diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/a.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/a.go new file mode 100644 index 0000000000000000000000000000000000000000..56e4292cda9f3a0892677a20722c833165171290 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/a.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Input for TestIssue13566 + +package a + +import "encoding/json" + +type A struct { + a *A + json json.RawMessage +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/b.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/b.go new file mode 100644 index 0000000000000000000000000000000000000000..419667820078e47d4744ee7c809a8f63516c8a42 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/b.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Input for TestIssue13566 + +package b + +import "./a" + +type A a.A diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/exports.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/exports.go new file mode 100644 index 0000000000000000000000000000000000000000..91598c03e35c0337edd1f51b4b399135b32be8dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/exports.go @@ -0,0 +1,91 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is used to generate an object file which +// serves as test file for gcimporter_test.go. + +package exports + +import "go/ast" + +// Issue 3682: Correctly read dotted identifiers from export data. +const init1 = 0 + +func init() {} + +const ( + C0 int = 0 + C1 = 3.14159265 + C2 = 2.718281828i + C3 = -123.456e-789 + C4 = +123.456e+789 + C5 = 1234i + C6 = "foo\n" + C7 = `bar\n` + C8 = 42 + C9 int = 42 + C10 float64 = 42 +) + +type ( + T1 int + T2 [10]int + T3 []int + T4 *int + T5 chan int + T6a chan<- int + T6b chan (<-chan int) + T6c chan<- (chan int) + T7 <-chan *ast.File + T8 struct{} + T9 struct { + a int + b, c float32 + d []string `go:"tag"` + } + T10 struct { + T8 + T9 + _ *T10 + } + T11 map[int]string + T12 interface{} + T13 interface { + m1() + m2(int) float32 + } + T14 interface { + T12 + T13 + m3(x ...struct{}) []T9 + } + T15 func() + T16 func(int) + T17 func(x int) + T18 func() float32 + T19 func() (x float32) + T20 func(...interface{}) + T21 struct{ next *T21 } + T22 struct{ link *T23 } + T23 struct{ link *T22 } + T24 *T24 + T25 *T26 + T26 *T27 + T27 *T25 + T28 func(T28) T28 +) + +var ( + V0 int + V1 = -991.0 + V2 float32 = 1.2 +) + +func F1() {} +func F2(x int) {} +func F3() int { return 0 } +func F4() float32 { return 0 } +func F5(a, b, c int, u, v, w struct{ x, y T1 }, more ...interface{}) (p, q, r chan<- T10) + +func (p *T1) M1() diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/generics.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/generics.go new file mode 100644 index 0000000000000000000000000000000000000000..00bf04000fa06c717f8bad1ab238dd83a8047ebe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/generics.go @@ -0,0 +1,29 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is used to generate an object file which +// serves as test file for gcimporter_test.go. + +package generics + +type Any any + +var x any + +type T[A, B any] struct { + Left A + Right B +} + +var X T[int, string] = T[int, string]{1, "hi"} + +func ToInt[P interface{ ~int }](p P) int { return int(p) } + +var IntID = ToInt[int] + +type G[C comparable] int + +func ImplicitFunc[T ~int]() {} + +type ImplicitType[T ~int] int diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue15920.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue15920.go new file mode 100644 index 0000000000000000000000000000000000000000..c70f7d8267b2f9209cf5f52a0ca7fe1bcc303649 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue15920.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +// The underlying type of Error is the underlying type of error. +// Make sure we can import this again without problems. +type Error error + +func F() Error { return nil } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue20046.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue20046.go new file mode 100644 index 0000000000000000000000000000000000000000..c63ee821c959dafda6799ccc8df77347221ad46a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue20046.go @@ -0,0 +1,9 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +var V interface { + M() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue25301.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue25301.go new file mode 100644 index 0000000000000000000000000000000000000000..e3dc98b4e1f9acdc9801c6a66e9305206f943cb5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue25301.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue25301 + +type ( + A = interface { + M() + } + T interface { + A + } + S struct{} +) + +func (S) M() { println("m") } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue25596.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue25596.go new file mode 100644 index 0000000000000000000000000000000000000000..8923373e5fa44d57d25ce1b4ccdc3a480360c74f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/issue25596.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue25596 + +type E interface { + M() T +} + +type T interface { + E +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/p.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/p.go new file mode 100644 index 0000000000000000000000000000000000000000..9e2e7057653725fc8ce963b5768828b3d9a88b42 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/p.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Input for TestIssue15517 + +package p + +const C = 0 + +var V int + +func F() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/versions/test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/versions/test.go new file mode 100644 index 0000000000000000000000000000000000000000..227fc092519212f30793e1266fe619858ac28bef --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/importer/testdata/versions/test.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// To create a test case for a new export format version, +// build this package with the latest compiler and store +// the resulting .a file appropriately named in the versions +// directory. The VersionHandling test will pick it up. +// +// In the testdata/versions: +// +// go build -o test_go1.$X_$Y.a test.go +// +// with $X = Go version and $Y = export format version +// (add 'b' or 'i' to distinguish between binary and +// indexed format starting with 1.11 as long as both +// formats are supported). +// +// Make sure this source is extended such that it exercises +// whatever export format change has taken place. + +package test + +// Any release before and including Go 1.7 didn't encode +// the package for a blank struct field. +type BlankField struct { + _ int +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go new file mode 100644 index 0000000000000000000000000000000000000000..2faf76f487c06e67bef285665a882093e51148ef --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/actualexprpropbits_string.go @@ -0,0 +1,58 @@ +// Code generated by "stringer -bitset -type ActualExprPropBits"; DO NOT EDIT. + +package inlheur + +import "strconv" +import "bytes" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ActualExprConstant-1] + _ = x[ActualExprIsConcreteConvIface-2] + _ = x[ActualExprIsFunc-4] + _ = x[ActualExprIsInlinableFunc-8] +} + +var _ActualExprPropBits_value = [...]uint64{ + 0x1, /* ActualExprConstant */ + 0x2, /* ActualExprIsConcreteConvIface */ + 0x4, /* ActualExprIsFunc */ + 0x8, /* ActualExprIsInlinableFunc */ +} + +const _ActualExprPropBits_name = "ActualExprConstantActualExprIsConcreteConvIfaceActualExprIsFuncActualExprIsInlinableFunc" + +var _ActualExprPropBits_index = [...]uint8{0, 18, 47, 63, 88} + +func (i ActualExprPropBits) String() string { + var b bytes.Buffer + + remain := uint64(i) + seen := false + + for k, v := range _ActualExprPropBits_value { + x := _ActualExprPropBits_name[_ActualExprPropBits_index[k]:_ActualExprPropBits_index[k+1]] + if v == 0 { + if i == 0 { + b.WriteString(x) + return b.String() + } + continue + } + if (v & remain) == v { + remain &^= v + x := _ActualExprPropBits_name[_ActualExprPropBits_index[k]:_ActualExprPropBits_index[k+1]] + if seen { + b.WriteString("|") + } + seen = true + b.WriteString(x) + } + } + if remain == 0 { + return b.String() + } + return "ActualExprPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze.go new file mode 100644 index 0000000000000000000000000000000000000000..a1b6f358e17e8eeda955a40da06ebc612b73f653 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze.go @@ -0,0 +1,370 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "encoding/json" + "fmt" + "internal/buildcfg" + "io" + "os" + "path/filepath" + "sort" + "strings" +) + +const ( + debugTraceFuncs = 1 << iota + debugTraceFuncFlags + debugTraceResults + debugTraceParams + debugTraceExprClassify + debugTraceCalls + debugTraceScoring +) + +// propAnalyzer interface is used for defining one or more analyzer +// helper objects, each tasked with computing some specific subset of +// the properties we're interested in. The assumption is that +// properties are independent, so each new analyzer that implements +// this interface can operate entirely on its own. For a given analyzer +// there will be a sequence of calls to nodeVisitPre and nodeVisitPost +// as the nodes within a function are visited, then a followup call to +// setResults so that the analyzer can transfer its results into the +// final properties object. +type propAnalyzer interface { + nodeVisitPre(n ir.Node) + nodeVisitPost(n ir.Node) + setResults(funcProps *FuncProps) +} + +// fnInlHeur contains inline heuristics state information about a +// specific Go function being analyzed/considered by the inliner. Note +// that in addition to constructing a fnInlHeur object by analyzing a +// specific *ir.Func, there is also code in the test harness +// (funcprops_test.go) that builds up fnInlHeur's by reading in and +// parsing a dump. This is the reason why we have file/fname/line +// fields below instead of just an *ir.Func field. +type fnInlHeur struct { + props *FuncProps + cstab CallSiteTab + fname string + file string + line uint +} + +var fpmap = map[*ir.Func]fnInlHeur{} + +// AnalyzeFunc computes function properties for fn and its contained +// closures, updating the global 'fpmap' table. It is assumed that +// "CanInline" has been run on fn and on the closures that feed +// directly into calls; other closures not directly called will also +// be checked inlinability for inlinability here in case they are +// returned as a result. +func AnalyzeFunc(fn *ir.Func, canInline func(*ir.Func), budgetForFunc func(*ir.Func) int32, inlineMaxBudget int) { + if fpmap == nil { + // If fpmap is nil this indicates that the main inliner pass is + // complete and we're doing inlining of wrappers (no heuristics + // used here). + return + } + if fn.OClosure != nil { + // closures will be processed along with their outer enclosing func. + return + } + enableDebugTraceIfEnv() + if debugTrace&debugTraceFuncs != 0 { + fmt.Fprintf(os.Stderr, "=-= AnalyzeFunc(%v)\n", fn) + } + // Build up a list containing 'fn' and any closures it contains. Along + // the way, test to see whether each closure is inlinable in case + // we might be returning it. + funcs := []*ir.Func{fn} + ir.VisitFuncAndClosures(fn, func(n ir.Node) { + if clo, ok := n.(*ir.ClosureExpr); ok { + funcs = append(funcs, clo.Func) + } + }) + + // Analyze the list of functions. We want to visit a given func + // only after the closures it contains have been processed, so + // iterate through the list in reverse order. Once a function has + // been analyzed, revisit the question of whether it should be + // inlinable; if it is over the default hairyness limit and it + // doesn't have any interesting properties, then we don't want + // the overhead of writing out its inline body. + nameFinder := newNameFinder(fn) + for i := len(funcs) - 1; i >= 0; i-- { + f := funcs[i] + if f.OClosure != nil && !f.InlinabilityChecked() { + canInline(f) + } + funcProps := analyzeFunc(f, inlineMaxBudget, nameFinder) + revisitInlinability(f, funcProps, budgetForFunc) + if f.Inl != nil { + f.Inl.Properties = funcProps.SerializeToString() + } + } + disableDebugTrace() +} + +// TearDown is invoked at the end of the main inlining pass; doing +// function analysis and call site scoring is unlikely to help a lot +// after this point, so nil out fpmap and other globals to reclaim +// storage. +func TearDown() { + fpmap = nil + scoreCallsCache.tab = nil + scoreCallsCache.csl = nil +} + +func analyzeFunc(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) *FuncProps { + if funcInlHeur, ok := fpmap[fn]; ok { + return funcInlHeur.props + } + funcProps, fcstab := computeFuncProps(fn, inlineMaxBudget, nf) + file, line := fnFileLine(fn) + entry := fnInlHeur{ + fname: fn.Sym().Name, + file: file, + line: line, + props: funcProps, + cstab: fcstab, + } + fn.SetNeverReturns(entry.props.Flags&FuncPropNeverReturns != 0) + fpmap[fn] = entry + if fn.Inl != nil && fn.Inl.Properties == "" { + fn.Inl.Properties = entry.props.SerializeToString() + } + return funcProps +} + +// revisitInlinability revisits the question of whether to continue to +// treat function 'fn' as an inline candidate based on the set of +// properties we've computed for it. If (for example) it has an +// initial size score of 150 and no interesting properties to speak +// of, then there isn't really any point to moving ahead with it as an +// inline candidate. +func revisitInlinability(fn *ir.Func, funcProps *FuncProps, budgetForFunc func(*ir.Func) int32) { + if fn.Inl == nil { + return + } + maxAdj := int32(LargestNegativeScoreAdjustment(fn, funcProps)) + budget := budgetForFunc(fn) + if fn.Inl.Cost+maxAdj > budget { + fn.Inl = nil + } +} + +// computeFuncProps examines the Go function 'fn' and computes for it +// a function "properties" object, to be used to drive inlining +// heuristics. See comments on the FuncProps type for more info. +func computeFuncProps(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) (*FuncProps, CallSiteTab) { + if debugTrace&debugTraceFuncs != 0 { + fmt.Fprintf(os.Stderr, "=-= starting analysis of func %v:\n%+v\n", + fn, fn) + } + funcProps := new(FuncProps) + ffa := makeFuncFlagsAnalyzer(fn) + analyzers := []propAnalyzer{ffa} + analyzers = addResultsAnalyzer(fn, analyzers, funcProps, inlineMaxBudget, nf) + analyzers = addParamsAnalyzer(fn, analyzers, funcProps, nf) + runAnalyzersOnFunction(fn, analyzers) + for _, a := range analyzers { + a.setResults(funcProps) + } + cstab := computeCallSiteTable(fn, fn.Body, nil, ffa.panicPathTable(), 0, nf) + return funcProps, cstab +} + +func runAnalyzersOnFunction(fn *ir.Func, analyzers []propAnalyzer) { + var doNode func(ir.Node) bool + doNode = func(n ir.Node) bool { + for _, a := range analyzers { + a.nodeVisitPre(n) + } + ir.DoChildren(n, doNode) + for _, a := range analyzers { + a.nodeVisitPost(n) + } + return false + } + doNode(fn) +} + +func propsForFunc(fn *ir.Func) *FuncProps { + if funcInlHeur, ok := fpmap[fn]; ok { + return funcInlHeur.props + } else if fn.Inl != nil && fn.Inl.Properties != "" { + // FIXME: considering adding some sort of cache or table + // for deserialized properties of imported functions. + return DeserializeFromString(fn.Inl.Properties) + } + return nil +} + +func fnFileLine(fn *ir.Func) (string, uint) { + p := base.Ctxt.InnermostPos(fn.Pos()) + return filepath.Base(p.Filename()), p.Line() +} + +func Enabled() bool { + return buildcfg.Experiment.NewInliner || UnitTesting() +} + +func UnitTesting() bool { + return base.Debug.DumpInlFuncProps != "" || + base.Debug.DumpInlCallSiteScores != 0 +} + +// DumpFuncProps computes and caches function properties for the func +// 'fn', writing out a description of the previously computed set of +// properties to the file given in 'dumpfile'. Used for the +// "-d=dumpinlfuncprops=..." command line flag, intended for use +// primarily in unit testing. +func DumpFuncProps(fn *ir.Func, dumpfile string) { + if fn != nil { + if fn.OClosure != nil { + // closures will be processed along with their outer enclosing func. + return + } + captureFuncDumpEntry(fn) + ir.VisitFuncAndClosures(fn, func(n ir.Node) { + if clo, ok := n.(*ir.ClosureExpr); ok { + captureFuncDumpEntry(clo.Func) + } + }) + } else { + emitDumpToFile(dumpfile) + } +} + +// emitDumpToFile writes out the buffer function property dump entries +// to a file, for unit testing. Dump entries need to be sorted by +// definition line, and due to generics we need to account for the +// possibility that several ir.Func's will have the same def line. +func emitDumpToFile(dumpfile string) { + mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC + if dumpfile[0] == '+' { + dumpfile = dumpfile[1:] + mode = os.O_WRONLY | os.O_APPEND | os.O_CREATE + } + if dumpfile[0] == '%' { + dumpfile = dumpfile[1:] + d, b := filepath.Dir(dumpfile), filepath.Base(dumpfile) + ptag := strings.ReplaceAll(types.LocalPkg.Path, "/", ":") + dumpfile = d + "/" + ptag + "." + b + } + outf, err := os.OpenFile(dumpfile, mode, 0644) + if err != nil { + base.Fatalf("opening function props dump file %q: %v\n", dumpfile, err) + } + defer outf.Close() + dumpFilePreamble(outf) + + atline := map[uint]uint{} + sl := make([]fnInlHeur, 0, len(dumpBuffer)) + for _, e := range dumpBuffer { + sl = append(sl, e) + atline[e.line] = atline[e.line] + 1 + } + sl = sortFnInlHeurSlice(sl) + + prevline := uint(0) + for _, entry := range sl { + idx := uint(0) + if prevline == entry.line { + idx++ + } + prevline = entry.line + atl := atline[entry.line] + if err := dumpFnPreamble(outf, &entry, nil, idx, atl); err != nil { + base.Fatalf("function props dump: %v\n", err) + } + } + dumpBuffer = nil +} + +// captureFuncDumpEntry grabs the function properties object for 'fn' +// and enqueues it for later dumping. Used for the +// "-d=dumpinlfuncprops=..." command line flag, intended for use +// primarily in unit testing. +func captureFuncDumpEntry(fn *ir.Func) { + // avoid capturing compiler-generated equality funcs. + if strings.HasPrefix(fn.Sym().Name, ".eq.") { + return + } + funcInlHeur, ok := fpmap[fn] + if !ok { + // Missing entry is expected for functions that are too large + // to inline. We still want to write out call site scores in + // this case however. + funcInlHeur = fnInlHeur{cstab: callSiteTab} + } + if dumpBuffer == nil { + dumpBuffer = make(map[*ir.Func]fnInlHeur) + } + if _, ok := dumpBuffer[fn]; ok { + return + } + if debugTrace&debugTraceFuncs != 0 { + fmt.Fprintf(os.Stderr, "=-= capturing dump for %v:\n", fn) + } + dumpBuffer[fn] = funcInlHeur +} + +// dumpFilePreamble writes out a file-level preamble for a given +// Go function as part of a function properties dump. +func dumpFilePreamble(w io.Writer) { + fmt.Fprintf(w, "// DO NOT EDIT (use 'go test -v -update-expected' instead.)\n") + fmt.Fprintf(w, "// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt\n") + fmt.Fprintf(w, "// for more information on the format of this file.\n") + fmt.Fprintf(w, "// %s\n", preambleDelimiter) +} + +// dumpFnPreamble writes out a function-level preamble for a given +// Go function as part of a function properties dump. See the +// README.txt file in testdata/props for more on the format of +// this preamble. +func dumpFnPreamble(w io.Writer, funcInlHeur *fnInlHeur, ecst encodedCallSiteTab, idx, atl uint) error { + fmt.Fprintf(w, "// %s %s %d %d %d\n", + funcInlHeur.file, funcInlHeur.fname, funcInlHeur.line, idx, atl) + // emit props as comments, followed by delimiter + fmt.Fprintf(w, "%s// %s\n", funcInlHeur.props.ToString("// "), comDelimiter) + data, err := json.Marshal(funcInlHeur.props) + if err != nil { + return fmt.Errorf("marshall error %v\n", err) + } + fmt.Fprintf(w, "// %s\n", string(data)) + dumpCallSiteComments(w, funcInlHeur.cstab, ecst) + fmt.Fprintf(w, "// %s\n", fnDelimiter) + return nil +} + +// sortFnInlHeurSlice sorts a slice of fnInlHeur based on +// the starting line of the function definition, then by name. +func sortFnInlHeurSlice(sl []fnInlHeur) []fnInlHeur { + sort.SliceStable(sl, func(i, j int) bool { + if sl[i].line != sl[j].line { + return sl[i].line < sl[j].line + } + return sl[i].fname < sl[j].fname + }) + return sl +} + +// delimiters written to various preambles to make parsing of +// dumps easier. +const preambleDelimiter = "" +const fnDelimiter = "" +const comDelimiter = "" +const csDelimiter = "" + +// dumpBuffer stores up function properties dumps when +// "-d=dumpinlfuncprops=..." is in effect. +var dumpBuffer map[*ir.Func]fnInlHeur diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go new file mode 100644 index 0000000000000000000000000000000000000000..36ebe18b82f3fd29b01318bf413423e124382a1d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go @@ -0,0 +1,413 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/pgo" + "cmd/compile/internal/typecheck" + "fmt" + "os" + "strings" +) + +type callSiteAnalyzer struct { + fn *ir.Func + *nameFinder +} + +type callSiteTableBuilder struct { + fn *ir.Func + *nameFinder + cstab CallSiteTab + ptab map[ir.Node]pstate + nstack []ir.Node + loopNest int + isInit bool +} + +func makeCallSiteAnalyzer(fn *ir.Func) *callSiteAnalyzer { + return &callSiteAnalyzer{ + fn: fn, + nameFinder: newNameFinder(fn), + } +} + +func makeCallSiteTableBuilder(fn *ir.Func, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int, nf *nameFinder) *callSiteTableBuilder { + isInit := fn.IsPackageInit() || strings.HasPrefix(fn.Sym().Name, "init.") + return &callSiteTableBuilder{ + fn: fn, + cstab: cstab, + ptab: ptab, + isInit: isInit, + loopNest: loopNestingLevel, + nstack: []ir.Node{fn}, + nameFinder: nf, + } +} + +// computeCallSiteTable builds and returns a table of call sites for +// the specified region in function fn. A region here corresponds to a +// specific subtree within the AST for a function. The main intended +// use cases are for 'region' to be either A) an entire function body, +// or B) an inlined call expression. +func computeCallSiteTable(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int, nf *nameFinder) CallSiteTab { + cstb := makeCallSiteTableBuilder(fn, cstab, ptab, loopNestingLevel, nf) + var doNode func(ir.Node) bool + doNode = func(n ir.Node) bool { + cstb.nodeVisitPre(n) + ir.DoChildren(n, doNode) + cstb.nodeVisitPost(n) + return false + } + for _, n := range region { + doNode(n) + } + return cstb.cstab +} + +func (cstb *callSiteTableBuilder) flagsForNode(call *ir.CallExpr) CSPropBits { + var r CSPropBits + + if debugTrace&debugTraceCalls != 0 { + fmt.Fprintf(os.Stderr, "=-= analyzing call at %s\n", + fmtFullPos(call.Pos())) + } + + // Set a bit if this call is within a loop. + if cstb.loopNest > 0 { + r |= CallSiteInLoop + } + + // Set a bit if the call is within an init function (either + // compiler-generated or user-written). + if cstb.isInit { + r |= CallSiteInInitFunc + } + + // Decide whether to apply the panic path heuristic. Hack: don't + // apply this heuristic in the function "main.main" (mostly just + // to avoid annoying users). + if !isMainMain(cstb.fn) { + r = cstb.determinePanicPathBits(call, r) + } + + return r +} + +// determinePanicPathBits updates the CallSiteOnPanicPath bit within +// "r" if we think this call is on an unconditional path to +// panic/exit. Do this by walking back up the node stack to see if we +// can find either A) an enclosing panic, or B) a statement node that +// we've determined leads to a panic/exit. +func (cstb *callSiteTableBuilder) determinePanicPathBits(call ir.Node, r CSPropBits) CSPropBits { + cstb.nstack = append(cstb.nstack, call) + defer func() { + cstb.nstack = cstb.nstack[:len(cstb.nstack)-1] + }() + + for ri := range cstb.nstack[:len(cstb.nstack)-1] { + i := len(cstb.nstack) - ri - 1 + n := cstb.nstack[i] + _, isCallExpr := n.(*ir.CallExpr) + _, isStmt := n.(ir.Stmt) + if isCallExpr { + isStmt = false + } + + if debugTrace&debugTraceCalls != 0 { + ps, inps := cstb.ptab[n] + fmt.Fprintf(os.Stderr, "=-= callpar %d op=%s ps=%s inptab=%v stmt=%v\n", i, n.Op().String(), ps.String(), inps, isStmt) + } + + if n.Op() == ir.OPANIC { + r |= CallSiteOnPanicPath + break + } + if v, ok := cstb.ptab[n]; ok { + if v == psCallsPanic { + r |= CallSiteOnPanicPath + break + } + if isStmt { + break + } + } + } + return r +} + +// propsForArg returns property bits for a given call argument expression arg. +func (cstb *callSiteTableBuilder) propsForArg(arg ir.Node) ActualExprPropBits { + if cval := cstb.constValue(arg); cval != nil { + return ActualExprConstant + } + if cstb.isConcreteConvIface(arg) { + return ActualExprIsConcreteConvIface + } + fname := cstb.funcName(arg) + if fname != nil { + if fn := fname.Func; fn != nil && typecheck.HaveInlineBody(fn) { + return ActualExprIsInlinableFunc + } + return ActualExprIsFunc + } + return 0 +} + +// argPropsForCall returns a slice of argument properties for the +// expressions being passed to the callee in the specific call +// expression; these will be stored in the CallSite object for a given +// call and then consulted when scoring. If no arg has any interesting +// properties we try to save some space and return a nil slice. +func (cstb *callSiteTableBuilder) argPropsForCall(ce *ir.CallExpr) []ActualExprPropBits { + rv := make([]ActualExprPropBits, len(ce.Args)) + somethingInteresting := false + for idx := range ce.Args { + argProp := cstb.propsForArg(ce.Args[idx]) + somethingInteresting = somethingInteresting || (argProp != 0) + rv[idx] = argProp + } + if !somethingInteresting { + return nil + } + return rv +} + +func (cstb *callSiteTableBuilder) addCallSite(callee *ir.Func, call *ir.CallExpr) { + flags := cstb.flagsForNode(call) + argProps := cstb.argPropsForCall(call) + if debugTrace&debugTraceCalls != 0 { + fmt.Fprintf(os.Stderr, "=-= props %+v for call %v\n", argProps, call) + } + // FIXME: maybe bulk-allocate these? + cs := &CallSite{ + Call: call, + Callee: callee, + Assign: cstb.containingAssignment(call), + ArgProps: argProps, + Flags: flags, + ID: uint(len(cstb.cstab)), + } + if _, ok := cstb.cstab[call]; ok { + fmt.Fprintf(os.Stderr, "*** cstab duplicate entry at: %s\n", + fmtFullPos(call.Pos())) + fmt.Fprintf(os.Stderr, "*** call: %+v\n", call) + panic("bad") + } + // Set initial score for callsite to the cost computed + // by CanInline; this score will be refined later based + // on heuristics. + cs.Score = int(callee.Inl.Cost) + + if cstb.cstab == nil { + cstb.cstab = make(CallSiteTab) + } + cstb.cstab[call] = cs + if debugTrace&debugTraceCalls != 0 { + fmt.Fprintf(os.Stderr, "=-= added callsite: caller=%v callee=%v n=%s\n", + cstb.fn, callee, fmtFullPos(call.Pos())) + } +} + +func (cstb *callSiteTableBuilder) nodeVisitPre(n ir.Node) { + switch n.Op() { + case ir.ORANGE, ir.OFOR: + if !hasTopLevelLoopBodyReturnOrBreak(loopBody(n)) { + cstb.loopNest++ + } + case ir.OCALLFUNC: + ce := n.(*ir.CallExpr) + callee := pgo.DirectCallee(ce.Fun) + if callee != nil && callee.Inl != nil { + cstb.addCallSite(callee, ce) + } + } + cstb.nstack = append(cstb.nstack, n) +} + +func (cstb *callSiteTableBuilder) nodeVisitPost(n ir.Node) { + cstb.nstack = cstb.nstack[:len(cstb.nstack)-1] + switch n.Op() { + case ir.ORANGE, ir.OFOR: + if !hasTopLevelLoopBodyReturnOrBreak(loopBody(n)) { + cstb.loopNest-- + } + } +} + +func loopBody(n ir.Node) ir.Nodes { + if forst, ok := n.(*ir.ForStmt); ok { + return forst.Body + } + if rst, ok := n.(*ir.RangeStmt); ok { + return rst.Body + } + return nil +} + +// hasTopLevelLoopBodyReturnOrBreak examines the body of a "for" or +// "range" loop to try to verify that it is a real loop, as opposed to +// a construct that is syntactically loopy but doesn't actually iterate +// multiple times, like: +// +// for { +// blah() +// return 1 +// } +// +// [Remark: the pattern above crops up quite a bit in the source code +// for the compiler itself, e.g. the auto-generated rewrite code] +// +// Note that we don't look for GOTO statements here, so it's possible +// we'll get the wrong result for a loop with complicated control +// jumps via gotos. +func hasTopLevelLoopBodyReturnOrBreak(loopBody ir.Nodes) bool { + for _, n := range loopBody { + if n.Op() == ir.ORETURN || n.Op() == ir.OBREAK { + return true + } + } + return false +} + +// containingAssignment returns the top-level assignment statement +// for a statement level function call "n". Examples: +// +// x := foo() +// x, y := bar(z, baz()) +// if blah() { ... +// +// Here the top-level assignment statement for the foo() call is the +// statement assigning to "x"; the top-level assignment for "bar()" +// call is the assignment to x,y. For the baz() and blah() calls, +// there is no top level assignment statement. +// +// The unstated goal here is that we want to use the containing +// assignment to establish a connection between a given call and the +// variables to which its results/returns are being assigned. +// +// Note that for the "bar" command above, the front end sometimes +// decomposes this into two assignments, the first one assigning the +// call to a pair of auto-temps, then the second one assigning the +// auto-temps to the user-visible vars. This helper will return the +// second (outer) of these two. +func (cstb *callSiteTableBuilder) containingAssignment(n ir.Node) ir.Node { + parent := cstb.nstack[len(cstb.nstack)-1] + + // assignsOnlyAutoTemps returns TRUE of the specified OAS2FUNC + // node assigns only auto-temps. + assignsOnlyAutoTemps := func(x ir.Node) bool { + alst := x.(*ir.AssignListStmt) + oa2init := alst.Init() + if len(oa2init) == 0 { + return false + } + for _, v := range oa2init { + d := v.(*ir.Decl) + if !ir.IsAutoTmp(d.X) { + return false + } + } + return true + } + + // Simple case: x := foo() + if parent.Op() == ir.OAS { + return parent + } + + // Multi-return case: x, y := bar() + if parent.Op() == ir.OAS2FUNC { + // Hack city: if the result vars are auto-temps, try looking + // for an outer assignment in the tree. The code shape we're + // looking for here is: + // + // OAS1({x,y},OCONVNOP(OAS2FUNC({auto1,auto2},OCALLFUNC(bar)))) + // + if assignsOnlyAutoTemps(parent) { + par2 := cstb.nstack[len(cstb.nstack)-2] + if par2.Op() == ir.OAS2 { + return par2 + } + if par2.Op() == ir.OCONVNOP { + par3 := cstb.nstack[len(cstb.nstack)-3] + if par3.Op() == ir.OAS2 { + return par3 + } + } + } + } + + return nil +} + +// UpdateCallsiteTable handles updating of callerfn's call site table +// after an inlined has been carried out, e.g. the call at 'n' as been +// turned into the inlined call expression 'ic' within function +// callerfn. The chief thing of interest here is to make sure that any +// call nodes within 'ic' are added to the call site table for +// 'callerfn' and scored appropriately. +func UpdateCallsiteTable(callerfn *ir.Func, n *ir.CallExpr, ic *ir.InlinedCallExpr) { + enableDebugTraceIfEnv() + defer disableDebugTrace() + + funcInlHeur, ok := fpmap[callerfn] + if !ok { + // This can happen for compiler-generated wrappers. + if debugTrace&debugTraceCalls != 0 { + fmt.Fprintf(os.Stderr, "=-= early exit, no entry for caller fn %v\n", callerfn) + } + return + } + + if debugTrace&debugTraceCalls != 0 { + fmt.Fprintf(os.Stderr, "=-= UpdateCallsiteTable(caller=%v, cs=%s)\n", + callerfn, fmtFullPos(n.Pos())) + } + + // Mark the call in question as inlined. + oldcs, ok := funcInlHeur.cstab[n] + if !ok { + // This can happen for compiler-generated wrappers. + return + } + oldcs.aux |= csAuxInlined + + if debugTrace&debugTraceCalls != 0 { + fmt.Fprintf(os.Stderr, "=-= marked as inlined: callee=%v %s\n", + oldcs.Callee, EncodeCallSiteKey(oldcs)) + } + + // Walk the inlined call region to collect new callsites. + var icp pstate + if oldcs.Flags&CallSiteOnPanicPath != 0 { + icp = psCallsPanic + } + var loopNestLevel int + if oldcs.Flags&CallSiteInLoop != 0 { + loopNestLevel = 1 + } + ptab := map[ir.Node]pstate{ic: icp} + nf := newNameFinder(nil) + icstab := computeCallSiteTable(callerfn, ic.Body, nil, ptab, loopNestLevel, nf) + + // Record parent callsite. This is primarily for debug output. + for _, cs := range icstab { + cs.parent = oldcs + } + + // Score the calls in the inlined body. Note the setting of + // "doCallResults" to false here: at the moment there isn't any + // easy way to localize or region-ize the work done by + // "rescoreBasedOnCallResultUses", which currently does a walk + // over the entire function to look for uses of a given set of + // results. Similarly we're passing nil to makeCallSiteAnalyzer, + // so as to run name finding without the use of static value & + // friends. + csa := makeCallSiteAnalyzer(nil) + const doCallResults = false + csa.scoreCallsRegion(callerfn, ic.Body, icstab, doCallResults, ic) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go new file mode 100644 index 0000000000000000000000000000000000000000..b7403a4f8c18665274b62289e0ed31f235412b83 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go @@ -0,0 +1,356 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "fmt" + "os" +) + +// funcFlagsAnalyzer computes the "Flags" value for the FuncProps +// object we're computing. The main item of interest here is "nstate", +// which stores the disposition of a given ir Node with respect to the +// flags/properties we're trying to compute. +type funcFlagsAnalyzer struct { + fn *ir.Func + nstate map[ir.Node]pstate + noInfo bool // set if we see something inscrutable/un-analyzable +} + +// pstate keeps track of the disposition of a given node and its +// children with respect to panic/exit calls. +type pstate int + +const ( + psNoInfo pstate = iota // nothing interesting about this node + psCallsPanic // node causes call to panic or os.Exit + psMayReturn // executing node may trigger a "return" stmt + psTop // dataflow lattice "top" element +) + +func makeFuncFlagsAnalyzer(fn *ir.Func) *funcFlagsAnalyzer { + return &funcFlagsAnalyzer{ + fn: fn, + nstate: make(map[ir.Node]pstate), + } +} + +// setResults transfers func flag results to 'funcProps'. +func (ffa *funcFlagsAnalyzer) setResults(funcProps *FuncProps) { + var rv FuncPropBits + if !ffa.noInfo && ffa.stateForList(ffa.fn.Body) == psCallsPanic { + rv = FuncPropNeverReturns + } + // This is slightly hacky and not at all required, but include a + // special case for main.main, which often ends in a call to + // os.Exit. People who write code like this (very common I + // imagine) + // + // func main() { + // rc = perform() + // ... + // foo() + // os.Exit(rc) + // } + // + // will be constantly surprised when foo() is inlined in many + // other spots in the program but not in main(). + if isMainMain(ffa.fn) { + rv &^= FuncPropNeverReturns + } + funcProps.Flags = rv +} + +func (ffa *funcFlagsAnalyzer) getState(n ir.Node) pstate { + return ffa.nstate[n] +} + +func (ffa *funcFlagsAnalyzer) setState(n ir.Node, st pstate) { + if st != psNoInfo { + ffa.nstate[n] = st + } +} + +func (ffa *funcFlagsAnalyzer) updateState(n ir.Node, st pstate) { + if st == psNoInfo { + delete(ffa.nstate, n) + } else { + ffa.nstate[n] = st + } +} + +func (ffa *funcFlagsAnalyzer) panicPathTable() map[ir.Node]pstate { + return ffa.nstate +} + +// blockCombine merges together states as part of a linear sequence of +// statements, where 'pred' and 'succ' are analysis results for a pair +// of consecutive statements. Examples: +// +// case 1: case 2: +// panic("foo") if q { return x } <-pred +// return x panic("boo") <-succ +// +// In case 1, since the pred state is "always panic" it doesn't matter +// what the succ state is, hence the state for the combination of the +// two blocks is "always panics". In case 2, because there is a path +// to return that avoids the panic in succ, the state for the +// combination of the two statements is "may return". +func blockCombine(pred, succ pstate) pstate { + switch succ { + case psTop: + return pred + case psMayReturn: + if pred == psCallsPanic { + return psCallsPanic + } + return psMayReturn + case psNoInfo: + return pred + case psCallsPanic: + if pred == psMayReturn { + return psMayReturn + } + return psCallsPanic + } + panic("should never execute") +} + +// branchCombine combines two states at a control flow branch point where +// either p1 or p2 executes (as in an "if" statement). +func branchCombine(p1, p2 pstate) pstate { + if p1 == psCallsPanic && p2 == psCallsPanic { + return psCallsPanic + } + if p1 == psMayReturn || p2 == psMayReturn { + return psMayReturn + } + return psNoInfo +} + +// stateForList walks through a list of statements and computes the +// state/diposition for the entire list as a whole, as well +// as updating disposition of intermediate nodes. +func (ffa *funcFlagsAnalyzer) stateForList(list ir.Nodes) pstate { + st := psTop + // Walk the list backwards so that we can update the state for + // earlier list elements based on what we find out about their + // successors. Example: + // + // if ... { + // L10: foo() + // L11: + // L12: panic(...) + // } + // + // After combining the dispositions for line 11 and 12, we want to + // update the state for the call at line 10 based on that combined + // disposition (if L11 has no path to "return", then the call at + // line 10 will be on a panic path). + for i := len(list) - 1; i >= 0; i-- { + n := list[i] + psi := ffa.getState(n) + if debugTrace&debugTraceFuncFlags != 0 { + fmt.Fprintf(os.Stderr, "=-= %v: stateForList n=%s ps=%s\n", + ir.Line(n), n.Op().String(), psi.String()) + } + st = blockCombine(psi, st) + ffa.updateState(n, st) + } + if st == psTop { + st = psNoInfo + } + return st +} + +func isMainMain(fn *ir.Func) bool { + s := fn.Sym() + return (s.Pkg.Name == "main" && s.Name == "main") +} + +func isWellKnownFunc(s *types.Sym, pkg, name string) bool { + return s.Pkg.Path == pkg && s.Name == name +} + +// isExitCall reports TRUE if the node itself is an unconditional +// call to os.Exit(), a panic, or a function that does likewise. +func isExitCall(n ir.Node) bool { + if n.Op() != ir.OCALLFUNC { + return false + } + cx := n.(*ir.CallExpr) + name := ir.StaticCalleeName(cx.Fun) + if name == nil { + return false + } + s := name.Sym() + if isWellKnownFunc(s, "os", "Exit") || + isWellKnownFunc(s, "runtime", "throw") { + return true + } + if funcProps := propsForFunc(name.Func); funcProps != nil { + if funcProps.Flags&FuncPropNeverReturns != 0 { + return true + } + } + return name.Func.NeverReturns() +} + +// pessimize is called to record the fact that we saw something in the +// function that renders it entirely impossible to analyze. +func (ffa *funcFlagsAnalyzer) pessimize() { + ffa.noInfo = true +} + +// shouldVisit reports TRUE if this is an interesting node from the +// perspective of computing function flags. NB: due to the fact that +// ir.CallExpr implements the Stmt interface, we wind up visiting +// a lot of nodes that we don't really need to, but these can +// simply be screened out as part of the visit. +func shouldVisit(n ir.Node) bool { + _, isStmt := n.(ir.Stmt) + return n.Op() != ir.ODCL && + (isStmt || n.Op() == ir.OCALLFUNC || n.Op() == ir.OPANIC) +} + +// nodeVisitPost helps implement the propAnalyzer interface; when +// called on a given node, it decides the disposition of that node +// based on the state(s) of the node's children. +func (ffa *funcFlagsAnalyzer) nodeVisitPost(n ir.Node) { + if debugTrace&debugTraceFuncFlags != 0 { + fmt.Fprintf(os.Stderr, "=+= nodevis %v %s should=%v\n", + ir.Line(n), n.Op().String(), shouldVisit(n)) + } + if !shouldVisit(n) { + return + } + var st pstate + switch n.Op() { + case ir.OCALLFUNC: + if isExitCall(n) { + st = psCallsPanic + } + case ir.OPANIC: + st = psCallsPanic + case ir.ORETURN: + st = psMayReturn + case ir.OBREAK, ir.OCONTINUE: + // FIXME: this handling of break/continue is sub-optimal; we + // have them as "mayReturn" in order to help with this case: + // + // for { + // if q() { break } + // panic(...) + // } + // + // where the effect of the 'break' is to cause the subsequent + // panic to be skipped. One possible improvement would be to + // track whether the currently enclosing loop is a "for {" or + // a for/range with condition, then use mayReturn only for the + // former. Note also that "break X" or "continue X" is treated + // the same as "goto", since we don't have a good way to track + // the target of the branch. + st = psMayReturn + n := n.(*ir.BranchStmt) + if n.Label != nil { + ffa.pessimize() + } + case ir.OBLOCK: + n := n.(*ir.BlockStmt) + st = ffa.stateForList(n.List) + case ir.OCASE: + if ccst, ok := n.(*ir.CaseClause); ok { + st = ffa.stateForList(ccst.Body) + } else if ccst, ok := n.(*ir.CommClause); ok { + st = ffa.stateForList(ccst.Body) + } else { + panic("unexpected") + } + case ir.OIF: + n := n.(*ir.IfStmt) + st = branchCombine(ffa.stateForList(n.Body), ffa.stateForList(n.Else)) + case ir.OFOR: + // Treat for { XXX } like a block. + // Treat for { XXX } like an if statement with no else. + n := n.(*ir.ForStmt) + bst := ffa.stateForList(n.Body) + if n.Cond == nil { + st = bst + } else { + if bst == psMayReturn { + st = psMayReturn + } + } + case ir.ORANGE: + // Treat for range { XXX } like an if statement with no else. + n := n.(*ir.RangeStmt) + if ffa.stateForList(n.Body) == psMayReturn { + st = psMayReturn + } + case ir.OGOTO: + // punt if we see even one goto. if we built a control + // flow graph we could do more, but this is just a tree walk. + ffa.pessimize() + case ir.OSELECT: + // process selects for "may return" but not "always panics", + // the latter case seems very improbable. + n := n.(*ir.SelectStmt) + if len(n.Cases) != 0 { + st = psTop + for _, c := range n.Cases { + st = branchCombine(ffa.stateForList(c.Body), st) + } + } + case ir.OSWITCH: + n := n.(*ir.SwitchStmt) + if len(n.Cases) != 0 { + st = psTop + for _, c := range n.Cases { + st = branchCombine(ffa.stateForList(c.Body), st) + } + } + + st, fall := psTop, psNoInfo + for i := len(n.Cases) - 1; i >= 0; i-- { + cas := n.Cases[i] + cst := ffa.stateForList(cas.Body) + endsInFallthrough := false + if len(cas.Body) != 0 { + endsInFallthrough = cas.Body[0].Op() == ir.OFALL + } + if endsInFallthrough { + cst = blockCombine(cst, fall) + } + st = branchCombine(st, cst) + fall = cst + } + case ir.OFALL: + // Not important. + case ir.ODCLFUNC, ir.ORECOVER, ir.OAS, ir.OAS2, ir.OAS2FUNC, ir.OASOP, + ir.OPRINTLN, ir.OPRINT, ir.OLABEL, ir.OCALLINTER, ir.ODEFER, + ir.OSEND, ir.ORECV, ir.OSELRECV2, ir.OGO, ir.OAPPEND, ir.OAS2DOTTYPE, + ir.OAS2MAPR, ir.OGETG, ir.ODELETE, ir.OINLMARK, ir.OAS2RECV, + ir.OMIN, ir.OMAX, ir.OMAKE, ir.ORECOVERFP, ir.OGETCALLERSP: + // these should all be benign/uninteresting + case ir.OTAILCALL, ir.OJUMPTABLE, ir.OTYPESW: + // don't expect to see these at all. + base.Fatalf("unexpected op %s in func %s", + n.Op().String(), ir.FuncName(ffa.fn)) + default: + base.Fatalf("%v: unhandled op %s in func %v", + ir.Line(n), n.Op().String(), ir.FuncName(ffa.fn)) + } + if debugTrace&debugTraceFuncFlags != 0 { + fmt.Fprintf(os.Stderr, "=-= %v: visit n=%s returns %s\n", + ir.Line(n), n.Op().String(), st.String()) + } + ffa.setState(n, st) +} + +func (ffa *funcFlagsAnalyzer) nodeVisitPre(n ir.Node) { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go new file mode 100644 index 0000000000000000000000000000000000000000..d85d73b2efc487a94a78c56589ff91dcfa2fd659 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go @@ -0,0 +1,355 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/ir" + "fmt" + "os" +) + +// paramsAnalyzer holds state information for the phase that computes +// flags for a Go functions parameters, for use in inline heuristics. +// Note that the params slice below includes entries for blanks. +type paramsAnalyzer struct { + fname string + values []ParamPropBits + params []*ir.Name + top []bool + *condLevelTracker + *nameFinder +} + +// getParams returns an *ir.Name slice containing all params for the +// function (plus rcvr as well if applicable). +func getParams(fn *ir.Func) []*ir.Name { + sig := fn.Type() + numParams := sig.NumRecvs() + sig.NumParams() + return fn.Dcl[:numParams] +} + +// addParamsAnalyzer creates a new paramsAnalyzer helper object for +// the function fn, appends it to the analyzers list, and returns the +// new list. If the function in question doesn't have any interesting +// parameters then the analyzer list is returned unchanged, and the +// params flags in "fp" are updated accordingly. +func addParamsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, nf *nameFinder) []propAnalyzer { + pa, props := makeParamsAnalyzer(fn, nf) + if pa != nil { + analyzers = append(analyzers, pa) + } else { + fp.ParamFlags = props + } + return analyzers +} + +// makeParamAnalyzer creates a new helper object to analyze parameters +// of function fn. If the function doesn't have any interesting +// params, a nil helper is returned along with a set of default param +// flags for the func. +func makeParamsAnalyzer(fn *ir.Func, nf *nameFinder) (*paramsAnalyzer, []ParamPropBits) { + params := getParams(fn) // includes receiver if applicable + if len(params) == 0 { + return nil, nil + } + vals := make([]ParamPropBits, len(params)) + if fn.Inl == nil { + return nil, vals + } + top := make([]bool, len(params)) + interestingToAnalyze := false + for i, pn := range params { + if pn == nil { + continue + } + pt := pn.Type() + if !pt.IsScalar() && !pt.HasNil() { + // existing properties not applicable here (for things + // like structs, arrays, slices, etc). + continue + } + // If param is reassigned, skip it. + if ir.Reassigned(pn) { + continue + } + top[i] = true + interestingToAnalyze = true + } + if !interestingToAnalyze { + return nil, vals + } + + if debugTrace&debugTraceParams != 0 { + fmt.Fprintf(os.Stderr, "=-= param analysis of func %v:\n", + fn.Sym().Name) + for i := range vals { + n := "_" + if params[i] != nil { + n = params[i].Sym().String() + } + fmt.Fprintf(os.Stderr, "=-= %d: %q %s top=%v\n", + i, n, vals[i].String(), top[i]) + } + } + pa := ¶msAnalyzer{ + fname: fn.Sym().Name, + values: vals, + params: params, + top: top, + condLevelTracker: new(condLevelTracker), + nameFinder: nf, + } + return pa, nil +} + +func (pa *paramsAnalyzer) setResults(funcProps *FuncProps) { + funcProps.ParamFlags = pa.values +} + +func (pa *paramsAnalyzer) findParamIdx(n *ir.Name) int { + if n == nil { + panic("bad") + } + for i := range pa.params { + if pa.params[i] == n { + return i + } + } + return -1 +} + +type testfType func(x ir.Node, param *ir.Name, idx int) (bool, bool) + +// paramsAnalyzer invokes function 'testf' on the specified expression +// 'x' for each parameter, and if the result is TRUE, or's 'flag' into +// the flags for that param. +func (pa *paramsAnalyzer) checkParams(x ir.Node, flag ParamPropBits, mayflag ParamPropBits, testf testfType) { + for idx, p := range pa.params { + if !pa.top[idx] && pa.values[idx] == ParamNoInfo { + continue + } + result, may := testf(x, p, idx) + if debugTrace&debugTraceParams != 0 { + fmt.Fprintf(os.Stderr, "=-= test expr %v param %s result=%v flag=%s\n", x, p.Sym().Name, result, flag.String()) + } + if result { + v := flag + if pa.condLevel != 0 || may { + v = mayflag + } + pa.values[idx] |= v + pa.top[idx] = false + } + } +} + +// foldCheckParams checks expression 'x' (an 'if' condition or +// 'switch' stmt expr) to see if the expr would fold away if a +// specific parameter had a constant value. +func (pa *paramsAnalyzer) foldCheckParams(x ir.Node) { + pa.checkParams(x, ParamFeedsIfOrSwitch, ParamMayFeedIfOrSwitch, + func(x ir.Node, p *ir.Name, idx int) (bool, bool) { + return ShouldFoldIfNameConstant(x, []*ir.Name{p}), false + }) +} + +// callCheckParams examines the target of call expression 'ce' to see +// if it is making a call to the value passed in for some parameter. +func (pa *paramsAnalyzer) callCheckParams(ce *ir.CallExpr) { + switch ce.Op() { + case ir.OCALLINTER: + if ce.Op() != ir.OCALLINTER { + return + } + sel := ce.Fun.(*ir.SelectorExpr) + r := pa.staticValue(sel.X) + if r.Op() != ir.ONAME { + return + } + name := r.(*ir.Name) + if name.Class != ir.PPARAM { + return + } + pa.checkParams(r, ParamFeedsInterfaceMethodCall, + ParamMayFeedInterfaceMethodCall, + func(x ir.Node, p *ir.Name, idx int) (bool, bool) { + name := x.(*ir.Name) + return name == p, false + }) + case ir.OCALLFUNC: + if ce.Fun.Op() != ir.ONAME { + return + } + called := ir.StaticValue(ce.Fun) + if called.Op() != ir.ONAME { + return + } + name := called.(*ir.Name) + if name.Class == ir.PPARAM { + pa.checkParams(called, ParamFeedsIndirectCall, + ParamMayFeedIndirectCall, + func(x ir.Node, p *ir.Name, idx int) (bool, bool) { + name := x.(*ir.Name) + return name == p, false + }) + } else { + cname := pa.funcName(called) + if cname != nil { + pa.deriveFlagsFromCallee(ce, cname.Func) + } + } + } +} + +// deriveFlagsFromCallee tries to derive flags for the current +// function based on a call this function makes to some other +// function. Example: +// +// /* Simple */ /* Derived from callee */ +// func foo(f func(int)) { func foo(f func(int)) { +// f(2) bar(32, f) +// } } +// func bar(x int, f func()) { +// f(x) +// } +// +// Here we can set the "param feeds indirect call" flag for +// foo's param 'f' since we know that bar has that flag set for +// its second param, and we're passing that param a function. +func (pa *paramsAnalyzer) deriveFlagsFromCallee(ce *ir.CallExpr, callee *ir.Func) { + calleeProps := propsForFunc(callee) + if calleeProps == nil { + return + } + if debugTrace&debugTraceParams != 0 { + fmt.Fprintf(os.Stderr, "=-= callee props for %v:\n%s", + callee.Sym().Name, calleeProps.String()) + } + + must := []ParamPropBits{ParamFeedsInterfaceMethodCall, ParamFeedsIndirectCall, ParamFeedsIfOrSwitch} + may := []ParamPropBits{ParamMayFeedInterfaceMethodCall, ParamMayFeedIndirectCall, ParamMayFeedIfOrSwitch} + + for pidx, arg := range ce.Args { + // Does the callee param have any interesting properties? + // If not we can skip this one. + pflag := calleeProps.ParamFlags[pidx] + if pflag == 0 { + continue + } + // See if one of the caller's parameters is flowing unmodified + // into this actual expression. + r := pa.staticValue(arg) + if r.Op() != ir.ONAME { + return + } + name := r.(*ir.Name) + if name.Class != ir.PPARAM { + return + } + callerParamIdx := pa.findParamIdx(name) + // note that callerParamIdx may return -1 in the case where + // the param belongs not to the current closure func we're + // analyzing but to an outer enclosing func. + if callerParamIdx == -1 { + return + } + if pa.params[callerParamIdx] == nil { + panic("something went wrong") + } + if !pa.top[callerParamIdx] && + pa.values[callerParamIdx] == ParamNoInfo { + continue + } + if debugTrace&debugTraceParams != 0 { + fmt.Fprintf(os.Stderr, "=-= pflag for arg %d is %s\n", + pidx, pflag.String()) + } + for i := range must { + mayv := may[i] + mustv := must[i] + if pflag&mustv != 0 && pa.condLevel == 0 { + pa.values[callerParamIdx] |= mustv + } else if pflag&(mustv|mayv) != 0 { + pa.values[callerParamIdx] |= mayv + } + } + pa.top[callerParamIdx] = false + } +} + +func (pa *paramsAnalyzer) nodeVisitPost(n ir.Node) { + if len(pa.values) == 0 { + return + } + pa.condLevelTracker.post(n) + switch n.Op() { + case ir.OCALLFUNC: + ce := n.(*ir.CallExpr) + pa.callCheckParams(ce) + case ir.OCALLINTER: + ce := n.(*ir.CallExpr) + pa.callCheckParams(ce) + case ir.OIF: + ifst := n.(*ir.IfStmt) + pa.foldCheckParams(ifst.Cond) + case ir.OSWITCH: + swst := n.(*ir.SwitchStmt) + if swst.Tag != nil { + pa.foldCheckParams(swst.Tag) + } + } +} + +func (pa *paramsAnalyzer) nodeVisitPre(n ir.Node) { + if len(pa.values) == 0 { + return + } + pa.condLevelTracker.pre(n) +} + +// condLevelTracker helps keeps track very roughly of "level of conditional +// nesting", e.g. how many "if" statements you have to go through to +// get to the point where a given stmt executes. Example: +// +// cond nesting level +// func foo() { +// G = 1 0 +// if x < 10 { 0 +// if y < 10 { 1 +// G = 0 2 +// } +// } +// } +// +// The intent here is to provide some sort of very abstract relative +// hotness metric, e.g. "G = 1" above is expected to be executed more +// often than "G = 0" (in the aggregate, across large numbers of +// functions). +type condLevelTracker struct { + condLevel int +} + +func (c *condLevelTracker) pre(n ir.Node) { + // Increment level of "conditional testing" if we see + // an "if" or switch statement, and decrement if in + // a loop. + switch n.Op() { + case ir.OIF, ir.OSWITCH: + c.condLevel++ + case ir.OFOR, ir.ORANGE: + c.condLevel-- + } +} + +func (c *condLevelTracker) post(n ir.Node) { + switch n.Op() { + case ir.OFOR, ir.ORANGE: + c.condLevel++ + case ir.OIF: + c.condLevel-- + case ir.OSWITCH: + c.condLevel-- + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go new file mode 100644 index 0000000000000000000000000000000000000000..2aaa68d1b7ace5b121dae16e714124b985796eec --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go @@ -0,0 +1,277 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/ir" + "fmt" + "go/constant" + "go/token" + "os" +) + +// resultsAnalyzer stores state information for the process of +// computing flags/properties for the return values of a specific Go +// function, as part of inline heuristics synthesis. +type resultsAnalyzer struct { + fname string + props []ResultPropBits + values []resultVal + inlineMaxBudget int + *nameFinder +} + +// resultVal captures information about a specific result returned from +// the function we're analyzing; we are interested in cases where +// the func always returns the same constant, or always returns +// the same function, etc. This container stores info on a the specific +// scenarios we're looking for. +type resultVal struct { + cval constant.Value + fn *ir.Name + fnClo bool + top bool + derived bool // see deriveReturnFlagsFromCallee below +} + +// addResultsAnalyzer creates a new resultsAnalyzer helper object for +// the function fn, appends it to the analyzers list, and returns the +// new list. If the function in question doesn't have any returns (or +// any interesting returns) then the analyzer list is left as is, and +// the result flags in "fp" are updated accordingly. +func addResultsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, inlineMaxBudget int, nf *nameFinder) []propAnalyzer { + ra, props := makeResultsAnalyzer(fn, inlineMaxBudget, nf) + if ra != nil { + analyzers = append(analyzers, ra) + } else { + fp.ResultFlags = props + } + return analyzers +} + +// makeResultsAnalyzer creates a new helper object to analyze results +// in function fn. If the function doesn't have any interesting +// results, a nil helper is returned along with a set of default +// result flags for the func. +func makeResultsAnalyzer(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) (*resultsAnalyzer, []ResultPropBits) { + results := fn.Type().Results() + if len(results) == 0 { + return nil, nil + } + props := make([]ResultPropBits, len(results)) + if fn.Inl == nil { + return nil, props + } + vals := make([]resultVal, len(results)) + interestingToAnalyze := false + for i := range results { + rt := results[i].Type + if !rt.IsScalar() && !rt.HasNil() { + // existing properties not applicable here (for things + // like structs, arrays, slices, etc). + continue + } + // set the "top" flag (as in "top element of data flow lattice") + // meaning "we have no info yet, but we might later on". + vals[i].top = true + interestingToAnalyze = true + } + if !interestingToAnalyze { + return nil, props + } + ra := &resultsAnalyzer{ + props: props, + values: vals, + inlineMaxBudget: inlineMaxBudget, + nameFinder: nf, + } + return ra, nil +} + +// setResults transfers the calculated result properties for this +// function to 'funcProps'. +func (ra *resultsAnalyzer) setResults(funcProps *FuncProps) { + // Promote ResultAlwaysSameFunc to ResultAlwaysSameInlinableFunc + for i := range ra.values { + if ra.props[i] == ResultAlwaysSameFunc && !ra.values[i].derived { + f := ra.values[i].fn.Func + // HACK: in order to allow for call site score + // adjustments, we used a relaxed inline budget in + // determining inlinability. For the check below, however, + // we want to know is whether the func in question is + // likely to be inlined, as opposed to whether it might + // possibly be inlined if all the right score adjustments + // happened, so do a simple check based on the cost. + if f.Inl != nil && f.Inl.Cost <= int32(ra.inlineMaxBudget) { + ra.props[i] = ResultAlwaysSameInlinableFunc + } + } + } + funcProps.ResultFlags = ra.props +} + +func (ra *resultsAnalyzer) pessimize() { + for i := range ra.props { + ra.props[i] = ResultNoInfo + } +} + +func (ra *resultsAnalyzer) nodeVisitPre(n ir.Node) { +} + +func (ra *resultsAnalyzer) nodeVisitPost(n ir.Node) { + if len(ra.values) == 0 { + return + } + if n.Op() != ir.ORETURN { + return + } + if debugTrace&debugTraceResults != 0 { + fmt.Fprintf(os.Stderr, "=+= returns nodevis %v %s\n", + ir.Line(n), n.Op().String()) + } + + // No support currently for named results, so if we see an empty + // "return" stmt, be conservative. + rs := n.(*ir.ReturnStmt) + if len(rs.Results) != len(ra.values) { + ra.pessimize() + return + } + for i, r := range rs.Results { + ra.analyzeResult(i, r) + } +} + +// analyzeResult examines the expression 'n' being returned as the +// 'ii'th argument in some return statement to see whether has +// interesting characteristics (for example, returns a constant), then +// applies a dataflow "meet" operation to combine this result with any +// previous result (for the given return slot) that we've already +// processed. +func (ra *resultsAnalyzer) analyzeResult(ii int, n ir.Node) { + isAllocMem := ra.isAllocatedMem(n) + isConcConvItf := ra.isConcreteConvIface(n) + constVal := ra.constValue(n) + isConst := (constVal != nil) + isNil := ra.isNil(n) + rfunc := ra.funcName(n) + isFunc := (rfunc != nil) + isClo := (rfunc != nil && rfunc.Func.OClosure != nil) + curp := ra.props[ii] + dprops, isDerivedFromCall := ra.deriveReturnFlagsFromCallee(n) + newp := ResultNoInfo + var newcval constant.Value + var newfunc *ir.Name + + if debugTrace&debugTraceResults != 0 { + fmt.Fprintf(os.Stderr, "=-= %v: analyzeResult n=%s ismem=%v isconcconv=%v isconst=%v isnil=%v isfunc=%v isclo=%v\n", ir.Line(n), n.Op().String(), isAllocMem, isConcConvItf, isConst, isNil, isFunc, isClo) + } + + if ra.values[ii].top { + ra.values[ii].top = false + // this is the first return we've seen; record + // whatever properties it has. + switch { + case isAllocMem: + newp = ResultIsAllocatedMem + case isConcConvItf: + newp = ResultIsConcreteTypeConvertedToInterface + case isFunc: + newp = ResultAlwaysSameFunc + newfunc = rfunc + case isConst: + newp = ResultAlwaysSameConstant + newcval = constVal + case isNil: + newp = ResultAlwaysSameConstant + newcval = nil + case isDerivedFromCall: + newp = dprops + ra.values[ii].derived = true + } + } else { + if !ra.values[ii].derived { + // this is not the first return we've seen; apply + // what amounts of a "meet" operator to combine + // the properties we see here with what we saw on + // the previous returns. + switch curp { + case ResultIsAllocatedMem: + if isAllocMem { + newp = ResultIsAllocatedMem + } + case ResultIsConcreteTypeConvertedToInterface: + if isConcConvItf { + newp = ResultIsConcreteTypeConvertedToInterface + } + case ResultAlwaysSameConstant: + if isNil && ra.values[ii].cval == nil { + newp = ResultAlwaysSameConstant + newcval = nil + } else if isConst && constant.Compare(constVal, token.EQL, ra.values[ii].cval) { + newp = ResultAlwaysSameConstant + newcval = constVal + } + case ResultAlwaysSameFunc: + if isFunc && isSameFuncName(rfunc, ra.values[ii].fn) { + newp = ResultAlwaysSameFunc + newfunc = rfunc + } + } + } + } + ra.values[ii].fn = newfunc + ra.values[ii].fnClo = isClo + ra.values[ii].cval = newcval + ra.props[ii] = newp + + if debugTrace&debugTraceResults != 0 { + fmt.Fprintf(os.Stderr, "=-= %v: analyzeResult newp=%s\n", + ir.Line(n), newp) + } +} + +// deriveReturnFlagsFromCallee tries to set properties for a given +// return result where we're returning call expression; return value +// is a return property value and a boolean indicating whether the +// prop is valid. Examples: +// +// func foo() int { return bar() } +// func bar() int { return 42 } +// func blix() int { return 43 } +// func two(y int) int { +// if y < 0 { return bar() } else { return blix() } +// } +// +// Since "foo" always returns the result of a call to "bar", we can +// set foo's return property to that of bar. In the case of "two", however, +// even though each return path returns a constant, we don't know +// whether the constants are identical, hence we need to be conservative. +func (ra *resultsAnalyzer) deriveReturnFlagsFromCallee(n ir.Node) (ResultPropBits, bool) { + if n.Op() != ir.OCALLFUNC { + return 0, false + } + ce := n.(*ir.CallExpr) + if ce.Fun.Op() != ir.ONAME { + return 0, false + } + called := ir.StaticValue(ce.Fun) + if called.Op() != ir.ONAME { + return 0, false + } + cname := ra.funcName(called) + if cname == nil { + return 0, false + } + calleeProps := propsForFunc(cname.Func) + if calleeProps == nil { + return 0, false + } + if len(calleeProps.ResultFlags) != 1 { + return 0, false + } + return calleeProps.ResultFlags[0], true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/callsite.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/callsite.go new file mode 100644 index 0000000000000000000000000000000000000000..f457dd439b241f31a0682583ca159ac23c4d1928 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/callsite.go @@ -0,0 +1,149 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/internal/src" + "fmt" + "io" + "path/filepath" + "sort" + "strings" +) + +// CallSite records useful information about a potentially inlinable +// (direct) function call. "Callee" is the target of the call, "Call" +// is the ir node corresponding to the call itself, "Assign" is +// the top-level assignment statement containing the call (if the call +// appears in the form of a top-level statement, e.g. "x := foo()"), +// "Flags" contains properties of the call that might be useful for +// making inlining decisions, "Score" is the final score assigned to +// the site, and "ID" is a numeric ID for the site within its +// containing function. +type CallSite struct { + Callee *ir.Func + Call *ir.CallExpr + parent *CallSite + Assign ir.Node + Flags CSPropBits + + ArgProps []ActualExprPropBits + Score int + ScoreMask scoreAdjustTyp + ID uint + aux uint8 +} + +// CallSiteTab is a table of call sites, keyed by call expr. +// Ideally it would be nice to key the table by src.XPos, but +// this results in collisions for calls on very long lines (the +// front end saturates column numbers at 255). We also wind up +// with many calls that share the same auto-generated pos. +type CallSiteTab map[*ir.CallExpr]*CallSite + +// ActualExprPropBits describes a property of an actual expression (value +// passed to some specific func argument at a call site). +type ActualExprPropBits uint8 + +const ( + ActualExprConstant ActualExprPropBits = 1 << iota + ActualExprIsConcreteConvIface + ActualExprIsFunc + ActualExprIsInlinableFunc +) + +type CSPropBits uint32 + +const ( + CallSiteInLoop CSPropBits = 1 << iota + CallSiteOnPanicPath + CallSiteInInitFunc +) + +type csAuxBits uint8 + +const ( + csAuxInlined = 1 << iota +) + +// encodedCallSiteTab is a table keyed by "encoded" callsite +// (stringified src.XPos plus call site ID) mapping to a value of call +// property bits and score. +type encodedCallSiteTab map[string]propsAndScore + +type propsAndScore struct { + props CSPropBits + score int + mask scoreAdjustTyp +} + +func (pas propsAndScore) String() string { + return fmt.Sprintf("P=%s|S=%d|M=%s", pas.props.String(), + pas.score, pas.mask.String()) +} + +func (cst CallSiteTab) merge(other CallSiteTab) error { + for k, v := range other { + if prev, ok := cst[k]; ok { + return fmt.Errorf("internal error: collision during call site table merge, fn=%s callsite=%s", prev.Callee.Sym().Name, fmtFullPos(prev.Call.Pos())) + } + cst[k] = v + } + return nil +} + +func fmtFullPos(p src.XPos) string { + var sb strings.Builder + sep := "" + base.Ctxt.AllPos(p, func(pos src.Pos) { + fmt.Fprintf(&sb, sep) + sep = "|" + file := filepath.Base(pos.Filename()) + fmt.Fprintf(&sb, "%s:%d:%d", file, pos.Line(), pos.Col()) + }) + return sb.String() +} + +func EncodeCallSiteKey(cs *CallSite) string { + var sb strings.Builder + // FIXME: maybe rewrite line offsets relative to function start? + sb.WriteString(fmtFullPos(cs.Call.Pos())) + fmt.Fprintf(&sb, "|%d", cs.ID) + return sb.String() +} + +func buildEncodedCallSiteTab(tab CallSiteTab) encodedCallSiteTab { + r := make(encodedCallSiteTab) + for _, cs := range tab { + k := EncodeCallSiteKey(cs) + r[k] = propsAndScore{ + props: cs.Flags, + score: cs.Score, + mask: cs.ScoreMask, + } + } + return r +} + +// dumpCallSiteComments emits comments into the dump file for the +// callsites in the function of interest. If "ecst" is non-nil, we use +// that, otherwise generated a fresh encodedCallSiteTab from "tab". +func dumpCallSiteComments(w io.Writer, tab CallSiteTab, ecst encodedCallSiteTab) { + if ecst == nil { + ecst = buildEncodedCallSiteTab(tab) + } + tags := make([]string, 0, len(ecst)) + for k := range ecst { + tags = append(tags, k) + } + sort.Strings(tags) + for _, s := range tags { + v := ecst[s] + fmt.Fprintf(w, "// callsite: %s flagstr %q flagval %d score %d mask %d maskstr %q\n", s, v.props.String(), v.props, v.score, v.mask, v.mask.String()) + } + fmt.Fprintf(w, "// %s\n", csDelimiter) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go new file mode 100644 index 0000000000000000000000000000000000000000..216f510c9928dfcacb767d32328f79ee2778663c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/cspropbits_string.go @@ -0,0 +1,56 @@ +// Code generated by "stringer -bitset -type CSPropBits"; DO NOT EDIT. + +package inlheur + +import "strconv" +import "bytes" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[CallSiteInLoop-1] + _ = x[CallSiteOnPanicPath-2] + _ = x[CallSiteInInitFunc-4] +} + +var _CSPropBits_value = [...]uint64{ + 0x1, /* CallSiteInLoop */ + 0x2, /* CallSiteOnPanicPath */ + 0x4, /* CallSiteInInitFunc */ +} + +const _CSPropBits_name = "CallSiteInLoopCallSiteOnPanicPathCallSiteInInitFunc" + +var _CSPropBits_index = [...]uint8{0, 14, 33, 51} + +func (i CSPropBits) String() string { + var b bytes.Buffer + + remain := uint64(i) + seen := false + + for k, v := range _CSPropBits_value { + x := _CSPropBits_name[_CSPropBits_index[k]:_CSPropBits_index[k+1]] + if v == 0 { + if i == 0 { + b.WriteString(x) + return b.String() + } + continue + } + if (v & remain) == v { + remain &^= v + x := _CSPropBits_name[_CSPropBits_index[k]:_CSPropBits_index[k+1]] + if seen { + b.WriteString("|") + } + seen = true + b.WriteString(x) + } + } + if remain == 0 { + return b.String() + } + return "CSPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/debugflags_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/debugflags_test.go new file mode 100644 index 0000000000000000000000000000000000000000..abf491070f6944ebd3da0c984cef92fe31fdda72 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/debugflags_test.go @@ -0,0 +1,65 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "testing" +) + +func TestInlScoreAdjFlagParse(t *testing.T) { + scenarios := []struct { + value string + expok bool + }{ + { + value: "returnFeedsConcreteToInterfaceCallAdj:9", + expok: true, + }, + { + value: "panicPathAdj:-1/initFuncAdj:9", + expok: true, + }, + { + value: "", + expok: false, + }, + { + value: "nonsenseAdj:10", + expok: false, + }, + { + value: "inLoopAdj:", + expok: false, + }, + { + value: "inLoopAdj:10:10", + expok: false, + }, + { + value: "inLoopAdj:blah", + expok: false, + }, + { + value: "/", + expok: false, + }, + } + + for _, scenario := range scenarios { + err := parseScoreAdj(scenario.value) + t.Logf("for value=%q err is %v\n", scenario.value, err) + if scenario.expok { + if err != nil { + t.Errorf("expected parseScoreAdj(%s) ok, got err %v", + scenario.value, err) + } + } else { + if err == nil { + t.Errorf("expected parseScoreAdj(%s) failure, got success", + scenario.value) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go new file mode 100644 index 0000000000000000000000000000000000000000..438b70096f2a3dbd4fe8162407980b07ddb6b910 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/dumpscores_test.go @@ -0,0 +1,109 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "internal/testenv" + "os" + "path/filepath" + "strings" + "testing" +) + +func TestDumpCallSiteScoreDump(t *testing.T) { + td := t.TempDir() + testenv.MustHaveGoBuild(t) + + scenarios := []struct { + name string + promoted int + indirectlyPromoted int + demoted int + unchanged int + }{ + { + name: "dumpscores", + promoted: 1, + indirectlyPromoted: 1, + demoted: 1, + unchanged: 5, + }, + } + + for _, scen := range scenarios { + dumpfile, err := gatherInlCallSitesScoresForFile(t, scen.name, td) + if err != nil { + t.Fatalf("dumping callsite scores for %q: error %v", scen.name, err) + } + var lines []string + if content, err := os.ReadFile(dumpfile); err != nil { + t.Fatalf("reading dump %q: error %v", dumpfile, err) + } else { + lines = strings.Split(string(content), "\n") + } + prom, indprom, dem, unch := 0, 0, 0, 0 + for _, line := range lines { + switch { + case strings.TrimSpace(line) == "": + case !strings.Contains(line, "|"): + case strings.HasPrefix(line, "#"): + case strings.Contains(line, "PROMOTED"): + prom++ + case strings.Contains(line, "INDPROM"): + indprom++ + case strings.Contains(line, "DEMOTED"): + dem++ + default: + unch++ + } + } + showout := false + if prom != scen.promoted { + t.Errorf("testcase %q, got %d promoted want %d promoted", + scen.name, prom, scen.promoted) + showout = true + } + if indprom != scen.indirectlyPromoted { + t.Errorf("testcase %q, got %d indirectly promoted want %d", + scen.name, indprom, scen.indirectlyPromoted) + showout = true + } + if dem != scen.demoted { + t.Errorf("testcase %q, got %d demoted want %d demoted", + scen.name, dem, scen.demoted) + showout = true + } + if unch != scen.unchanged { + t.Errorf("testcase %q, got %d unchanged want %d unchanged", + scen.name, unch, scen.unchanged) + showout = true + } + if showout { + t.Logf(">> dump output: %s", strings.Join(lines, "\n")) + } + } +} + +// gatherInlCallSitesScoresForFile builds the specified testcase 'testcase' +// from testdata/props passing the "-d=dumpinlcallsitescores=1" +// compiler option, to produce a dump, then returns the path of the +// newly created file. +func gatherInlCallSitesScoresForFile(t *testing.T, testcase string, td string) (string, error) { + t.Helper() + gopath := "testdata/" + testcase + ".go" + outpath := filepath.Join(td, testcase+".a") + dumpfile := filepath.Join(td, testcase+".callsites.txt") + run := []string{testenv.GoToolPath(t), "build", + "-gcflags=-d=dumpinlcallsitescores=1", "-o", outpath, gopath} + out, err := testenv.Command(t, run[0], run[1:]...).CombinedOutput() + t.Logf("run: %+v\n", run) + if err != nil { + return "", err + } + if err := os.WriteFile(dumpfile, out, 0666); err != nil { + return "", err + } + return dumpfile, err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/eclassify.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/eclassify.go new file mode 100644 index 0000000000000000000000000000000000000000..1e6d1b9e37864e11ecfa3100d177ba2b5792aea2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/eclassify.go @@ -0,0 +1,247 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/ir" + "fmt" + "os" +) + +// ShouldFoldIfNameConstant analyzes expression tree 'e' to see +// whether it contains only combinations of simple references to all +// of the names in 'names' with selected constants + operators. The +// intent is to identify expression that could be folded away to a +// constant if the value of 'n' were available. Return value is TRUE +// if 'e' does look foldable given the value of 'n', and given that +// 'e' actually makes reference to 'n'. Some examples where the type +// of "n" is int64, type of "s" is string, and type of "p" is *byte: +// +// Simple? Expr +// yes n<10 +// yes n*n-100 +// yes (n < 10 || n > 100) && (n >= 12 || n <= 99 || n != 101) +// yes s == "foo" +// yes p == nil +// no nm +// no float32(n)<1.0 +// no *p == 1 +// no 1 + 100 +// no 1 / n +// no 1 + unsafe.Sizeof(n) +// +// To avoid complexities (e.g. nan, inf) we stay way from folding and +// floating point or complex operations (integers, bools, and strings +// only). We also try to be conservative about avoiding any operation +// that might result in a panic at runtime, e.g. for "n" with type +// int64: +// +// 1<<(n-9) < 100/(n<<9999) +// +// we would return FALSE due to the negative shift count and/or +// potential divide by zero. +func ShouldFoldIfNameConstant(n ir.Node, names []*ir.Name) bool { + cl := makeExprClassifier(names) + var doNode func(ir.Node) bool + doNode = func(n ir.Node) bool { + ir.DoChildren(n, doNode) + cl.Visit(n) + return false + } + doNode(n) + if cl.getdisp(n) != exprSimple { + return false + } + for _, v := range cl.names { + if !v { + return false + } + } + return true +} + +// exprClassifier holds intermediate state about nodes within an +// expression tree being analyzed by ShouldFoldIfNameConstant. Here +// "name" is the name node passed in, and "disposition" stores the +// result of classifying a given IR node. +type exprClassifier struct { + names map[*ir.Name]bool + disposition map[ir.Node]disp +} + +type disp int + +const ( + // no info on this expr + exprNoInfo disp = iota + + // expr contains only literals + exprLiterals + + // expr is legal combination of literals and specified names + exprSimple +) + +func (d disp) String() string { + switch d { + case exprNoInfo: + return "noinfo" + case exprSimple: + return "simple" + case exprLiterals: + return "literals" + default: + return fmt.Sprintf("unknown<%d>", d) + } +} + +func makeExprClassifier(names []*ir.Name) *exprClassifier { + m := make(map[*ir.Name]bool, len(names)) + for _, n := range names { + m[n] = false + } + return &exprClassifier{ + names: m, + disposition: make(map[ir.Node]disp), + } +} + +// Visit sets the classification for 'n' based on the previously +// calculated classifications for n's children, as part of a bottom-up +// walk over an expression tree. +func (ec *exprClassifier) Visit(n ir.Node) { + + ndisp := exprNoInfo + + binparts := func(n ir.Node) (ir.Node, ir.Node) { + if lex, ok := n.(*ir.LogicalExpr); ok { + return lex.X, lex.Y + } else if bex, ok := n.(*ir.BinaryExpr); ok { + return bex.X, bex.Y + } else { + panic("bad") + } + } + + t := n.Type() + if t == nil { + if debugTrace&debugTraceExprClassify != 0 { + fmt.Fprintf(os.Stderr, "=-= *** untyped op=%s\n", + n.Op().String()) + } + } else if t.IsInteger() || t.IsString() || t.IsBoolean() || t.HasNil() { + switch n.Op() { + // FIXME: maybe add support for OADDSTR? + case ir.ONIL: + ndisp = exprLiterals + + case ir.OLITERAL: + if _, ok := n.(*ir.BasicLit); ok { + } else { + panic("unexpected") + } + ndisp = exprLiterals + + case ir.ONAME: + nn := n.(*ir.Name) + if _, ok := ec.names[nn]; ok { + ndisp = exprSimple + ec.names[nn] = true + } else { + sv := ir.StaticValue(n) + if sv.Op() == ir.ONAME { + nn = sv.(*ir.Name) + } + if _, ok := ec.names[nn]; ok { + ndisp = exprSimple + ec.names[nn] = true + } + } + + case ir.ONOT, + ir.OPLUS, + ir.ONEG: + uex := n.(*ir.UnaryExpr) + ndisp = ec.getdisp(uex.X) + + case ir.OEQ, + ir.ONE, + ir.OLT, + ir.OGT, + ir.OGE, + ir.OLE: + // compare ops + x, y := binparts(n) + ndisp = ec.dispmeet(x, y) + if debugTrace&debugTraceExprClassify != 0 { + fmt.Fprintf(os.Stderr, "=-= meet(%s,%s) = %s for op=%s\n", + ec.getdisp(x), ec.getdisp(y), ec.dispmeet(x, y), + n.Op().String()) + } + case ir.OLSH, + ir.ORSH, + ir.ODIV, + ir.OMOD: + x, y := binparts(n) + if ec.getdisp(y) == exprLiterals { + ndisp = ec.dispmeet(x, y) + } + + case ir.OADD, + ir.OSUB, + ir.OOR, + ir.OXOR, + ir.OMUL, + ir.OAND, + ir.OANDNOT, + ir.OANDAND, + ir.OOROR: + x, y := binparts(n) + if debugTrace&debugTraceExprClassify != 0 { + fmt.Fprintf(os.Stderr, "=-= meet(%s,%s) = %s for op=%s\n", + ec.getdisp(x), ec.getdisp(y), ec.dispmeet(x, y), + n.Op().String()) + } + ndisp = ec.dispmeet(x, y) + } + } + + if debugTrace&debugTraceExprClassify != 0 { + fmt.Fprintf(os.Stderr, "=-= op=%s disp=%v\n", n.Op().String(), + ndisp.String()) + } + + ec.disposition[n] = ndisp +} + +func (ec *exprClassifier) getdisp(x ir.Node) disp { + if d, ok := ec.disposition[x]; ok { + return d + } else { + panic("missing node from disp table") + } +} + +// dispmeet performs a "meet" operation on the data flow states of +// node x and y (where the term "meet" is being drawn from traditional +// lattice-theoretical data flow analysis terminology). +func (ec *exprClassifier) dispmeet(x, y ir.Node) disp { + xd := ec.getdisp(x) + if xd == exprNoInfo { + return exprNoInfo + } + yd := ec.getdisp(y) + if yd == exprNoInfo { + return exprNoInfo + } + if xd == exprSimple || yd == exprSimple { + return exprSimple + } + if xd != exprLiterals || yd != exprLiterals { + panic("unexpected") + } + return exprLiterals +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/funcprop_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/funcprop_string.go new file mode 100644 index 0000000000000000000000000000000000000000..d16e4d3378194b01882d91340d44af24840eed42 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/funcprop_string.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "fmt" + "strings" +) + +func (fp *FuncProps) String() string { + return fp.ToString("") +} + +func (fp *FuncProps) ToString(prefix string) string { + var sb strings.Builder + if fp.Flags != 0 { + fmt.Fprintf(&sb, "%sFlags %s\n", prefix, fp.Flags) + } + flagSliceToSB[ParamPropBits](&sb, fp.ParamFlags, + prefix, "ParamFlags") + flagSliceToSB[ResultPropBits](&sb, fp.ResultFlags, + prefix, "ResultFlags") + return sb.String() +} + +func flagSliceToSB[T interface { + ~uint32 + String() string +}](sb *strings.Builder, sl []T, prefix string, tag string) { + var sb2 strings.Builder + foundnz := false + fmt.Fprintf(&sb2, "%s%s\n", prefix, tag) + for i, e := range sl { + if e != 0 { + foundnz = true + } + fmt.Fprintf(&sb2, "%s %d %s\n", prefix, i, e.String()) + } + if foundnz { + sb.WriteString(sb2.String()) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go new file mode 100644 index 0000000000000000000000000000000000000000..28de4a9ced50207266c94a9875d8afbf5d987375 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/funcpropbits_string.go @@ -0,0 +1,58 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "stringer -bitset -type FuncPropBits"; DO NOT EDIT. + +package inlheur + +import ( + "bytes" + "strconv" +) + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[FuncPropNeverReturns-1] +} + +var _FuncPropBits_value = [...]uint64{ + 0x1, /* FuncPropNeverReturns */ +} + +const _FuncPropBits_name = "FuncPropNeverReturns" + +var _FuncPropBits_index = [...]uint8{0, 20} + +func (i FuncPropBits) String() string { + var b bytes.Buffer + + remain := uint64(i) + seen := false + + for k, v := range _FuncPropBits_value { + x := _FuncPropBits_name[_FuncPropBits_index[k]:_FuncPropBits_index[k+1]] + if v == 0 { + if i == 0 { + b.WriteString(x) + return b.String() + } + continue + } + if (v & remain) == v { + remain &^= v + x := _FuncPropBits_name[_FuncPropBits_index[k]:_FuncPropBits_index[k+1]] + if seen { + b.WriteString("|") + } + seen = true + b.WriteString(x) + } + } + if remain == 0 { + return b.String() + } + return "FuncPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/funcprops_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/funcprops_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c04e604882d3bbb45a6a7c785b43d489cd7240d5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/funcprops_test.go @@ -0,0 +1,530 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "bufio" + "encoding/json" + "flag" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" + "time" +) + +var remasterflag = flag.Bool("update-expected", false, "if true, generate updated golden results in testcases for all props tests") + +func TestFuncProperties(t *testing.T) { + td := t.TempDir() + // td = "/tmp/qqq" + // os.RemoveAll(td) + // os.Mkdir(td, 0777) + testenv.MustHaveGoBuild(t) + + // NOTE: this testpoint has the unfortunate characteristic that it + // relies on the installed compiler, meaning that if you make + // changes to the inline heuristics code in your working copy and + // then run the test, it will test the installed compiler and not + // your local modifications. TODO: decide whether to convert this + // to building a fresh compiler on the fly, or using some other + // scheme. + + testcases := []string{"funcflags", "returns", "params", + "acrosscall", "calls", "returns2"} + for _, tc := range testcases { + dumpfile, err := gatherPropsDumpForFile(t, tc, td) + if err != nil { + t.Fatalf("dumping func props for %q: error %v", tc, err) + } + // Read in the newly generated dump. + dentries, dcsites, derr := readDump(t, dumpfile) + if derr != nil { + t.Fatalf("reading func prop dump: %v", derr) + } + if *remasterflag { + updateExpected(t, tc, dentries, dcsites) + continue + } + // Generate expected dump. + epath, egerr := genExpected(td, tc) + if egerr != nil { + t.Fatalf("generating expected func prop dump: %v", egerr) + } + // Read in the expected result entries. + eentries, ecsites, eerr := readDump(t, epath) + if eerr != nil { + t.Fatalf("reading expected func prop dump: %v", eerr) + } + // Compare new vs expected. + n := len(dentries) + eidx := 0 + for i := 0; i < n; i++ { + dentry := dentries[i] + dcst := dcsites[i] + if !interestingToCompare(dentry.fname) { + continue + } + if eidx >= len(eentries) { + t.Errorf("testcase %s missing expected entry for %s, skipping", tc, dentry.fname) + continue + } + eentry := eentries[eidx] + ecst := ecsites[eidx] + eidx++ + if dentry.fname != eentry.fname { + t.Errorf("got fn %q wanted %q, skipping checks", + dentry.fname, eentry.fname) + continue + } + compareEntries(t, tc, &dentry, dcst, &eentry, ecst) + } + } +} + +func propBitsToString[T interface{ String() string }](sl []T) string { + var sb strings.Builder + for i, f := range sl { + fmt.Fprintf(&sb, "%d: %s\n", i, f.String()) + } + return sb.String() +} + +func compareEntries(t *testing.T, tc string, dentry *fnInlHeur, dcsites encodedCallSiteTab, eentry *fnInlHeur, ecsites encodedCallSiteTab) { + dfp := dentry.props + efp := eentry.props + dfn := dentry.fname + + // Compare function flags. + if dfp.Flags != efp.Flags { + t.Errorf("testcase %q: Flags mismatch for %q: got %s, wanted %s", + tc, dfn, dfp.Flags.String(), efp.Flags.String()) + } + // Compare returns + rgot := propBitsToString[ResultPropBits](dfp.ResultFlags) + rwant := propBitsToString[ResultPropBits](efp.ResultFlags) + if rgot != rwant { + t.Errorf("testcase %q: Results mismatch for %q: got:\n%swant:\n%s", + tc, dfn, rgot, rwant) + } + // Compare receiver + params. + pgot := propBitsToString[ParamPropBits](dfp.ParamFlags) + pwant := propBitsToString[ParamPropBits](efp.ParamFlags) + if pgot != pwant { + t.Errorf("testcase %q: Params mismatch for %q: got:\n%swant:\n%s", + tc, dfn, pgot, pwant) + } + // Compare call sites. + for k, ve := range ecsites { + if vd, ok := dcsites[k]; !ok { + t.Errorf("testcase %q missing expected callsite %q in func %q", tc, k, dfn) + continue + } else { + if vd != ve { + t.Errorf("testcase %q callsite %q in func %q: got %+v want %+v", + tc, k, dfn, vd.String(), ve.String()) + } + } + } + for k := range dcsites { + if _, ok := ecsites[k]; !ok { + t.Errorf("testcase %q unexpected extra callsite %q in func %q", tc, k, dfn) + } + } +} + +type dumpReader struct { + s *bufio.Scanner + t *testing.T + p string + ln int +} + +// readDump reads in the contents of a dump file produced +// by the "-d=dumpinlfuncprops=..." command line flag by the Go +// compiler. It breaks the dump down into separate sections +// by function, then deserializes each func section into a +// fnInlHeur object and returns a slice of those objects. +func readDump(t *testing.T, path string) ([]fnInlHeur, []encodedCallSiteTab, error) { + content, err := os.ReadFile(path) + if err != nil { + return nil, nil, err + } + dr := &dumpReader{ + s: bufio.NewScanner(strings.NewReader(string(content))), + t: t, + p: path, + ln: 1, + } + // consume header comment until preamble delimiter. + found := false + for dr.scan() { + if dr.curLine() == preambleDelimiter { + found = true + break + } + } + if !found { + return nil, nil, fmt.Errorf("malformed testcase file %s, missing preamble delimiter", path) + } + res := []fnInlHeur{} + csres := []encodedCallSiteTab{} + for { + dentry, dcst, err := dr.readEntry() + if err != nil { + t.Fatalf("reading func prop dump: %v", err) + } + if dentry.fname == "" { + break + } + res = append(res, dentry) + csres = append(csres, dcst) + } + return res, csres, nil +} + +func (dr *dumpReader) scan() bool { + v := dr.s.Scan() + if v { + dr.ln++ + } + return v +} + +func (dr *dumpReader) curLine() string { + res := strings.TrimSpace(dr.s.Text()) + if !strings.HasPrefix(res, "// ") { + dr.t.Fatalf("malformed line %s:%d, no comment: %s", dr.p, dr.ln, res) + } + return res[3:] +} + +// readObjBlob reads in a series of commented lines until +// it hits a delimiter, then returns the contents of the comments. +func (dr *dumpReader) readObjBlob(delim string) (string, error) { + var sb strings.Builder + foundDelim := false + for dr.scan() { + line := dr.curLine() + if delim == line { + foundDelim = true + break + } + sb.WriteString(line + "\n") + } + if err := dr.s.Err(); err != nil { + return "", err + } + if !foundDelim { + return "", fmt.Errorf("malformed input %s, missing delimiter %q", + dr.p, delim) + } + return sb.String(), nil +} + +// readEntry reads a single function's worth of material from +// a file produced by the "-d=dumpinlfuncprops=..." command line +// flag. It deserializes the json for the func properties and +// returns the resulting properties and function name. EOF is +// signaled by a nil FuncProps return (with no error +func (dr *dumpReader) readEntry() (fnInlHeur, encodedCallSiteTab, error) { + var funcInlHeur fnInlHeur + var callsites encodedCallSiteTab + if !dr.scan() { + return funcInlHeur, callsites, nil + } + // first line contains info about function: file/name/line + info := dr.curLine() + chunks := strings.Fields(info) + funcInlHeur.file = chunks[0] + funcInlHeur.fname = chunks[1] + if _, err := fmt.Sscanf(chunks[2], "%d", &funcInlHeur.line); err != nil { + return funcInlHeur, callsites, fmt.Errorf("scanning line %q: %v", info, err) + } + // consume comments until and including delimiter + for { + if !dr.scan() { + break + } + if dr.curLine() == comDelimiter { + break + } + } + + // Consume JSON for encoded props. + dr.scan() + line := dr.curLine() + fp := &FuncProps{} + if err := json.Unmarshal([]byte(line), fp); err != nil { + return funcInlHeur, callsites, err + } + funcInlHeur.props = fp + + // Consume callsites. + callsites = make(encodedCallSiteTab) + for dr.scan() { + line := dr.curLine() + if line == csDelimiter { + break + } + // expected format: "// callsite: flagstr flagval score mask maskstr " + fields := strings.Fields(line) + if len(fields) != 12 { + return funcInlHeur, nil, fmt.Errorf("malformed callsite (nf=%d) %s line %d: %s", len(fields), dr.p, dr.ln, line) + } + if fields[2] != "flagstr" || fields[4] != "flagval" || fields[6] != "score" || fields[8] != "mask" || fields[10] != "maskstr" { + return funcInlHeur, nil, fmt.Errorf("malformed callsite %s line %d: %s", + dr.p, dr.ln, line) + } + tag := fields[1] + flagstr := fields[5] + flags, err := strconv.Atoi(flagstr) + if err != nil { + return funcInlHeur, nil, fmt.Errorf("bad flags val %s line %d: %q err=%v", + dr.p, dr.ln, line, err) + } + scorestr := fields[7] + score, err2 := strconv.Atoi(scorestr) + if err2 != nil { + return funcInlHeur, nil, fmt.Errorf("bad score val %s line %d: %q err=%v", + dr.p, dr.ln, line, err2) + } + maskstr := fields[9] + mask, err3 := strconv.Atoi(maskstr) + if err3 != nil { + return funcInlHeur, nil, fmt.Errorf("bad mask val %s line %d: %q err=%v", + dr.p, dr.ln, line, err3) + } + callsites[tag] = propsAndScore{ + props: CSPropBits(flags), + score: score, + mask: scoreAdjustTyp(mask), + } + } + + // Consume function delimiter. + dr.scan() + line = dr.curLine() + if line != fnDelimiter { + return funcInlHeur, nil, fmt.Errorf("malformed testcase file %q, missing delimiter %q", dr.p, fnDelimiter) + } + + return funcInlHeur, callsites, nil +} + +// gatherPropsDumpForFile builds the specified testcase 'testcase' from +// testdata/props passing the "-d=dumpinlfuncprops=..." compiler option, +// to produce a properties dump, then returns the path of the newly +// created file. NB: we can't use "go tool compile" here, since +// some of the test cases import stdlib packages (such as "os"). +// This means using "go build", which is problematic since the +// Go command can potentially cache the results of the compile step, +// causing the test to fail when being run interactively. E.g. +// +// $ rm -f dump.txt +// $ go build -o foo.a -gcflags=-d=dumpinlfuncprops=dump.txt foo.go +// $ rm -f dump.txt foo.a +// $ go build -o foo.a -gcflags=-d=dumpinlfuncprops=dump.txt foo.go +// $ ls foo.a dump.txt > /dev/null +// ls : cannot access 'dump.txt': No such file or directory +// $ +// +// For this reason, pick a unique filename for the dump, so as to +// defeat the caching. +func gatherPropsDumpForFile(t *testing.T, testcase string, td string) (string, error) { + t.Helper() + gopath := "testdata/props/" + testcase + ".go" + outpath := filepath.Join(td, testcase+".a") + salt := fmt.Sprintf(".p%dt%d", os.Getpid(), time.Now().UnixNano()) + dumpfile := filepath.Join(td, testcase+salt+".dump.txt") + run := []string{testenv.GoToolPath(t), "build", + "-gcflags=-d=dumpinlfuncprops=" + dumpfile, "-o", outpath, gopath} + out, err := testenv.Command(t, run[0], run[1:]...).CombinedOutput() + if err != nil { + t.Logf("compile command: %+v", run) + } + if strings.TrimSpace(string(out)) != "" { + t.Logf("%s", out) + } + return dumpfile, err +} + +// genExpected reads in a given Go testcase file, strips out all the +// unindented (column 0) commands, writes them out to a new file, and +// returns the path of that new file. By picking out just the comments +// from the Go file we wind up with something that resembles the +// output from a "-d=dumpinlfuncprops=..." compilation. +func genExpected(td string, testcase string) (string, error) { + epath := filepath.Join(td, testcase+".expected") + outf, err := os.OpenFile(epath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return "", err + } + gopath := "testdata/props/" + testcase + ".go" + content, err := os.ReadFile(gopath) + if err != nil { + return "", err + } + lines := strings.Split(string(content), "\n") + for _, line := range lines[3:] { + if !strings.HasPrefix(line, "// ") { + continue + } + fmt.Fprintf(outf, "%s\n", line) + } + if err := outf.Close(); err != nil { + return "", err + } + return epath, nil +} + +type upexState struct { + dentries []fnInlHeur + newgolines []string + atline map[uint]uint +} + +func mkUpexState(dentries []fnInlHeur) *upexState { + atline := make(map[uint]uint) + for _, e := range dentries { + atline[e.line] = atline[e.line] + 1 + } + return &upexState{ + dentries: dentries, + atline: atline, + } +} + +// updateExpected takes a given Go testcase file X.go and writes out a +// new/updated version of the file to X.go.new, where the column-0 +// "expected" comments have been updated using fresh data from +// "dentries". +// +// Writing of expected results is complicated by closures and by +// generics, where you can have multiple functions that all share the +// same starting line. Currently we combine up all the dups and +// closures into the single pre-func comment. +func updateExpected(t *testing.T, testcase string, dentries []fnInlHeur, dcsites []encodedCallSiteTab) { + nd := len(dentries) + + ues := mkUpexState(dentries) + + gopath := "testdata/props/" + testcase + ".go" + newgopath := "testdata/props/" + testcase + ".go.new" + + // Read the existing Go file. + content, err := os.ReadFile(gopath) + if err != nil { + t.Fatalf("opening %s: %v", gopath, err) + } + golines := strings.Split(string(content), "\n") + + // Preserve copyright. + ues.newgolines = append(ues.newgolines, golines[:4]...) + if !strings.HasPrefix(golines[0], "// Copyright") { + t.Fatalf("missing copyright from existing testcase") + } + golines = golines[4:] + + clore := regexp.MustCompile(`.+\.func\d+[\.\d]*$`) + + emitFunc := func(e *fnInlHeur, dcsites encodedCallSiteTab, + instance, atl uint) { + var sb strings.Builder + dumpFnPreamble(&sb, e, dcsites, instance, atl) + ues.newgolines = append(ues.newgolines, + strings.Split(strings.TrimSpace(sb.String()), "\n")...) + } + + // Write file preamble with "DO NOT EDIT" message and such. + var sb strings.Builder + dumpFilePreamble(&sb) + ues.newgolines = append(ues.newgolines, + strings.Split(strings.TrimSpace(sb.String()), "\n")...) + + // Helper to add a clump of functions to the output file. + processClump := func(idx int, emit bool) int { + // Process func itself, plus anything else defined + // on the same line + atl := ues.atline[dentries[idx].line] + for k := uint(0); k < atl; k++ { + if emit { + emitFunc(&dentries[idx], dcsites[idx], k, atl) + } + idx++ + } + // now process any closures it contains + ncl := 0 + for idx < nd { + nfn := dentries[idx].fname + if !clore.MatchString(nfn) { + break + } + ncl++ + if emit { + emitFunc(&dentries[idx], dcsites[idx], 0, 1) + } + idx++ + } + return idx + } + + didx := 0 + for _, line := range golines { + if strings.HasPrefix(line, "func ") { + + // We have a function definition. + // Pick out the corresponding entry or entries in the dump + // and emit if interesting (or skip if not). + dentry := dentries[didx] + emit := interestingToCompare(dentry.fname) + didx = processClump(didx, emit) + } + + // Consume all existing comments. + if strings.HasPrefix(line, "//") { + continue + } + ues.newgolines = append(ues.newgolines, line) + } + + if didx != nd { + t.Logf("didx=%d wanted %d", didx, nd) + } + + // Open new Go file and write contents. + of, err := os.OpenFile(newgopath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + t.Fatalf("opening %s: %v", newgopath, err) + } + fmt.Fprintf(of, "%s", strings.Join(ues.newgolines, "\n")) + if err := of.Close(); err != nil { + t.Fatalf("closing %s: %v", newgopath, err) + } + + t.Logf("update-expected: emitted updated file %s", newgopath) + t.Logf("please compare the two files, then overwrite %s with %s\n", + gopath, newgopath) +} + +// interestingToCompare returns TRUE if we want to compare results +// for function 'fname'. +func interestingToCompare(fname string) bool { + if strings.HasPrefix(fname, "init.") { + return true + } + if strings.HasPrefix(fname, "T_") { + return true + } + f := strings.Split(fname, ".") + if len(f) == 2 && strings.HasPrefix(f[1], "T_") { + return true + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/function_properties.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/function_properties.go new file mode 100644 index 0000000000000000000000000000000000000000..b90abf976a5cbc6aed633cd46b97d3d829267515 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/function_properties.go @@ -0,0 +1,98 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +// This file defines a set of Go function "properties" intended to +// guide inlining heuristics; these properties may apply to the +// function as a whole, or to one or more function return values or +// parameters. +// +// IMPORTANT: function properties are produced on a "best effort" +// basis, meaning that the code that computes them doesn't verify that +// the properties are guaranteed to be true in 100% of cases. For this +// reason, properties should only be used to drive always-safe +// optimization decisions (e.g. "should I inline this call", or +// "should I unroll this loop") as opposed to potentially unsafe IR +// alterations that could change program semantics (e.g. "can I delete +// this variable" or "can I move this statement to a new location"). +// +//---------------------------------------------------------------- + +// FuncProps describes a set of function or method properties that may +// be useful for inlining heuristics. Here 'Flags' are properties that +// we think apply to the entire function; 'RecvrParamFlags' are +// properties of specific function params (or the receiver), and +// 'ResultFlags' are things properties we think will apply to values +// of specific results. Note that 'ParamFlags' includes and entry for +// the receiver if applicable, and does include etries for blank +// params; for a function such as "func foo(_ int, b byte, _ float32)" +// the length of ParamFlags will be 3. +type FuncProps struct { + Flags FuncPropBits + ParamFlags []ParamPropBits // slot 0 receiver if applicable + ResultFlags []ResultPropBits +} + +type FuncPropBits uint32 + +const ( + // Function always panics or invokes os.Exit() or a func that does + // likewise. + FuncPropNeverReturns FuncPropBits = 1 << iota +) + +type ParamPropBits uint32 + +const ( + // No info about this param + ParamNoInfo ParamPropBits = 0 + + // Parameter value feeds unmodified into a top-level interface + // call (this assumes the parameter is of interface type). + ParamFeedsInterfaceMethodCall ParamPropBits = 1 << iota + + // Parameter value feeds unmodified into an interface call that + // may be conditional/nested and not always executed (this assumes + // the parameter is of interface type). + ParamMayFeedInterfaceMethodCall ParamPropBits = 1 << iota + + // Parameter value feeds unmodified into a top level indirect + // function call (assumes parameter is of function type). + ParamFeedsIndirectCall + + // Parameter value feeds unmodified into an indirect function call + // that is conditional/nested (not guaranteed to execute). Assumes + // parameter is of function type. + ParamMayFeedIndirectCall + + // Parameter value feeds unmodified into a top level "switch" + // statement or "if" statement simple expressions (see more on + // "simple" expression classification below). + ParamFeedsIfOrSwitch + + // Parameter value feeds unmodified into a "switch" or "if" + // statement simple expressions (see more on "simple" expression + // classification below), where the if/switch is + // conditional/nested. + ParamMayFeedIfOrSwitch +) + +type ResultPropBits uint32 + +const ( + // No info about this result + ResultNoInfo ResultPropBits = 0 + // This result always contains allocated memory. + ResultIsAllocatedMem ResultPropBits = 1 << iota + // This result is always a single concrete type that is + // implicitly converted to interface. + ResultIsConcreteTypeConvertedToInterface + // Result is always the same non-composite compile time constant. + ResultAlwaysSameConstant + // Result is always the same function or closure. + ResultAlwaysSameFunc + // Result is always the same (potentially) inlinable function or closure. + ResultAlwaysSameInlinableFunc +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/names.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/names.go new file mode 100644 index 0000000000000000000000000000000000000000..022385087b7451fd3c99c2c738b3dcee6e9eb997 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/names.go @@ -0,0 +1,129 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/ir" + "go/constant" +) + +// nameFinder provides a set of "isXXX" query methods for clients to +// ask whether a given AST node corresponds to a function, a constant +// value, and so on. These methods use an underlying ir.ReassignOracle +// to return more precise results in cases where an "interesting" +// value is assigned to a singly-defined local temp. Example: +// +// const q = 101 +// fq := func() int { return q } +// copyOfConstant := q +// copyOfFunc := f +// interestingCall(copyOfConstant, copyOfFunc) +// +// A name finder query method invoked on the arguments being passed to +// "interestingCall" will be able detect that 'copyOfConstant' always +// evaluates to a constant (even though it is in fact a PAUTO local +// variable). A given nameFinder can also operate without using +// ir.ReassignOracle (in cases where it is not practical to look +// at the entire function); in such cases queries will still work +// for explicit constant values and functions. +type nameFinder struct { + ro *ir.ReassignOracle +} + +// newNameFinder returns a new nameFinder object with a reassignment +// oracle initialized based on the function fn, or if fn is nil, +// without an underlying ReassignOracle. +func newNameFinder(fn *ir.Func) *nameFinder { + var ro *ir.ReassignOracle + if fn != nil { + ro = &ir.ReassignOracle{} + ro.Init(fn) + } + return &nameFinder{ro: ro} +} + +// funcName returns the *ir.Name for the func or method +// corresponding to node 'n', or nil if n can't be proven +// to contain a function value. +func (nf *nameFinder) funcName(n ir.Node) *ir.Name { + sv := n + if nf.ro != nil { + sv = nf.ro.StaticValue(n) + } + if name := ir.StaticCalleeName(sv); name != nil { + return name + } + return nil +} + +// isAllocatedMem returns true if node n corresponds to a memory +// allocation expression (make, new, or equivalent). +func (nf *nameFinder) isAllocatedMem(n ir.Node) bool { + sv := n + if nf.ro != nil { + sv = nf.ro.StaticValue(n) + } + switch sv.Op() { + case ir.OMAKESLICE, ir.ONEW, ir.OPTRLIT, ir.OSLICELIT: + return true + } + return false +} + +// constValue returns the underlying constant.Value for an AST node n +// if n is itself a constant value/expr, or if n is a singly assigned +// local containing constant expr/value (or nil not constant). +func (nf *nameFinder) constValue(n ir.Node) constant.Value { + sv := n + if nf.ro != nil { + sv = nf.ro.StaticValue(n) + } + if sv.Op() == ir.OLITERAL { + return sv.Val() + } + return nil +} + +// isNil returns whether n is nil (or singly +// assigned local containing nil). +func (nf *nameFinder) isNil(n ir.Node) bool { + sv := n + if nf.ro != nil { + sv = nf.ro.StaticValue(n) + } + return sv.Op() == ir.ONIL +} + +func (nf *nameFinder) staticValue(n ir.Node) ir.Node { + if nf.ro == nil { + return n + } + return nf.ro.StaticValue(n) +} + +func (nf *nameFinder) reassigned(n *ir.Name) bool { + if nf.ro == nil { + return true + } + return nf.ro.Reassigned(n) +} + +func (nf *nameFinder) isConcreteConvIface(n ir.Node) bool { + sv := n + if nf.ro != nil { + sv = nf.ro.StaticValue(n) + } + if sv.Op() != ir.OCONVIFACE { + return false + } + return !sv.(*ir.ConvExpr).X.Type().IsInterface() +} + +func isSameFuncName(v1, v2 *ir.Name) bool { + // NB: there are a few corner cases where pointer equality + // doesn't work here, but this should be good enough for + // our purposes here. + return v1 == v2 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go new file mode 100644 index 0000000000000000000000000000000000000000..bf4d3ca4ad24d65c53afe54171bc5978d847b287 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/parampropbits_string.go @@ -0,0 +1,70 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "stringer -bitset -type ParamPropBits"; DO NOT EDIT. + +package inlheur + +import ( + "bytes" + "strconv" +) + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ParamNoInfo-0] + _ = x[ParamFeedsInterfaceMethodCall-2] + _ = x[ParamMayFeedInterfaceMethodCall-4] + _ = x[ParamFeedsIndirectCall-8] + _ = x[ParamMayFeedIndirectCall-16] + _ = x[ParamFeedsIfOrSwitch-32] + _ = x[ParamMayFeedIfOrSwitch-64] +} + +var _ParamPropBits_value = [...]uint64{ + 0x0, /* ParamNoInfo */ + 0x2, /* ParamFeedsInterfaceMethodCall */ + 0x4, /* ParamMayFeedInterfaceMethodCall */ + 0x8, /* ParamFeedsIndirectCall */ + 0x10, /* ParamMayFeedIndirectCall */ + 0x20, /* ParamFeedsIfOrSwitch */ + 0x40, /* ParamMayFeedIfOrSwitch */ +} + +const _ParamPropBits_name = "ParamNoInfoParamFeedsInterfaceMethodCallParamMayFeedInterfaceMethodCallParamFeedsIndirectCallParamMayFeedIndirectCallParamFeedsIfOrSwitchParamMayFeedIfOrSwitch" + +var _ParamPropBits_index = [...]uint8{0, 11, 40, 71, 93, 117, 137, 159} + +func (i ParamPropBits) String() string { + var b bytes.Buffer + + remain := uint64(i) + seen := false + + for k, v := range _ParamPropBits_value { + x := _ParamPropBits_name[_ParamPropBits_index[k]:_ParamPropBits_index[k+1]] + if v == 0 { + if i == 0 { + b.WriteString(x) + return b.String() + } + continue + } + if (v & remain) == v { + remain &^= v + x := _ParamPropBits_name[_ParamPropBits_index[k]:_ParamPropBits_index[k+1]] + if seen { + b.WriteString("|") + } + seen = true + b.WriteString(x) + } + } + if remain == 0 { + return b.String() + } + return "ParamPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/pstate_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/pstate_string.go new file mode 100644 index 0000000000000000000000000000000000000000..e6108d1318a9315b95830843aba6e8e5db02d92d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/pstate_string.go @@ -0,0 +1,30 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "stringer -type pstate"; DO NOT EDIT. + +package inlheur + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[psNoInfo-0] + _ = x[psCallsPanic-1] + _ = x[psMayReturn-2] + _ = x[psTop-3] +} + +const _pstate_name = "psNoInfopsCallsPanicpsMayReturnpsTop" + +var _pstate_index = [...]uint8{0, 8, 20, 31, 36} + +func (i pstate) String() string { + if i < 0 || i >= pstate(len(_pstate_index)-1) { + return "pstate(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _pstate_name[_pstate_index[i]:_pstate_index[i+1]] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go new file mode 100644 index 0000000000000000000000000000000000000000..888af98fc3a9c1a1d1ac9d34ae5367026bc295b1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/resultpropbits_string.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "stringer -bitset -type ResultPropBits"; DO NOT EDIT. + +package inlheur + +import ( + "bytes" + "strconv" +) + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ResultNoInfo-0] + _ = x[ResultIsAllocatedMem-2] + _ = x[ResultIsConcreteTypeConvertedToInterface-4] + _ = x[ResultAlwaysSameConstant-8] + _ = x[ResultAlwaysSameFunc-16] + _ = x[ResultAlwaysSameInlinableFunc-32] +} + +var _ResultPropBits_value = [...]uint64{ + 0x0, /* ResultNoInfo */ + 0x2, /* ResultIsAllocatedMem */ + 0x4, /* ResultIsConcreteTypeConvertedToInterface */ + 0x8, /* ResultAlwaysSameConstant */ + 0x10, /* ResultAlwaysSameFunc */ + 0x20, /* ResultAlwaysSameInlinableFunc */ +} + +const _ResultPropBits_name = "ResultNoInfoResultIsAllocatedMemResultIsConcreteTypeConvertedToInterfaceResultAlwaysSameConstantResultAlwaysSameFuncResultAlwaysSameInlinableFunc" + +var _ResultPropBits_index = [...]uint8{0, 12, 32, 72, 96, 116, 145} + +func (i ResultPropBits) String() string { + var b bytes.Buffer + + remain := uint64(i) + seen := false + + for k, v := range _ResultPropBits_value { + x := _ResultPropBits_name[_ResultPropBits_index[k]:_ResultPropBits_index[k+1]] + if v == 0 { + if i == 0 { + b.WriteString(x) + return b.String() + } + continue + } + if (v & remain) == v { + remain &^= v + x := _ResultPropBits_name[_ResultPropBits_index[k]:_ResultPropBits_index[k+1]] + if seen { + b.WriteString("|") + } + seen = true + b.WriteString(x) + } + } + if remain == 0 { + return b.String() + } + return "ResultPropBits(0x" + strconv.FormatInt(int64(i), 16) + ")" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go new file mode 100644 index 0000000000000000000000000000000000000000..b95ea37d59b11e2cc086cf1068eb0aa5a22cc6ff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go @@ -0,0 +1,413 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/ir" + "fmt" + "os" +) + +// This file contains code to re-score callsites based on how the +// results of the call were used. Example: +// +// func foo() { +// x, fptr := bar() +// switch x { +// case 10: fptr = baz() +// default: blix() +// } +// fptr(100) +// } +// +// The initial scoring pass will assign a score to "bar()" based on +// various criteria, however once the first pass of scoring is done, +// we look at the flags on the result from bar, and check to see +// how those results are used. If bar() always returns the same constant +// for its first result, and if the variable receiving that result +// isn't redefined, and if that variable feeds into an if/switch +// condition, then we will try to adjust the score for "bar" (on the +// theory that if we inlined, we can constant fold / deadcode). + +type resultPropAndCS struct { + defcs *CallSite + props ResultPropBits +} + +type resultUseAnalyzer struct { + resultNameTab map[*ir.Name]resultPropAndCS + fn *ir.Func + cstab CallSiteTab + *condLevelTracker +} + +// rescoreBasedOnCallResultUses examines how call results are used, +// and tries to update the scores of calls based on how their results +// are used in the function. +func (csa *callSiteAnalyzer) rescoreBasedOnCallResultUses(fn *ir.Func, resultNameTab map[*ir.Name]resultPropAndCS, cstab CallSiteTab) { + enableDebugTraceIfEnv() + rua := &resultUseAnalyzer{ + resultNameTab: resultNameTab, + fn: fn, + cstab: cstab, + condLevelTracker: new(condLevelTracker), + } + var doNode func(ir.Node) bool + doNode = func(n ir.Node) bool { + rua.nodeVisitPre(n) + ir.DoChildren(n, doNode) + rua.nodeVisitPost(n) + return false + } + doNode(fn) + disableDebugTrace() +} + +func (csa *callSiteAnalyzer) examineCallResults(cs *CallSite, resultNameTab map[*ir.Name]resultPropAndCS) map[*ir.Name]resultPropAndCS { + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= examining call results for %q\n", + EncodeCallSiteKey(cs)) + } + + // Invoke a helper to pick out the specific ir.Name's the results + // from this call are assigned into, e.g. "x, y := fooBar()". If + // the call is not part of an assignment statement, or if the + // variables in question are not newly defined, then we'll receive + // an empty list here. + // + names, autoTemps, props := namesDefined(cs) + if len(names) == 0 { + return resultNameTab + } + + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= %d names defined\n", len(names)) + } + + // For each returned value, if the value has interesting + // properties (ex: always returns the same constant), and the name + // in question is never redefined, then make an entry in the + // result table for it. + const interesting = (ResultIsConcreteTypeConvertedToInterface | + ResultAlwaysSameConstant | ResultAlwaysSameInlinableFunc | ResultAlwaysSameFunc) + for idx, n := range names { + rprop := props.ResultFlags[idx] + + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= props for ret %d %q: %s\n", + idx, n.Sym().Name, rprop.String()) + } + + if rprop&interesting == 0 { + continue + } + if csa.nameFinder.reassigned(n) { + continue + } + if resultNameTab == nil { + resultNameTab = make(map[*ir.Name]resultPropAndCS) + } else if _, ok := resultNameTab[n]; ok { + panic("should never happen") + } + entry := resultPropAndCS{ + defcs: cs, + props: rprop, + } + resultNameTab[n] = entry + if autoTemps[idx] != nil { + resultNameTab[autoTemps[idx]] = entry + } + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= add resultNameTab table entry n=%v autotemp=%v props=%s\n", n, autoTemps[idx], rprop.String()) + } + } + return resultNameTab +} + +// namesDefined returns a list of ir.Name's corresponding to locals +// that receive the results from the call at site 'cs', plus the +// properties object for the called function. If a given result +// isn't cleanly assigned to a newly defined local, the +// slot for that result in the returned list will be nil. Example: +// +// call returned name list +// +// x := foo() [ x ] +// z, y := bar() [ nil, nil ] +// _, q := baz() [ nil, q ] +// +// In the case of a multi-return call, such as "x, y := foo()", +// the pattern we see from the front end will be a call op +// assigning to auto-temps, and then an assignment of the auto-temps +// to the user-level variables. In such cases we return +// first the user-level variable (in the first func result) +// and then the auto-temp name in the second result. +func namesDefined(cs *CallSite) ([]*ir.Name, []*ir.Name, *FuncProps) { + // If this call doesn't feed into an assignment (and of course not + // all calls do), then we don't have anything to work with here. + if cs.Assign == nil { + return nil, nil, nil + } + funcInlHeur, ok := fpmap[cs.Callee] + if !ok { + // TODO: add an assert/panic here. + return nil, nil, nil + } + if len(funcInlHeur.props.ResultFlags) == 0 { + return nil, nil, nil + } + + // Single return case. + if len(funcInlHeur.props.ResultFlags) == 1 { + asgn, ok := cs.Assign.(*ir.AssignStmt) + if !ok { + return nil, nil, nil + } + // locate name being assigned + aname, ok := asgn.X.(*ir.Name) + if !ok { + return nil, nil, nil + } + return []*ir.Name{aname}, []*ir.Name{nil}, funcInlHeur.props + } + + // Multi-return case + asgn, ok := cs.Assign.(*ir.AssignListStmt) + if !ok || !asgn.Def { + return nil, nil, nil + } + userVars := make([]*ir.Name, len(funcInlHeur.props.ResultFlags)) + autoTemps := make([]*ir.Name, len(funcInlHeur.props.ResultFlags)) + for idx, x := range asgn.Lhs { + if n, ok := x.(*ir.Name); ok { + userVars[idx] = n + r := asgn.Rhs[idx] + if r.Op() == ir.OCONVNOP { + r = r.(*ir.ConvExpr).X + } + if ir.IsAutoTmp(r) { + autoTemps[idx] = r.(*ir.Name) + } + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= multi-ret namedef uv=%v at=%v\n", + x, autoTemps[idx]) + } + } else { + return nil, nil, nil + } + } + return userVars, autoTemps, funcInlHeur.props +} + +func (rua *resultUseAnalyzer) nodeVisitPost(n ir.Node) { + rua.condLevelTracker.post(n) +} + +func (rua *resultUseAnalyzer) nodeVisitPre(n ir.Node) { + rua.condLevelTracker.pre(n) + switch n.Op() { + case ir.OCALLINTER: + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= rescore examine iface call %v:\n", n) + } + rua.callTargetCheckResults(n) + case ir.OCALLFUNC: + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= rescore examine call %v:\n", n) + } + rua.callTargetCheckResults(n) + case ir.OIF: + ifst := n.(*ir.IfStmt) + rua.foldCheckResults(ifst.Cond) + case ir.OSWITCH: + swst := n.(*ir.SwitchStmt) + if swst.Tag != nil { + rua.foldCheckResults(swst.Tag) + } + + } +} + +// callTargetCheckResults examines a given call to see whether the +// callee expression is potentially an inlinable function returned +// from a potentially inlinable call. Examples: +// +// Scenario 1: named intermediate +// +// fn1 := foo() conc := bar() +// fn1("blah") conc.MyMethod() +// +// Scenario 2: returned func or concrete object feeds directly to call +// +// foo()("blah") bar().MyMethod() +// +// In the second case although at the source level the result of the +// direct call feeds right into the method call or indirect call, +// we're relying on the front end having inserted an auto-temp to +// capture the value. +func (rua *resultUseAnalyzer) callTargetCheckResults(call ir.Node) { + ce := call.(*ir.CallExpr) + rname := rua.getCallResultName(ce) + if rname == nil { + return + } + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= staticvalue returns %v:\n", + rname) + } + if rname.Class != ir.PAUTO { + return + } + switch call.Op() { + case ir.OCALLINTER: + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= in %s checking %v for cci prop:\n", + rua.fn.Sym().Name, rname) + } + if cs := rua.returnHasProp(rname, ResultIsConcreteTypeConvertedToInterface); cs != nil { + + adj := returnFeedsConcreteToInterfaceCallAdj + cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask) + } + case ir.OCALLFUNC: + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= in %s checking %v for samefunc props:\n", + rua.fn.Sym().Name, rname) + v, ok := rua.resultNameTab[rname] + if !ok { + fmt.Fprintf(os.Stderr, "=-= no entry for %v in rt\n", rname) + } else { + fmt.Fprintf(os.Stderr, "=-= props for %v: %q\n", rname, v.props.String()) + } + } + if cs := rua.returnHasProp(rname, ResultAlwaysSameInlinableFunc); cs != nil { + adj := returnFeedsInlinableFuncToIndCallAdj + cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask) + } else if cs := rua.returnHasProp(rname, ResultAlwaysSameFunc); cs != nil { + adj := returnFeedsFuncToIndCallAdj + cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask) + + } + } +} + +// foldCheckResults examines the specified if/switch condition 'cond' +// to see if it refers to locals defined by a (potentially inlinable) +// function call at call site C, and if so, whether 'cond' contains +// only combinations of simple references to all of the names in +// 'names' with selected constants + operators. If these criteria are +// met, then we adjust the score for call site C to reflect the +// fact that inlining will enable deadcode and/or constant propagation. +// Note: for this heuristic to kick in, the names in question have to +// be all from the same callsite. Examples: +// +// q, r := baz() x, y := foo() +// switch q+r { a, b, c := bar() +// ... if x && y && a && b && c { +// } ... +// } +// +// For the call to "baz" above we apply a score adjustment, but not +// for the calls to "foo" or "bar". +func (rua *resultUseAnalyzer) foldCheckResults(cond ir.Node) { + namesUsed := collectNamesUsed(cond) + if len(namesUsed) == 0 { + return + } + var cs *CallSite + for _, n := range namesUsed { + rpcs, found := rua.resultNameTab[n] + if !found { + return + } + if cs != nil && rpcs.defcs != cs { + return + } + cs = rpcs.defcs + if rpcs.props&ResultAlwaysSameConstant == 0 { + return + } + } + if debugTrace&debugTraceScoring != 0 { + nls := func(nl []*ir.Name) string { + r := "" + for _, n := range nl { + r += " " + n.Sym().Name + } + return r + } + fmt.Fprintf(os.Stderr, "=-= calling ShouldFoldIfNameConstant on names={%s} cond=%v\n", nls(namesUsed), cond) + } + + if !ShouldFoldIfNameConstant(cond, namesUsed) { + return + } + adj := returnFeedsConstToIfAdj + cs.Score, cs.ScoreMask = adjustScore(adj, cs.Score, cs.ScoreMask) +} + +func collectNamesUsed(expr ir.Node) []*ir.Name { + res := []*ir.Name{} + ir.Visit(expr, func(n ir.Node) { + if n.Op() != ir.ONAME { + return + } + nn := n.(*ir.Name) + if nn.Class != ir.PAUTO { + return + } + res = append(res, nn) + }) + return res +} + +func (rua *resultUseAnalyzer) returnHasProp(name *ir.Name, prop ResultPropBits) *CallSite { + v, ok := rua.resultNameTab[name] + if !ok { + return nil + } + if v.props&prop == 0 { + return nil + } + return v.defcs +} + +func (rua *resultUseAnalyzer) getCallResultName(ce *ir.CallExpr) *ir.Name { + var callTarg ir.Node + if sel, ok := ce.Fun.(*ir.SelectorExpr); ok { + // method call + callTarg = sel.X + } else if ctarg, ok := ce.Fun.(*ir.Name); ok { + // regular call + callTarg = ctarg + } else { + return nil + } + r := ir.StaticValue(callTarg) + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= staticname on %v returns %v:\n", + callTarg, r) + } + if r.Op() == ir.OCALLFUNC { + // This corresponds to the "x := foo()" case; here + // ir.StaticValue has brought us all the way back to + // the call expression itself. We need to back off to + // the name defined by the call; do this by looking up + // the callsite. + ce := r.(*ir.CallExpr) + cs, ok := rua.cstab[ce] + if !ok { + return nil + } + names, _, _ := namesDefined(cs) + if len(names) == 0 { + return nil + } + return names[0] + } else if r.Op() == ir.ONAME { + return r.(*ir.Name) + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go new file mode 100644 index 0000000000000000000000000000000000000000..f5b8bf6903275c0e59383f9fb74a5ebee2c2a845 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/scoreadjusttyp_string.go @@ -0,0 +1,80 @@ +// Code generated by "stringer -bitset -type scoreAdjustTyp"; DO NOT EDIT. + +package inlheur + +import "strconv" +import "bytes" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[panicPathAdj-1] + _ = x[initFuncAdj-2] + _ = x[inLoopAdj-4] + _ = x[passConstToIfAdj-8] + _ = x[passConstToNestedIfAdj-16] + _ = x[passConcreteToItfCallAdj-32] + _ = x[passConcreteToNestedItfCallAdj-64] + _ = x[passFuncToIndCallAdj-128] + _ = x[passFuncToNestedIndCallAdj-256] + _ = x[passInlinableFuncToIndCallAdj-512] + _ = x[passInlinableFuncToNestedIndCallAdj-1024] + _ = x[returnFeedsConstToIfAdj-2048] + _ = x[returnFeedsFuncToIndCallAdj-4096] + _ = x[returnFeedsInlinableFuncToIndCallAdj-8192] + _ = x[returnFeedsConcreteToInterfaceCallAdj-16384] +} + +var _scoreAdjustTyp_value = [...]uint64{ + 0x1, /* panicPathAdj */ + 0x2, /* initFuncAdj */ + 0x4, /* inLoopAdj */ + 0x8, /* passConstToIfAdj */ + 0x10, /* passConstToNestedIfAdj */ + 0x20, /* passConcreteToItfCallAdj */ + 0x40, /* passConcreteToNestedItfCallAdj */ + 0x80, /* passFuncToIndCallAdj */ + 0x100, /* passFuncToNestedIndCallAdj */ + 0x200, /* passInlinableFuncToIndCallAdj */ + 0x400, /* passInlinableFuncToNestedIndCallAdj */ + 0x800, /* returnFeedsConstToIfAdj */ + 0x1000, /* returnFeedsFuncToIndCallAdj */ + 0x2000, /* returnFeedsInlinableFuncToIndCallAdj */ + 0x4000, /* returnFeedsConcreteToInterfaceCallAdj */ +} + +const _scoreAdjustTyp_name = "panicPathAdjinitFuncAdjinLoopAdjpassConstToIfAdjpassConstToNestedIfAdjpassConcreteToItfCallAdjpassConcreteToNestedItfCallAdjpassFuncToIndCallAdjpassFuncToNestedIndCallAdjpassInlinableFuncToIndCallAdjpassInlinableFuncToNestedIndCallAdjreturnFeedsConstToIfAdjreturnFeedsFuncToIndCallAdjreturnFeedsInlinableFuncToIndCallAdjreturnFeedsConcreteToInterfaceCallAdj" + +var _scoreAdjustTyp_index = [...]uint16{0, 12, 23, 32, 48, 70, 94, 124, 144, 170, 199, 234, 257, 284, 320, 357} + +func (i scoreAdjustTyp) String() string { + var b bytes.Buffer + + remain := uint64(i) + seen := false + + for k, v := range _scoreAdjustTyp_value { + x := _scoreAdjustTyp_name[_scoreAdjustTyp_index[k]:_scoreAdjustTyp_index[k+1]] + if v == 0 { + if i == 0 { + b.WriteString(x) + return b.String() + } + continue + } + if (v & remain) == v { + remain &^= v + x := _scoreAdjustTyp_name[_scoreAdjustTyp_index[k]:_scoreAdjustTyp_index[k+1]] + if seen { + b.WriteString("|") + } + seen = true + b.WriteString(x) + } + } + if remain == 0 { + return b.String() + } + return "scoreAdjustTyp(0x" + strconv.FormatInt(int64(i), 16) + ")" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/scoring.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/scoring.go new file mode 100644 index 0000000000000000000000000000000000000000..623ba8adf0e41aecb0b90c7fe96d87049d6eefe2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/scoring.go @@ -0,0 +1,751 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/pgo" + "cmd/compile/internal/types" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// These constants enumerate the set of possible ways/scenarios +// in which we'll adjust the score of a given callsite. +type scoreAdjustTyp uint + +// These constants capture the various ways in which the inliner's +// scoring phase can adjust a callsite score based on heuristics. They +// fall broadly into three categories: +// +// 1) adjustments based solely on the callsite context (ex: call +// appears on panic path) +// +// 2) adjustments that take into account specific interesting values +// passed at a call site (ex: passing a constant that could result in +// cprop/deadcode in the caller) +// +// 3) adjustments that take into account values returned from the call +// at a callsite (ex: call always returns the same inlinable function, +// and return value flows unmodified into an indirect call) +// +// For categories 2 and 3 above, each adjustment can have either a +// "must" version and a "may" version (but not both). Here the idea is +// that in the "must" version the value flow is unconditional: if the +// callsite executes, then the condition we're interested in (ex: +// param feeding call) is guaranteed to happen. For the "may" version, +// there may be control flow that could cause the benefit to be +// bypassed. +const ( + // Category 1 adjustments (see above) + panicPathAdj scoreAdjustTyp = (1 << iota) + initFuncAdj + inLoopAdj + + // Category 2 adjustments (see above). + passConstToIfAdj + passConstToNestedIfAdj + passConcreteToItfCallAdj + passConcreteToNestedItfCallAdj + passFuncToIndCallAdj + passFuncToNestedIndCallAdj + passInlinableFuncToIndCallAdj + passInlinableFuncToNestedIndCallAdj + + // Category 3 adjustments. + returnFeedsConstToIfAdj + returnFeedsFuncToIndCallAdj + returnFeedsInlinableFuncToIndCallAdj + returnFeedsConcreteToInterfaceCallAdj + + sentinelScoreAdj // sentinel; not a real adjustment +) + +// This table records the specific values we use to adjust call +// site scores in a given scenario. +// NOTE: these numbers are chosen very arbitrarily; ideally +// we will go through some sort of turning process to decide +// what value for each one produces the best performance. + +var adjValues = map[scoreAdjustTyp]int{ + panicPathAdj: 40, + initFuncAdj: 20, + inLoopAdj: -5, + passConstToIfAdj: -20, + passConstToNestedIfAdj: -15, + passConcreteToItfCallAdj: -30, + passConcreteToNestedItfCallAdj: -25, + passFuncToIndCallAdj: -25, + passFuncToNestedIndCallAdj: -20, + passInlinableFuncToIndCallAdj: -45, + passInlinableFuncToNestedIndCallAdj: -40, + returnFeedsConstToIfAdj: -15, + returnFeedsFuncToIndCallAdj: -25, + returnFeedsInlinableFuncToIndCallAdj: -40, + returnFeedsConcreteToInterfaceCallAdj: -25, +} + +// SetupScoreAdjustments interprets the value of the -d=inlscoreadj +// debugging option, if set. The value of this flag is expected to be +// a series of "/"-separated clauses of the form adj1:value1. Example: +// -d=inlscoreadj=inLoopAdj=0/passConstToIfAdj=-99 +func SetupScoreAdjustments() { + if base.Debug.InlScoreAdj == "" { + return + } + if err := parseScoreAdj(base.Debug.InlScoreAdj); err != nil { + base.Fatalf("malformed -d=inlscoreadj argument %q: %v", + base.Debug.InlScoreAdj, err) + } +} + +func adjStringToVal(s string) (scoreAdjustTyp, bool) { + for adj := scoreAdjustTyp(1); adj < sentinelScoreAdj; adj <<= 1 { + if adj.String() == s { + return adj, true + } + } + return 0, false +} + +func parseScoreAdj(val string) error { + clauses := strings.Split(val, "/") + if len(clauses) == 0 { + return fmt.Errorf("no clauses") + } + for _, clause := range clauses { + elems := strings.Split(clause, ":") + if len(elems) < 2 { + return fmt.Errorf("clause %q: expected colon", clause) + } + if len(elems) != 2 { + return fmt.Errorf("clause %q has %d elements, wanted 2", clause, + len(elems)) + } + adj, ok := adjStringToVal(elems[0]) + if !ok { + return fmt.Errorf("clause %q: unknown adjustment", clause) + } + val, err := strconv.Atoi(elems[1]) + if err != nil { + return fmt.Errorf("clause %q: malformed value: %v", clause, err) + } + adjValues[adj] = val + } + return nil +} + +func adjValue(x scoreAdjustTyp) int { + if val, ok := adjValues[x]; ok { + return val + } else { + panic("internal error unregistered adjustment type") + } +} + +var mayMustAdj = [...]struct{ may, must scoreAdjustTyp }{ + {may: passConstToNestedIfAdj, must: passConstToIfAdj}, + {may: passConcreteToNestedItfCallAdj, must: passConcreteToItfCallAdj}, + {may: passFuncToNestedIndCallAdj, must: passFuncToNestedIndCallAdj}, + {may: passInlinableFuncToNestedIndCallAdj, must: passInlinableFuncToIndCallAdj}, +} + +func isMay(x scoreAdjustTyp) bool { + return mayToMust(x) != 0 +} + +func isMust(x scoreAdjustTyp) bool { + return mustToMay(x) != 0 +} + +func mayToMust(x scoreAdjustTyp) scoreAdjustTyp { + for _, v := range mayMustAdj { + if x == v.may { + return v.must + } + } + return 0 +} + +func mustToMay(x scoreAdjustTyp) scoreAdjustTyp { + for _, v := range mayMustAdj { + if x == v.must { + return v.may + } + } + return 0 +} + +// computeCallSiteScore takes a given call site whose ir node is +// 'call' and callee function is 'callee' and with previously computed +// call site properties 'csflags', then computes a score for the +// callsite that combines the size cost of the callee with heuristics +// based on previously computed argument and function properties, +// then stores the score and the adjustment mask in the appropriate +// fields in 'cs' +func (cs *CallSite) computeCallSiteScore(csa *callSiteAnalyzer, calleeProps *FuncProps) { + callee := cs.Callee + csflags := cs.Flags + call := cs.Call + + // Start with the size-based score for the callee. + score := int(callee.Inl.Cost) + var tmask scoreAdjustTyp + + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= scoring call to %s at %s , initial=%d\n", + callee.Sym().Name, fmtFullPos(call.Pos()), score) + } + + // First some score adjustments to discourage inlining in selected cases. + if csflags&CallSiteOnPanicPath != 0 { + score, tmask = adjustScore(panicPathAdj, score, tmask) + } + if csflags&CallSiteInInitFunc != 0 { + score, tmask = adjustScore(initFuncAdj, score, tmask) + } + + // Then adjustments to encourage inlining in selected cases. + if csflags&CallSiteInLoop != 0 { + score, tmask = adjustScore(inLoopAdj, score, tmask) + } + + // Stop here if no callee props. + if calleeProps == nil { + cs.Score, cs.ScoreMask = score, tmask + return + } + + // Walk through the actual expressions being passed at the call. + calleeRecvrParms := callee.Type().RecvParams() + for idx := range call.Args { + // ignore blanks + if calleeRecvrParms[idx].Sym == nil || + calleeRecvrParms[idx].Sym.IsBlank() { + continue + } + arg := call.Args[idx] + pflag := calleeProps.ParamFlags[idx] + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= arg %d of %d: val %v flags=%s\n", + idx, len(call.Args), arg, pflag.String()) + } + + if len(cs.ArgProps) == 0 { + continue + } + argProps := cs.ArgProps[idx] + + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= arg %d props %s value %v\n", + idx, argProps.String(), arg) + } + + if argProps&ActualExprConstant != 0 { + if pflag&ParamMayFeedIfOrSwitch != 0 { + score, tmask = adjustScore(passConstToNestedIfAdj, score, tmask) + } + if pflag&ParamFeedsIfOrSwitch != 0 { + score, tmask = adjustScore(passConstToIfAdj, score, tmask) + } + } + + if argProps&ActualExprIsConcreteConvIface != 0 { + // FIXME: ideally here it would be nice to make a + // distinction between the inlinable case and the + // non-inlinable case, but this is hard to do. Example: + // + // type I interface { Tiny() int; Giant() } + // type Conc struct { x int } + // func (c *Conc) Tiny() int { return 42 } + // func (c *Conc) Giant() { } + // + // func passConcToItf(c *Conc) { + // makesItfMethodCall(c) + // } + // + // In the code above, function properties will only tell + // us that 'makesItfMethodCall' invokes a method on its + // interface parameter, but we don't know whether it calls + // "Tiny" or "Giant". If we knew if called "Tiny", then in + // theory in addition to converting the interface call to + // a direct call, we could also inline (in which case + // we'd want to decrease the score even more). + // + // One thing we could do (not yet implemented) is iterate + // through all of the methods of "*Conc" that allow it to + // satisfy I, and if all are inlinable, then exploit that. + if pflag&ParamMayFeedInterfaceMethodCall != 0 { + score, tmask = adjustScore(passConcreteToNestedItfCallAdj, score, tmask) + } + if pflag&ParamFeedsInterfaceMethodCall != 0 { + score, tmask = adjustScore(passConcreteToItfCallAdj, score, tmask) + } + } + + if argProps&(ActualExprIsFunc|ActualExprIsInlinableFunc) != 0 { + mayadj := passFuncToNestedIndCallAdj + mustadj := passFuncToIndCallAdj + if argProps&ActualExprIsInlinableFunc != 0 { + mayadj = passInlinableFuncToNestedIndCallAdj + mustadj = passInlinableFuncToIndCallAdj + } + if pflag&ParamMayFeedIndirectCall != 0 { + score, tmask = adjustScore(mayadj, score, tmask) + } + if pflag&ParamFeedsIndirectCall != 0 { + score, tmask = adjustScore(mustadj, score, tmask) + } + } + } + + cs.Score, cs.ScoreMask = score, tmask +} + +func adjustScore(typ scoreAdjustTyp, score int, mask scoreAdjustTyp) (int, scoreAdjustTyp) { + + if isMust(typ) { + if mask&typ != 0 { + return score, mask + } + may := mustToMay(typ) + if mask&may != 0 { + // promote may to must, so undo may + score -= adjValue(may) + mask &^= may + } + } else if isMay(typ) { + must := mayToMust(typ) + if mask&(must|typ) != 0 { + return score, mask + } + } + if mask&typ == 0 { + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= applying adj %d for %s\n", + adjValue(typ), typ.String()) + } + score += adjValue(typ) + mask |= typ + } + return score, mask +} + +var resultFlagToPositiveAdj map[ResultPropBits]scoreAdjustTyp +var paramFlagToPositiveAdj map[ParamPropBits]scoreAdjustTyp + +func setupFlagToAdjMaps() { + resultFlagToPositiveAdj = map[ResultPropBits]scoreAdjustTyp{ + ResultIsAllocatedMem: returnFeedsConcreteToInterfaceCallAdj, + ResultAlwaysSameFunc: returnFeedsFuncToIndCallAdj, + ResultAlwaysSameConstant: returnFeedsConstToIfAdj, + } + paramFlagToPositiveAdj = map[ParamPropBits]scoreAdjustTyp{ + ParamMayFeedInterfaceMethodCall: passConcreteToNestedItfCallAdj, + ParamFeedsInterfaceMethodCall: passConcreteToItfCallAdj, + ParamMayFeedIndirectCall: passInlinableFuncToNestedIndCallAdj, + ParamFeedsIndirectCall: passInlinableFuncToIndCallAdj, + } +} + +// LargestNegativeScoreAdjustment tries to estimate the largest possible +// negative score adjustment that could be applied to a call of the +// function with the specified props. Example: +// +// func foo() { func bar(x int, p *int) int { +// ... if x < 0 { *p = x } +// } return 99 +// } +// +// Function 'foo' above on the left has no interesting properties, +// thus as a result the most we'll adjust any call to is the value for +// "call in loop". If the calculated cost of the function is 150, and +// the in-loop adjustment is 5 (for example), then there is not much +// point treating it as inlinable. On the other hand "bar" has a param +// property (parameter "x" feeds unmodified to an "if" statement") and +// a return property (always returns same constant) meaning that a +// given call _could_ be rescored down as much as -35 points-- thus if +// the size of "bar" is 100 (for example) then there is at least a +// chance that scoring will enable inlining. +func LargestNegativeScoreAdjustment(fn *ir.Func, props *FuncProps) int { + if resultFlagToPositiveAdj == nil { + setupFlagToAdjMaps() + } + var tmask scoreAdjustTyp + score := adjValues[inLoopAdj] // any call can be in a loop + for _, pf := range props.ParamFlags { + if adj, ok := paramFlagToPositiveAdj[pf]; ok { + score, tmask = adjustScore(adj, score, tmask) + } + } + for _, rf := range props.ResultFlags { + if adj, ok := resultFlagToPositiveAdj[rf]; ok { + score, tmask = adjustScore(adj, score, tmask) + } + } + + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= largestScore(%v) is %d\n", + fn, score) + } + + return score +} + +// LargestPositiveScoreAdjustment tries to estimate the largest possible +// positive score adjustment that could be applied to a given callsite. +// At the moment we don't have very many positive score adjustments, so +// this is just hard-coded, not table-driven. +func LargestPositiveScoreAdjustment(fn *ir.Func) int { + return adjValues[panicPathAdj] + adjValues[initFuncAdj] +} + +// callSiteTab contains entries for each call in the function +// currently being processed by InlineCalls; this variable will either +// be set to 'cstabCache' below (for non-inlinable routines) or to the +// local 'cstab' entry in the fnInlHeur object for inlinable routines. +// +// NOTE: this assumes that inlining operations are happening in a serial, +// single-threaded fashion,f which is true today but probably won't hold +// in the future (for example, we might want to score the callsites +// in multiple functions in parallel); if the inliner evolves in this +// direction we'll need to come up with a different approach here. +var callSiteTab CallSiteTab + +// scoreCallsCache caches a call site table and call site list between +// invocations of ScoreCalls so that we can reuse previously allocated +// storage. +var scoreCallsCache scoreCallsCacheType + +type scoreCallsCacheType struct { + tab CallSiteTab + csl []*CallSite +} + +// ScoreCalls assigns numeric scores to each of the callsites in +// function 'fn'; the lower the score, the more helpful we think it +// will be to inline. +// +// Unlike a lot of the other inline heuristics machinery, callsite +// scoring can't be done as part of the CanInline call for a function, +// due to fact that we may be working on a non-trivial SCC. So for +// example with this SCC: +// +// func foo(x int) { func bar(x int, f func()) { +// if x != 0 { f() +// bar(x, func(){}) foo(x-1) +// } } +// } +// +// We don't want to perform scoring for the 'foo' call in "bar" until +// after foo has been analyzed, but it's conceivable that CanInline +// might visit bar before foo for this SCC. +func ScoreCalls(fn *ir.Func) { + if len(fn.Body) == 0 { + return + } + enableDebugTraceIfEnv() + + nameFinder := newNameFinder(fn) + + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= ScoreCalls(%v)\n", ir.FuncName(fn)) + } + + // If this is an inlinable function, use the precomputed + // call site table for it. If the function wasn't an inline + // candidate, collect a callsite table for it now. + var cstab CallSiteTab + if funcInlHeur, ok := fpmap[fn]; ok { + cstab = funcInlHeur.cstab + } else { + if len(scoreCallsCache.tab) != 0 { + panic("missing call to ScoreCallsCleanup") + } + if scoreCallsCache.tab == nil { + scoreCallsCache.tab = make(CallSiteTab) + } + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= building cstab for non-inl func %s\n", + ir.FuncName(fn)) + } + cstab = computeCallSiteTable(fn, fn.Body, scoreCallsCache.tab, nil, 0, + nameFinder) + } + + csa := makeCallSiteAnalyzer(fn) + const doCallResults = true + csa.scoreCallsRegion(fn, fn.Body, cstab, doCallResults, nil) + + disableDebugTrace() +} + +// scoreCallsRegion assigns numeric scores to each of the callsites in +// region 'region' within function 'fn'. This can be called on +// an entire function, or with 'region' set to a chunk of +// code corresponding to an inlined call. +func (csa *callSiteAnalyzer) scoreCallsRegion(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, doCallResults bool, ic *ir.InlinedCallExpr) { + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= scoreCallsRegion(%v, %s) len(cstab)=%d\n", + ir.FuncName(fn), region[0].Op().String(), len(cstab)) + } + + // Sort callsites to avoid any surprises with non deterministic + // map iteration order (this is probably not needed, but here just + // in case). + csl := scoreCallsCache.csl[:0] + for _, cs := range cstab { + csl = append(csl, cs) + } + scoreCallsCache.csl = csl[:0] + sort.Slice(csl, func(i, j int) bool { + return csl[i].ID < csl[j].ID + }) + + // Score each call site. + var resultNameTab map[*ir.Name]resultPropAndCS + for _, cs := range csl { + var cprops *FuncProps + fihcprops := false + desercprops := false + if funcInlHeur, ok := fpmap[cs.Callee]; ok { + cprops = funcInlHeur.props + fihcprops = true + } else if cs.Callee.Inl != nil { + cprops = DeserializeFromString(cs.Callee.Inl.Properties) + desercprops = true + } else { + if base.Debug.DumpInlFuncProps != "" { + fmt.Fprintf(os.Stderr, "=-= *** unable to score call to %s from %s\n", cs.Callee.Sym().Name, fmtFullPos(cs.Call.Pos())) + panic("should never happen") + } else { + continue + } + } + cs.computeCallSiteScore(csa, cprops) + + if doCallResults { + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= examineCallResults at %s: flags=%d score=%d funcInlHeur=%v deser=%v\n", fmtFullPos(cs.Call.Pos()), cs.Flags, cs.Score, fihcprops, desercprops) + } + resultNameTab = csa.examineCallResults(cs, resultNameTab) + } + + if debugTrace&debugTraceScoring != 0 { + fmt.Fprintf(os.Stderr, "=-= scoring call at %s: flags=%d score=%d funcInlHeur=%v deser=%v\n", fmtFullPos(cs.Call.Pos()), cs.Flags, cs.Score, fihcprops, desercprops) + } + } + + if resultNameTab != nil { + csa.rescoreBasedOnCallResultUses(fn, resultNameTab, cstab) + } + + disableDebugTrace() + + if ic != nil && callSiteTab != nil { + // Integrate the calls from this cstab into the table for the caller. + if err := callSiteTab.merge(cstab); err != nil { + base.FatalfAt(ic.Pos(), "%v", err) + } + } else { + callSiteTab = cstab + } +} + +// ScoreCallsCleanup resets the state of the callsite cache +// once ScoreCalls is done with a function. +func ScoreCallsCleanup() { + if base.Debug.DumpInlCallSiteScores != 0 { + if allCallSites == nil { + allCallSites = make(CallSiteTab) + } + for call, cs := range callSiteTab { + allCallSites[call] = cs + } + } + for k := range scoreCallsCache.tab { + delete(scoreCallsCache.tab, k) + } +} + +// GetCallSiteScore returns the previously calculated score for call +// within fn. +func GetCallSiteScore(fn *ir.Func, call *ir.CallExpr) (int, bool) { + if funcInlHeur, ok := fpmap[fn]; ok { + if cs, ok := funcInlHeur.cstab[call]; ok { + return cs.Score, true + } + } + if cs, ok := callSiteTab[call]; ok { + return cs.Score, true + } + return 0, false +} + +// BudgetExpansion returns the amount to relax/expand the base +// inlining budget when the new inliner is turned on; the inliner +// will add the returned value to the hairyness budget. +// +// Background: with the new inliner, the score for a given callsite +// can be adjusted down by some amount due to heuristics, however we +// won't know whether this is going to happen until much later after +// the CanInline call. This function returns the amount to relax the +// budget initially (to allow for a large score adjustment); later on +// in RevisitInlinability we'll look at each individual function to +// demote it if needed. +func BudgetExpansion(maxBudget int32) int32 { + if base.Debug.InlBudgetSlack != 0 { + return int32(base.Debug.InlBudgetSlack) + } + // In the default case, return maxBudget, which will effectively + // double the budget from 80 to 160; this should be good enough + // for most cases. + return maxBudget +} + +var allCallSites CallSiteTab + +// DumpInlCallSiteScores is invoked by the inliner if the debug flag +// "-d=dumpinlcallsitescores" is set; it dumps out a human-readable +// summary of all (potentially) inlinable callsites in the package, +// along with info on call site scoring and the adjustments made to a +// given score. Here profile is the PGO profile in use (may be +// nil), budgetCallback is a callback that can be invoked to find out +// the original pre-adjustment hairyness limit for the function, and +// inlineHotMaxBudget is the constant of the same name used in the +// inliner. Sample output lines: +// +// Score Adjustment Status Callee CallerPos ScoreFlags +// 115 40 DEMOTED cmd/compile/internal/abi.(*ABIParamAssignment).Offset expand_calls.go:1679:14|6 panicPathAdj +// 76 -5n PROMOTED runtime.persistentalloc mcheckmark.go:48:45|3 inLoopAdj +// 201 0 --- PGO unicode.DecodeRuneInString utf8.go:312:30|1 +// 7 -5 --- PGO internal/abi.Name.DataChecked type.go:625:22|0 inLoopAdj +// +// In the dump above, "Score" is the final score calculated for the +// callsite, "Adjustment" is the amount added to or subtracted from +// the original hairyness estimate to form the score. "Status" shows +// whether anything changed with the site -- did the adjustment bump +// it down just below the threshold ("PROMOTED") or instead bump it +// above the threshold ("DEMOTED"); this will be blank ("---") if no +// threshold was crossed as a result of the heuristics. Note that +// "Status" also shows whether PGO was involved. "Callee" is the name +// of the function called, "CallerPos" is the position of the +// callsite, and "ScoreFlags" is a digest of the specific properties +// we used to make adjustments to callsite score via heuristics. +func DumpInlCallSiteScores(profile *pgo.Profile, budgetCallback func(fn *ir.Func, profile *pgo.Profile) (int32, bool)) { + + var indirectlyDueToPromotion func(cs *CallSite) bool + indirectlyDueToPromotion = func(cs *CallSite) bool { + bud, _ := budgetCallback(cs.Callee, profile) + hairyval := cs.Callee.Inl.Cost + score := int32(cs.Score) + if hairyval > bud && score <= bud { + return true + } + if cs.parent != nil { + return indirectlyDueToPromotion(cs.parent) + } + return false + } + + genstatus := func(cs *CallSite) string { + hairyval := cs.Callee.Inl.Cost + bud, isPGO := budgetCallback(cs.Callee, profile) + score := int32(cs.Score) + st := "---" + expinl := false + switch { + case hairyval <= bud && score <= bud: + // "Normal" inlined case: hairy val sufficiently low that + // it would have been inlined anyway without heuristics. + expinl = true + case hairyval > bud && score > bud: + // "Normal" not inlined case: hairy val sufficiently high + // and scoring didn't lower it. + case hairyval > bud && score <= bud: + // Promoted: we would not have inlined it before, but + // after score adjustment we decided to inline. + st = "PROMOTED" + expinl = true + case hairyval <= bud && score > bud: + // Demoted: we would have inlined it before, but after + // score adjustment we decided not to inline. + st = "DEMOTED" + } + inlined := cs.aux&csAuxInlined != 0 + indprom := false + if cs.parent != nil { + indprom = indirectlyDueToPromotion(cs.parent) + } + if inlined && indprom { + st += "|INDPROM" + } + if inlined && !expinl { + st += "|[NI?]" + } else if !inlined && expinl { + st += "|[IN?]" + } + if isPGO { + st += "|PGO" + } + return st + } + + if base.Debug.DumpInlCallSiteScores != 0 { + var sl []*CallSite + for _, cs := range allCallSites { + sl = append(sl, cs) + } + sort.Slice(sl, func(i, j int) bool { + if sl[i].Score != sl[j].Score { + return sl[i].Score < sl[j].Score + } + fni := ir.PkgFuncName(sl[i].Callee) + fnj := ir.PkgFuncName(sl[j].Callee) + if fni != fnj { + return fni < fnj + } + ecsi := EncodeCallSiteKey(sl[i]) + ecsj := EncodeCallSiteKey(sl[j]) + return ecsi < ecsj + }) + + mkname := func(fn *ir.Func) string { + var n string + if fn == nil || fn.Nname == nil { + return "" + } + if fn.Sym().Pkg == types.LocalPkg { + n = "·" + fn.Sym().Name + } else { + n = ir.PkgFuncName(fn) + } + // don't try to print super-long names + if len(n) <= 64 { + return n + } + return n[:32] + "..." + n[len(n)-32:len(n)] + } + + if len(sl) != 0 { + fmt.Fprintf(os.Stdout, "# scores for package %s\n", types.LocalPkg.Path) + fmt.Fprintf(os.Stdout, "# Score Adjustment Status Callee CallerPos Flags ScoreFlags\n") + } + for _, cs := range sl { + hairyval := cs.Callee.Inl.Cost + adj := int32(cs.Score) - hairyval + nm := mkname(cs.Callee) + ecc := EncodeCallSiteKey(cs) + fmt.Fprintf(os.Stdout, "%d %d\t%s\t%s\t%s\t%s\n", + cs.Score, adj, genstatus(cs), + nm, ecc, + cs.ScoreMask.String()) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/serialize.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/serialize.go new file mode 100644 index 0000000000000000000000000000000000000000..d650626679e7874da8b2f64bfbaba46b5e01f023 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/serialize.go @@ -0,0 +1,80 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import "strings" + +func (funcProps *FuncProps) SerializeToString() string { + if funcProps == nil { + return "" + } + var sb strings.Builder + writeUleb128(&sb, uint64(funcProps.Flags)) + writeUleb128(&sb, uint64(len(funcProps.ParamFlags))) + for _, pf := range funcProps.ParamFlags { + writeUleb128(&sb, uint64(pf)) + } + writeUleb128(&sb, uint64(len(funcProps.ResultFlags))) + for _, rf := range funcProps.ResultFlags { + writeUleb128(&sb, uint64(rf)) + } + return sb.String() +} + +func DeserializeFromString(s string) *FuncProps { + if len(s) == 0 { + return nil + } + var funcProps FuncProps + var v uint64 + sl := []byte(s) + v, sl = readULEB128(sl) + funcProps.Flags = FuncPropBits(v) + v, sl = readULEB128(sl) + funcProps.ParamFlags = make([]ParamPropBits, v) + for i := range funcProps.ParamFlags { + v, sl = readULEB128(sl) + funcProps.ParamFlags[i] = ParamPropBits(v) + } + v, sl = readULEB128(sl) + funcProps.ResultFlags = make([]ResultPropBits, v) + for i := range funcProps.ResultFlags { + v, sl = readULEB128(sl) + funcProps.ResultFlags[i] = ResultPropBits(v) + } + return &funcProps +} + +func readULEB128(sl []byte) (value uint64, rsl []byte) { + var shift uint + + for { + b := sl[0] + sl = sl[1:] + value |= (uint64(b&0x7F) << shift) + if b&0x80 == 0 { + break + } + shift += 7 + } + return value, sl +} + +func writeUleb128(sb *strings.Builder, v uint64) { + if v < 128 { + sb.WriteByte(uint8(v)) + return + } + more := true + for more { + c := uint8(v & 0x7f) + v >>= 7 + more = v != 0 + if more { + c |= 0x80 + } + sb.WriteByte(c) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go new file mode 100644 index 0000000000000000000000000000000000000000..6f2f76002ea9d861165730fc897eaf35d38afd44 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/dumpscores.go @@ -0,0 +1,45 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dumpscores + +var G int + +func inlinable(x int, f func(int) int) int { + if x != 0 { + return 1 + } + G += noninl(x) + return f(x) +} + +func inlinable2(x int) int { + return noninl(-x) +} + +//go:noinline +func noninl(x int) int { + return x + 1 +} + +func tooLargeToInline(x int) int { + if x > 101 { + // Drive up the cost of inlining this func over the + // regular threshold. + return big(big(big(big(big(G + x))))) + } + if x < 100 { + // make sure this callsite is scored properly + G += inlinable(101, inlinable2) + if G == 101 { + return 0 + } + panic(inlinable2(3)) + } + return G +} + +func big(q int) int { + return noninl(q) + noninl(-q) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt new file mode 100644 index 0000000000000000000000000000000000000000..af5ebec850f73a79bcc35325f0262a51a0933146 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/README.txt @@ -0,0 +1,77 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +Notes on the format of the testcase files in +cmd/compile/internal/inline/inlheur/testdata/props: + +- each (compilable) file contains input Go code and expected results + in the form of column-0 comments. + +- functions or methods that begin with "T_" are targeted for testing, + as well as "init" functions; all other functions are ignored. + +- function header comments begin with a line containing + the file name, function name, definition line, then index + and a count of the number of funcs that share that same + definition line (needed to support generics). Example: + + // foo.go T_mumble 35 1 4 + + Here "T_mumble" is defined at line 35, and it is func 0 + out of the 4 funcs that share that same line. + +- function property expected results appear as comments in immediately + prior to the function. For example, here we have first the function + name ("T_feeds_if_simple"), then human-readable dump of the function + properties, as well as the JSON for the properties object, each + section separated by a "<>" delimiter. + + // params.go T_feeds_if_simple 35 0 1 + // RecvrParamFlags: + // 0: ParamFeedsIfOrSwitch + // + // {"Flags":0,"RecvrParamFlags":[8],"ReturnFlags":[]} + // callsite: params.go:34:10|0 "CallSiteOnPanicPath" 2 + // + // + func T_feeds_if_simple(x int) { + if x < 100 { + os.Exit(1) + } + println(x) + } + +- when the test runs, it will compile the Go source file with an + option to dump out function properties, then compare the new dump + for each function with the JSON appearing in the header comment for + the function (in the example above, the JSON appears between + "" and "". The material prior to the + dump is simply there for human consumption, so that a developer can + easily see that "RecvrParamFlags":[8] means that the first parameter + has flag ParamFeedsIfOrSwitch. + +- when making changes to the compiler (which can alter the expected + results) or edits/additions to the go code in the testcase files, + you can remaster the results by running + + go test -v -count=1 . + + In the trace output of this run, you'll see messages of the form + + === RUN TestFuncProperties + funcprops_test.go:NNN: update-expected: emitted updated file + testdata/props/XYZ.go.new + funcprops_test.go:MMM: please compare the two files, then overwrite + testdata/props/XYZ.go with testdata/props/XYZ.go.new + + at which point you can compare the old and new files by hand, then + overwrite the *.go file with the *.go.new file if you are happy with + the diffs. + +- note that the remastering process will strip out any existing + column-0 (unindented) comments; if you write comments that you + want to see preserved, use "/* */" or indent them. + + + diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go new file mode 100644 index 0000000000000000000000000000000000000000..a8166fddb6a5cf394fbf43b5e8c389841547ca6c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/acrosscall.go @@ -0,0 +1,214 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// DO NOT EDIT (use 'go test -v -update-expected' instead.) +// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt +// for more information on the format of this file. +// +package params + +// acrosscall.go T_feeds_indirect_call_via_call_toplevel 19 0 1 +// ParamFlags +// 0 ParamFeedsIndirectCall +// +// {"Flags":0,"ParamFlags":[8],"ResultFlags":null} +// callsite: acrosscall.go:20:12|0 flagstr "" flagval 0 score 60 mask 0 maskstr "" +// +// +func T_feeds_indirect_call_via_call_toplevel(f func(int)) { + callsparam(f) +} + +// acrosscall.go T_feeds_indirect_call_via_call_conditional 31 0 1 +// ParamFlags +// 0 ParamMayFeedIndirectCall +// +// {"Flags":0,"ParamFlags":[16],"ResultFlags":null} +// callsite: acrosscall.go:33:13|0 flagstr "" flagval 0 score 60 mask 0 maskstr "" +// +// +func T_feeds_indirect_call_via_call_conditional(f func(int)) { + if G != 101 { + callsparam(f) + } +} + +// acrosscall.go T_feeds_conditional_indirect_call_via_call_toplevel 45 0 1 +// ParamFlags +// 0 ParamMayFeedIndirectCall +// +// {"Flags":0,"ParamFlags":[16],"ResultFlags":null} +// callsite: acrosscall.go:46:23|0 flagstr "" flagval 0 score 64 mask 0 maskstr "" +// +// +func T_feeds_conditional_indirect_call_via_call_toplevel(f func(int)) { + callsparamconditional(f) +} + +// acrosscall.go T_feeds_if_via_call 57 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":null} +// callsite: acrosscall.go:58:9|0 flagstr "" flagval 0 score 8 mask 0 maskstr "" +// +// +func T_feeds_if_via_call(x int) { + feedsif(x) +} + +// acrosscall.go T_feeds_if_via_call_conditional 69 0 1 +// ParamFlags +// 0 ParamMayFeedIfOrSwitch +// +// {"Flags":0,"ParamFlags":[64],"ResultFlags":null} +// callsite: acrosscall.go:71:10|0 flagstr "" flagval 0 score 8 mask 0 maskstr "" +// +// +func T_feeds_if_via_call_conditional(x int) { + if G != 101 { + feedsif(x) + } +} + +// acrosscall.go T_feeds_conditional_if_via_call 83 0 1 +// ParamFlags +// 0 ParamMayFeedIfOrSwitch +// +// {"Flags":0,"ParamFlags":[64],"ResultFlags":null} +// callsite: acrosscall.go:84:20|0 flagstr "" flagval 0 score 12 mask 0 maskstr "" +// +// +func T_feeds_conditional_if_via_call(x int) { + feedsifconditional(x) +} + +// acrosscall.go T_multifeeds1 97 0 1 +// ParamFlags +// 0 ParamFeedsIndirectCall|ParamMayFeedIndirectCall +// 1 ParamNoInfo +// +// {"Flags":0,"ParamFlags":[24,0],"ResultFlags":null} +// callsite: acrosscall.go:98:12|0 flagstr "" flagval 0 score 60 mask 0 maskstr "" +// callsite: acrosscall.go:99:23|1 flagstr "" flagval 0 score 64 mask 0 maskstr "" +// +// +func T_multifeeds1(f1, f2 func(int)) { + callsparam(f1) + callsparamconditional(f1) +} + +// acrosscall.go T_acrosscall_returnsconstant 110 0 1 +// ResultFlags +// 0 ResultAlwaysSameConstant +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":[8]} +// callsite: acrosscall.go:111:24|0 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// +// +func T_acrosscall_returnsconstant() int { + return returnsconstant() +} + +// acrosscall.go T_acrosscall_returnsmem 122 0 1 +// ResultFlags +// 0 ResultIsAllocatedMem +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":[2]} +// callsite: acrosscall.go:123:19|0 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// +// +func T_acrosscall_returnsmem() *int { + return returnsmem() +} + +// acrosscall.go T_acrosscall_returnscci 134 0 1 +// ResultFlags +// 0 ResultIsConcreteTypeConvertedToInterface +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":[4]} +// callsite: acrosscall.go:135:19|0 flagstr "" flagval 0 score 7 mask 0 maskstr "" +// +// +func T_acrosscall_returnscci() I { + return returnscci() +} + +// acrosscall.go T_acrosscall_multiret 144 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// callsite: acrosscall.go:146:25|0 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// +// +func T_acrosscall_multiret(q int) int { + if q != G { + return returnsconstant() + } + return 0 +} + +// acrosscall.go T_acrosscall_multiret2 158 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// callsite: acrosscall.go:160:25|0 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// callsite: acrosscall.go:162:25|1 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// +// +func T_acrosscall_multiret2(q int) int { + if q == G { + return returnsconstant() + } else { + return returnsconstant() + } +} + +func callsparam(f func(int)) { + f(2) +} + +func callsparamconditional(f func(int)) { + if G != 101 { + f(2) + } +} + +func feedsif(x int) int { + if x != 101 { + return 42 + } + return 43 +} + +func feedsifconditional(x int) int { + if G != 101 { + if x != 101 { + return 42 + } + } + return 43 +} + +func returnsconstant() int { + return 42 +} + +func returnsmem() *int { + return new(int) +} + +func returnscci() I { + var q Q + return q +} + +type I interface { + Foo() +} + +type Q int + +func (q Q) Foo() { +} + +var G int diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go new file mode 100644 index 0000000000000000000000000000000000000000..5cc217b4baac86ad448ca73c1f03cf042aa5fbfb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/calls.go @@ -0,0 +1,240 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// DO NOT EDIT (use 'go test -v -update-expected' instead.) +// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt +// for more information on the format of this file. +// +package calls + +import "os" + +// calls.go T_call_in_panic_arg 19 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// callsite: calls.go:21:15|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj" +// +// +func T_call_in_panic_arg(x int) { + if x < G { + panic(callee(x)) + } +} + +// calls.go T_calls_in_loops 32 0 1 +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null} +// callsite: calls.go:34:9|0 flagstr "CallSiteInLoop" flagval 1 score -3 mask 4 maskstr "inLoopAdj" +// callsite: calls.go:37:9|1 flagstr "CallSiteInLoop" flagval 1 score -3 mask 4 maskstr "inLoopAdj" +// +// +func T_calls_in_loops(x int, q []string) { + for i := 0; i < x; i++ { + callee(i) + } + for _, s := range q { + callee(len(s)) + } +} + +// calls.go T_calls_in_pseudo_loop 48 0 1 +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null} +// callsite: calls.go:50:9|0 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// callsite: calls.go:54:9|1 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// +// +func T_calls_in_pseudo_loop(x int, q []string) { + for i := 0; i < x; i++ { + callee(i) + return + } + for _, s := range q { + callee(len(s)) + break + } +} + +// calls.go T_calls_on_panic_paths 67 0 1 +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null} +// callsite: calls.go:69:9|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj" +// callsite: calls.go:73:9|1 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj" +// callsite: calls.go:77:12|2 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj" +// +// +func T_calls_on_panic_paths(x int, q []string) { + if x+G == 101 { + callee(x) + panic("ouch") + } + if x < G-101 { + callee(x) + if len(q) == 0 { + G++ + } + callsexit(x) + } +} + +// calls.go T_calls_not_on_panic_paths 93 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch|ParamMayFeedIfOrSwitch +// 1 ParamNoInfo +// +// {"Flags":0,"ParamFlags":[96,0],"ResultFlags":null} +// callsite: calls.go:103:9|0 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// callsite: calls.go:112:9|1 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// callsite: calls.go:115:9|2 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// callsite: calls.go:119:12|3 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj" +// +// +func T_calls_not_on_panic_paths(x int, q []string) { + if x != G { + panic("ouch") + /* Notes: */ + /* - we only look for post-dominating panic/exit, so */ + /* this site will on fact not have a panicpath flag */ + /* - vet will complain about this site as unreachable */ + callee(x) + } + if x != G { + callee(x) + if x < 100 { + panic("ouch") + } + } + if x+G == 101 { + if x < 100 { + panic("ouch") + } + callee(x) + } + if x < -101 { + callee(x) + if len(q) == 0 { + return + } + callsexit(x) + } +} + +// calls.go init.0 129 0 1 +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":null} +// callsite: calls.go:130:16|0 flagstr "CallSiteInInitFunc" flagval 4 score 22 mask 2 maskstr "initFuncAdj" +// +// +func init() { + println(callee(5)) +} + +// calls.go T_pass_inlinable_func_to_param_feeding_indirect_call 140 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// callsite: calls.go:141:19|0 flagstr "" flagval 0 score 16 mask 512 maskstr "passInlinableFuncToIndCallAdj" +// callsite: calls.go:141:19|calls.go:232:10|0 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// +// +func T_pass_inlinable_func_to_param_feeding_indirect_call(x int) int { + return callsParam(x, callee) +} + +// calls.go T_pass_noninlinable_func_to_param_feeding_indirect_call 150 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// callsite: calls.go:153:19|0 flagstr "" flagval 0 score 36 mask 128 maskstr "passFuncToIndCallAdj" +// +// +func T_pass_noninlinable_func_to_param_feeding_indirect_call(x int) int { + // if we inline callsParam we can convert the indirect call + // to a direct call, but we can't inline it. + return callsParam(x, calleeNoInline) +} + +// calls.go T_pass_inlinable_func_to_param_feeding_nested_indirect_call 165 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]} +// callsite: calls.go:166:25|0 flagstr "" flagval 0 score 27 mask 1024 maskstr "passInlinableFuncToNestedIndCallAdj" +// callsite: calls.go:166:25|calls.go:237:11|0 flagstr "" flagval 0 score 2 mask 0 maskstr "" +// +// +func T_pass_inlinable_func_to_param_feeding_nested_indirect_call(x int) int { + return callsParamNested(x, callee) +} + +// calls.go T_pass_noninlinable_func_to_param_feeding_nested_indirect_call 177 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]} +// callsite: calls.go:178:25|0 flagstr "" flagval 0 score 47 mask 256 maskstr "passFuncToNestedIndCallAdj" +// +// +func T_pass_noninlinable_func_to_param_feeding_nested_indirect_call(x int) int { + return callsParamNested(x, calleeNoInline) +} + +// calls.go T_call_scoring_in_noninlinable_func 195 0 1 +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]} +// callsite: calls.go:209:14|0 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj" +// callsite: calls.go:210:15|1 flagstr "CallSiteOnPanicPath" flagval 2 score 42 mask 1 maskstr "panicPathAdj" +// callsite: calls.go:212:19|2 flagstr "" flagval 0 score 16 mask 512 maskstr "passInlinableFuncToIndCallAdj" +// callsite: calls.go:212:19|calls.go:232:10|0 flagstr "" flagval 0 score 4 mask 0 maskstr "" +// +// +// calls.go T_call_scoring_in_noninlinable_func.func1 212 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// +// +func T_call_scoring_in_noninlinable_func(x int, sl []int) int { + if x == 101 { + // Drive up the cost of inlining this funcfunc over the + // regular threshold. + for i := 0; i < 10; i++ { + for j := 0; j < i; j++ { + sl = append(sl, append(sl, append(sl, append(sl, x)...)...)...) + sl = append(sl, sl[0], sl[1], sl[2]) + x += calleeNoInline(x) + } + } + } + if x < 100 { + // make sure this callsite is scored properly + G += callee(101) + panic(callee(x)) + } + return callsParam(x, func(y int) int { return y + x }) +} + +var G int + +func callee(x int) int { + return x +} + +func calleeNoInline(x int) int { + defer func() { G++ }() + return x +} + +func callsexit(x int) { + println(x) + os.Exit(x) +} + +func callsParam(x int, f func(int) int) int { + return f(x) +} + +func callsParamNested(x int, f func(int) int) int { + if x < 0 { + return f(x) + } + return 0 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go new file mode 100644 index 0000000000000000000000000000000000000000..f3d74241b43bfaf400336d4e84e6b5a8494bce13 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/funcflags.go @@ -0,0 +1,341 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// DO NOT EDIT (use 'go test -v -update-expected' instead.) +// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt +// for more information on the format of this file. +// + +package funcflags + +import "os" + +// funcflags.go T_simple 20 0 1 +// Flags FuncPropNeverReturns +// +// {"Flags":1,"ParamFlags":null,"ResultFlags":null} +// +// +func T_simple() { + panic("bad") +} + +// funcflags.go T_nested 32 0 1 +// Flags FuncPropNeverReturns +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":1,"ParamFlags":[32],"ResultFlags":null} +// +// +func T_nested(x int) { + if x < 10 { + panic("bad") + } else { + panic("good") + } +} + +// funcflags.go T_block1 46 0 1 +// Flags FuncPropNeverReturns +// +// {"Flags":1,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_block1(x int) { + panic("bad") + if x < 10 { + return + } +} + +// funcflags.go T_block2 60 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":null} +// +// +func T_block2(x int) { + if x < 10 { + return + } + panic("bad") +} + +// funcflags.go T_switches1 75 0 1 +// Flags FuncPropNeverReturns +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":1,"ParamFlags":[32],"ResultFlags":null} +// +// +func T_switches1(x int) { + switch x { + case 1: + panic("one") + case 2: + panic("two") + } + panic("whatev") +} + +// funcflags.go T_switches1a 92 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":null} +// +// +func T_switches1a(x int) { + switch x { + case 2: + panic("two") + } +} + +// funcflags.go T_switches2 106 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":null} +// +// +func T_switches2(x int) { + switch x { + case 1: + panic("one") + case 2: + panic("two") + default: + return + } + panic("whatev") +} + +// funcflags.go T_switches3 123 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_switches3(x interface{}) { + switch x.(type) { + case bool: + panic("one") + case float32: + panic("two") + } +} + +// funcflags.go T_switches4 138 0 1 +// Flags FuncPropNeverReturns +// +// {"Flags":1,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_switches4(x int) { + switch x { + case 1: + x++ + fallthrough + case 2: + panic("two") + fallthrough + default: + panic("bad") + } + panic("whatev") +} + +// funcflags.go T_recov 157 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_recov(x int) { + if x := recover(); x != nil { + panic(x) + } +} + +// funcflags.go T_forloops1 169 0 1 +// Flags FuncPropNeverReturns +// +// {"Flags":1,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_forloops1(x int) { + for { + panic("wokketa") + } +} + +// funcflags.go T_forloops2 180 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_forloops2(x int) { + for { + println("blah") + if true { + break + } + panic("warg") + } +} + +// funcflags.go T_forloops3 195 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_forloops3(x int) { + for i := 0; i < 101; i++ { + println("blah") + if true { + continue + } + panic("plark") + } + for i := range [10]int{} { + println(i) + panic("plark") + } + panic("whatev") +} + +// funcflags.go T_hasgotos 215 0 1 +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null} +// +// +func T_hasgotos(x int, y int) { + { + xx := x + panic("bad") + lab1: + goto lab2 + lab2: + if false { + goto lab1 + } else { + goto lab4 + } + lab4: + if xx < y { + lab3: + if false { + goto lab3 + } + } + println(9) + } +} + +// funcflags.go T_break_with_label 246 0 1 +// ParamFlags +// 0 ParamMayFeedIfOrSwitch +// 1 ParamNoInfo +// +// {"Flags":0,"ParamFlags":[64,0],"ResultFlags":null} +// +// +func T_break_with_label(x int, y int) { + // presence of break with label should pessimize this func + // (similar to goto). + panic("bad") +lab1: + for { + println("blah") + if x < 0 { + break lab1 + } + panic("hubba") + } +} + +// funcflags.go T_callsexit 268 0 1 +// Flags FuncPropNeverReturns +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":1,"ParamFlags":[32],"ResultFlags":null} +// +// +func T_callsexit(x int) { + if x < 0 { + os.Exit(1) + } + os.Exit(2) +} + +// funcflags.go T_exitinexpr 281 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// callsite: funcflags.go:286:18|0 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj" +// +// +func T_exitinexpr(x int) { + // This function does indeed unconditionally call exit, since the + // first thing it does is invoke exprcallsexit, however from the + // perspective of this function, the call is not at the statement + // level, so we'll wind up missing it. + if exprcallsexit(x) < 0 { + println("foo") + } +} + +// funcflags.go T_select_noreturn 297 0 1 +// Flags FuncPropNeverReturns +// +// {"Flags":1,"ParamFlags":[0,0,0],"ResultFlags":null} +// +// +func T_select_noreturn(chi chan int, chf chan float32, p *int) { + rv := 0 + select { + case i := <-chi: + rv = i + case f := <-chf: + rv = int(f) + } + *p = rv + panic("bad") +} + +// funcflags.go T_select_mayreturn 314 0 1 +// +// {"Flags":0,"ParamFlags":[0,0,0],"ResultFlags":[0]} +// +// +func T_select_mayreturn(chi chan int, chf chan float32, p *int) int { + rv := 0 + select { + case i := <-chi: + rv = i + return i + case f := <-chf: + rv = int(f) + } + *p = rv + panic("bad") +} + +// funcflags.go T_calls_callsexit 334 0 1 +// Flags FuncPropNeverReturns +// +// {"Flags":1,"ParamFlags":[0],"ResultFlags":null} +// callsite: funcflags.go:335:15|0 flagstr "CallSiteOnPanicPath" flagval 2 score 102 mask 1 maskstr "panicPathAdj" +// +// +func T_calls_callsexit(x int) { + exprcallsexit(x) +} + +func exprcallsexit(x int) int { + os.Exit(x) + return x +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go new file mode 100644 index 0000000000000000000000000000000000000000..1a3073c25caa816bf7c2afb17647fd6432f70048 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/params.go @@ -0,0 +1,367 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// DO NOT EDIT (use 'go test -v -update-expected' instead.) +// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt +// for more information on the format of this file. +// +package params + +import "os" + +// params.go T_feeds_if_simple 20 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":null} +// +// +func T_feeds_if_simple(x int) { + if x < 100 { + os.Exit(1) + } + println(x) +} + +// params.go T_feeds_if_nested 35 0 1 +// ParamFlags +// 0 ParamMayFeedIfOrSwitch +// 1 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[64,32],"ResultFlags":null} +// +// +func T_feeds_if_nested(x, y int) { + if y != 0 { + if x < 100 { + os.Exit(1) + } + } + println(x) +} + +// params.go T_feeds_if_pointer 51 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":null} +// +// +func T_feeds_if_pointer(xp *int) { + if xp != nil { + os.Exit(1) + } + println(xp) +} + +// params.go T.T_feeds_if_simple_method 66 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// 1 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32,32],"ResultFlags":null} +// +// +func (r T) T_feeds_if_simple_method(x int) { + if x < 100 { + os.Exit(1) + } + if r != 99 { + os.Exit(2) + } + println(x) +} + +// params.go T_feeds_if_blanks 86 0 1 +// ParamFlags +// 0 ParamNoInfo +// 1 ParamFeedsIfOrSwitch +// 2 ParamNoInfo +// 3 ParamNoInfo +// +// {"Flags":0,"ParamFlags":[0,32,0,0],"ResultFlags":null} +// +// +func T_feeds_if_blanks(_ string, x int, _ bool, _ bool) { + // blanks ignored; from a props perspective "x" is param 0 + if x < 100 { + os.Exit(1) + } + println(x) +} + +// params.go T_feeds_if_with_copy 101 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":null} +// +// +func T_feeds_if_with_copy(x int) { + // simple copy here -- we get this case + xx := x + if xx < 100 { + os.Exit(1) + } + println(x) +} + +// params.go T_feeds_if_with_copy_expr 115 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_feeds_if_with_copy_expr(x int) { + // this case (copy of expression) currently not handled. + xx := x < 100 + if xx { + os.Exit(1) + } + println(x) +} + +// params.go T_feeds_switch 131 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":null} +// +// +func T_feeds_switch(x int) { + switch x { + case 101: + println(101) + case 202: + panic("bad") + } + println(x) +} + +// params.go T_feeds_if_toocomplex 146 0 1 +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null} +// +// +func T_feeds_if_toocomplex(x int, y int) { + // not handled at the moment; we only look for cases where + // an "if" or "switch" can be simplified based on a single + // constant param, not a combination of constant params. + if x < y { + panic("bad") + } + println(x + y) +} + +// params.go T_feeds_if_redefined 161 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_feeds_if_redefined(x int) { + if x < G { + x++ + } + if x == 101 { + panic("bad") + } +} + +// params.go T_feeds_if_redefined2 175 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_feeds_if_redefined2(x int) { + // this currently classifies "x" as "no info", since the analysis we + // use to check for reassignments/redefinitions is not flow-sensitive, + // but we could probably catch this case with better analysis or + // high-level SSA. + if x == 101 { + panic("bad") + } + if x < G { + x++ + } +} + +// params.go T_feeds_multi_if 196 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// 1 ParamNoInfo +// +// {"Flags":0,"ParamFlags":[32,0],"ResultFlags":null} +// +// +func T_feeds_multi_if(x int, y int) { + // Here we have one "if" that is too complex (x < y) but one that is + // simple enough. Currently we enable the heuristic for this. It's + // possible to imagine this being a bad thing if the function in + // question is sufficiently large, but if it's too large we probably + // can't inline it anyhow. + if x < y { + panic("bad") + } + if x < 10 { + panic("whatev") + } + println(x + y) +} + +// params.go T_feeds_if_redefined_indirectwrite 216 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_feeds_if_redefined_indirectwrite(x int) { + ax := &x + if G != 2 { + *ax = G + } + if x == 101 { + panic("bad") + } +} + +// params.go T_feeds_if_redefined_indirectwrite_copy 231 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_feeds_if_redefined_indirectwrite_copy(x int) { + // we don't catch this case, "x" is marked as no info, + // since we're conservative about redefinitions. + ax := &x + cx := x + if G != 2 { + *ax = G + } + if cx == 101 { + panic("bad") + } +} + +// params.go T_feeds_if_expr1 251 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":null} +// +// +func T_feeds_if_expr1(x int) { + if x == 101 || x == 102 || x&0xf == 0 { + panic("bad") + } +} + +// params.go T_feeds_if_expr2 262 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_feeds_if_expr2(x int) { + if (x*x)-(x+x)%x == 101 || x&0xf == 0 { + panic("bad") + } +} + +// params.go T_feeds_if_expr3 273 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_feeds_if_expr3(x int) { + if x-(x&0x1)^378 > (1 - G) { + panic("bad") + } +} + +// params.go T_feeds_if_shift_may_panic 284 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// +// +func T_feeds_if_shift_may_panic(x int) *int { + // here if "x" is a constant like 2, we could simplify the "if", + // but if we were to pass in a negative value for "x" we can't + // fold the condition due to the need to panic on negative shift. + if 1< 1024 { + return nil + } + return &G +} + +// params.go T_feeds_if_maybe_divide_by_zero 299 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// +// +func T_feeds_if_maybe_divide_by_zero(x int) { + if 99/x == 3 { + return + } + println("blarg") +} + +// params.go T_feeds_indcall 313 0 1 +// ParamFlags +// 0 ParamMayFeedIndirectCall +// +// {"Flags":0,"ParamFlags":[16],"ResultFlags":null} +// +// +func T_feeds_indcall(x func()) { + if G != 20 { + x() + } +} + +// params.go T_feeds_indcall_and_if 326 0 1 +// ParamFlags +// 0 ParamMayFeedIndirectCall|ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[48],"ResultFlags":null} +// +// +func T_feeds_indcall_and_if(x func()) { + if x != nil { + x() + } +} + +// params.go T_feeds_indcall_with_copy 339 0 1 +// ParamFlags +// 0 ParamFeedsIndirectCall +// +// {"Flags":0,"ParamFlags":[8],"ResultFlags":null} +// +// +func T_feeds_indcall_with_copy(x func()) { + xx := x + if G < 10 { + G-- + } + xx() +} + +// params.go T_feeds_interface_method_call 354 0 1 +// ParamFlags +// 0 ParamFeedsInterfaceMethodCall +// +// {"Flags":0,"ParamFlags":[2],"ResultFlags":null} +// +// +func T_feeds_interface_method_call(i I) { + i.Blarg() +} + +var G int + +type T int + +type I interface { + Blarg() +} + +func (r T) Blarg() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go new file mode 100644 index 0000000000000000000000000000000000000000..51f2bc7cb2d222aeefa70432d278e3eac1879d34 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/returns.go @@ -0,0 +1,370 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// DO NOT EDIT (use 'go test -v -update-expected' instead.) +// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt +// for more information on the format of this file. +// + +package returns1 + +import "unsafe" + +// returns.go T_simple_allocmem 21 0 1 +// ResultFlags +// 0 ResultIsAllocatedMem +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":[2]} +// +// +func T_simple_allocmem() *Bar { + return &Bar{} +} + +// returns.go T_allocmem_two_returns 34 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// ResultFlags +// 0 ResultIsAllocatedMem +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2]} +// +// +func T_allocmem_two_returns(x int) *Bar { + // multiple returns + if x < 0 { + return new(Bar) + } else { + return &Bar{x: 2} + } +} + +// returns.go T_allocmem_three_returns 52 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// ResultFlags +// 0 ResultIsAllocatedMem +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2]} +// +// +func T_allocmem_three_returns(x int) []*Bar { + // more multiple returns + switch x { + case 10, 11, 12: + return make([]*Bar, 10) + case 13: + fallthrough + case 15: + return []*Bar{&Bar{x: 15}} + } + return make([]*Bar, 0, 10) +} + +// returns.go T_return_nil 72 0 1 +// ResultFlags +// 0 ResultAlwaysSameConstant +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":[8]} +// +// +func T_return_nil() *Bar { + // simple case: no alloc + return nil +} + +// returns.go T_multi_return_nil 84 0 1 +// ResultFlags +// 0 ResultAlwaysSameConstant +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[8]} +// +// +func T_multi_return_nil(x, y bool) *Bar { + if x && y { + return nil + } + return nil +} + +// returns.go T_multi_return_nil_anomoly 98 0 1 +// ResultFlags +// 0 ResultIsConcreteTypeConvertedToInterface +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]} +// +// +func T_multi_return_nil_anomoly(x, y bool) Itf { + if x && y { + var qnil *Q + return qnil + } + var barnil *Bar + return barnil +} + +// returns.go T_multi_return_some_nil 112 0 1 +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]} +// +// +func T_multi_return_some_nil(x, y bool) *Bar { + if x && y { + return nil + } else { + return &GB + } +} + +// returns.go T_mixed_returns 127 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]} +// +// +func T_mixed_returns(x int) *Bar { + // mix of alloc and non-alloc + if x < 0 { + return new(Bar) + } else { + return &GB + } +} + +// returns.go T_mixed_returns_slice 143 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0]} +// +// +func T_mixed_returns_slice(x int) []*Bar { + // mix of alloc and non-alloc + switch x { + case 10, 11, 12: + return make([]*Bar, 10) + case 13: + fallthrough + case 15: + return []*Bar{&Bar{x: 15}} + } + ba := [...]*Bar{&GB, &GB} + return ba[:] +} + +// returns.go T_maps_and_channels 167 0 1 +// ResultFlags +// 0 ResultNoInfo +// 1 ResultNoInfo +// 2 ResultNoInfo +// 3 ResultAlwaysSameConstant +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0,0,0,8]} +// +// +func T_maps_and_channels(x int, b bool) (bool, map[int]int, chan bool, unsafe.Pointer) { + // maps and channels + return b, make(map[int]int), make(chan bool), nil +} + +// returns.go T_assignment_to_named_returns 179 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":[0,0]} +// +// +func T_assignment_to_named_returns(x int) (r1 *uint64, r2 *uint64) { + // assignments to named returns and then "return" not supported + r1 = new(uint64) + if x < 1 { + *r1 = 2 + } + r2 = new(uint64) + return +} + +// returns.go T_named_returns_but_return_explicit_values 199 0 1 +// ParamFlags +// 0 ParamFeedsIfOrSwitch +// ResultFlags +// 0 ResultIsAllocatedMem +// 1 ResultIsAllocatedMem +// +// {"Flags":0,"ParamFlags":[32],"ResultFlags":[2,2]} +// +// +func T_named_returns_but_return_explicit_values(x int) (r1 *uint64, r2 *uint64) { + // named returns ok if all returns are non-empty + rx1 := new(uint64) + if x < 1 { + *rx1 = 2 + } + rx2 := new(uint64) + return rx1, rx2 +} + +// returns.go T_return_concrete_type_to_itf 216 0 1 +// ResultFlags +// 0 ResultIsConcreteTypeConvertedToInterface +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]} +// +// +func T_return_concrete_type_to_itf(x, y int) Itf { + return &Bar{} +} + +// returns.go T_return_concrete_type_to_itfwith_copy 227 0 1 +// ResultFlags +// 0 ResultIsConcreteTypeConvertedToInterface +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]} +// +// +func T_return_concrete_type_to_itfwith_copy(x, y int) Itf { + b := &Bar{} + println("whee") + return b +} + +// returns.go T_return_concrete_type_to_itf_mixed 238 0 1 +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[0]} +// +// +func T_return_concrete_type_to_itf_mixed(x, y int) Itf { + if x < y { + b := &Bar{} + return b + } + return nil +} + +// returns.go T_return_same_func 253 0 1 +// ResultFlags +// 0 ResultAlwaysSameInlinableFunc +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":[32]} +// +// +func T_return_same_func() func(int) int { + if G < 10 { + return foo + } else { + return foo + } +} + +// returns.go T_return_different_funcs 266 0 1 +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":[0]} +// +// +func T_return_different_funcs() func(int) int { + if G != 10 { + return foo + } else { + return bar + } +} + +// returns.go T_return_same_closure 286 0 1 +// ResultFlags +// 0 ResultAlwaysSameInlinableFunc +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":[32]} +// +// +// returns.go T_return_same_closure.func1 287 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// +// +func T_return_same_closure() func(int) int { + p := func(q int) int { return q } + if G < 10 { + return p + } else { + return p + } +} + +// returns.go T_return_different_closures 312 0 1 +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":[0]} +// +// +// returns.go T_return_different_closures.func1 313 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// +// +// returns.go T_return_different_closures.func2 317 0 1 +// ResultFlags +// 0 ResultAlwaysSameConstant +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[8]} +// +// +func T_return_different_closures() func(int) int { + p := func(q int) int { return q } + if G < 10 { + return p + } else { + return func(q int) int { return 101 } + } +} + +// returns.go T_return_noninlinable 339 0 1 +// ResultFlags +// 0 ResultAlwaysSameFunc +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[16]} +// +// +// returns.go T_return_noninlinable.func1 340 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// callsite: returns.go:343:4|0 flagstr "" flagval 0 score 4 mask 0 maskstr "" +// +// +// returns.go T_return_noninlinable.func1.1 341 0 1 +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":null} +// +// +func T_return_noninlinable(x int) func(int) int { + noti := func(q int) int { + defer func() { + println(q + x) + }() + return q + } + return noti +} + +type Bar struct { + x int + y string +} + +func (b *Bar) Plark() { +} + +type Q int + +func (q *Q) Plark() { +} + +func foo(x int) int { return x } +func bar(x int) int { return -x } + +var G int +var GB Bar + +type Itf interface { + Plark() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go new file mode 100644 index 0000000000000000000000000000000000000000..7200926fb8a31880fcc49f858aec72c74bef49c7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/testdata/props/returns2.go @@ -0,0 +1,231 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// DO NOT EDIT (use 'go test -v -update-expected' instead.) +// See cmd/compile/internal/inline/inlheur/testdata/props/README.txt +// for more information on the format of this file. +// + +package returns2 + +// returns2.go T_return_feeds_iface_call 18 0 1 +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":null} +// callsite: returns2.go:19:13|0 flagstr "" flagval 0 score 1 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj" +// +// +func T_return_feeds_iface_call() { + b := newBar(10) + b.Plark() +} + +// returns2.go T_multi_return_feeds_iface_call 29 0 1 +// +// {"Flags":0,"ParamFlags":null,"ResultFlags":null} +// callsite: returns2.go:30:20|0 flagstr "" flagval 0 score 3 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj" +// +// +func T_multi_return_feeds_iface_call() { + _, b, _ := newBar2(10) + b.Plark() +} + +// returns2.go T_returned_inlinable_func_feeds_indirect_call 41 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// callsite: returns2.go:42:18|0 flagstr "" flagval 0 score -51 mask 8200 maskstr "passConstToIfAdj|returnFeedsInlinableFuncToIndCallAdj" +// callsite: returns2.go:44:20|1 flagstr "" flagval 0 score -23 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj" +// +// +func T_returned_inlinable_func_feeds_indirect_call(q int) { + f := returnsFunc(10) + f(q) + f2 := returnsFunc2() + f2(q) +} + +// returns2.go T_returned_noninlineable_func_feeds_indirect_call 54 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// callsite: returns2.go:55:30|0 flagstr "" flagval 0 score -23 mask 4096 maskstr "returnFeedsFuncToIndCallAdj" +// +// +func T_returned_noninlineable_func_feeds_indirect_call(q int) { + f := returnsNonInlinableFunc() + f(q) +} + +// returns2.go T_multi_return_feeds_indirect_call 65 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":null} +// callsite: returns2.go:66:29|0 flagstr "" flagval 0 score -21 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj" +// +// +func T_multi_return_feeds_indirect_call(q int) { + _, f, _ := multiReturnsFunc() + f(q) +} + +// returns2.go T_return_feeds_ifswitch 76 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// callsite: returns2.go:77:14|0 flagstr "" flagval 0 score 10 mask 2048 maskstr "returnFeedsConstToIfAdj" +// +// +func T_return_feeds_ifswitch(q int) int { + x := meaning(q) + if x < 42 { + switch x { + case 42: + return 1 + } + } + return 0 +} + +// returns2.go T_multi_return_feeds_ifswitch 93 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// callsite: returns2.go:94:21|0 flagstr "" flagval 0 score 9 mask 2048 maskstr "returnFeedsConstToIfAdj" +// +// +func T_multi_return_feeds_ifswitch(q int) int { + x, y, z := meanings(q) + if x < y { + switch x { + case 42: + return z + } + } + return 0 +} + +// returns2.go T_two_calls_feed_ifswitch 111 0 1 +// +// {"Flags":0,"ParamFlags":[0],"ResultFlags":[0]} +// callsite: returns2.go:115:14|0 flagstr "" flagval 0 score 25 mask 0 maskstr "" +// callsite: returns2.go:116:14|1 flagstr "" flagval 0 score 25 mask 0 maskstr "" +// +// +func T_two_calls_feed_ifswitch(q int) int { + // This case we don't handle; for the heuristic to kick in, + // all names in a given if/switch cond have to come from the + // same callsite + x := meaning(q) + y := meaning(-q) + if x < y { + switch x + y { + case 42: + return 1 + } + } + return 0 +} + +// returns2.go T_chained_indirect_call 132 0 1 +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null} +// callsite: returns2.go:135:18|0 flagstr "" flagval 0 score -31 mask 8192 maskstr "returnFeedsInlinableFuncToIndCallAdj" +// +// +func T_chained_indirect_call(x, y int) { + // Here 'returnsFunc' returns an inlinable func that feeds + // directly into a call (no named intermediate). + G += returnsFunc(x - y)(x + y) +} + +// returns2.go T_chained_conc_iface_call 144 0 1 +// +// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":null} +// callsite: returns2.go:148:8|0 flagstr "" flagval 0 score 1 mask 16384 maskstr "returnFeedsConcreteToInterfaceCallAdj" +// +// +func T_chained_conc_iface_call(x, y int) { + // Similar to the case above, return from call returning concrete type + // feeds directly into interface call. Note that only the first + // iface call is interesting here. + newBar(10).Plark().Plark() +} + +func returnsFunc(x int) func(int) int { + if x < 0 { + G++ + } + return adder +} + +func returnsFunc2() func(int) int { + return func(x int) int { + return adder(x) + } +} + +func returnsNonInlinableFunc() func(int) int { + return adderNoInline +} + +func multiReturnsFunc() (int, func(int) int, int) { + return 42, func(x int) int { G++; return 1 }, -42 +} + +func adder(x int) int { + G += 1 + return G +} + +func adderNoInline(x int) int { + defer func() { G += x }() + G += 1 + return G +} + +func meaning(q int) int { + r := 0 + for i := 0; i < 42; i++ { + r += q + } + G += r + return 42 +} + +func meanings(q int) (int, int, int) { + r := 0 + for i := 0; i < 42; i++ { + r += q + } + return 42, 43, r +} + +type Bar struct { + x int + y string +} + +func (b *Bar) Plark() Itf { + return b +} + +type Itf interface { + Plark() Itf +} + +func newBar(x int) Itf { + s := 0 + for i := 0; i < x; i++ { + s += i + } + return &Bar{ + x: s, + } +} + +func newBar2(x int) (int, Itf, bool) { + s := 0 + for i := 0; i < x; i++ { + s += i + } + return 0, &Bar{x: s}, false +} + +var G int diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go new file mode 100644 index 0000000000000000000000000000000000000000..587eab03fcb954a95e1c576e8280be1e6fbc409b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/texpr_classify_test.go @@ -0,0 +1,217 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/src" + "go/constant" + "testing" +) + +var pos src.XPos +var local *types.Pkg +var f *ir.Func + +func init() { + types.PtrSize = 8 + types.RegSize = 8 + types.MaxWidth = 1 << 50 + typecheck.InitUniverse() + local = types.NewPkg("", "") + fsym := &types.Sym{ + Pkg: types.NewPkg("my/import/path", "path"), + Name: "function", + } + f = ir.NewFunc(src.NoXPos, src.NoXPos, fsym, nil) +} + +type state struct { + ntab map[string]*ir.Name +} + +func mkstate() *state { + return &state{ + ntab: make(map[string]*ir.Name), + } +} + +func bin(x ir.Node, op ir.Op, y ir.Node) ir.Node { + return ir.NewBinaryExpr(pos, op, x, y) +} + +func conv(x ir.Node, t *types.Type) ir.Node { + return ir.NewConvExpr(pos, ir.OCONV, t, x) +} + +func logical(x ir.Node, op ir.Op, y ir.Node) ir.Node { + return ir.NewLogicalExpr(pos, op, x, y) +} + +func un(op ir.Op, x ir.Node) ir.Node { + return ir.NewUnaryExpr(pos, op, x) +} + +func liti(i int64) ir.Node { + return ir.NewBasicLit(pos, types.Types[types.TINT64], constant.MakeInt64(i)) +} + +func lits(s string) ir.Node { + return ir.NewBasicLit(pos, types.Types[types.TSTRING], constant.MakeString(s)) +} + +func (s *state) nm(name string, t *types.Type) *ir.Name { + if n, ok := s.ntab[name]; ok { + if n.Type() != t { + panic("bad") + } + return n + } + sym := local.Lookup(name) + nn := ir.NewNameAt(pos, sym, t) + s.ntab[name] = nn + return nn +} + +func (s *state) nmi64(name string) *ir.Name { + return s.nm(name, types.Types[types.TINT64]) +} + +func (s *state) nms(name string) *ir.Name { + return s.nm(name, types.Types[types.TSTRING]) +} + +func TestClassifyIntegerCompare(t *testing.T) { + + // (n < 10 || n > 100) && (n >= 12 || n <= 99 || n != 101) + s := mkstate() + nn := s.nmi64("n") + nlt10 := bin(nn, ir.OLT, liti(10)) // n < 10 + ngt100 := bin(nn, ir.OGT, liti(100)) // n > 100 + nge12 := bin(nn, ir.OGE, liti(12)) // n >= 12 + nle99 := bin(nn, ir.OLE, liti(99)) // n < 10 + nne101 := bin(nn, ir.ONE, liti(101)) // n != 101 + noror1 := logical(nlt10, ir.OOROR, ngt100) // n < 10 || n > 100 + noror2 := logical(nge12, ir.OOROR, nle99) // n >= 12 || n <= 99 + noror3 := logical(noror2, ir.OOROR, nne101) + nandand := typecheck.Expr(logical(noror1, ir.OANDAND, noror3)) + + wantv := true + v := ShouldFoldIfNameConstant(nandand, []*ir.Name{nn}) + if v != wantv { + t.Errorf("wanted shouldfold(%v) %v, got %v", nandand, wantv, v) + } +} + +func TestClassifyStringCompare(t *testing.T) { + + // s != "foo" && s < "ooblek" && s > "plarkish" + s := mkstate() + nn := s.nms("s") + snefoo := bin(nn, ir.ONE, lits("foo")) // s != "foo" + sltoob := bin(nn, ir.OLT, lits("ooblek")) // s < "ooblek" + sgtpk := bin(nn, ir.OGT, lits("plarkish")) // s > "plarkish" + nandand := logical(snefoo, ir.OANDAND, sltoob) + top := typecheck.Expr(logical(nandand, ir.OANDAND, sgtpk)) + + wantv := true + v := ShouldFoldIfNameConstant(top, []*ir.Name{nn}) + if v != wantv { + t.Errorf("wanted shouldfold(%v) %v, got %v", top, wantv, v) + } +} + +func TestClassifyIntegerArith(t *testing.T) { + // n+1 ^ n-3 * n/2 + n<<9 + n>>2 - n&^7 + + s := mkstate() + nn := s.nmi64("n") + np1 := bin(nn, ir.OADD, liti(1)) // n+1 + nm3 := bin(nn, ir.OSUB, liti(3)) // n-3 + nd2 := bin(nn, ir.ODIV, liti(2)) // n/2 + nls9 := bin(nn, ir.OLSH, liti(9)) // n<<9 + nrs2 := bin(nn, ir.ORSH, liti(2)) // n>>2 + nan7 := bin(nn, ir.OANDNOT, liti(7)) // n&^7 + c1xor := bin(np1, ir.OXOR, nm3) + c2mul := bin(c1xor, ir.OMUL, nd2) + c3add := bin(c2mul, ir.OADD, nls9) + c4add := bin(c3add, ir.OADD, nrs2) + c5sub := bin(c4add, ir.OSUB, nan7) + top := typecheck.Expr(c5sub) + + wantv := true + v := ShouldFoldIfNameConstant(top, []*ir.Name{nn}) + if v != wantv { + t.Errorf("wanted shouldfold(%v) %v, got %v", top, wantv, v) + } +} + +func TestClassifyAssortedShifts(t *testing.T) { + + s := mkstate() + nn := s.nmi64("n") + badcases := []ir.Node{ + bin(liti(3), ir.OLSH, nn), // 3<>n + } + for _, bc := range badcases { + wantv := false + v := ShouldFoldIfNameConstant(typecheck.Expr(bc), []*ir.Name{nn}) + if v != wantv { + t.Errorf("wanted shouldfold(%v) %v, got %v", bc, wantv, v) + } + } +} + +func TestClassifyFloat(t *testing.T) { + // float32(n) + float32(10) + s := mkstate() + nn := s.nm("n", types.Types[types.TUINT32]) + f1 := conv(nn, types.Types[types.TFLOAT32]) + f2 := conv(liti(10), types.Types[types.TFLOAT32]) + add := bin(f1, ir.OADD, f2) + + wantv := false + v := ShouldFoldIfNameConstant(typecheck.Expr(add), []*ir.Name{nn}) + if v != wantv { + t.Errorf("wanted shouldfold(%v) %v, got %v", add, wantv, v) + } +} + +func TestMultipleNamesAllUsed(t *testing.T) { + // n != 101 && m < 2 + s := mkstate() + nn := s.nmi64("n") + nm := s.nmi64("m") + nne101 := bin(nn, ir.ONE, liti(101)) // n != 101 + mlt2 := bin(nm, ir.OLT, liti(2)) // m < 2 + nandand := typecheck.Expr(logical(nne101, ir.OANDAND, mlt2)) + + // all names used + wantv := true + v := ShouldFoldIfNameConstant(nandand, []*ir.Name{nn, nm}) + if v != wantv { + t.Errorf("wanted shouldfold(%v) %v, got %v", nandand, wantv, v) + } + + // not all names used + wantv = false + v = ShouldFoldIfNameConstant(nne101, []*ir.Name{nn, nm}) + if v != wantv { + t.Errorf("wanted shouldfold(%v) %v, got %v", nne101, wantv, v) + } + + // other names used. + np := s.nmi64("p") + pne0 := bin(np, ir.ONE, liti(101)) // p != 0 + noror := logical(nandand, ir.OOROR, pne0) + wantv = false + v = ShouldFoldIfNameConstant(noror, []*ir.Name{nn, nm}) + if v != wantv { + t.Errorf("wanted shouldfold(%v) %v, got %v", noror, wantv, v) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/trace_off.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/trace_off.go new file mode 100644 index 0000000000000000000000000000000000000000..9eea7fa3692e12af6e32feb7f003a29292d0aa2d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/trace_off.go @@ -0,0 +1,18 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !debugtrace + +package inlheur + +const debugTrace = 0 + +func enableDebugTrace(x int) { +} + +func enableDebugTraceIfEnv() { +} + +func disableDebugTrace() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/trace_on.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/trace_on.go new file mode 100644 index 0000000000000000000000000000000000000000..160842905fe45142b24848870e4698785d3aa53e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/trace_on.go @@ -0,0 +1,40 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build debugtrace + +package inlheur + +import ( + "os" + "strconv" +) + +var debugTrace = 0 + +func enableDebugTrace(x int) { + debugTrace = x +} + +func enableDebugTraceIfEnv() { + v := os.Getenv("DEBUG_TRACE_INLHEUR") + if v == "" { + return + } + if v[0] == '*' { + if !UnitTesting() { + return + } + v = v[1:] + } + i, err := strconv.Atoi(v) + if err != nil { + return + } + debugTrace = i +} + +func disableDebugTrace() { + debugTrace = 0 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/tserial_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/tserial_test.go new file mode 100644 index 0000000000000000000000000000000000000000..def12f5aafef00d714df3e2fe8cee6350bb2ae44 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/inlheur/tserial_test.go @@ -0,0 +1,65 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import "testing" + +func fpeq(fp1, fp2 FuncProps) bool { + if fp1.Flags != fp2.Flags { + return false + } + if len(fp1.ParamFlags) != len(fp2.ParamFlags) { + return false + } + for i := range fp1.ParamFlags { + if fp1.ParamFlags[i] != fp2.ParamFlags[i] { + return false + } + } + if len(fp1.ResultFlags) != len(fp2.ResultFlags) { + return false + } + for i := range fp1.ResultFlags { + if fp1.ResultFlags[i] != fp2.ResultFlags[i] { + return false + } + } + return true +} + +func TestSerDeser(t *testing.T) { + testcases := []FuncProps{ + FuncProps{}, + FuncProps{ + Flags: 0xfffff, + }, + FuncProps{ + Flags: 1, + ResultFlags: []ResultPropBits{ResultAlwaysSameConstant}, + }, + FuncProps{ + Flags: 1, + ParamFlags: []ParamPropBits{0x99, 0xaa, 0xfffff}, + ResultFlags: []ResultPropBits{0xfeedface}, + }, + } + + for k, tc := range testcases { + s := tc.SerializeToString() + fp := DeserializeFromString(s) + got := fp.String() + want := tc.String() + if !fpeq(*fp, tc) { + t.Errorf("eq check failed for test %d: got:\n%s\nwant:\n%s\n", k, got, want) + } + } + + var nilt *FuncProps + ns := nilt.SerializeToString() + nfp := DeserializeFromString(ns) + if len(ns) != 0 || nfp != nil { + t.Errorf("nil serialize/deserialize failed") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/interleaved/interleaved.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/interleaved/interleaved.go new file mode 100644 index 0000000000000000000000000000000000000000..a6f19d470d9895134cd541b5b7f46c3b2c034b27 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/inline/interleaved/interleaved.go @@ -0,0 +1,132 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package interleaved implements the interleaved devirtualization and +// inlining pass. +package interleaved + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/devirtualize" + "cmd/compile/internal/inline" + "cmd/compile/internal/inline/inlheur" + "cmd/compile/internal/ir" + "cmd/compile/internal/pgo" + "cmd/compile/internal/typecheck" + "fmt" +) + +// DevirtualizeAndInlinePackage interleaves devirtualization and inlining on +// all functions within pkg. +func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgo.Profile) { + if profile != nil && base.Debug.PGODevirtualize > 0 { + // TODO(mdempsky): Integrate into DevirtualizeAndInlineFunc below. + ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) { + for _, fn := range list { + devirtualize.ProfileGuided(fn, profile) + } + }) + ir.CurFunc = nil + } + + if base.Flag.LowerL != 0 { + inlheur.SetupScoreAdjustments() + } + + var inlProfile *pgo.Profile // copy of profile for inlining + if base.Debug.PGOInline != 0 { + inlProfile = profile + } + if inlProfile != nil { + inline.PGOInlinePrologue(inlProfile, pkg.Funcs) + } + + ir.VisitFuncsBottomUp(pkg.Funcs, func(funcs []*ir.Func, recursive bool) { + // We visit functions within an SCC in fairly arbitrary order, + // so by computing inlinability for all functions in the SCC + // before performing any inlining, the results are less + // sensitive to the order within the SCC (see #58905 for an + // example). + + // First compute inlinability for all functions in the SCC ... + inline.CanInlineSCC(funcs, recursive, inlProfile) + + // ... then make a second pass to do devirtualization and inlining + // of calls. + for _, fn := range funcs { + DevirtualizeAndInlineFunc(fn, inlProfile) + } + }) + + if base.Flag.LowerL != 0 { + // Perform a garbage collection of hidden closures functions that + // are no longer reachable from top-level functions following + // inlining. See #59404 and #59638 for more context. + inline.GarbageCollectUnreferencedHiddenClosures() + + if base.Debug.DumpInlFuncProps != "" { + inlheur.DumpFuncProps(nil, base.Debug.DumpInlFuncProps) + } + if inlheur.Enabled() { + inline.PostProcessCallSites(inlProfile) + inlheur.TearDown() + } + } +} + +// DevirtualizeAndInlineFunc interleaves devirtualization and inlining +// on a single function. +func DevirtualizeAndInlineFunc(fn *ir.Func, profile *pgo.Profile) { + ir.WithFunc(fn, func() { + if base.Flag.LowerL != 0 { + if inlheur.Enabled() && !fn.Wrapper() { + inlheur.ScoreCalls(fn) + defer inlheur.ScoreCallsCleanup() + } + if base.Debug.DumpInlFuncProps != "" && !fn.Wrapper() { + inlheur.DumpFuncProps(fn, base.Debug.DumpInlFuncProps) + } + } + + bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn) + if bigCaller && base.Flag.LowerM > 1 { + fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn) + } + + // Walk fn's body and apply devirtualization and inlining. + var inlCalls []*ir.InlinedCallExpr + var edit func(ir.Node) ir.Node + edit = func(n ir.Node) ir.Node { + switch n := n.(type) { + case *ir.TailCallStmt: + n.Call.NoInline = true // can't inline yet + } + + ir.EditChildren(n, edit) + + if call, ok := n.(*ir.CallExpr); ok { + devirtualize.StaticCall(call) + + if inlCall := inline.TryInlineCall(fn, call, bigCaller, profile); inlCall != nil { + inlCalls = append(inlCalls, inlCall) + n = inlCall + } + } + + return n + } + ir.EditChildren(fn, edit) + + // If we inlined any calls, we want to recursively visit their + // bodies for further devirtualization and inlining. However, we + // need to wait until *after* the original function body has been + // expanded, or else inlCallee can have false positives (e.g., + // #54632). + for len(inlCalls) > 0 { + call := inlCalls[0] + inlCalls = inlCalls[1:] + ir.EditChildren(call, edit) + } + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_complicated_esc_address.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_complicated_esc_address.go new file mode 100644 index 0000000000000000000000000000000000000000..c658340fd5d7a6ef3fadf5504103639d47489791 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_complicated_esc_address.go @@ -0,0 +1,115 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +func main() { + ss, sa := shared(23) + ps, pa := private(23) + es, ea := experiment(23) + + fmt.Printf("shared s, a; private, s, a; experiment s, a = %d, %d; %d, %d; %d, %d\n", ss, sa, ps, pa, es, ea) + + if ss != ps || ss != es || ea != pa || sa == pa { + os.Exit(11) + } else { + fmt.Println("PASS") + } +} + +func experiment(x int) (int, int) { + sum := 0 + var is []*int + for i := x; i != 1; i = i / 2 { + for j := 0; j < 10; j++ { + if i == j { // 10 skips + continue + } + sum++ + } + i = i*3 + 1 + if i&1 == 0 { + is = append(is, &i) + for i&2 == 0 { + i = i >> 1 + } + } else { + i = i + i + } + } + + asum := 0 + for _, pi := range is { + asum += *pi + } + + return sum, asum +} + +func private(x int) (int, int) { + sum := 0 + var is []*int + I := x + for ; I != 1; I = I / 2 { + i := I + for j := 0; j < 10; j++ { + if i == j { // 10 skips + I = i + continue + } + sum++ + } + i = i*3 + 1 + if i&1 == 0 { + is = append(is, &i) + for i&2 == 0 { + i = i >> 1 + } + } else { + i = i + i + } + I = i + } + + asum := 0 + for _, pi := range is { + asum += *pi + } + + return sum, asum +} + +func shared(x int) (int, int) { + sum := 0 + var is []*int + i := x + for ; i != 1; i = i / 2 { + for j := 0; j < 10; j++ { + if i == j { // 10 skips + continue + } + sum++ + } + i = i*3 + 1 + if i&1 == 0 { + is = append(is, &i) + for i&2 == 0 { + i = i >> 1 + } + } else { + i = i + i + } + } + + asum := 0 + for _, pi := range is { + asum += *pi + } + return sum, asum +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_address.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_address.go new file mode 100644 index 0000000000000000000000000000000000000000..beaefb10ab460b55510f209bced21eeef95efb92 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_address.go @@ -0,0 +1,45 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +func main() { + sum := 0 + var is []*int + for i := 0; i < 10; i++ { + for j := 0; j < 10; j++ { + if i == j { // 10 skips + continue + } + sum++ + } + if i&1 == 0 { + is = append(is, &i) + } + } + + bug := false + if sum != 100-10 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum) + bug = true + } + sum = 0 + for _, pi := range is { + sum += *pi + } + if sum != 2+4+6+8 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum) + bug = true + } + if !bug { + fmt.Printf("PASS\n") + } else { + os.Exit(11) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_closure.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_closure.go new file mode 100644 index 0000000000000000000000000000000000000000..b60d0007bd4dd7dd79bf6fc5a39bd3b04bb24658 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_closure.go @@ -0,0 +1,51 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +var is []func() int + +func main() { + sum := 0 + for i := 0; i < 10; i++ { + for j := 0; j < 10; j++ { + if i == j { // 10 skips + continue + } + sum++ + } + if i&1 == 0 { + is = append(is, func() int { + if i%17 == 15 { + i++ + } + return i + }) + } + } + + bug := false + if sum != 100-10 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum) + bug = true + } + sum = 0 + for _, f := range is { + sum += f() + } + if sum != 2+4+6+8 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum) + bug = true + } + if !bug { + fmt.Printf("PASS\n") + } else { + os.Exit(11) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_method.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_method.go new file mode 100644 index 0000000000000000000000000000000000000000..0e2f8017bc7b3b8a7dcf14bfeec24ebb8bccedc0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_method.go @@ -0,0 +1,51 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +type I int + +func (x *I) method() int { + return int(*x) +} + +func main() { + sum := 0 + var is []func() int + for i := I(0); int(i) < 10; i++ { + for j := 0; j < 10; j++ { + if int(i) == j { // 10 skips + continue + } + sum++ + } + if i&1 == 0 { + is = append(is, i.method) + } + } + + bug := false + if sum != 100-10 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum) + bug = true + } + sum = 0 + for _, m := range is { + sum += m() + } + if sum != 2+4+6+8 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum) + bug = true + } + if !bug { + fmt.Printf("PASS\n") + } else { + os.Exit(11) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_minimal_closure.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_minimal_closure.go new file mode 100644 index 0000000000000000000000000000000000000000..971c91dde1f1f666a8ef48b00ed2360df89ad4d2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_esc_minimal_closure.go @@ -0,0 +1,48 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +var is []func() int + +func main() { + sum := 0 + for i := 0; i < 10; i++ { + for j := 0; j < 10; j++ { + if i == j { // 10 skips + continue + } + sum++ + } + if i&1 == 0 { + is = append(is, func() int { + return i + }) + } + } + + bug := false + if sum != 100-10 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum) + bug = true + } + sum = 0 + for _, f := range is { + sum += f() + } + if sum != 2+4+6+8 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum) + bug = true + } + if !bug { + fmt.Printf("PASS\n") + } else { + os.Exit(11) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_nested.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_nested.go new file mode 100644 index 0000000000000000000000000000000000000000..4888fabc4570c086c989e61c8f71e9a7c2dee496 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/for_nested.go @@ -0,0 +1,47 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +func main() { + x := f(60) + fmt.Println(x) + if x != 54 { + os.Exit(11) + } +} + +var escape *int + +func f(i int) int { + a := 0 +outer: + for { + switch { + case i > 55: + i-- + continue + case i == 55: + for j := i; j != 1; j = j / 2 { + a++ + if j == 4 { + escape = &j + i-- + continue outer + } + if j&1 == 1 { + j = 2 * (3*j + 1) + } + } + return a + case i < 55: + return i + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/a/a.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..0bae36dafff85f7e05e587427a226b0931184f01 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/a/a.go @@ -0,0 +1,20 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +import "cmd/compile/internal/loopvar/testdata/inlines/b" + +func F() []*int { + var s []*int + for i := 0; i < 10; i++ { + s = append(s, &i) + } + return s +} + +func Fb() []*int { + bf, _ := b.F() + return bf +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/b/b.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/b/b.go new file mode 100644 index 0000000000000000000000000000000000000000..7b1d8cede11d147747498fedab125aa385865146 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/b/b.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package b + +var slice = []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024} + +func F() ([]*int, []*int) { + return g() +} + +func g() ([]*int, []*int) { + var s []*int + var t []*int + for i, j := range slice { + s = append(s, &i) + t = append(t, &j) + } + return s[:len(s)-1], t +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/c/c.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/c/c.go new file mode 100644 index 0000000000000000000000000000000000000000..0405ace9feca3873c520e9ab4a737b7e67fb16d4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/c/c.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package c + +//go:noinline +func F() []*int { + var s []*int + for i := 0; i < 10; i++ { + s = append(s, &i) + } + return s +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/main.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/main.go new file mode 100644 index 0000000000000000000000000000000000000000..46fcee1a6db426f4c21bff4c6203868967ef3920 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/inlines/main.go @@ -0,0 +1,53 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "cmd/compile/internal/loopvar/testdata/inlines/a" + "cmd/compile/internal/loopvar/testdata/inlines/b" + "cmd/compile/internal/loopvar/testdata/inlines/c" + "fmt" + "os" +) + +func sum(s []*int) int { + sum := 0 + for _, pi := range s { + sum += *pi + } + return sum +} + +var t []*int + +func F() []*int { + var s []*int + for i, j := 0, 0; j < 10; i, j = i+1, j+1 { + s = append(s, &i) + t = append(s, &j) + } + return s +} + +func main() { + f := F() + af := a.F() + bf, _ := b.F() + abf := a.Fb() + cf := c.F() + + sf, saf, sbf, sabf, scf := sum(f), sum(af), sum(bf), sum(abf), sum(cf) + + fmt.Printf("f, af, bf, abf, cf sums = %d, %d, %d, %d, %d\n", sf, saf, sbf, sabf, scf) + + // Special failure just for use with hash searching, to prove it fires exactly once. + // To test: `gossahash -e loopvarhash go run .` in this directory. + // This is designed to fail in two different ways, because gossahash searches randomly + // it will find both failures over time. + if os.Getenv("GOCOMPILEDEBUG") != "" && (sabf == 45 || sf == 45) { + os.Exit(11) + } + os.Exit(0) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/opt-121.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/opt-121.go new file mode 100644 index 0000000000000000000000000000000000000000..4afb658fc8d1daf5c95c9024c0fadc55557b86c2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/opt-121.go @@ -0,0 +1,43 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 + +package main + +import ( + "fmt" + "os" +) + +var is []func() int + +func inline(j, k int) []*int { + var a []*int + for private := j; private < k; private++ { + a = append(a, &private) + } + return a +} + +//go:noinline +func notinline(j, k int) ([]*int, *int) { + for shared := j; shared < k; shared++ { + if shared == k/2 { + // want the call inlined, want "private" in that inline to be transformed, + // (believe it ends up on init node of the return). + // but do not want "shared" transformed, + return inline(j, k), &shared + } + } + return nil, &j +} + +func main() { + a, p := notinline(2, 9) + fmt.Printf("a[0]=%d,*p=%d\n", *a[0], *p) + if *a[0] != 2 { + os.Exit(1) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/opt-122.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/opt-122.go new file mode 100644 index 0000000000000000000000000000000000000000..9dceab91752fdbec40d9bbc7eaddbe4fbee02972 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/opt-122.go @@ -0,0 +1,43 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 + +package main + +import ( + "fmt" + "os" +) + +var is []func() int + +func inline(j, k int) []*int { + var a []*int + for private := j; private < k; private++ { + a = append(a, &private) + } + return a +} + +//go:noinline +func notinline(j, k int) ([]*int, *int) { + for shared := j; shared < k; shared++ { + if shared == k/2 { + // want the call inlined, want "private" in that inline to be transformed, + // (believe it ends up on init node of the return). + // but do not want "shared" transformed, + return inline(j, k), &shared + } + } + return nil, &j +} + +func main() { + a, p := notinline(2, 9) + fmt.Printf("a[0]=%d,*p=%d\n", *a[0], *p) + if *a[0] != 2 { + os.Exit(1) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/opt.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/opt.go new file mode 100644 index 0000000000000000000000000000000000000000..82c8616bcd367628a6d72d29aa9c573ed38abb24 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/opt.go @@ -0,0 +1,41 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +var is []func() int + +func inline(j, k int) []*int { + var a []*int + for private := j; private < k; private++ { + a = append(a, &private) + } + return a +} + +//go:noinline +func notinline(j, k int) ([]*int, *int) { + for shared := j; shared < k; shared++ { + if shared == k/2 { + // want the call inlined, want "private" in that inline to be transformed, + // (believe it ends up on init node of the return). + // but do not want "shared" transformed, + return inline(j, k), &shared + } + } + return nil, &j +} + +func main() { + a, p := notinline(2, 9) + fmt.Printf("a[0]=%d,*p=%d\n", *a[0], *p) + if *a[0] != 2 { + os.Exit(1) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_address.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_address.go new file mode 100644 index 0000000000000000000000000000000000000000..79d7f04a0c012d787e965fda12b697ce985044da --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_address.go @@ -0,0 +1,47 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +var ints = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + +func main() { + sum := 0 + var is []*int + for _, i := range ints { + for j := 0; j < 10; j++ { + if i == j { // 10 skips + continue + } + sum++ + } + if i&1 == 0 { + is = append(is, &i) + } + } + + bug := false + if sum != 100-10 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum) + bug = true + } + sum = 0 + for _, pi := range is { + sum += *pi + } + if sum != 2+4+6+8 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum) + bug = true + } + if !bug { + fmt.Printf("PASS\n") + } else { + os.Exit(11) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_closure.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_closure.go new file mode 100644 index 0000000000000000000000000000000000000000..9bcb5efb09ad272cbb094ab2f54f868e5b9b3dfe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_closure.go @@ -0,0 +1,53 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +var is []func() int + +var ints = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + +func main() { + sum := 0 + for _, i := range ints { + for j := 0; j < 10; j++ { + if i == j { // 10 skips + continue + } + sum++ + } + if i&1 == 0 { + is = append(is, func() int { + if i%17 == 15 { + i++ + } + return i + }) + } + } + + bug := false + if sum != 100-10 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum) + bug = true + } + sum = 0 + for _, f := range is { + sum += f() + } + if sum != 2+4+6+8 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum) + bug = true + } + if !bug { + fmt.Printf("PASS\n") + } else { + os.Exit(11) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_method.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_method.go new file mode 100644 index 0000000000000000000000000000000000000000..9a85ab02d36375fc75276f93275c129a381b52cd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_method.go @@ -0,0 +1,53 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +type I int + +func (x *I) method() int { + return int(*x) +} + +var ints = []I{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + +func main() { + sum := 0 + var is []func() int + for _, i := range ints { + for j := 0; j < 10; j++ { + if int(i) == j { // 10 skips + continue + } + sum++ + } + if i&1 == 0 { + is = append(is, i.method) + } + } + + bug := false + if sum != 100-10 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum) + bug = true + } + sum = 0 + for _, m := range is { + sum += m() + } + if sum != 2+4+6+8 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum) + bug = true + } + if !bug { + fmt.Printf("PASS\n") + } else { + os.Exit(11) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_minimal_closure.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_minimal_closure.go new file mode 100644 index 0000000000000000000000000000000000000000..8804d8b78946ce002b6d1bf51cbb85127984ecf5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/loopvar/testdata/range_esc_minimal_closure.go @@ -0,0 +1,50 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +var is []func() int + +var ints = []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + +func main() { + sum := 0 + for _, i := range ints { + for j := 0; j < 10; j++ { + if i == j { // 10 skips + continue + } + sum++ + } + if i&1 == 0 { + is = append(is, func() int { + return i + }) + } + } + + bug := false + if sum != 100-10 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 90, sum) + bug = true + } + sum = 0 + for _, f := range is { + sum += f() + } + if sum != 2+4+6+8 { + fmt.Printf("wrong sum, expected %d, saw %d\n", 20, sum) + bug = true + } + if !bug { + fmt.Printf("PASS\n") + } else { + os.Exit(11) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/pgo/internal/graph/graph.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/pgo/internal/graph/graph.go new file mode 100644 index 0000000000000000000000000000000000000000..4d89b1ba63af809c1e1171602822299dca351d46 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/pgo/internal/graph/graph.go @@ -0,0 +1,520 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package graph represents a pprof profile as a directed graph. +// +// This package is a simplified fork of github.com/google/pprof/internal/graph. +package graph + +import ( + "fmt" + "internal/profile" + "sort" + "strings" +) + +// Options encodes the options for constructing a graph +type Options struct { + SampleValue func(s []int64) int64 // Function to compute the value of a sample + SampleMeanDivisor func(s []int64) int64 // Function to compute the divisor for mean graphs, or nil + + DropNegative bool // Drop nodes with overall negative values + + KeptNodes NodeSet // If non-nil, only use nodes in this set +} + +// Nodes is an ordered collection of graph nodes. +type Nodes []*Node + +// Node is an entry on a profiling report. It represents a unique +// program location. +type Node struct { + // Info describes the source location associated to this node. + Info NodeInfo + + // Function represents the function that this node belongs to. On + // graphs with sub-function resolution (eg line number or + // addresses), two nodes in a NodeMap that are part of the same + // function have the same value of Node.Function. If the Node + // represents the whole function, it points back to itself. + Function *Node + + // Values associated to this node. Flat is exclusive to this node, + // Cum includes all descendents. + Flat, FlatDiv, Cum, CumDiv int64 + + // In and out Contains the nodes immediately reaching or reached by + // this node. + In, Out EdgeMap +} + +// Graph summarizes a performance profile into a format that is +// suitable for visualization. +type Graph struct { + Nodes Nodes +} + +// FlatValue returns the exclusive value for this node, computing the +// mean if a divisor is available. +func (n *Node) FlatValue() int64 { + if n.FlatDiv == 0 { + return n.Flat + } + return n.Flat / n.FlatDiv +} + +// CumValue returns the inclusive value for this node, computing the +// mean if a divisor is available. +func (n *Node) CumValue() int64 { + if n.CumDiv == 0 { + return n.Cum + } + return n.Cum / n.CumDiv +} + +// AddToEdge increases the weight of an edge between two nodes. If +// there isn't such an edge one is created. +func (n *Node) AddToEdge(to *Node, v int64, residual, inline bool) { + n.AddToEdgeDiv(to, 0, v, residual, inline) +} + +// AddToEdgeDiv increases the weight of an edge between two nodes. If +// there isn't such an edge one is created. +func (n *Node) AddToEdgeDiv(to *Node, dv, v int64, residual, inline bool) { + if e := n.Out.FindTo(to); e != nil { + e.WeightDiv += dv + e.Weight += v + if residual { + e.Residual = true + } + if !inline { + e.Inline = false + } + return + } + + info := &Edge{Src: n, Dest: to, WeightDiv: dv, Weight: v, Residual: residual, Inline: inline} + n.Out.Add(info) + to.In.Add(info) +} + +// NodeInfo contains the attributes for a node. +type NodeInfo struct { + Name string + Address uint64 + StartLine, Lineno int +} + +// PrintableName calls the Node's Formatter function with a single space separator. +func (i *NodeInfo) PrintableName() string { + return strings.Join(i.NameComponents(), " ") +} + +// NameComponents returns the components of the printable name to be used for a node. +func (i *NodeInfo) NameComponents() []string { + var name []string + if i.Address != 0 { + name = append(name, fmt.Sprintf("%016x", i.Address)) + } + if fun := i.Name; fun != "" { + name = append(name, fun) + } + + switch { + case i.Lineno != 0: + // User requested line numbers, provide what we have. + name = append(name, fmt.Sprintf(":%d", i.Lineno)) + case i.Name != "": + // User requested function name. It was already included. + default: + // Do not leave it empty if there is no information at all. + name = append(name, "") + } + return name +} + +// NodeMap maps from a node info struct to a node. It is used to merge +// report entries with the same info. +type NodeMap map[NodeInfo]*Node + +// NodeSet is a collection of node info structs. +type NodeSet map[NodeInfo]bool + +// NodePtrSet is a collection of nodes. Trimming a graph or tree requires a set +// of objects which uniquely identify the nodes to keep. In a graph, NodeInfo +// works as a unique identifier; however, in a tree multiple nodes may share +// identical NodeInfos. A *Node does uniquely identify a node so we can use that +// instead. Though a *Node also uniquely identifies a node in a graph, +// currently, during trimming, graphs are rebuilt from scratch using only the +// NodeSet, so there would not be the required context of the initial graph to +// allow for the use of *Node. +type NodePtrSet map[*Node]bool + +// FindOrInsertNode takes the info for a node and either returns a matching node +// from the node map if one exists, or adds one to the map if one does not. +// If kept is non-nil, nodes are only added if they can be located on it. +func (nm NodeMap) FindOrInsertNode(info NodeInfo, kept NodeSet) *Node { + if kept != nil { + if _, ok := kept[info]; !ok { + return nil + } + } + + if n, ok := nm[info]; ok { + return n + } + + n := &Node{ + Info: info, + } + nm[info] = n + if info.Address == 0 && info.Lineno == 0 { + // This node represents the whole function, so point Function + // back to itself. + n.Function = n + return n + } + // Find a node that represents the whole function. + info.Address = 0 + info.Lineno = 0 + n.Function = nm.FindOrInsertNode(info, nil) + return n +} + +// EdgeMap is used to represent the incoming/outgoing edges from a node. +type EdgeMap []*Edge + +func (em EdgeMap) FindTo(n *Node) *Edge { + for _, e := range em { + if e.Dest == n { + return e + } + } + return nil +} + +func (em *EdgeMap) Add(e *Edge) { + *em = append(*em, e) +} + +func (em *EdgeMap) Delete(e *Edge) { + for i, edge := range *em { + if edge == e { + (*em)[i] = (*em)[len(*em)-1] + *em = (*em)[:len(*em)-1] + return + } + } +} + +// Edge contains any attributes to be represented about edges in a graph. +type Edge struct { + Src, Dest *Node + // The summary weight of the edge + Weight, WeightDiv int64 + + // residual edges connect nodes that were connected through a + // separate node, which has been removed from the report. + Residual bool + // An inline edge represents a call that was inlined into the caller. + Inline bool +} + +// WeightValue returns the weight value for this edge, normalizing if a +// divisor is available. +func (e *Edge) WeightValue() int64 { + if e.WeightDiv == 0 { + return e.Weight + } + return e.Weight / e.WeightDiv +} + +// NewGraph computes a graph from a profile. +func NewGraph(prof *profile.Profile, o *Options) *Graph { + nodes, locationMap := CreateNodes(prof, o) + seenNode := make(map[*Node]bool) + seenEdge := make(map[nodePair]bool) + for _, sample := range prof.Sample { + var w, dw int64 + w = o.SampleValue(sample.Value) + if o.SampleMeanDivisor != nil { + dw = o.SampleMeanDivisor(sample.Value) + } + if dw == 0 && w == 0 { + continue + } + for k := range seenNode { + delete(seenNode, k) + } + for k := range seenEdge { + delete(seenEdge, k) + } + var parent *Node + // A residual edge goes over one or more nodes that were not kept. + residual := false + + // Group the sample frames, based on a global map. + // Count only the last two frames as a call edge. Frames higher up + // the stack are unlikely to be repeated calls (e.g. runtime.main + // calling main.main). So adding weights to call edges higher up + // the stack may be not reflecting the actual call edge weights + // in the program. Without a branch profile this is just an + // approximation. + i := 1 + if last := len(sample.Location) - 1; last < i { + i = last + } + for ; i >= 0; i-- { + l := sample.Location[i] + locNodes := locationMap.get(l.ID) + for ni := len(locNodes) - 1; ni >= 0; ni-- { + n := locNodes[ni] + if n == nil { + residual = true + continue + } + // Add cum weight to all nodes in stack, avoiding double counting. + _, sawNode := seenNode[n] + if !sawNode { + seenNode[n] = true + n.addSample(dw, w, false) + } + // Update edge weights for all edges in stack, avoiding double counting. + if (!sawNode || !seenEdge[nodePair{n, parent}]) && parent != nil && n != parent { + seenEdge[nodePair{n, parent}] = true + parent.AddToEdgeDiv(n, dw, w, residual, ni != len(locNodes)-1) + } + + parent = n + residual = false + } + } + if parent != nil && !residual { + // Add flat weight to leaf node. + parent.addSample(dw, w, true) + } + } + + return selectNodesForGraph(nodes, o.DropNegative) +} + +func selectNodesForGraph(nodes Nodes, dropNegative bool) *Graph { + // Collect nodes into a graph. + gNodes := make(Nodes, 0, len(nodes)) + for _, n := range nodes { + if n == nil { + continue + } + if n.Cum == 0 && n.Flat == 0 { + continue + } + if dropNegative && isNegative(n) { + continue + } + gNodes = append(gNodes, n) + } + return &Graph{gNodes} +} + +type nodePair struct { + src, dest *Node +} + +// isNegative returns true if the node is considered as "negative" for the +// purposes of drop_negative. +func isNegative(n *Node) bool { + switch { + case n.Flat < 0: + return true + case n.Flat == 0 && n.Cum < 0: + return true + default: + return false + } +} + +type locationMap struct { + s []Nodes // a slice for small sequential IDs + m map[uint64]Nodes // fallback for large IDs (unlikely) +} + +func (l *locationMap) add(id uint64, n Nodes) { + if id < uint64(len(l.s)) { + l.s[id] = n + } else { + l.m[id] = n + } +} + +func (l locationMap) get(id uint64) Nodes { + if id < uint64(len(l.s)) { + return l.s[id] + } else { + return l.m[id] + } +} + +// CreateNodes creates graph nodes for all locations in a profile. It +// returns set of all nodes, plus a mapping of each location to the +// set of corresponding nodes (one per location.Line). +func CreateNodes(prof *profile.Profile, o *Options) (Nodes, locationMap) { + locations := locationMap{make([]Nodes, len(prof.Location)+1), make(map[uint64]Nodes)} + nm := make(NodeMap, len(prof.Location)) + for _, l := range prof.Location { + lines := l.Line + if len(lines) == 0 { + lines = []profile.Line{{}} // Create empty line to include location info. + } + nodes := make(Nodes, len(lines)) + for ln := range lines { + nodes[ln] = nm.findOrInsertLine(l, lines[ln], o) + } + locations.add(l.ID, nodes) + } + return nm.nodes(), locations +} + +func (nm NodeMap) nodes() Nodes { + nodes := make(Nodes, 0, len(nm)) + for _, n := range nm { + nodes = append(nodes, n) + } + return nodes +} + +func (nm NodeMap) findOrInsertLine(l *profile.Location, li profile.Line, o *Options) *Node { + var objfile string + if m := l.Mapping; m != nil && m.File != "" { + objfile = m.File + } + + if ni := nodeInfo(l, li, objfile, o); ni != nil { + return nm.FindOrInsertNode(*ni, o.KeptNodes) + } + return nil +} + +func nodeInfo(l *profile.Location, line profile.Line, objfile string, o *Options) *NodeInfo { + if line.Function == nil { + return &NodeInfo{Address: l.Address} + } + ni := &NodeInfo{ + Address: l.Address, + Lineno: int(line.Line), + Name: line.Function.Name, + } + ni.StartLine = int(line.Function.StartLine) + return ni +} + +// Sum adds the flat and cum values of a set of nodes. +func (ns Nodes) Sum() (flat int64, cum int64) { + for _, n := range ns { + flat += n.Flat + cum += n.Cum + } + return +} + +func (n *Node) addSample(dw, w int64, flat bool) { + // Update sample value + if flat { + n.FlatDiv += dw + n.Flat += w + } else { + n.CumDiv += dw + n.Cum += w + } +} + +// String returns a text representation of a graph, for debugging purposes. +func (g *Graph) String() string { + var s []string + + nodeIndex := make(map[*Node]int, len(g.Nodes)) + + for i, n := range g.Nodes { + nodeIndex[n] = i + 1 + } + + for i, n := range g.Nodes { + name := n.Info.PrintableName() + var in, out []int + + for _, from := range n.In { + in = append(in, nodeIndex[from.Src]) + } + for _, to := range n.Out { + out = append(out, nodeIndex[to.Dest]) + } + s = append(s, fmt.Sprintf("%d: %s[flat=%d cum=%d] %x -> %v ", i+1, name, n.Flat, n.Cum, in, out)) + } + return strings.Join(s, "\n") +} + +// Sort returns a slice of the edges in the map, in a consistent +// order. The sort order is first based on the edge weight +// (higher-to-lower) and then by the node names to avoid flakiness. +func (em EdgeMap) Sort() []*Edge { + el := make(edgeList, 0, len(em)) + for _, w := range em { + el = append(el, w) + } + + sort.Sort(el) + return el +} + +// Sum returns the total weight for a set of nodes. +func (em EdgeMap) Sum() int64 { + var ret int64 + for _, edge := range em { + ret += edge.Weight + } + return ret +} + +type edgeList []*Edge + +func (el edgeList) Len() int { + return len(el) +} + +func (el edgeList) Less(i, j int) bool { + if el[i].Weight != el[j].Weight { + return abs64(el[i].Weight) > abs64(el[j].Weight) + } + + from1 := el[i].Src.Info.PrintableName() + from2 := el[j].Src.Info.PrintableName() + if from1 != from2 { + return from1 < from2 + } + + to1 := el[i].Dest.Info.PrintableName() + to2 := el[j].Dest.Info.PrintableName() + + return to1 < to2 +} + +func (el edgeList) Swap(i, j int) { + el[i], el[j] = el[j], el[i] +} + +func abs64(i int64) int64 { + if i < 0 { + return -i + } + return i +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/386.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/386.rules new file mode 100644 index 0000000000000000000000000000000000000000..d92dddd377af337bbc81e714c3164eda6159297e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/386.rules @@ -0,0 +1,941 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Lowering arithmetic +(Add(Ptr|32|16|8) ...) => (ADDL ...) +(Add(32|64)F ...) => (ADDS(S|D) ...) +(Add32carry ...) => (ADDLcarry ...) +(Add32withcarry ...) => (ADCL ...) + +(Sub(Ptr|32|16|8) ...) => (SUBL ...) +(Sub(32|64)F ...) => (SUBS(S|D) ...) +(Sub32carry ...) => (SUBLcarry ...) +(Sub32withcarry ...) => (SBBL ...) + +(Mul(32|16|8) ...) => (MULL ...) +(Mul(32|64)F ...) => (MULS(S|D) ...) +(Mul32uhilo ...) => (MULLQU ...) + +(Select0 (Mul32uover x y)) => (Select0 (MULLU x y)) +(Select1 (Mul32uover x y)) => (SETO (Select1 (MULLU x y))) + +(Avg32u ...) => (AVGLU ...) + +(Div(32|64)F ...) => (DIVS(S|D) ...) +(Div(32|32u|16|16u) ...) => (DIV(L|LU|W|WU) ...) +(Div8 x y) => (DIVW (SignExt8to16 x) (SignExt8to16 y)) +(Div8u x y) => (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)) + +(Hmul(32|32u) ...) => (HMUL(L|LU) ...) + +(Mod(32|32u|16|16u) ...) => (MOD(L|LU|W|WU) ...) +(Mod8 x y) => (MODW (SignExt8to16 x) (SignExt8to16 y)) +(Mod8u x y) => (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y)) + +(And(32|16|8) ...) => (ANDL ...) +(Or(32|16|8) ...) => (ORL ...) +(Xor(32|16|8) ...) => (XORL ...) + +(Neg(32|16|8) ...) => (NEGL ...) +(Neg32F x) => (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) +(Neg64F x) => (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + +(Com(32|16|8) ...) => (NOTL ...) + +// Lowering boolean ops +(AndB ...) => (ANDL ...) +(OrB ...) => (ORL ...) +(Not x) => (XORLconst [1] x) + +// Lowering pointer arithmetic +(OffPtr [off] ptr) => (ADDLconst [int32(off)] ptr) + +(Bswap32 ...) => (BSWAPL ...) +(Bswap16 x) => (ROLWconst [8] x) + +(Sqrt ...) => (SQRTSD ...) +(Sqrt32 ...) => (SQRTSS ...) + +(Ctz8 x) => (BSFL (ORLconst [0x100] x)) +(Ctz8NonZero ...) => (BSFL ...) +(Ctz16 x) => (BSFL (ORLconst [0x10000] x)) +(Ctz16NonZero ...) => (BSFL ...) +(Ctz32 ...) => (LoweredCtz32 ...) +(Ctz32NonZero ...) => (BSFL ...) + +// Lowering extension +(SignExt8to16 ...) => (MOVBLSX ...) +(SignExt8to32 ...) => (MOVBLSX ...) +(SignExt16to32 ...) => (MOVWLSX ...) + +(ZeroExt8to16 ...) => (MOVBLZX ...) +(ZeroExt8to32 ...) => (MOVBLZX ...) +(ZeroExt16to32 ...) => (MOVWLZX ...) + +(Signmask x) => (SARLconst x [31]) +(Zeromask x) => (XORLconst [-1] (SBBLcarrymask (CMPLconst x [1]))) +(Slicemask x) => (SARLconst (NEGL x) [31]) + +// Lowering truncation +// Because we ignore high parts of registers, truncates are just copies. +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) + +// Lowering float-int conversions +(Cvt32to32F ...) => (CVTSL2SS ...) +(Cvt32to64F ...) => (CVTSL2SD ...) + +(Cvt32Fto32 ...) => (CVTTSS2SL ...) +(Cvt64Fto32 ...) => (CVTTSD2SL ...) + +(Cvt32Fto64F ...) => (CVTSS2SD ...) +(Cvt64Fto32F ...) => (CVTSD2SS ...) + +(Round32F ...) => (Copy ...) +(Round64F ...) => (Copy ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +// Lowering shifts +// Unsigned shifts need to return 0 if shift amount is >= width of shifted value. +// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff) +(Lsh32x(32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHLL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) +(Lsh16x(32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHLL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) +(Lsh8x(32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHLL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) + +(Lsh32x(32|16|8) x y) && shiftIsBounded(v) => (SHLL x y) +(Lsh16x(32|16|8) x y) && shiftIsBounded(v) => (SHLL x y) +(Lsh8x(32|16|8) x y) && shiftIsBounded(v) => (SHLL x y) + +(Rsh32Ux(32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHRL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) +(Rsh16Ux(32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHRW x y) (SBBLcarrymask (CMP(L|W|B)const y [16]))) +(Rsh8Ux(32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHRB x y) (SBBLcarrymask (CMP(L|W|B)const y [8]))) + +(Rsh32Ux(32|16|8) x y) && shiftIsBounded(v) => (SHRL x y) +(Rsh16Ux(32|16|8) x y) && shiftIsBounded(v) => (SHRW x y) +(Rsh8Ux(32|16|8) x y) && shiftIsBounded(v) => (SHRB x y) + +// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. +// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width. + +(Rsh32x(32|16|8) x y) && !shiftIsBounded(v) => (SARL x (ORL y (NOTL (SBBLcarrymask (CMP(L|W|B)const y [32]))))) +(Rsh16x(32|16|8) x y) && !shiftIsBounded(v) => (SARW x (ORL y (NOTL (SBBLcarrymask (CMP(L|W|B)const y [16]))))) +(Rsh8x(32|16|8) x y) && !shiftIsBounded(v) => (SARB x (ORL y (NOTL (SBBLcarrymask (CMP(L|W|B)const y [8]))))) + +(Rsh32x(32|16|8) x y) && shiftIsBounded(v) => (SARL x y) +(Rsh16x(32|16|8) x y) && shiftIsBounded(v) => (SARW x y) +(Rsh8x(32|16|8) x y) && shiftIsBounded(v) => (SARB x y) + +// constant shifts +// generic opt rewrites all constant shifts to shift by Const64 +(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SHLLconst x [int32(c)]) +(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SARLconst x [int32(c)]) +(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 => (SHRLconst x [int32(c)]) +(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SHLLconst x [int32(c)]) +(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SARWconst x [int16(c)]) +(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 => (SHRWconst x [int16(c)]) +(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SHLLconst x [int32(c)]) +(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SARBconst x [int8(c)]) +(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 => (SHRBconst x [int8(c)]) + +// large constant shifts +(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0]) +(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0]) +(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0]) +(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0]) +(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0]) +(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0]) + +// large constant signed right shift, we leave the sign bit +(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 => (SARLconst x [31]) +(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SARWconst x [15]) +(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SARBconst x [7]) + +// rotates +(RotateLeft32 ...) => (ROLL ...) +(RotateLeft16 ...) => (ROLW ...) +(RotateLeft8 ...) => (ROLB ...) +// constant rotates +(ROLL x (MOVLconst [c])) => (ROLLconst [c&31] x) +(ROLW x (MOVLconst [c])) => (ROLWconst [int16(c&15)] x) +(ROLB x (MOVLconst [c])) => (ROLBconst [int8(c&7)] x) + +// Lowering comparisons +(Less32 x y) => (SETL (CMPL x y)) +(Less16 x y) => (SETL (CMPW x y)) +(Less8 x y) => (SETL (CMPB x y)) +(Less32U x y) => (SETB (CMPL x y)) +(Less16U x y) => (SETB (CMPW x y)) +(Less8U x y) => (SETB (CMPB x y)) +// Use SETGF with reversed operands to dodge NaN case +(Less64F x y) => (SETGF (UCOMISD y x)) +(Less32F x y) => (SETGF (UCOMISS y x)) + +(Leq32 x y) => (SETLE (CMPL x y)) +(Leq16 x y) => (SETLE (CMPW x y)) +(Leq8 x y) => (SETLE (CMPB x y)) +(Leq32U x y) => (SETBE (CMPL x y)) +(Leq16U x y) => (SETBE (CMPW x y)) +(Leq8U x y) => (SETBE (CMPB x y)) +// Use SETGEF with reversed operands to dodge NaN case +(Leq64F x y) => (SETGEF (UCOMISD y x)) +(Leq32F x y) => (SETGEF (UCOMISS y x)) + +(Eq32 x y) => (SETEQ (CMPL x y)) +(Eq16 x y) => (SETEQ (CMPW x y)) +(Eq8 x y) => (SETEQ (CMPB x y)) +(EqB x y) => (SETEQ (CMPB x y)) +(EqPtr x y) => (SETEQ (CMPL x y)) +(Eq64F x y) => (SETEQF (UCOMISD x y)) +(Eq32F x y) => (SETEQF (UCOMISS x y)) + +(Neq32 x y) => (SETNE (CMPL x y)) +(Neq16 x y) => (SETNE (CMPW x y)) +(Neq8 x y) => (SETNE (CMPB x y)) +(NeqB x y) => (SETNE (CMPB x y)) +(NeqPtr x y) => (SETNE (CMPL x y)) +(Neq64F x y) => (SETNEF (UCOMISD x y)) +(Neq32F x y) => (SETNEF (UCOMISS x y)) + +// Lowering loads +(Load ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVLload ptr mem) +(Load ptr mem) && is16BitInt(t) => (MOVWload ptr mem) +(Load ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem) + +// Lowering stores +(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVSDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVSSstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVLstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) + +// Lowering moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) +(Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem) +(Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem) +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBload [2] src mem) + (MOVWstore dst (MOVWload src mem) mem)) +(Move [5] dst src mem) => + (MOVBstore [4] dst (MOVBload [4] src mem) + (MOVLstore dst (MOVLload src mem) mem)) +(Move [6] dst src mem) => + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVLstore dst (MOVLload src mem) mem)) +(Move [7] dst src mem) => + (MOVLstore [3] dst (MOVLload [3] src mem) + (MOVLstore dst (MOVLload src mem) mem)) +(Move [8] dst src mem) => + (MOVLstore [4] dst (MOVLload [4] src mem) + (MOVLstore dst (MOVLload src mem) mem)) + +// Adjust moves to be a multiple of 4 bytes. +(Move [s] dst src mem) + && s > 8 && s%4 != 0 => + (Move [s-s%4] + (ADDLconst dst [int32(s%4)]) + (ADDLconst src [int32(s%4)]) + (MOVLstore dst (MOVLload src mem) mem)) + +// Medium copying uses a duff device. +(Move [s] dst src mem) + && s > 8 && s <= 4*128 && s%4 == 0 + && !config.noDuffDevice && logLargeCopy(v, s) => + (DUFFCOPY [10*(128-s/4)] dst src mem) +// 10 and 128 are magic constants. 10 is the number of bytes to encode: +// MOVL (SI), CX +// ADDL $4, SI +// MOVL CX, (DI) +// ADDL $4, DI +// and 128 is the number of such blocks. See src/runtime/duff_386.s:duffcopy. + +// Large copying uses REP MOVSL. +(Move [s] dst src mem) && (s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s) => + (REPMOVSL dst src (MOVLconst [int32(s/4)]) mem) + +// Lowering Zero instructions +(Zero [0] _ mem) => mem +(Zero [1] destptr mem) => (MOVBstoreconst [0] destptr mem) +(Zero [2] destptr mem) => (MOVWstoreconst [0] destptr mem) +(Zero [4] destptr mem) => (MOVLstoreconst [0] destptr mem) + +(Zero [3] destptr mem) => + (MOVBstoreconst [makeValAndOff(0,2)] destptr + (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [5] destptr mem) => + (MOVBstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [6] destptr mem) => + (MOVWstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [7] destptr mem) => + (MOVLstoreconst [makeValAndOff(0,3)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) + +// Strip off any fractional word zeroing. +(Zero [s] destptr mem) && s%4 != 0 && s > 4 => + (Zero [s-s%4] (ADDLconst destptr [int32(s%4)]) + (MOVLstoreconst [0] destptr mem)) + +// Zero small numbers of words directly. +(Zero [8] destptr mem) => + (MOVLstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [12] destptr mem) => + (MOVLstoreconst [makeValAndOff(0,8)] destptr + (MOVLstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))) +(Zero [16] destptr mem) => + (MOVLstoreconst [makeValAndOff(0,12)] destptr + (MOVLstoreconst [makeValAndOff(0,8)] destptr + (MOVLstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)))) + +// Medium zeroing uses a duff device. +(Zero [s] destptr mem) + && s > 16 && s <= 4*128 && s%4 == 0 + && !config.noDuffDevice => + (DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem) +// 1 and 128 are magic constants. 1 is the number of bytes to encode STOSL. +// 128 is the number of STOSL instructions in duffzero. +// See src/runtime/duff_386.s:duffzero. + +// Large zeroing uses REP STOSQ. +(Zero [s] destptr mem) + && (s > 4*128 || (config.noDuffDevice && s > 16)) + && s%4 == 0 => + (REPSTOSL destptr (MOVLconst [int32(s/4)]) (MOVLconst [0]) mem) + + +// Lowering constants +(Const8 [c]) => (MOVLconst [int32(c)]) +(Const16 [c]) => (MOVLconst [int32(c)]) +(Const32 ...) => (MOVLconst ...) +(Const(32|64)F ...) => (MOVS(S|D)const ...) +(ConstNil) => (MOVLconst [0]) +(ConstBool [c]) => (MOVLconst [b2i32(c)]) + +// Lowering calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// Miscellaneous +(IsNonNil p) => (SETNE (TESTL p p)) +(IsInBounds idx len) => (SETB (CMPL idx len)) +(IsSliceInBounds idx len) => (SETBE (CMPL idx len)) +(NilCheck ...) => (LoweredNilCheck ...) +(GetG ...) => (LoweredGetG ...) +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(Addr {sym} base) => (LEAL {sym} base) +(LocalAddr {sym} base mem) && t.Elem().HasPointers() => (LEAL {sym} (SPanchored base mem)) +(LocalAddr {sym} base _) && !t.Elem().HasPointers() => (LEAL {sym} base) + +// block rewrites +(If (SETL cmp) yes no) => (LT cmp yes no) +(If (SETLE cmp) yes no) => (LE cmp yes no) +(If (SETG cmp) yes no) => (GT cmp yes no) +(If (SETGE cmp) yes no) => (GE cmp yes no) +(If (SETEQ cmp) yes no) => (EQ cmp yes no) +(If (SETNE cmp) yes no) => (NE cmp yes no) +(If (SETB cmp) yes no) => (ULT cmp yes no) +(If (SETBE cmp) yes no) => (ULE cmp yes no) +(If (SETA cmp) yes no) => (UGT cmp yes no) +(If (SETAE cmp) yes no) => (UGE cmp yes no) +(If (SETO cmp) yes no) => (OS cmp yes no) + +// Special case for floating point - LF/LEF not generated +(If (SETGF cmp) yes no) => (UGT cmp yes no) +(If (SETGEF cmp) yes no) => (UGE cmp yes no) +(If (SETEQF cmp) yes no) => (EQF cmp yes no) +(If (SETNEF cmp) yes no) => (NEF cmp yes no) + +(If cond yes no) => (NE (TESTB cond cond) yes no) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem) +(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem) +(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem) + +// *************************** +// Above: lowering rules +// Below: optimizations +// *************************** +// TODO: Should the optimizations be a separate pass? + +// Fold boolean tests into blocks +(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no) +(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no) +(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no) +(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no) +(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no) +(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no) +(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no) +(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no) +(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no) +(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no) +(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no) + +// Special case for floating point - LF/LEF not generated +(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no) +(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no) +(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no) +(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no) + +// fold constants into instructions +(ADDL x (MOVLconst [c])) && !t.IsPtr() => (ADDLconst [c] x) +(ADDLcarry x (MOVLconst [c])) => (ADDLconstcarry [c] x) +(ADCL x (MOVLconst [c]) f) => (ADCLconst [c] x f) + +(SUBL x (MOVLconst [c])) => (SUBLconst x [c]) +(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst x [c])) +(SUBLcarry x (MOVLconst [c])) => (SUBLconstcarry [c] x) +(SBBL x (MOVLconst [c]) f) => (SBBLconst [c] x f) + +(MULL x (MOVLconst [c])) => (MULLconst [c] x) +(ANDL x (MOVLconst [c])) => (ANDLconst [c] x) + +(ANDLconst [c] (ANDLconst [d] x)) => (ANDLconst [c & d] x) +(XORLconst [c] (XORLconst [d] x)) => (XORLconst [c ^ d] x) +(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x) + +(ORL x (MOVLconst [c])) => (ORLconst [c] x) +(XORL x (MOVLconst [c])) => (XORLconst [c] x) + +(SHLL x (MOVLconst [c])) => (SHLLconst [c&31] x) +(SHRL x (MOVLconst [c])) => (SHRLconst [c&31] x) +(SHRW x (MOVLconst [c])) && c&31 < 16 => (SHRWconst [int16(c&31)] x) +(SHRW _ (MOVLconst [c])) && c&31 >= 16 => (MOVLconst [0]) +(SHRB x (MOVLconst [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x) +(SHRB _ (MOVLconst [c])) && c&31 >= 8 => (MOVLconst [0]) + +(SARL x (MOVLconst [c])) => (SARLconst [c&31] x) +(SARW x (MOVLconst [c])) => (SARWconst [int16(min(int64(c&31),15))] x) +(SARB x (MOVLconst [c])) => (SARBconst [int8(min(int64(c&31),7))] x) + +(SARL x (ANDLconst [31] y)) => (SARL x y) +(SHLL x (ANDLconst [31] y)) => (SHLL x y) +(SHRL x (ANDLconst [31] y)) => (SHRL x y) + +// Constant shift simplifications + +(SHLLconst x [0]) => x +(SHRLconst x [0]) => x +(SARLconst x [0]) => x + +(SHRWconst x [0]) => x +(SARWconst x [0]) => x + +(SHRBconst x [0]) => x +(SARBconst x [0]) => x + +(ROLLconst [0] x) => x +(ROLWconst [0] x) => x +(ROLBconst [0] x) => x + +// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits) +// because the x86 instructions are defined to use all 5 bits of the shift even +// for the small shifts. I don't think we'll ever generate a weird shift (e.g. +// (SHRW x (MOVLconst [24])), but just in case. + +(CMPL x (MOVLconst [c])) => (CMPLconst x [c]) +(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c])) +(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)]) +(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)])) +(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)]) +(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)])) + +// Canonicalize the order of arguments to comparisons - helps with CSE. +(CMP(L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(L|W|B) y x)) + +// strength reduction +// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf: +// 1 - addl, shll, leal, negl, subl +// 3 - imull +// This limits the rewrites to two instructions. +// Note that negl always operates in-place, +// which can require a register-register move +// to preserve the original value, +// so it must be used with care. +(MULLconst [-9] x) => (NEGL (LEAL8 x x)) +(MULLconst [-5] x) => (NEGL (LEAL4 x x)) +(MULLconst [-3] x) => (NEGL (LEAL2 x x)) +(MULLconst [-1] x) => (NEGL x) +(MULLconst [0] _) => (MOVLconst [0]) +(MULLconst [1] x) => x +(MULLconst [3] x) => (LEAL2 x x) +(MULLconst [5] x) => (LEAL4 x x) +(MULLconst [7] x) => (LEAL2 x (LEAL2 x x)) +(MULLconst [9] x) => (LEAL8 x x) +(MULLconst [11] x) => (LEAL2 x (LEAL4 x x)) +(MULLconst [13] x) => (LEAL4 x (LEAL2 x x)) +(MULLconst [19] x) => (LEAL2 x (LEAL8 x x)) +(MULLconst [21] x) => (LEAL4 x (LEAL4 x x)) +(MULLconst [25] x) => (LEAL8 x (LEAL2 x x)) +(MULLconst [27] x) => (LEAL8 (LEAL2 x x) (LEAL2 x x)) +(MULLconst [37] x) => (LEAL4 x (LEAL8 x x)) +(MULLconst [41] x) => (LEAL8 x (LEAL4 x x)) +(MULLconst [45] x) => (LEAL8 (LEAL4 x x) (LEAL4 x x)) +(MULLconst [73] x) => (LEAL8 x (LEAL8 x x)) +(MULLconst [81] x) => (LEAL8 (LEAL8 x x) (LEAL8 x x)) + +(MULLconst [c] x) && isPowerOfTwo32(c+1) && c >= 15 => (SUBL (SHLLconst [int32(log32(c+1))] x) x) +(MULLconst [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEAL1 (SHLLconst [int32(log32(c-1))] x) x) +(MULLconst [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEAL2 (SHLLconst [int32(log32(c-2))] x) x) +(MULLconst [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEAL4 (SHLLconst [int32(log32(c-4))] x) x) +(MULLconst [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEAL8 (SHLLconst [int32(log32(c-8))] x) x) +(MULLconst [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHLLconst [int32(log32(c/3))] (LEAL2 x x)) +(MULLconst [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHLLconst [int32(log32(c/5))] (LEAL4 x x)) +(MULLconst [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHLLconst [int32(log32(c/9))] (LEAL8 x x)) + +// combine add/shift into LEAL +(ADDL x (SHLLconst [3] y)) => (LEAL8 x y) +(ADDL x (SHLLconst [2] y)) => (LEAL4 x y) +(ADDL x (SHLLconst [1] y)) => (LEAL2 x y) +(ADDL x (ADDL y y)) => (LEAL2 x y) +(ADDL x (ADDL x y)) => (LEAL2 y x) + +// combine ADDL/ADDLconst into LEAL1 +(ADDLconst [c] (ADDL x y)) => (LEAL1 [c] x y) +(ADDL (ADDLconst [c] x) y) => (LEAL1 [c] x y) + +// fold ADDL into LEAL +(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEAL [c+d] {s} x) +(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(int64(c)+int64(d)) => (LEAL [c+d] {s} x) +(ADDLconst [c] x:(SP)) => (LEAL [c] x) // so it is rematerializeable +(LEAL [c] {s} (ADDL x y)) && x.Op != OpSB && y.Op != OpSB => (LEAL1 [c] {s} x y) +(ADDL x (LEAL [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEAL1 [c] {s} x y) + +// fold ADDLconst into LEALx +(ADDLconst [c] (LEAL1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL1 [c+d] {s} x y) +(ADDLconst [c] (LEAL2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL2 [c+d] {s} x y) +(ADDLconst [c] (LEAL4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL4 [c+d] {s} x y) +(ADDLconst [c] (LEAL8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL8 [c+d] {s} x y) +(LEAL1 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL1 [c+d] {s} x y) +(LEAL2 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL2 [c+d] {s} x y) +(LEAL2 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEAL2 [c+2*d] {s} x y) +(LEAL4 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL4 [c+d] {s} x y) +(LEAL4 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEAL4 [c+4*d] {s} x y) +(LEAL8 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL8 [c+d] {s} x y) +(LEAL8 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEAL8 [c+8*d] {s} x y) + +// fold shifts into LEALx +(LEAL1 [c] {s} x (SHLLconst [1] y)) => (LEAL2 [c] {s} x y) +(LEAL1 [c] {s} x (SHLLconst [2] y)) => (LEAL4 [c] {s} x y) +(LEAL1 [c] {s} x (SHLLconst [3] y)) => (LEAL8 [c] {s} x y) +(LEAL2 [c] {s} x (SHLLconst [1] y)) => (LEAL4 [c] {s} x y) +(LEAL2 [c] {s} x (SHLLconst [2] y)) => (LEAL8 [c] {s} x y) +(LEAL4 [c] {s} x (SHLLconst [1] y)) => (LEAL8 [c] {s} x y) + +// reverse ordering of compare instruction +(SETL (InvertFlags x)) => (SETG x) +(SETG (InvertFlags x)) => (SETL x) +(SETB (InvertFlags x)) => (SETA x) +(SETA (InvertFlags x)) => (SETB x) +(SETLE (InvertFlags x)) => (SETGE x) +(SETGE (InvertFlags x)) => (SETLE x) +(SETBE (InvertFlags x)) => (SETAE x) +(SETAE (InvertFlags x)) => (SETBE x) +(SETEQ (InvertFlags x)) => (SETEQ x) +(SETNE (InvertFlags x)) => (SETNE x) + +// sign extended loads +// Note: The combined instruction must end up in the same block +// as the original load. If not, we end up making a value with +// memory type live in two different blocks, which can lead to +// multiple memory values alive simultaneously. +// Make sure we don't combine these ops if the load has another use. +// This prevents a single load from being split into multiple loads +// which then might return different values. See test/atomicload.go. +(MOVBLSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBLSXload [off] {sym} ptr mem) +(MOVBLZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload [off] {sym} ptr mem) +(MOVWLSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWLSXload [off] {sym} ptr mem) +(MOVWLZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload [off] {sym} ptr mem) + +// replace load from same location as preceding store with zero/sign extension (or copy in case of full width) +(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBLZX x) +(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWLZX x) +(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x +(MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBLSX x) +(MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWLSX x) + +// Fold extensions and ANDs together. +(MOVBLZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x) +(MOVWLZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x) +(MOVBLSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x) +(MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x) + +// Don't extend before storing +(MOVWstore [off] {sym} ptr (MOVWL(S|Z)X x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBL(S|Z)X x) mem) => (MOVBstore [off] {sym} ptr x mem) + +// fold constants into memory operations +// Note that this is not always a good idea because if not all the uses of +// the ADDLconst get eliminated, we still have to compute the ADDLconst and we now +// have potentially two live values (ptr and (ADDLconst [off] ptr)) instead of one. +// Nevertheless, let's do it! +(MOV(L|W|B|SS|SD)load [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (MOV(L|W|B|SS|SD)load [off1+off2] {sym} ptr mem) +(MOV(L|W|B|SS|SD)store [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => + (MOV(L|W|B|SS|SD)store [off1+off2] {sym} ptr val mem) + +((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {sym} val base mem) +((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem) +((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem) +((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDLconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem) +((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) && valoff1.canAdd32(off2) => + ((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {sym} base mem) + +// Fold constants into stores. +(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) => + (MOVLstoreconst [makeValAndOff(c,off)] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) => + (MOVWstoreconst [makeValAndOff(c,off)] {sym} ptr mem) +(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) => + (MOVBstoreconst [makeValAndOff(c,off)] {sym} ptr mem) + +// Fold address offsets into constant stores. +(MOV(L|W|B)storeconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) => + (MOV(L|W|B)storeconst [sc.addOffset32(off)] {s} ptr mem) + +// We need to fold LEAL into the MOVx ops so that the live variable analysis knows +// what variables are being read/written by the ops. +// Note: we turn off this merging for operations on globals when building +// position-independent code (when Flag_shared is set). +// PIC needs a spare register to load the PC into. Having the LEAL be +// a separate instruction gives us that register. Having the LEAL be +// a separate instruction also allows it to be CSEd (which is good because +// it compiles to a thunk call). +(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + && (base.Op != OpSB || !config.ctxt.Flag_shared) => + (MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem) + +(MOV(L|W|B|SS|SD)store [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) + && (base.Op != OpSB || !config.ctxt.Flag_shared) => + (MOV(L|W|B|SS|SD)store [off1+off2] {mergeSym(sym1,sym2)} base val mem) + +(MOV(L|W|B)storeconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOV(L|W|B)storeconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + +((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) => + ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem) +((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) => + ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) +((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) => + ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) +((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) +((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem) + && valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) => + ((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + +// Merge load/store to op +((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) +(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) +(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) +(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr mem)) mem) + && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff(c,off)] {sym} ptr mem) + +// fold LEALs together +(LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAL [off1+off2] {mergeSym(sym1,sym2)} x) + +// LEAL into LEAL1 +(LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAL1 into LEAL +(LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAL into LEAL[248] +(LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAL[248] into LEAL +(LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAL[1248] into LEAL[1248]. Only some such merges are possible. +(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y) +(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x) +(LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+2*int64(off2)) => + (LEAL4 [off1+2*off2] {sym} x y) +(LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+4*int64(off2)) => + (LEAL8 [off1+4*off2] {sym} x y) + +// Absorb InvertFlags into branches. +(LT (InvertFlags cmp) yes no) => (GT cmp yes no) +(GT (InvertFlags cmp) yes no) => (LT cmp yes no) +(LE (InvertFlags cmp) yes no) => (GE cmp yes no) +(GE (InvertFlags cmp) yes no) => (LE cmp yes no) +(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no) +(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no) +(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no) +(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no) +(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no) +(NE (InvertFlags cmp) yes no) => (NE cmp yes no) + +// Constant comparisons. +(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ) +(CMPLconst (MOVLconst [x]) [y]) && x (FlagLT_ULT) +(CMPLconst (MOVLconst [x]) [y]) && xuint32(y) => (FlagLT_UGT) +(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x) (FlagGT_ULT) +(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT) + +(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ) +(CMPWconst (MOVLconst [x]) [y]) && int16(x) (FlagLT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)uint16(y) => (FlagLT_UGT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x) (FlagGT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT) + +(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ) +(CMPBconst (MOVLconst [x]) [y]) && int8(x) (FlagLT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)uint8(y) => (FlagLT_UGT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x) (FlagGT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT) + +// Other known comparisons. +(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1< (FlagLT_ULT) +(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) +(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT) +(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT) +// TODO: DIVxU also. + +// Absorb flag constants into SBB ops. +(SBBLcarrymask (FlagEQ)) => (MOVLconst [0]) +(SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1]) +(SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0]) +(SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1]) +(SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0]) + +// Absorb flag constants into branches. +(EQ (FlagEQ) yes no) => (First yes no) +(EQ (FlagLT_ULT) yes no) => (First no yes) +(EQ (FlagLT_UGT) yes no) => (First no yes) +(EQ (FlagGT_ULT) yes no) => (First no yes) +(EQ (FlagGT_UGT) yes no) => (First no yes) + +(NE (FlagEQ) yes no) => (First no yes) +(NE (FlagLT_ULT) yes no) => (First yes no) +(NE (FlagLT_UGT) yes no) => (First yes no) +(NE (FlagGT_ULT) yes no) => (First yes no) +(NE (FlagGT_UGT) yes no) => (First yes no) + +(LT (FlagEQ) yes no) => (First no yes) +(LT (FlagLT_ULT) yes no) => (First yes no) +(LT (FlagLT_UGT) yes no) => (First yes no) +(LT (FlagGT_ULT) yes no) => (First no yes) +(LT (FlagGT_UGT) yes no) => (First no yes) + +(LE (FlagEQ) yes no) => (First yes no) +(LE (FlagLT_ULT) yes no) => (First yes no) +(LE (FlagLT_UGT) yes no) => (First yes no) +(LE (FlagGT_ULT) yes no) => (First no yes) +(LE (FlagGT_UGT) yes no) => (First no yes) + +(GT (FlagEQ) yes no) => (First no yes) +(GT (FlagLT_ULT) yes no) => (First no yes) +(GT (FlagLT_UGT) yes no) => (First no yes) +(GT (FlagGT_ULT) yes no) => (First yes no) +(GT (FlagGT_UGT) yes no) => (First yes no) + +(GE (FlagEQ) yes no) => (First yes no) +(GE (FlagLT_ULT) yes no) => (First no yes) +(GE (FlagLT_UGT) yes no) => (First no yes) +(GE (FlagGT_ULT) yes no) => (First yes no) +(GE (FlagGT_UGT) yes no) => (First yes no) + +(ULT (FlagEQ) yes no) => (First no yes) +(ULT (FlagLT_ULT) yes no) => (First yes no) +(ULT (FlagLT_UGT) yes no) => (First no yes) +(ULT (FlagGT_ULT) yes no) => (First yes no) +(ULT (FlagGT_UGT) yes no) => (First no yes) + +(ULE (FlagEQ) yes no) => (First yes no) +(ULE (FlagLT_ULT) yes no) => (First yes no) +(ULE (FlagLT_UGT) yes no) => (First no yes) +(ULE (FlagGT_ULT) yes no) => (First yes no) +(ULE (FlagGT_UGT) yes no) => (First no yes) + +(UGT (FlagEQ) yes no) => (First no yes) +(UGT (FlagLT_ULT) yes no) => (First no yes) +(UGT (FlagLT_UGT) yes no) => (First yes no) +(UGT (FlagGT_ULT) yes no) => (First no yes) +(UGT (FlagGT_UGT) yes no) => (First yes no) + +(UGE (FlagEQ) yes no) => (First yes no) +(UGE (FlagLT_ULT) yes no) => (First no yes) +(UGE (FlagLT_UGT) yes no) => (First yes no) +(UGE (FlagGT_ULT) yes no) => (First no yes) +(UGE (FlagGT_UGT) yes no) => (First yes no) + +// Absorb flag constants into SETxx ops. +(SETEQ (FlagEQ)) => (MOVLconst [1]) +(SETEQ (FlagLT_ULT)) => (MOVLconst [0]) +(SETEQ (FlagLT_UGT)) => (MOVLconst [0]) +(SETEQ (FlagGT_ULT)) => (MOVLconst [0]) +(SETEQ (FlagGT_UGT)) => (MOVLconst [0]) + +(SETNE (FlagEQ)) => (MOVLconst [0]) +(SETNE (FlagLT_ULT)) => (MOVLconst [1]) +(SETNE (FlagLT_UGT)) => (MOVLconst [1]) +(SETNE (FlagGT_ULT)) => (MOVLconst [1]) +(SETNE (FlagGT_UGT)) => (MOVLconst [1]) + +(SETL (FlagEQ)) => (MOVLconst [0]) +(SETL (FlagLT_ULT)) => (MOVLconst [1]) +(SETL (FlagLT_UGT)) => (MOVLconst [1]) +(SETL (FlagGT_ULT)) => (MOVLconst [0]) +(SETL (FlagGT_UGT)) => (MOVLconst [0]) + +(SETLE (FlagEQ)) => (MOVLconst [1]) +(SETLE (FlagLT_ULT)) => (MOVLconst [1]) +(SETLE (FlagLT_UGT)) => (MOVLconst [1]) +(SETLE (FlagGT_ULT)) => (MOVLconst [0]) +(SETLE (FlagGT_UGT)) => (MOVLconst [0]) + +(SETG (FlagEQ)) => (MOVLconst [0]) +(SETG (FlagLT_ULT)) => (MOVLconst [0]) +(SETG (FlagLT_UGT)) => (MOVLconst [0]) +(SETG (FlagGT_ULT)) => (MOVLconst [1]) +(SETG (FlagGT_UGT)) => (MOVLconst [1]) + +(SETGE (FlagEQ)) => (MOVLconst [1]) +(SETGE (FlagLT_ULT)) => (MOVLconst [0]) +(SETGE (FlagLT_UGT)) => (MOVLconst [0]) +(SETGE (FlagGT_ULT)) => (MOVLconst [1]) +(SETGE (FlagGT_UGT)) => (MOVLconst [1]) + +(SETB (FlagEQ)) => (MOVLconst [0]) +(SETB (FlagLT_ULT)) => (MOVLconst [1]) +(SETB (FlagLT_UGT)) => (MOVLconst [0]) +(SETB (FlagGT_ULT)) => (MOVLconst [1]) +(SETB (FlagGT_UGT)) => (MOVLconst [0]) + +(SETBE (FlagEQ)) => (MOVLconst [1]) +(SETBE (FlagLT_ULT)) => (MOVLconst [1]) +(SETBE (FlagLT_UGT)) => (MOVLconst [0]) +(SETBE (FlagGT_ULT)) => (MOVLconst [1]) +(SETBE (FlagGT_UGT)) => (MOVLconst [0]) + +(SETA (FlagEQ)) => (MOVLconst [0]) +(SETA (FlagLT_ULT)) => (MOVLconst [0]) +(SETA (FlagLT_UGT)) => (MOVLconst [1]) +(SETA (FlagGT_ULT)) => (MOVLconst [0]) +(SETA (FlagGT_UGT)) => (MOVLconst [1]) + +(SETAE (FlagEQ)) => (MOVLconst [1]) +(SETAE (FlagLT_ULT)) => (MOVLconst [0]) +(SETAE (FlagLT_UGT)) => (MOVLconst [1]) +(SETAE (FlagGT_ULT)) => (MOVLconst [0]) +(SETAE (FlagGT_UGT)) => (MOVLconst [1]) + +// Remove redundant *const ops +(ADDLconst [c] x) && c==0 => x +(SUBLconst [c] x) && c==0 => x +(ANDLconst [c] _) && c==0 => (MOVLconst [0]) +(ANDLconst [c] x) && c==-1 => x +(ORLconst [c] x) && c==0 => x +(ORLconst [c] _) && c==-1 => (MOVLconst [-1]) +(XORLconst [c] x) && c==0 => x +// TODO: since we got rid of the W/B versions, we might miss +// things like (ANDLconst [0x100] x) which were formerly +// (ANDBconst [0] x). Probably doesn't happen very often. +// If we cared, we might do: +// (ANDLconst [c] x) && t.Size()==1 && int8(x)==0 => (MOVLconst [0]) + +// Convert constant subtracts to constant adds +(SUBLconst [c] x) => (ADDLconst [-c] x) + +// generic constant folding +// TODO: more of this +(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d]) +(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x) +(SARLconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)]) +(SARWconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)]) +(SARBconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)]) +(NEGL (MOVLconst [c])) => (MOVLconst [-c]) +(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d]) +(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d]) +(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d]) +(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d]) +(NOTL (MOVLconst [c])) => (MOVLconst [^c]) + +// generic simplifications +// TODO: more of this +(ADDL x (NEGL y)) => (SUBL x y) +(SUBL x x) => (MOVLconst [0]) +(ANDL x x) => x +(ORL x x) => x +(XORL x x) => (MOVLconst [0]) + +// checking AND against 0. +(CMP(L|W|B)const l:(ANDL x y) [0]) && l.Uses==1 => (TEST(L|W|B) x y) +(CMPLconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTLconst [c] x) +(CMPWconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTWconst [int16(c)] x) +(CMPBconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTBconst [int8(c)] x) + +// TEST %reg,%reg is shorter than CMP +(CMP(L|W|B)const x [0]) => (TEST(L|W|B) x x) + +// Convert LEAL1 back to ADDL if we can +(LEAL1 [0] {nil} x y) => (ADDL x y) + +// For PIC, break floating-point constant loading into two instructions so we have +// a register to use for holding the address of the constant pool entry. +(MOVSSconst [c]) && config.ctxt.Flag_shared => (MOVSSconst2 (MOVSSconst1 [c])) +(MOVSDconst [c]) && config.ctxt.Flag_shared => (MOVSDconst2 (MOVSDconst1 [c])) + +(CMP(L|W|B) l:(MOV(L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(L|W|B)load {sym} [off] ptr x mem) +(CMP(L|W|B) x l:(MOV(L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(L|W|B)load {sym} [off] ptr x mem)) + +(CMP(L|W|B)const l:(MOV(L|W|B)load {sym} [off] ptr mem) [c]) + && l.Uses == 1 + && clobber(l) => + @l.Block (CMP(L|W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem) + +(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) +(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem) +(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + +(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))]) +(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/386Ops.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/386Ops.go new file mode 100644 index 0000000000000000000000000000000000000000..7401ac871c3ffed2cc72fa520eb01ed01b4f150e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/386Ops.go @@ -0,0 +1,590 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +// Notes: +// - Integer types live in the low portion of registers. Upper portions are junk. +// - Boolean types use the low-order byte of a register. 0=false, 1=true. +// Upper bytes are junk. +// - Floating-point types live in the low natural slot of an sse2 register. +// Unused portions are junk. +// - We do not use AH,BH,CH,DH registers. +// - When doing sub-register operations, we try to write the whole +// destination register to avoid a partial-register write. +// - Unused portions of AuxInt (or the Val portion of ValAndOff) are +// filled by sign-extending the used portion. Users of AuxInt which interpret +// AuxInt as unsigned (e.g. shifts) must be careful. + +// Suffixes encode the bit width of various instructions. +// L (long word) = 32 bit +// W (word) = 16 bit +// B (byte) = 8 bit + +// copied from ../../x86/reg.go +var regNames386 = []string{ + "AX", + "CX", + "DX", + "BX", + "SP", + "BP", + "SI", + "DI", + "X0", + "X1", + "X2", + "X3", + "X4", + "X5", + "X6", + "X7", + + // If you add registers, update asyncPreempt in runtime + + // pseudo-registers + "SB", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNames386) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNames386 { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + // Common individual register masks + var ( + ax = buildReg("AX") + cx = buildReg("CX") + dx = buildReg("DX") + bx = buildReg("BX") + si = buildReg("SI") + gp = buildReg("AX CX DX BX BP SI DI") + fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7") + gpsp = gp | buildReg("SP") + gpspsb = gpsp | buildReg("SB") + callerSave = gp | fp + ) + // Common slices of register masks + var ( + gponly = []regMask{gp} + fponly = []regMask{fp} + ) + + // Common regInfo + var ( + gp01 = regInfo{inputs: nil, outputs: gponly} + gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly} + gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly} + gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly} + gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + gp11carry = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}} + gp21carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}} + gp1carry1 = regInfo{inputs: []regMask{gp}, outputs: gponly} + gp2carry1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} + gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly} + gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}} + gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax}, clobbers: dx} + gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax} + gp11mod = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{dx}, clobbers: ax} + gp21mul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}} + + gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}} + gp1flags = regInfo{inputs: []regMask{gpsp}} + gp0flagsLoad = regInfo{inputs: []regMask{gpspsb, 0}} + gp1flagsLoad = regInfo{inputs: []regMask{gpspsb, gpsp, 0}} + flagsgp = regInfo{inputs: nil, outputs: gponly} + + readflags = regInfo{inputs: nil, outputs: gponly} + flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}} + + gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly} + gp21load = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly} + gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly} + gp21loadidx = regInfo{inputs: []regMask{gp, gpspsb, gpsp, 0}, outputs: gponly} + + gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}} + gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}} + gpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}} + gpstoreconstidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}} + + fp01 = regInfo{inputs: nil, outputs: fponly} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly} + fp21load = regInfo{inputs: []regMask{fp, gpspsb, 0}, outputs: fponly} + fpgp = regInfo{inputs: fponly, outputs: gponly} + gpfp = regInfo{inputs: gponly, outputs: fponly} + fp11 = regInfo{inputs: fponly, outputs: fponly} + fp2flags = regInfo{inputs: []regMask{fp, fp}} + + fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly} + fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly} + + fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} + fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} + ) + + var _386ops = []opData{ + // fp ops + {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true}, // fp32 add + {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true}, // fp64 add + {name: "SUBSS", argLength: 2, reg: fp21, asm: "SUBSS", resultInArg0: true}, // fp32 sub + {name: "SUBSD", argLength: 2, reg: fp21, asm: "SUBSD", resultInArg0: true}, // fp64 sub + {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true}, // fp32 mul + {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true}, // fp64 mul + {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true}, // fp32 div + {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, // fp64 div + + {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load + {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load + {name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true}, // fp32 constant + {name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true}, // fp64 constant + {name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by i + {name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by 4*i + {name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by i + {name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by 8*i + + {name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp32 store + {name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp64 store + {name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by i store + {name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by 4i store + {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store + {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by 8i store + + {name: "ADDSSload", argLength: 3, reg: fp21load, asm: "ADDSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "ADDSDload", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "SUBSSload", argLength: 3, reg: fp21load, asm: "SUBSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "SUBSDload", argLength: 3, reg: fp21load, asm: "SUBSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "MULSSload", argLength: 3, reg: fp21load, asm: "MULSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "MULSDload", argLength: 3, reg: fp21load, asm: "MULSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "DIVSSload", argLength: 3, reg: fp21load, asm: "DIVSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "DIVSDload", argLength: 3, reg: fp21load, asm: "DIVSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + + // binary ops + {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1 + {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", typ: "UInt32", clobberFlags: true}, // arg0 + auxint + + {name: "ADDLcarry", argLength: 2, reg: gp21carry, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1, generates pair + {name: "ADDLconstcarry", argLength: 1, reg: gp11carry, asm: "ADDL", aux: "Int32", resultInArg0: true}, // arg0 + auxint, generates pair + {name: "ADCL", argLength: 3, reg: gp2carry1, asm: "ADCL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0+arg1+carry(arg2), where arg2 is flags + {name: "ADCLconst", argLength: 2, reg: gp1carry1, asm: "ADCL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0+auxint+carry(arg1), where arg1 is flags + + {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1 + {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint + + {name: "SUBLcarry", argLength: 2, reg: gp21carry, asm: "SUBL", resultInArg0: true}, // arg0-arg1, generates pair + {name: "SUBLconstcarry", argLength: 1, reg: gp11carry, asm: "SUBL", aux: "Int32", resultInArg0: true}, // arg0-auxint, generates pair + {name: "SBBL", argLength: 3, reg: gp2carry1, asm: "SBBL", resultInArg0: true, clobberFlags: true}, // arg0-arg1-borrow(arg2), where arg2 is flags + {name: "SBBLconst", argLength: 2, reg: gp1carry1, asm: "SBBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0-auxint-borrow(arg1), where arg1 is flags + + {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 + {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint + + {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x. + + {name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width + {name: "HMULLU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width + + {name: "MULLQU", argLength: 2, reg: gp21mul, commutative: true, asm: "MULL", clobberFlags: true}, // arg0 * arg1, high 32 in result[0], low 32 in result[1] + + {name: "AVGLU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 32 result bits + + // For DIVL, DIVW, MODL and MODW, AuxInt non-zero means that the divisor has been proved to be not -1. + {name: "DIVL", argLength: 2, reg: gp11div, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 / arg1 + {name: "DIVW", argLength: 2, reg: gp11div, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 / arg1 + {name: "DIVLU", argLength: 2, reg: gp11div, asm: "DIVL", clobberFlags: true}, // arg0 / arg1 + {name: "DIVWU", argLength: 2, reg: gp11div, asm: "DIVW", clobberFlags: true}, // arg0 / arg1 + + {name: "MODL", argLength: 2, reg: gp11mod, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 % arg1 + {name: "MODW", argLength: 2, reg: gp11mod, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 % arg1 + {name: "MODLU", argLength: 2, reg: gp11mod, asm: "DIVL", clobberFlags: true}, // arg0 % arg1 + {name: "MODWU", argLength: 2, reg: gp11mod, asm: "DIVW", clobberFlags: true}, // arg0 % arg1 + + {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1 + {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint + + {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1 + {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint + + {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1 + {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint + + {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint + {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint + {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint + + // compare *(arg0+auxint+aux) to arg1 (in that order). arg2=mem. + {name: "CMPLload", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + {name: "CMPWload", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + {name: "CMPBload", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + + // compare *(arg0+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg1=mem. + {name: "CMPLconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPL", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + {name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + {name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + + {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, // arg0 compare to arg1, f32 + {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, // arg0 compare to arg1, f64 + + {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0 + {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0 + {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0 + {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0 + {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0 + {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0 + + {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32 + {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31 + // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount! + + {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 + {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 + {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 + {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31 + {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-15 + {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-7 + + {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 + {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 + {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 + {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31 + {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-15 + {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-7 + + {name: "ROLL", argLength: 2, reg: gp21shift, asm: "ROLL", resultInArg0: true, clobberFlags: true}, // 32 bits of arg0 rotate left by arg1 + {name: "ROLW", argLength: 2, reg: gp21shift, asm: "ROLW", resultInArg0: true, clobberFlags: true}, // low 16 bits of arg0 rotate left by arg1 + {name: "ROLB", argLength: 2, reg: gp21shift, asm: "ROLB", resultInArg0: true, clobberFlags: true}, // low 8 bits of arg0 rotate left by arg1 + {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31 + {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15 + {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7 + + // binary-op with a memory source operand + {name: "ADDLload", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "SUBLload", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "MULLload", argLength: 3, reg: gp21load, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "ANDLload", argLength: 3, reg: gp21load, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "ORLload", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + {name: "XORLload", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem + + // binary-op with an indexed memory source operand + {name: "ADDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem + {name: "SUBLloadidx4", argLength: 4, reg: gp21loadidx, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem + {name: "MULLloadidx4", argLength: 4, reg: gp21loadidx, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem + {name: "ANDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem + {name: "ORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem + {name: "XORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem + + // unary ops + {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0 + + {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true}, // ^arg0 + + {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero + {name: "BSFW", argLength: 1, reg: gp11, asm: "BSFW", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero + {name: "LoweredCtz32", argLength: 1, reg: gp11, clobberFlags: true}, // arg0 # of low-order zeroes + + {name: "BSRL", argLength: 1, reg: gp11, asm: "BSRL", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero + {name: "BSRW", argLength: 1, reg: gp11, asm: "BSRW", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero + + {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true}, // arg0 swap bytes + + {name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"}, // sqrt(arg0) + {name: "SQRTSS", argLength: 1, reg: fp11, asm: "SQRTSS"}, // sqrt(arg0), float32 + + {name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear. + // Note: SBBW and SBBB are subsumed by SBBL + + {name: "SETEQ", argLength: 1, reg: readflags, asm: "SETEQ"}, // extract == condition from arg0 + {name: "SETNE", argLength: 1, reg: readflags, asm: "SETNE"}, // extract != condition from arg0 + {name: "SETL", argLength: 1, reg: readflags, asm: "SETLT"}, // extract signed < condition from arg0 + {name: "SETLE", argLength: 1, reg: readflags, asm: "SETLE"}, // extract signed <= condition from arg0 + {name: "SETG", argLength: 1, reg: readflags, asm: "SETGT"}, // extract signed > condition from arg0 + {name: "SETGE", argLength: 1, reg: readflags, asm: "SETGE"}, // extract signed >= condition from arg0 + {name: "SETB", argLength: 1, reg: readflags, asm: "SETCS"}, // extract unsigned < condition from arg0 + {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0 + {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0 + {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0 + {name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0 + // Need different opcodes for floating point conditions because + // any comparison involving a NaN is always FALSE and thus + // the patterns for inverting conditions cannot be used. + {name: "SETEQF", argLength: 1, reg: flagsgpax, asm: "SETEQ", clobberFlags: true}, // extract == condition from arg0 + {name: "SETNEF", argLength: 1, reg: flagsgpax, asm: "SETNE", clobberFlags: true}, // extract != condition from arg0 + {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0 + {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0 + + {name: "SETGF", argLength: 1, reg: flagsgp, asm: "SETHI"}, // extract floating > condition from arg0 + {name: "SETGEF", argLength: 1, reg: flagsgp, asm: "SETCC"}, // extract floating >= condition from arg0 + + {name: "MOVBLSX", argLength: 1, reg: gp11, asm: "MOVBLSX"}, // sign extend arg0 from int8 to int32 + {name: "MOVBLZX", argLength: 1, reg: gp11, asm: "MOVBLZX"}, // zero extend arg0 from int8 to int32 + {name: "MOVWLSX", argLength: 1, reg: gp11, asm: "MOVWLSX"}, // sign extend arg0 from int16 to int32 + {name: "MOVWLZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int32 + + {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint + + {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32 + {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL"}, // convert float32 to int32 + {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS"}, // convert int32 to float32 + {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD"}, // convert int32 to float64 + {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS"}, // convert float64 to float32 + {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64 + + {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation. + + {name: "LEAL", argLength: 1, reg: gp11sb, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux + {name: "LEAL1", argLength: 2, reg: gp21sb, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux + {name: "LEAL2", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux + {name: "LEAL4", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux + {name: "LEAL8", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux + // Note: LEAL{1,2,4,8} must not have OpSB as either argument. + + // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address + {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVBLSXload", argLength: 2, reg: gpload, asm: "MOVBLSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int32 + {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVWLSXload", argLength: 2, reg: gpload, asm: "MOVWLSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int32 + {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem + + // direct binary-op on memory (read-modify-write) + {name: "ADDLmodify", argLength: 3, reg: gpstore, asm: "ADDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) += arg1, arg2=mem + {name: "SUBLmodify", argLength: 3, reg: gpstore, asm: "SUBL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) -= arg1, arg2=mem + {name: "ANDLmodify", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) &= arg1, arg2=mem + {name: "ORLmodify", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) |= arg1, arg2=mem + {name: "XORLmodify", argLength: 3, reg: gpstore, asm: "XORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) ^= arg1, arg2=mem + + // direct binary-op on indexed memory (read-modify-write) + {name: "ADDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ADDL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) += arg2, arg3=mem + {name: "SUBLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "SUBL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) -= arg2, arg3=mem + {name: "ANDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ANDL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) &= arg2, arg3=mem + {name: "ORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ORL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) |= arg2, arg3=mem + {name: "XORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "XORL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) ^= arg2, arg3=mem + + // direct binary-op on memory with a constant (read-modify-write) + {name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem + {name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem + {name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem + {name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem + + // direct binary-op on indexed memory with a constant (read-modify-write) + {name: "ADDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem + {name: "ANDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem + {name: "ORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem + {name: "XORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem + + // indexed loads/stores + {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem + {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem + // TODO: sign-extending indexed loads + {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem + {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem + // TODO: add size-mismatched indexed loads, like MOVBstoreidx4. + + // For storeconst ops, the AuxInt field encodes both + // the value to store and an address offset of the store. + // Cast AuxInt to a ValAndOff to extract Val and Off fields. + {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem + {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 2 bytes of ... + {name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 4 bytes of ... + + {name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+1*arg1+ValAndOff(AuxInt).Off()+aux. arg2=mem + {name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... arg1 ... + {name: "MOVWstoreconstidx2", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... 2*arg1 ... + {name: "MOVLstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... arg1 ... + {name: "MOVLstoreconstidx4", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... 4*arg1 ... + + // arg0 = pointer to start of memory to zero + // arg1 = value to store (will always be zero) + // arg2 = mem + // auxint = offset into duffzero code to start executing + // returns mem + { + name: "DUFFZERO", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("DI"), buildReg("AX")}, + clobbers: buildReg("DI CX"), + // Note: CX is only clobbered when dynamic linking. + }, + faultOnNilArg0: true, + }, + + // arg0 = address of memory to zero + // arg1 = # of 4-byte words to zero + // arg2 = value to store (will always be zero) + // arg3 = mem + // returns mem + { + name: "REPSTOSL", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")}, + clobbers: buildReg("DI CX"), + }, + faultOnNilArg0: true, + }, + + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + + // arg0 = destination pointer + // arg1 = source pointer + // arg2 = mem + // auxint = offset from duffcopy symbol to call + // returns memory + { + name: "DUFFCOPY", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("DI"), buildReg("SI")}, + clobbers: buildReg("DI SI CX"), // uses CX as a temporary + }, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // arg0 = destination pointer + // arg1 = source pointer + // arg2 = # of 8-byte words to copy + // arg3 = mem + // returns memory + { + name: "REPMOVSL", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("DI"), buildReg("SI"), buildReg("CX")}, + clobbers: buildReg("DI SI CX"), + }, + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // (InvertFlags (CMPL a b)) == (CMPL b a) + // So if we want (SETL (CMPL a b)) but we can't do that because a is a constant, + // then we do (SETL (InvertFlags (CMPL b a))) instead. + // Rewrites will convert this to (SETG (CMPL b a)). + // InvertFlags is a pseudo-op which can't appear in assembly output. + {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 + + // Pseudo-ops + {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem + // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, + // and sorts it to the very beginning of the block to prevent other + // use of DX (the closure pointer) + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}, zeroWidth: true}, + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem + {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, + //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, + + // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of write barrier slots + // It saves all GP registers if necessary, but may clobber others. + // Returns a pointer to a write barrier buffer in DI. + {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: callerSave &^ gp, outputs: []regMask{buildReg("DI")}}, clobberFlags: true, aux: "Int64"}, + + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{dx, bx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{cx, dx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{ax, cx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + // Extend ops are the same as Bounds ops except the indexes are 64-bit. + {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, dx, bx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go). + {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, cx, dx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go). + {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, ax, cx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go). + + // Constant flag values. For any comparison, there are 5 possible + // outcomes: the three from the signed total order (<,==,>) and the + // three from the unsigned total order. The == cases overlap. + // Note: there's a sixth "unordered" outcome for floating-point + // comparisons, but we don't use such a beast yet. + // These ops are for temporary use by rewrite rules. They + // cannot appear in the generated assembly. + {name: "FlagEQ"}, // equal + {name: "FlagLT_ULT"}, // signed < and unsigned < + {name: "FlagLT_UGT"}, // signed < and unsigned > + {name: "FlagGT_UGT"}, // signed > and unsigned < + {name: "FlagGT_ULT"}, // signed > and unsigned > + + // Special ops for PIC floating-point constants. + // MOVSXconst1 loads the address of the constant-pool entry into a register. + // MOVSXconst2 loads the constant from that address. + // MOVSXconst1 returns a pointer, but we type it as uint32 because it can never point to the Go heap. + {name: "MOVSSconst1", reg: gp01, typ: "UInt32", aux: "Float32"}, + {name: "MOVSDconst1", reg: gp01, typ: "UInt32", aux: "Float64"}, + {name: "MOVSSconst2", argLength: 1, reg: gpfp, asm: "MOVSS"}, + {name: "MOVSDconst2", argLength: 1, reg: gpfp, asm: "MOVSD"}, + } + + var _386blocks = []blockData{ + {name: "EQ", controls: 1}, + {name: "NE", controls: 1}, + {name: "LT", controls: 1}, + {name: "LE", controls: 1}, + {name: "GT", controls: 1}, + {name: "GE", controls: 1}, + {name: "OS", controls: 1}, + {name: "OC", controls: 1}, + {name: "ULT", controls: 1}, + {name: "ULE", controls: 1}, + {name: "UGT", controls: 1}, + {name: "UGE", controls: 1}, + {name: "EQF", controls: 1}, + {name: "NEF", controls: 1}, + {name: "ORD", controls: 1}, // FP, ordered comparison (parity zero) + {name: "NAN", controls: 1}, // FP, unordered comparison (parity one) + } + + archs = append(archs, arch{ + name: "386", + pkg: "cmd/internal/obj/x86", + genfile: "../../x86/ssa.go", + ops: _386ops, + blocks: _386blocks, + regnames: regNames386, + gpregmask: gp, + fpregmask: fp, + framepointerreg: int8(num["BP"]), + linkreg: -1, // not used + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/386splitload.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/386splitload.rules new file mode 100644 index 0000000000000000000000000000000000000000..29d4f8c227f550e89d1b53cefed08ad10c32cb9b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/386splitload.rules @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// See the top of AMD64splitload.rules for discussion of these rules. + +(CMP(L|W|B)load {sym} [off] ptr x mem) => (CMP(L|W|B) (MOV(L|W|B)load {sym} [off] ptr mem) x) + +(CMPLconstload {sym} [vo] ptr mem) => (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()]) +(CMPWconstload {sym} [vo] ptr mem) => (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()]) +(CMPBconstload {sym} [vo] ptr mem) => (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()]) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64.rules new file mode 100644 index 0000000000000000000000000000000000000000..2a4c59ebfc7176028ed9bd5eefa83f11b1a81a23 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -0,0 +1,1696 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Lowering arithmetic +(Add(64|32|16|8) ...) => (ADD(Q|L|L|L) ...) +(AddPtr ...) => (ADDQ ...) +(Add(32|64)F ...) => (ADDS(S|D) ...) + +(Sub(64|32|16|8) ...) => (SUB(Q|L|L|L) ...) +(SubPtr ...) => (SUBQ ...) +(Sub(32|64)F ...) => (SUBS(S|D) ...) + +(Mul(64|32|16|8) ...) => (MUL(Q|L|L|L) ...) +(Mul(32|64)F ...) => (MULS(S|D) ...) + +(Select0 (Mul64uover x y)) => (Select0 (MULQU x y)) +(Select0 (Mul32uover x y)) => (Select0 (MULLU x y)) +(Select1 (Mul(64|32)uover x y)) => (SETO (Select1 (MUL(Q|L)U x y))) + +(Hmul(64|32) ...) => (HMUL(Q|L) ...) +(Hmul(64|32)u ...) => (HMUL(Q|L)U ...) + +(Div(64|32|16) [a] x y) => (Select0 (DIV(Q|L|W) [a] x y)) +(Div8 x y) => (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y))) +(Div(64|32|16)u x y) => (Select0 (DIV(Q|L|W)U x y)) +(Div8u x y) => (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) +(Div(32|64)F ...) => (DIVS(S|D) ...) + +(Select0 (Add64carry x y c)) => + (Select0 (ADCQ x y (Select1 (NEGLflags c)))) +(Select1 (Add64carry x y c)) => + (NEGQ (SBBQcarrymask (Select1 (ADCQ x y (Select1 (NEGLflags c)))))) +(Select0 (Sub64borrow x y c)) => + (Select0 (SBBQ x y (Select1 (NEGLflags c)))) +(Select1 (Sub64borrow x y c)) => + (NEGQ (SBBQcarrymask (Select1 (SBBQ x y (Select1 (NEGLflags c)))))) + +// Optimize ADCQ and friends +(ADCQ x (MOVQconst [c]) carry) && is32Bit(c) => (ADCQconst x [int32(c)] carry) +(ADCQ x y (FlagEQ)) => (ADDQcarry x y) +(ADCQconst x [c] (FlagEQ)) => (ADDQconstcarry x [c]) +(ADDQcarry x (MOVQconst [c])) && is32Bit(c) => (ADDQconstcarry x [int32(c)]) +(SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) => (SBBQconst x [int32(c)] borrow) +(SBBQ x y (FlagEQ)) => (SUBQborrow x y) +(SBBQconst x [c] (FlagEQ)) => (SUBQconstborrow x [c]) +(SUBQborrow x (MOVQconst [c])) && is32Bit(c) => (SUBQconstborrow x [int32(c)]) +(Select1 (NEGLflags (MOVQconst [0]))) => (FlagEQ) +(Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) => x + + +(Mul64uhilo ...) => (MULQU2 ...) +(Div128u ...) => (DIVQU2 ...) + +(Avg64u ...) => (AVGQU ...) + +(Mod(64|32|16) [a] x y) => (Select1 (DIV(Q|L|W) [a] x y)) +(Mod8 x y) => (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y))) +(Mod(64|32|16)u x y) => (Select1 (DIV(Q|L|W)U x y)) +(Mod8u x y) => (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))) + +(And(64|32|16|8) ...) => (AND(Q|L|L|L) ...) +(Or(64|32|16|8) ...) => (OR(Q|L|L|L) ...) +(Xor(64|32|16|8) ...) => (XOR(Q|L|L|L) ...) +(Com(64|32|16|8) ...) => (NOT(Q|L|L|L) ...) + +(Neg(64|32|16|8) ...) => (NEG(Q|L|L|L) ...) +(Neg32F x) => (PXOR x (MOVSSconst [float32(math.Copysign(0, -1))])) +(Neg64F x) => (PXOR x (MOVSDconst [math.Copysign(0, -1)])) + +// Lowering boolean ops +(AndB ...) => (ANDL ...) +(OrB ...) => (ORL ...) +(Not x) => (XORLconst [1] x) + +// Lowering pointer arithmetic +(OffPtr [off] ptr) && is32Bit(off) => (ADDQconst [int32(off)] ptr) +(OffPtr [off] ptr) => (ADDQ (MOVQconst [off]) ptr) + +// Lowering other arithmetic +(Ctz64 x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x) +(Ctz32 x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x) +(Ctz64 x) && buildcfg.GOAMD64 < 3 => (CMOVQEQ (Select0 (BSFQ x)) (MOVQconst [64]) (Select1 (BSFQ x))) +(Ctz32 x) && buildcfg.GOAMD64 < 3 => (Select0 (BSFQ (BTSQconst [32] x))) +(Ctz16 x) => (BSFL (ORLconst [1<<16] x)) +(Ctz8 x) => (BSFL (ORLconst [1<<8 ] x)) + +(Ctz64NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x) +(Ctz32NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x) +(Ctz16NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x) +(Ctz8NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x) +(Ctz64NonZero x) && buildcfg.GOAMD64 < 3 => (Select0 (BSFQ x)) +(Ctz32NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x) +(Ctz16NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x) +(Ctz8NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x) + +// BitLen64 of a 64 bit value x requires checking whether x == 0, since BSRQ is undefined when x == 0. +// However, for zero-extended values, we can cheat a bit, and calculate +// BSR(x<<1 + 1), which is guaranteed to be non-zero, and which conveniently +// places the index of the highest set bit where we want it. +// For GOAMD64>=3, BitLen can be calculated by OperandSize - LZCNT(x). +(BitLen64 x) && buildcfg.GOAMD64 < 3 => (ADDQconst [1] (CMOVQEQ (Select0 (BSRQ x)) (MOVQconst [-1]) (Select1 (BSRQ x)))) +(BitLen32 x) && buildcfg.GOAMD64 < 3 => (Select0 (BSRQ (LEAQ1 [1] (MOVLQZX x) (MOVLQZX x)))) +(BitLen16 x) && buildcfg.GOAMD64 < 3 => (BSRL (LEAL1 [1] (MOVWQZX x) (MOVWQZX x))) +(BitLen8 x) && buildcfg.GOAMD64 < 3 => (BSRL (LEAL1 [1] (MOVBQZX x) (MOVBQZX x))) +(BitLen64 x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst [-64] (LZCNTQ x))) +// Use 64-bit version to allow const-fold remove unnecessary arithmetic. +(BitLen32 x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst [-32] (LZCNTL x))) +(BitLen16 x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst [-32] (LZCNTL (MOVWQZX x)))) +(BitLen8 x) && buildcfg.GOAMD64 >= 3 => (NEGQ (ADDQconst [-32] (LZCNTL (MOVBQZX x)))) + +(Bswap(64|32) ...) => (BSWAP(Q|L) ...) +(Bswap16 x) => (ROLWconst [8] x) + +(PopCount(64|32) ...) => (POPCNT(Q|L) ...) +(PopCount16 x) => (POPCNTL (MOVWQZX x)) +(PopCount8 x) => (POPCNTL (MOVBQZX x)) + +(Sqrt ...) => (SQRTSD ...) +(Sqrt32 ...) => (SQRTSS ...) + +(RoundToEven x) => (ROUNDSD [0] x) +(Floor x) => (ROUNDSD [1] x) +(Ceil x) => (ROUNDSD [2] x) +(Trunc x) => (ROUNDSD [3] x) + +(FMA x y z) => (VFMADD231SD z x y) + +// Lowering extension +// Note: we always extend to 64 bits even though some ops don't need that many result bits. +(SignExt8to16 ...) => (MOVBQSX ...) +(SignExt8to32 ...) => (MOVBQSX ...) +(SignExt8to64 ...) => (MOVBQSX ...) +(SignExt16to32 ...) => (MOVWQSX ...) +(SignExt16to64 ...) => (MOVWQSX ...) +(SignExt32to64 ...) => (MOVLQSX ...) + +(ZeroExt8to16 ...) => (MOVBQZX ...) +(ZeroExt8to32 ...) => (MOVBQZX ...) +(ZeroExt8to64 ...) => (MOVBQZX ...) +(ZeroExt16to32 ...) => (MOVWQZX ...) +(ZeroExt16to64 ...) => (MOVWQZX ...) +(ZeroExt32to64 ...) => (MOVLQZX ...) + +(Slicemask x) => (SARQconst (NEGQ x) [63]) + +(SpectreIndex x y) => (CMOVQCC x (MOVQconst [0]) (CMPQ x y)) +(SpectreSliceIndex x y) => (CMOVQHI x (MOVQconst [0]) (CMPQ x y)) + +// Lowering truncation +// Because we ignore high parts of registers, truncates are just copies. +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) +(Trunc64to8 ...) => (Copy ...) +(Trunc64to16 ...) => (Copy ...) +(Trunc64to32 ...) => (Copy ...) + +// Lowering float <-> int +(Cvt32to32F ...) => (CVTSL2SS ...) +(Cvt32to64F ...) => (CVTSL2SD ...) +(Cvt64to32F ...) => (CVTSQ2SS ...) +(Cvt64to64F ...) => (CVTSQ2SD ...) + +(Cvt32Fto32 ...) => (CVTTSS2SL ...) +(Cvt32Fto64 ...) => (CVTTSS2SQ ...) +(Cvt64Fto32 ...) => (CVTTSD2SL ...) +(Cvt64Fto64 ...) => (CVTTSD2SQ ...) + +(Cvt32Fto64F ...) => (CVTSS2SD ...) +(Cvt64Fto32F ...) => (CVTSD2SS ...) + +(Round(32|64)F ...) => (Copy ...) + +// Floating-point min is tricky, as the hardware op isn't right for various special +// cases (-0 and NaN). We use two hardware ops organized just right to make the +// result come out how we want it. See https://github.com/golang/go/issues/59488#issuecomment-1553493207 +// (although that comment isn't exactly right, as the value overwritten is not simulated correctly). +// t1 = MINSD x, y => incorrect if x==NaN or x==-0,y==+0 +// t2 = MINSD t1, x => fixes x==NaN case +// res = POR t1, t2 => fixes x==-0,y==+0 case +// Note that this trick depends on the special property that (NaN OR x) produces a NaN (although +// it might not produce the same NaN as the input). +(Min(64|32)F x y) => (POR (MINS(D|S) (MINS(D|S) x y) x) (MINS(D|S) x y)) +// Floating-point max is even trickier. Punt to using min instead. +// max(x,y) == -min(-x,-y) +(Max(64|32)F x y) => (Neg(64|32)F (Min(64|32)F (Neg(64|32)F x) (Neg(64|32)F y))) + +(CvtBoolToUint8 ...) => (Copy ...) + +// Lowering shifts +// Unsigned shifts need to return 0 if shift amount is >= width of shifted value. +// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff) +(Lsh64x(64|32|16|8) x y) && !shiftIsBounded(v) => (ANDQ (SHLQ x y) (SBBQcarrymask (CMP(Q|L|W|B)const y [64]))) +(Lsh32x(64|32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHLL x y) (SBBLcarrymask (CMP(Q|L|W|B)const y [32]))) +(Lsh16x(64|32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHLL x y) (SBBLcarrymask (CMP(Q|L|W|B)const y [32]))) +(Lsh8x(64|32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHLL x y) (SBBLcarrymask (CMP(Q|L|W|B)const y [32]))) + +(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLQ x y) +(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y) +(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y) +(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y) + +(Rsh64Ux(64|32|16|8) x y) && !shiftIsBounded(v) => (ANDQ (SHRQ x y) (SBBQcarrymask (CMP(Q|L|W|B)const y [64]))) +(Rsh32Ux(64|32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHRL x y) (SBBLcarrymask (CMP(Q|L|W|B)const y [32]))) +(Rsh16Ux(64|32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHRW x y) (SBBLcarrymask (CMP(Q|L|W|B)const y [16]))) +(Rsh8Ux(64|32|16|8) x y) && !shiftIsBounded(v) => (ANDL (SHRB x y) (SBBLcarrymask (CMP(Q|L|W|B)const y [8]))) + +(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRQ x y) +(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRL x y) +(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRW x y) +(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRB x y) + +// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. +// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width. +(Rsh64x(64|32|16|8) x y) && !shiftIsBounded(v) => (SARQ x (OR(Q|L|L|L) y (NOT(Q|L|L|L) (SBB(Q|L|L|L)carrymask (CMP(Q|L|W|B)const y [64]))))) +(Rsh32x(64|32|16|8) x y) && !shiftIsBounded(v) => (SARL x (OR(Q|L|L|L) y (NOT(Q|L|L|L) (SBB(Q|L|L|L)carrymask (CMP(Q|L|W|B)const y [32]))))) +(Rsh16x(64|32|16|8) x y) && !shiftIsBounded(v) => (SARW x (OR(Q|L|L|L) y (NOT(Q|L|L|L) (SBB(Q|L|L|L)carrymask (CMP(Q|L|W|B)const y [16]))))) +(Rsh8x(64|32|16|8) x y) && !shiftIsBounded(v) => (SARB x (OR(Q|L|L|L) y (NOT(Q|L|L|L) (SBB(Q|L|L|L)carrymask (CMP(Q|L|W|B)const y [8]))))) + +(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SARQ x y) +(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SARL x y) +(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y) +(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SARB x y) + +// Lowering integer comparisons +(Less(64|32|16|8) x y) => (SETL (CMP(Q|L|W|B) x y)) +(Less(64|32|16|8)U x y) => (SETB (CMP(Q|L|W|B) x y)) +(Leq(64|32|16|8) x y) => (SETLE (CMP(Q|L|W|B) x y)) +(Leq(64|32|16|8)U x y) => (SETBE (CMP(Q|L|W|B) x y)) +(Eq(Ptr|64|32|16|8|B) x y) => (SETEQ (CMP(Q|Q|L|W|B|B) x y)) +(Neq(Ptr|64|32|16|8|B) x y) => (SETNE (CMP(Q|Q|L|W|B|B) x y)) + +// Lowering floating point comparisons +// Note Go assembler gets UCOMISx operand order wrong, but it is right here +// and the operands are reversed when generating assembly language. +(Eq(32|64)F x y) => (SETEQF (UCOMIS(S|D) x y)) +(Neq(32|64)F x y) => (SETNEF (UCOMIS(S|D) x y)) +// Use SETGF/SETGEF with reversed operands to dodge NaN case. +(Less(32|64)F x y) => (SETGF (UCOMIS(S|D) y x)) +(Leq(32|64)F x y) => (SETGEF (UCOMIS(S|D) y x)) + +// Lowering loads +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVQload ptr mem) +(Load ptr mem) && is32BitInt(t) => (MOVLload ptr mem) +(Load ptr mem) && is16BitInt(t) => (MOVWload ptr mem) +(Load ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem) + +// Lowering stores +(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVSDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVSSstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVQstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVLstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) + +// Lowering moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) +(Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem) +(Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem) +(Move [8] dst src mem) => (MOVQstore dst (MOVQload src mem) mem) +(Move [16] dst src mem) && config.useSSE => (MOVOstore dst (MOVOload src mem) mem) +(Move [16] dst src mem) && !config.useSSE => + (MOVQstore [8] dst (MOVQload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem)) + +(Move [32] dst src mem) => + (Move [16] + (OffPtr dst [16]) + (OffPtr src [16]) + (Move [16] dst src mem)) + +(Move [48] dst src mem) && config.useSSE => + (Move [32] + (OffPtr dst [16]) + (OffPtr src [16]) + (Move [16] dst src mem)) + +(Move [64] dst src mem) && config.useSSE => + (Move [32] + (OffPtr dst [32]) + (OffPtr src [32]) + (Move [32] dst src mem)) + +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBload [2] src mem) + (MOVWstore dst (MOVWload src mem) mem)) +(Move [5] dst src mem) => + (MOVBstore [4] dst (MOVBload [4] src mem) + (MOVLstore dst (MOVLload src mem) mem)) +(Move [6] dst src mem) => + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVLstore dst (MOVLload src mem) mem)) +(Move [7] dst src mem) => + (MOVLstore [3] dst (MOVLload [3] src mem) + (MOVLstore dst (MOVLload src mem) mem)) +(Move [9] dst src mem) => + (MOVBstore [8] dst (MOVBload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem)) +(Move [10] dst src mem) => + (MOVWstore [8] dst (MOVWload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem)) +(Move [11] dst src mem) => + (MOVLstore [7] dst (MOVLload [7] src mem) + (MOVQstore dst (MOVQload src mem) mem)) +(Move [12] dst src mem) => + (MOVLstore [8] dst (MOVLload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem)) +(Move [s] dst src mem) && s >= 13 && s <= 15 => + (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) + (MOVQstore dst (MOVQload src mem) mem)) + +// Adjust moves to be a multiple of 16 bytes. +(Move [s] dst src mem) + && s > 16 && s%16 != 0 && s%16 <= 8 => + (Move [s-s%16] + (OffPtr dst [s%16]) + (OffPtr src [s%16]) + (MOVQstore dst (MOVQload src mem) mem)) +(Move [s] dst src mem) + && s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE => + (Move [s-s%16] + (OffPtr dst [s%16]) + (OffPtr src [s%16]) + (MOVOstore dst (MOVOload src mem) mem)) +(Move [s] dst src mem) + && s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE => + (Move [s-s%16] + (OffPtr dst [s%16]) + (OffPtr src [s%16]) + (MOVQstore [8] dst (MOVQload [8] src mem) + (MOVQstore dst (MOVQload src mem) mem))) + +// Medium copying uses a duff device. +(Move [s] dst src mem) + && s > 64 && s <= 16*64 && s%16 == 0 + && !config.noDuffDevice && logLargeCopy(v, s) => + (DUFFCOPY [s] dst src mem) + +// Large copying uses REP MOVSQ. +(Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) => + (REPMOVSQ dst src (MOVQconst [s/8]) mem) + +// Lowering Zero instructions +(Zero [0] _ mem) => mem +(Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff(0,0)] destptr mem) +(Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff(0,0)] destptr mem) +(Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff(0,0)] destptr mem) +(Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff(0,0)] destptr mem) + +(Zero [3] destptr mem) => + (MOVBstoreconst [makeValAndOff(0,2)] destptr + (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [5] destptr mem) => + (MOVBstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [6] destptr mem) => + (MOVWstoreconst [makeValAndOff(0,4)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [7] destptr mem) => + (MOVLstoreconst [makeValAndOff(0,3)] destptr + (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)) + +// Strip off any fractional word zeroing. +(Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE => + (Zero [s-s%8] (OffPtr destptr [s%8]) + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + +// Zero small numbers of words directly. +(Zero [16] destptr mem) && !config.useSSE => + (MOVQstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [24] destptr mem) && !config.useSSE => + (MOVQstoreconst [makeValAndOff(0,16)] destptr + (MOVQstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))) +(Zero [32] destptr mem) && !config.useSSE => + (MOVQstoreconst [makeValAndOff(0,24)] destptr + (MOVQstoreconst [makeValAndOff(0,16)] destptr + (MOVQstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))) + +(Zero [9] destptr mem) && config.useSSE => + (MOVBstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + +(Zero [10] destptr mem) && config.useSSE => + (MOVWstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + +(Zero [11] destptr mem) && config.useSSE => + (MOVLstoreconst [makeValAndOff(0,7)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + +(Zero [12] destptr mem) && config.useSSE => + (MOVLstoreconst [makeValAndOff(0,8)] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + +(Zero [s] destptr mem) && s > 12 && s < 16 && config.useSSE => + (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr + (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)) + +// Adjust zeros to be a multiple of 16 bytes. +(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE => + (Zero [s-s%16] (OffPtr destptr [s%16]) + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) + +(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE => + (Zero [s-s%16] (OffPtr destptr [s%16]) + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) + +(Zero [16] destptr mem) && config.useSSE => + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem) +(Zero [32] destptr mem) && config.useSSE => + (MOVOstoreconst [makeValAndOff(0,16)] destptr + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)) +(Zero [48] destptr mem) && config.useSSE => + (MOVOstoreconst [makeValAndOff(0,32)] destptr + (MOVOstoreconst [makeValAndOff(0,16)] destptr + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))) +(Zero [64] destptr mem) && config.useSSE => + (MOVOstoreconst [makeValAndOff(0,48)] destptr + (MOVOstoreconst [makeValAndOff(0,32)] destptr + (MOVOstoreconst [makeValAndOff(0,16)] destptr + (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))) + +// Medium zeroing uses a duff device. +(Zero [s] destptr mem) + && s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice => + (DUFFZERO [s] destptr mem) + +// Large zeroing uses REP STOSQ. +(Zero [s] destptr mem) + && (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) + && s%8 == 0 => + (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem) + +// Lowering constants +(Const8 [c]) => (MOVLconst [int32(c)]) +(Const16 [c]) => (MOVLconst [int32(c)]) +(Const32 ...) => (MOVLconst ...) +(Const64 ...) => (MOVQconst ...) +(Const32F ...) => (MOVSSconst ...) +(Const64F ...) => (MOVSDconst ...) +(ConstNil ) => (MOVQconst [0]) +(ConstBool [c]) => (MOVLconst [b2i32(c)]) + +// Lowering calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// Lowering conditional moves +// If the condition is a SETxx, we can just run a CMOV from the comparison that was +// setting the flags. +// Legend: HI=unsigned ABOVE, CS=unsigned BELOW, CC=unsigned ABOVE EQUAL, LS=unsigned BELOW EQUAL +(CondSelect x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && (is64BitInt(t) || isPtr(t)) + => (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond) +(CondSelect x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is32BitInt(t) + => (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond) +(CondSelect x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t) + => (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond) + +// If the condition does not set the flags, we need to generate a comparison. +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 1 + => (CondSelect x y (MOVBQZX check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 2 + => (CondSelect x y (MOVWQZX check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 4 + => (CondSelect x y (MOVLQZX check)) + +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t)) + => (CMOVQNE y x (CMPQconst [0] check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t) + => (CMOVLNE y x (CMPQconst [0] check)) +(CondSelect x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t) + => (CMOVWNE y x (CMPQconst [0] check)) + +// Absorb InvertFlags +(CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) + => (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) +(CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) + => (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) +(CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond)) + => (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond) + +// Absorb constants generated during lower +(CMOV(QEQ|QLE|QGE|QCC|QLS|LEQ|LLE|LGE|LCC|LLS|WEQ|WLE|WGE|WCC|WLS) _ x (FlagEQ)) => x +(CMOV(QNE|QLT|QGT|QCS|QHI|LNE|LLT|LGT|LCS|LHI|WNE|WLT|WGT|WCS|WHI) y _ (FlagEQ)) => y +(CMOV(QNE|QGT|QGE|QHI|QCC|LNE|LGT|LGE|LHI|LCC|WNE|WGT|WGE|WHI|WCC) _ x (FlagGT_UGT)) => x +(CMOV(QEQ|QLE|QLT|QLS|QCS|LEQ|LLE|LLT|LLS|LCS|WEQ|WLE|WLT|WLS|WCS) y _ (FlagGT_UGT)) => y +(CMOV(QNE|QGT|QGE|QLS|QCS|LNE|LGT|LGE|LLS|LCS|WNE|WGT|WGE|WLS|WCS) _ x (FlagGT_ULT)) => x +(CMOV(QEQ|QLE|QLT|QHI|QCC|LEQ|LLE|LLT|LHI|LCC|WEQ|WLE|WLT|WHI|WCC) y _ (FlagGT_ULT)) => y +(CMOV(QNE|QLT|QLE|QCS|QLS|LNE|LLT|LLE|LCS|LLS|WNE|WLT|WLE|WCS|WLS) _ x (FlagLT_ULT)) => x +(CMOV(QEQ|QGT|QGE|QHI|QCC|LEQ|LGT|LGE|LHI|LCC|WEQ|WGT|WGE|WHI|WCC) y _ (FlagLT_ULT)) => y +(CMOV(QNE|QLT|QLE|QHI|QCC|LNE|LLT|LLE|LHI|LCC|WNE|WLT|WLE|WHI|WCC) _ x (FlagLT_UGT)) => x +(CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) => y + +// Miscellaneous +(IsNonNil p) => (SETNE (TESTQ p p)) +(IsInBounds idx len) => (SETB (CMPQ idx len)) +(IsSliceInBounds idx len) => (SETBE (CMPQ idx len)) +(NilCheck ...) => (LoweredNilCheck ...) +(GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register. +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) + +(HasCPUFeature {s}) => (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s}))) +(Addr {sym} base) => (LEAQ {sym} base) +(LocalAddr {sym} base mem) && t.Elem().HasPointers() => (LEAQ {sym} (SPanchored base mem)) +(LocalAddr {sym} base _) && !t.Elem().HasPointers() => (LEAQ {sym} base) + +(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 => (SETLstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 => (SETLEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 => (SETGstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 => (SETGEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 => (SETEQstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 => (SETNEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 => (SETBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 => (SETBEstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 => (SETAstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 => (SETAEstore [off] {sym} ptr x mem) + +// block rewrites +(If (SETL cmp) yes no) => (LT cmp yes no) +(If (SETLE cmp) yes no) => (LE cmp yes no) +(If (SETG cmp) yes no) => (GT cmp yes no) +(If (SETGE cmp) yes no) => (GE cmp yes no) +(If (SETEQ cmp) yes no) => (EQ cmp yes no) +(If (SETNE cmp) yes no) => (NE cmp yes no) +(If (SETB cmp) yes no) => (ULT cmp yes no) +(If (SETBE cmp) yes no) => (ULE cmp yes no) +(If (SETA cmp) yes no) => (UGT cmp yes no) +(If (SETAE cmp) yes no) => (UGE cmp yes no) +(If (SETO cmp) yes no) => (OS cmp yes no) + +// Special case for floating point - LF/LEF not generated +(If (SETGF cmp) yes no) => (UGT cmp yes no) +(If (SETGEF cmp) yes no) => (UGE cmp yes no) +(If (SETEQF cmp) yes no) => (EQF cmp yes no) +(If (SETNEF cmp) yes no) => (NEF cmp yes no) + +(If cond yes no) => (NE (TESTB cond cond) yes no) + +(JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (LEAQ {makeJumpTableSym(b)} (SB))) + +// Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here. +(AtomicLoad8 ptr mem) => (MOVBatomicload ptr mem) +(AtomicLoad32 ptr mem) => (MOVLatomicload ptr mem) +(AtomicLoad64 ptr mem) => (MOVQatomicload ptr mem) +(AtomicLoadPtr ptr mem) => (MOVQatomicload ptr mem) + +// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load. +// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those? +(AtomicStore8 ptr val mem) => (Select1 (XCHGB val ptr mem)) +(AtomicStore32 ptr val mem) => (Select1 (XCHGL val ptr mem)) +(AtomicStore64 ptr val mem) => (Select1 (XCHGQ val ptr mem)) +(AtomicStorePtrNoWB ptr val mem) => (Select1 (XCHGQ val ptr mem)) + +// Atomic exchanges. +(AtomicExchange32 ptr val mem) => (XCHGL val ptr mem) +(AtomicExchange64 ptr val mem) => (XCHGQ val ptr mem) + +// Atomic adds. +(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (XADDLlock val ptr mem)) +(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (XADDQlock val ptr mem)) +(Select0 (AddTupleFirst32 val tuple)) => (ADDL val (Select0 tuple)) +(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple) +(Select0 (AddTupleFirst64 val tuple)) => (ADDQ val (Select0 tuple)) +(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple) + +// Atomic compare and swap. +(AtomicCompareAndSwap32 ptr old new_ mem) => (CMPXCHGLlock ptr old new_ mem) +(AtomicCompareAndSwap64 ptr old new_ mem) => (CMPXCHGQlock ptr old new_ mem) + +// Atomic memory updates. +(AtomicAnd8 ptr val mem) => (ANDBlock ptr val mem) +(AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem) +(AtomicOr8 ptr val mem) => (ORBlock ptr val mem) +(AtomicOr32 ptr val mem) => (ORLlock ptr val mem) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +// lowering rotates +(RotateLeft8 ...) => (ROLB ...) +(RotateLeft16 ...) => (ROLW ...) +(RotateLeft32 ...) => (ROLL ...) +(RotateLeft64 ...) => (ROLQ ...) + +// *************************** +// Above: lowering rules +// Below: optimizations +// *************************** +// TODO: Should the optimizations be a separate pass? + +// Fold boolean tests into blocks +(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no) +(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no) +(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no) +(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no) +(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no) +(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no) +(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no) +(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no) +(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no) +(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no) +(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no) + +// Unsigned comparisons to 0/1 +(ULT (TEST(Q|L|W|B) x x) yes no) => (First no yes) +(UGE (TEST(Q|L|W|B) x x) yes no) => (First yes no) +(SETB (TEST(Q|L|W|B) x x)) => (ConstBool [false]) +(SETAE (TEST(Q|L|W|B) x x)) => (ConstBool [true]) + +// x & 1 != 0 -> x & 1 +(SETNE (TEST(B|W)const [1] x)) => (AND(L|L)const [1] x) +(SETB (BT(L|Q)const [0] x)) => (AND(L|Q)const [1] x) + +// Recognize bit tests: a&(1< ((ULT|UGE) (BTL x y)) +((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y)) +((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c)) + => ((ULT|UGE) (BTLconst [int8(log32(c))] x)) +((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c)) + => ((ULT|UGE) (BTQconst [int8(log32(c))] x)) +((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) + => ((ULT|UGE) (BTQconst [int8(log64(c))] x)) +(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y)) +(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y)) +(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c)) + => (SET(B|AE) (BTLconst [int8(log32(c))] x)) +(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c)) + => (SET(B|AE) (BTQconst [int8(log32(c))] x)) +(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) + => (SET(B|AE) (BTQconst [int8(log64(c))] x)) +// SET..store variant +(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) + => (SET(B|AE)store [off] {sym} ptr (BTL x y) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) + => (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c)) + => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c)) + => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c) + => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem) + +// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules +// and further combining shifts. +(BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x) +(BT(Q|L)const [c] (SHLQconst [d] x)) && c>d => (BT(Q|L)const [c-d] x) +(BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x) +(BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x) +(BTLconst [c] (SHLLconst [d] x)) && c>d => (BTLconst [c-d] x) +(BTLconst [0] s:(SHR(L|XL) x y)) => (BTL y x) + +// Rewrite a & 1 != 1 into a & 1 == 0. +// Among other things, this lets us turn (a>>b)&1 != 1 into a bit test. +(SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) => (SET(EQ|NE) (CMPLconst [0] s)) +(SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem) +(SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) => (SET(EQ|NE) (CMPQconst [0] s)) +(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem) + +// Recognize bit setting (a |= 1< (BTS(Q|L) x y) +(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y) +// Note: only convert OR/XOR to BTS/BTC if the constant wouldn't fit in +// the constant field of the OR/XOR instruction. See issue 61694. +((OR|XOR)Q (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64(c))] x) + +// Recognize bit clearing: a &^= 1< (BTR(Q|L) x y) +(ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y) +// Note: only convert AND to BTR if the constant wouldn't fit in +// the constant field of the AND instruction. See issue 61694. +(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64(^c))] x) + +// Special-case bit patterns on first/last bit. +// generic.rules changes ANDs of high-part/low-part masks into a couple of shifts, +// for instance: +// x & 0xFFFF0000 -> (x >> 16) << 16 +// x & 0x80000000 -> (x >> 31) << 31 +// +// In case the mask is just one bit (like second example above), it conflicts +// with the above rules to detect bit-testing / bit-clearing of first/last bit. +// We thus special-case them, by detecting the shift patterns. + +// Special case resetting first/last bit +(SHL(L|Q)const [1] (SHR(L|Q)const [1] x)) + => (AND(L|Q)const [-2] x) +(SHRLconst [1] (SHLLconst [1] x)) + => (ANDLconst [0x7fffffff] x) +(SHRQconst [1] (SHLQconst [1] x)) + => (BTRQconst [63] x) + +// Special case testing first/last bit (with double-shift generated by generic.rules) +((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x)) +((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTQconst [31] x)) +(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem) + +((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTQconst [0] x)) +((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTLconst [0] x)) +(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem) + +// Special-case manually testing last bit with "a>>63 != 0" (without "&1") +((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x)) +((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2 + => ((SETB|SETAE|ULT|UGE) (BTLconst [31] x)) +(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem) +(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2 + => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem) + +// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1) +(BTSQconst [c] (BTRQconst [c] x)) => (BTSQconst [c] x) +(BTSQconst [c] (BTCQconst [c] x)) => (BTSQconst [c] x) +(BTRQconst [c] (BTSQconst [c] x)) => (BTRQconst [c] x) +(BTRQconst [c] (BTCQconst [c] x)) => (BTRQconst [c] x) + +// Fold boolean negation into SETcc. +(XORLconst [1] (SETNE x)) => (SETEQ x) +(XORLconst [1] (SETEQ x)) => (SETNE x) +(XORLconst [1] (SETL x)) => (SETGE x) +(XORLconst [1] (SETGE x)) => (SETL x) +(XORLconst [1] (SETLE x)) => (SETG x) +(XORLconst [1] (SETG x)) => (SETLE x) +(XORLconst [1] (SETB x)) => (SETAE x) +(XORLconst [1] (SETAE x)) => (SETB x) +(XORLconst [1] (SETBE x)) => (SETA x) +(XORLconst [1] (SETA x)) => (SETBE x) + +// Special case for floating point - LF/LEF not generated +(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no) +(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no) +(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no) +(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no) + +// Disabled because it interferes with the pattern match above and makes worse code. +// (SETNEF x) => (ORQ (SETNE x) (SETNAN x)) +// (SETEQF x) => (ANDQ (SETEQ x) (SETORD x)) + +// fold constants into instructions +(ADDQ x (MOVQconst [c])) && is32Bit(c) && !t.IsPtr() => (ADDQconst [int32(c)] x) +(ADDQ x (MOVLconst [c])) => (ADDQconst [c] x) +(ADDL x (MOVLconst [c])) => (ADDLconst [c] x) + +(SUBQ x (MOVQconst [c])) && is32Bit(c) => (SUBQconst x [int32(c)]) +(SUBQ (MOVQconst [c]) x) && is32Bit(c) => (NEGQ (SUBQconst x [int32(c)])) +(SUBL x (MOVLconst [c])) => (SUBLconst x [c]) +(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst x [c])) + +(MULQ x (MOVQconst [c])) && is32Bit(c) => (MULQconst [int32(c)] x) +(MULL x (MOVLconst [c])) => (MULLconst [c] x) + +(ANDQ x (MOVQconst [c])) && is32Bit(c) => (ANDQconst [int32(c)] x) +(ANDL x (MOVLconst [c])) => (ANDLconst [c] x) + +(AND(L|Q)const [c] (AND(L|Q)const [d] x)) => (AND(L|Q)const [c & d] x) +(XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x) +(OR(L|Q)const [c] (OR(L|Q)const [d] x)) => (OR(L|Q)const [c | d] x) + +(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x) +(MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x) + +(ORQ x (MOVQconst [c])) && is32Bit(c) => (ORQconst [int32(c)] x) +(ORQ x (MOVLconst [c])) => (ORQconst [c] x) +(ORL x (MOVLconst [c])) => (ORLconst [c] x) + +(XORQ x (MOVQconst [c])) && is32Bit(c) => (XORQconst [int32(c)] x) +(XORL x (MOVLconst [c])) => (XORLconst [c] x) + +(SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x) +(SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x) + +(SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x) +(SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x) +(SHRW x (MOV(Q|L)const [c])) && c&31 < 16 => (SHRWconst [int8(c&31)] x) +(SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0]) +(SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x) +(SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0]) + +(SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x) +(SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x) +(SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x) +(SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x) + +// Operations which don't affect the low 6/5 bits of the shift amount are NOPs. +((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGQ (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGQ y)) +((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGQ (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ y)) + +((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGQ (ADDQconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGQ y)) +((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGQ (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ y)) + +((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGL (ADDLconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGL y)) +((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y) +((SHLQ|SHRQ|SARQ) x (NEGL (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL y)) + +((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGL (ADDLconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGL y)) +((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y) +((SHLL|SHRL|SARL) x (NEGL (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL y)) + +// rotate left negative = rotate right +(ROLQ x (NEG(Q|L) y)) => (RORQ x y) +(ROLL x (NEG(Q|L) y)) => (RORL x y) +(ROLW x (NEG(Q|L) y)) => (RORW x y) +(ROLB x (NEG(Q|L) y)) => (RORB x y) + +// rotate right negative = rotate left +(RORQ x (NEG(Q|L) y)) => (ROLQ x y) +(RORL x (NEG(Q|L) y)) => (ROLL x y) +(RORW x (NEG(Q|L) y)) => (ROLW x y) +(RORB x (NEG(Q|L) y)) => (ROLB x y) + +// rotate by constants +(ROLQ x (MOV(Q|L)const [c])) => (ROLQconst [int8(c&63)] x) +(ROLL x (MOV(Q|L)const [c])) => (ROLLconst [int8(c&31)] x) +(ROLW x (MOV(Q|L)const [c])) => (ROLWconst [int8(c&15)] x) +(ROLB x (MOV(Q|L)const [c])) => (ROLBconst [int8(c&7) ] x) + +(RORQ x (MOV(Q|L)const [c])) => (ROLQconst [int8((-c)&63)] x) +(RORL x (MOV(Q|L)const [c])) => (ROLLconst [int8((-c)&31)] x) +(RORW x (MOV(Q|L)const [c])) => (ROLWconst [int8((-c)&15)] x) +(RORB x (MOV(Q|L)const [c])) => (ROLBconst [int8((-c)&7) ] x) + +// Constant shift simplifications +((SHLQ|SHRQ|SARQ)const x [0]) => x +((SHLL|SHRL|SARL)const x [0]) => x +((SHRW|SARW)const x [0]) => x +((SHRB|SARB)const x [0]) => x +((ROLQ|ROLL|ROLW|ROLB)const x [0]) => x + +// Multi-register shifts +(ORQ (SH(R|L)Q lo bits) (SH(L|R)Q hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits) +(ORQ (SH(R|L)XQ lo bits) (SH(L|R)XQ hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits) + +// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits) +// because the x86 instructions are defined to use all 5 bits of the shift even +// for the small shifts. I don't think we'll ever generate a weird shift (e.g. +// (SHRW x (MOVLconst [24])), but just in case. + +(CMPQ x (MOVQconst [c])) && is32Bit(c) => (CMPQconst x [int32(c)]) +(CMPQ (MOVQconst [c]) x) && is32Bit(c) => (InvertFlags (CMPQconst x [int32(c)])) +(CMPL x (MOVLconst [c])) => (CMPLconst x [c]) +(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c])) +(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)]) +(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)])) +(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)]) +(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)])) + +// Canonicalize the order of arguments to comparisons - helps with CSE. +(CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x)) + +// Using MOVZX instead of AND is cheaper. +(AND(Q|L)const [ 0xFF] x) => (MOVBQZX x) +(AND(Q|L)const [0xFFFF] x) => (MOVWQZX x) +// This rule is currently invalid because 0xFFFFFFFF is not representable by a signed int32. +// Commenting out for now, because it also can't trigger because of the is32bit guard on the +// ANDQconst lowering-rule, above, prevents 0xFFFFFFFF from matching (for the same reason) +// Using an alternate form of this rule segfaults some binaries because of +// adverse interactions with other passes. +// (ANDQconst [0xFFFFFFFF] x) => (MOVLQZX x) + +// strength reduction +// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf: +// 1 - addq, shlq, leaq, negq, subq +// 3 - imulq +// This limits the rewrites to two instructions. +// Note that negq always operates in-place, +// which can require a register-register move +// to preserve the original value, +// so it must be used with care. +(MUL(Q|L)const [-9] x) => (NEG(Q|L) (LEA(Q|L)8 x x)) +(MUL(Q|L)const [-5] x) => (NEG(Q|L) (LEA(Q|L)4 x x)) +(MUL(Q|L)const [-3] x) => (NEG(Q|L) (LEA(Q|L)2 x x)) +(MUL(Q|L)const [-1] x) => (NEG(Q|L) x) +(MUL(Q|L)const [ 0] _) => (MOV(Q|L)const [0]) +(MUL(Q|L)const [ 1] x) => x +(MUL(Q|L)const [ 3] x) => (LEA(Q|L)2 x x) +(MUL(Q|L)const [ 5] x) => (LEA(Q|L)4 x x) +(MUL(Q|L)const [ 7] x) => (LEA(Q|L)2 x (LEA(Q|L)2 x x)) +(MUL(Q|L)const [ 9] x) => (LEA(Q|L)8 x x) +(MUL(Q|L)const [11] x) => (LEA(Q|L)2 x (LEA(Q|L)4 x x)) +(MUL(Q|L)const [13] x) => (LEA(Q|L)4 x (LEA(Q|L)2 x x)) +(MUL(Q|L)const [19] x) => (LEA(Q|L)2 x (LEA(Q|L)8 x x)) +(MUL(Q|L)const [21] x) => (LEA(Q|L)4 x (LEA(Q|L)4 x x)) +(MUL(Q|L)const [25] x) => (LEA(Q|L)8 x (LEA(Q|L)2 x x)) +(MUL(Q|L)const [27] x) => (LEA(Q|L)8 (LEA(Q|L)2 x x) (LEA(Q|L)2 x x)) +(MUL(Q|L)const [37] x) => (LEA(Q|L)4 x (LEA(Q|L)8 x x)) +(MUL(Q|L)const [41] x) => (LEA(Q|L)8 x (LEA(Q|L)4 x x)) +(MUL(Q|L)const [45] x) => (LEA(Q|L)8 (LEA(Q|L)4 x x) (LEA(Q|L)4 x x)) +(MUL(Q|L)const [73] x) => (LEA(Q|L)8 x (LEA(Q|L)8 x x)) +(MUL(Q|L)const [81] x) => (LEA(Q|L)8 (LEA(Q|L)8 x x) (LEA(Q|L)8 x x)) + +(MUL(Q|L)const [c] x) && isPowerOfTwo64(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const [int8(log64(int64(c)+1))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEA(Q|L)1 (SHL(Q|L)const [int8(log32(c-1))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEA(Q|L)2 (SHL(Q|L)const [int8(log32(c-2))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEA(Q|L)4 (SHL(Q|L)const [int8(log32(c-4))] x) x) +(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEA(Q|L)8 (SHL(Q|L)const [int8(log32(c-8))] x) x) +(MUL(Q|L)const [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHL(Q|L)const [int8(log32(c/3))] (LEA(Q|L)2 x x)) +(MUL(Q|L)const [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHL(Q|L)const [int8(log32(c/5))] (LEA(Q|L)4 x x)) +(MUL(Q|L)const [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHL(Q|L)const [int8(log32(c/9))] (LEA(Q|L)8 x x)) + +// combine add/shift into LEAQ/LEAL +(ADD(L|Q) x (SHL(L|Q)const [3] y)) => (LEA(L|Q)8 x y) +(ADD(L|Q) x (SHL(L|Q)const [2] y)) => (LEA(L|Q)4 x y) +(ADD(L|Q) x (SHL(L|Q)const [1] y)) => (LEA(L|Q)2 x y) +(ADD(L|Q) x (ADD(L|Q) y y)) => (LEA(L|Q)2 x y) +(ADD(L|Q) x (ADD(L|Q) x y)) => (LEA(L|Q)2 y x) + +// combine ADDQ/ADDQconst into LEAQ1/LEAL1 +(ADD(Q|L)const [c] (ADD(Q|L) x y)) => (LEA(Q|L)1 [c] x y) +(ADD(Q|L) (ADD(Q|L)const [c] x) y) => (LEA(Q|L)1 [c] x y) +(ADD(Q|L)const [c] (SHL(Q|L)const [1] x)) => (LEA(Q|L)1 [c] x x) + +// fold ADDQ/ADDL into LEAQ/LEAL +(ADD(Q|L)const [c] (LEA(Q|L) [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x) +(LEA(Q|L) [c] {s} (ADD(Q|L)const [d] x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x) +(LEA(Q|L) [c] {s} (ADD(Q|L) x y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y) +(ADD(Q|L) x (LEA(Q|L) [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y) + +// fold ADDQconst/ADDLconst into LEAQx/LEALx +(ADD(Q|L)const [c] (LEA(Q|L)1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)1 [c+d] {s} x y) +(ADD(Q|L)const [c] (LEA(Q|L)2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)2 [c+d] {s} x y) +(ADD(Q|L)const [c] (LEA(Q|L)4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)4 [c+d] {s} x y) +(ADD(Q|L)const [c] (LEA(Q|L)8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)8 [c+d] {s} x y) +(LEA(Q|L)1 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)1 [c+d] {s} x y) +(LEA(Q|L)2 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)2 [c+d] {s} x y) +(LEA(Q|L)2 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEA(Q|L)2 [c+2*d] {s} x y) +(LEA(Q|L)4 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)4 [c+d] {s} x y) +(LEA(Q|L)4 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEA(Q|L)4 [c+4*d] {s} x y) +(LEA(Q|L)8 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)8 [c+d] {s} x y) +(LEA(Q|L)8 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEA(Q|L)8 [c+8*d] {s} x y) + +// fold shifts into LEAQx/LEALx +(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)2 [c] {s} x y) +(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)4 [c] {s} x y) +(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [3] y)) => (LEA(Q|L)8 [c] {s} x y) +(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)4 [c] {s} x y) +(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)8 [c] {s} x y) +(LEA(Q|L)4 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)8 [c] {s} x y) + +// reverse ordering of compare instruction +(SETL (InvertFlags x)) => (SETG x) +(SETG (InvertFlags x)) => (SETL x) +(SETB (InvertFlags x)) => (SETA x) +(SETA (InvertFlags x)) => (SETB x) +(SETLE (InvertFlags x)) => (SETGE x) +(SETGE (InvertFlags x)) => (SETLE x) +(SETBE (InvertFlags x)) => (SETAE x) +(SETAE (InvertFlags x)) => (SETBE x) +(SETEQ (InvertFlags x)) => (SETEQ x) +(SETNE (InvertFlags x)) => (SETNE x) + +(SETLstore [off] {sym} ptr (InvertFlags x) mem) => (SETGstore [off] {sym} ptr x mem) +(SETGstore [off] {sym} ptr (InvertFlags x) mem) => (SETLstore [off] {sym} ptr x mem) +(SETBstore [off] {sym} ptr (InvertFlags x) mem) => (SETAstore [off] {sym} ptr x mem) +(SETAstore [off] {sym} ptr (InvertFlags x) mem) => (SETBstore [off] {sym} ptr x mem) +(SETLEstore [off] {sym} ptr (InvertFlags x) mem) => (SETGEstore [off] {sym} ptr x mem) +(SETGEstore [off] {sym} ptr (InvertFlags x) mem) => (SETLEstore [off] {sym} ptr x mem) +(SETBEstore [off] {sym} ptr (InvertFlags x) mem) => (SETAEstore [off] {sym} ptr x mem) +(SETAEstore [off] {sym} ptr (InvertFlags x) mem) => (SETBEstore [off] {sym} ptr x mem) +(SETEQstore [off] {sym} ptr (InvertFlags x) mem) => (SETEQstore [off] {sym} ptr x mem) +(SETNEstore [off] {sym} ptr (InvertFlags x) mem) => (SETNEstore [off] {sym} ptr x mem) + +// sign extended loads +// Note: The combined instruction must end up in the same block +// as the original load. If not, we end up making a value with +// memory type live in two different blocks, which can lead to +// multiple memory values alive simultaneously. +// Make sure we don't combine these ops if the load has another use. +// This prevents a single load from being split into multiple loads +// which then might return different values. See test/atomicload.go. +(MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload [off] {sym} ptr mem) +(MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload [off] {sym} ptr mem) +(MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload [off] {sym} ptr mem) +(MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload [off] {sym} ptr mem) +(MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload [off] {sym} ptr mem) +(MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload [off] {sym} ptr mem) +(MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload [off] {sym} ptr mem) +(MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload [off] {sym} ptr mem) +(MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload [off] {sym} ptr mem) +(MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload [off] {sym} ptr mem) +(MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload [off] {sym} ptr mem) +(MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload [off] {sym} ptr mem) +(MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload [off] {sym} ptr mem) +(MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload [off] {sym} ptr mem) +(MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload [off] {sym} ptr mem) +(MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload [off] {sym} ptr mem) +(MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload [off] {sym} ptr mem) +(MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload [off] {sym} ptr mem) + +// replace load from same location as preceding store with zero/sign extension (or copy in case of full width) +(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQZX x) +(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQZX x) +(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQZX x) +(MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x +(MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQSX x) +(MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQSX x) +(MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQSX x) + +// Fold extensions and ANDs together. +(MOVBQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x) +(MOVWQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x) +(MOVLQZX (ANDLconst [c] x)) => (ANDLconst [c] x) +(MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x) +(MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x) +(MOVLQSX (ANDLconst [c] x)) && uint32(c) & 0x80000000 == 0 => (ANDLconst [c & 0x7fffffff] x) + +// Don't extend before storing +(MOVLstore [off] {sym} ptr (MOVLQSX x) mem) => (MOVLstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWQSX x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBQSX x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVLstore [off] {sym} ptr (MOVLQZX x) mem) => (MOVLstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWQZX x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBQZX x) mem) => (MOVBstore [off] {sym} ptr x mem) + +// fold constants into memory operations +// Note that this is not always a good idea because if not all the uses of +// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now +// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one. +// Nevertheless, let's do it! +(MOV(Q|L|W|B|SS|SD|O)load [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (MOV(Q|L|W|B|SS|SD|O)load [off1+off2] {sym} ptr mem) +(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) => + (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {sym} ptr val mem) +(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => + (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {sym} base val mem) +((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem) +((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem) +(CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => + (CMP(Q|L|W|B)load [off1+off2] {sym} base val mem) +(CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => + (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) + +((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem) +((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem) +((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => + ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) +((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) => + ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem) +((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {sym} base val mem) +((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem) + +// Fold constants into stores. +(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) => + (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) +(MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => + (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => + (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) +(MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) => + (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) + +// Fold address offsets into constant stores. +(MOV(Q|L|W|B|O)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) => + (MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem) + +// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows +// what variables are being read/written by the ops. +(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOV(Q|L|W|B|O)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) => + (MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) +(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem) +((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem) +((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem) +(CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) => + (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) + +((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem) +((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem) +((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) => + ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) +((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem) + && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) => + ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem) +((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) +((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem) + && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem) + +// fold LEAQs together +(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x) + +// LEAQ into LEAQ1 +(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAQ1 into LEAQ +(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAQ into LEAQ[248] +(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAQ[248] into LEAQ +(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y) +(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y) + +// LEAQ[1248] into LEAQ[1248]. Only some such merges are possible. +(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y) +(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x) +(LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil => + (LEAQ4 [off1+2*off2] {sym1} x y) +(LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil => + (LEAQ8 [off1+4*off2] {sym1} x y) +// TODO: more? + +// Lower LEAQ2/4/8 when the offset is a constant +(LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*2) => + (LEAQ [off+int32(scale)*2] {sym} x) +(LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*4) => + (LEAQ [off+int32(scale)*4] {sym} x) +(LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*8) => + (LEAQ [off+int32(scale)*8] {sym} x) + +// Absorb InvertFlags into branches. +(LT (InvertFlags cmp) yes no) => (GT cmp yes no) +(GT (InvertFlags cmp) yes no) => (LT cmp yes no) +(LE (InvertFlags cmp) yes no) => (GE cmp yes no) +(GE (InvertFlags cmp) yes no) => (LE cmp yes no) +(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no) +(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no) +(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no) +(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no) +(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no) +(NE (InvertFlags cmp) yes no) => (NE cmp yes no) + +// Constant comparisons. +(CMPQconst (MOVQconst [x]) [y]) && x==int64(y) => (FlagEQ) +(CMPQconst (MOVQconst [x]) [y]) && x (FlagLT_ULT) +(CMPQconst (MOVQconst [x]) [y]) && xuint64(int64(y)) => (FlagLT_UGT) +(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x) (FlagGT_ULT) +(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)>uint64(int64(y)) => (FlagGT_UGT) +(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ) +(CMPLconst (MOVLconst [x]) [y]) && x (FlagLT_ULT) +(CMPLconst (MOVLconst [x]) [y]) && xuint32(y) => (FlagLT_UGT) +(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x) (FlagGT_ULT) +(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ) +(CMPWconst (MOVLconst [x]) [y]) && int16(x) (FlagLT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)uint16(y) => (FlagLT_UGT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x) (FlagGT_ULT) +(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ) +(CMPBconst (MOVLconst [x]) [y]) && int8(x) (FlagLT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)uint8(y) => (FlagLT_UGT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x) (FlagGT_ULT) +(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT) + +// CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts. +// In theory this applies to any of the simplifications above, +// but CMPQ is the only one I've actually seen occur. +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y => (FlagEQ) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x (FlagLT_ULT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && xuint64(y) => (FlagLT_UGT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x) (FlagGT_ULT) +(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) => (FlagGT_UGT) + +// Other known comparisons. +(CMPQconst (MOVBQZX _) [c]) && 0xFF < c => (FlagLT_ULT) +(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c => (FlagLT_ULT) +(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1< (FlagLT_ULT) +(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1< (FlagLT_ULT) +(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) +(CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) +(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT) +(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT) +(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT) + +// TESTQ c c sets flags like CMPQ c 0. +(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ) +(TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ) +(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0 => (FlagLT_UGT) +(TESTLconst [c] (MOVLconst [c])) && c < 0 => (FlagLT_UGT) +(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0 => (FlagGT_UGT) +(TESTLconst [c] (MOVLconst [c])) && c > 0 => (FlagGT_UGT) + +// TODO: DIVxU also. + +// Absorb flag constants into SBB ops. +(SBBQcarrymask (FlagEQ)) => (MOVQconst [0]) +(SBBQcarrymask (FlagLT_ULT)) => (MOVQconst [-1]) +(SBBQcarrymask (FlagLT_UGT)) => (MOVQconst [0]) +(SBBQcarrymask (FlagGT_ULT)) => (MOVQconst [-1]) +(SBBQcarrymask (FlagGT_UGT)) => (MOVQconst [0]) +(SBBLcarrymask (FlagEQ)) => (MOVLconst [0]) +(SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1]) +(SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0]) +(SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1]) +(SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0]) + +// Absorb flag constants into branches. +((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no) => (First yes no) +((NE|LT|GT|ULT|UGT) (FlagEQ) yes no) => (First no yes) +((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) => (First yes no) +((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) => (First no yes) +((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) => (First yes no) +((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) => (First no yes) +((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) => (First yes no) +((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) => (First no yes) +((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) => (First yes no) +((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) => (First no yes) + +// Absorb flag constants into SETxx ops. +((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ)) => (MOVLconst [1]) +((SETNE|SETL|SETG|SETB|SETA) (FlagEQ)) => (MOVLconst [0]) +((SETNE|SETL|SETLE|SETB|SETBE) (FlagLT_ULT)) => (MOVLconst [1]) +((SETEQ|SETG|SETGE|SETA|SETAE) (FlagLT_ULT)) => (MOVLconst [0]) +((SETNE|SETL|SETLE|SETA|SETAE) (FlagLT_UGT)) => (MOVLconst [1]) +((SETEQ|SETG|SETGE|SETB|SETBE) (FlagLT_UGT)) => (MOVLconst [0]) +((SETNE|SETG|SETGE|SETB|SETBE) (FlagGT_ULT)) => (MOVLconst [1]) +((SETEQ|SETL|SETLE|SETA|SETAE) (FlagGT_ULT)) => (MOVLconst [0]) +((SETNE|SETG|SETGE|SETA|SETAE) (FlagGT_UGT)) => (MOVLconst [1]) +((SETEQ|SETL|SETLE|SETB|SETBE) (FlagGT_UGT)) => (MOVLconst [0]) + +(SETEQstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + +(SETNEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + +(SETLstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETLstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETLstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + +(SETLEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + +(SETGstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETGstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + +(SETGEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + +(SETBstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETBstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETBstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + +(SETBEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) + +(SETAstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETAstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + +(SETAEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) +(SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [0]) mem) +(SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst [1]) mem) + +// Remove redundant *const ops +(ADDQconst [0] x) => x +(ADDLconst [c] x) && c==0 => x +(SUBQconst [0] x) => x +(SUBLconst [c] x) && c==0 => x +(ANDQconst [0] _) => (MOVQconst [0]) +(ANDLconst [c] _) && c==0 => (MOVLconst [0]) +(ANDQconst [-1] x) => x +(ANDLconst [c] x) && c==-1 => x +(ORQconst [0] x) => x +(ORLconst [c] x) && c==0 => x +(ORQconst [-1] _) => (MOVQconst [-1]) +(ORLconst [c] _) && c==-1 => (MOVLconst [-1]) +(XORQconst [0] x) => x +(XORLconst [c] x) && c==0 => x +// TODO: since we got rid of the W/B versions, we might miss +// things like (ANDLconst [0x100] x) which were formerly +// (ANDBconst [0] x). Probably doesn't happen very often. +// If we cared, we might do: +// (ANDLconst [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0]) + +// Remove redundant ops +// Not in generic rules, because they may appear after lowering e. g. Slicemask +(NEG(Q|L) (NEG(Q|L) x)) => x +(NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 => (SUB(Q|L) y x) + +// Convert constant subtracts to constant adds +(SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x) +(SUBLconst [c] x) => (ADDLconst [-c] x) + +// generic constant folding +// TODO: more of this +(ADDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)+d]) +(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d]) +(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x) +(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x) +(SUBQconst (MOVQconst [d]) [c]) => (MOVQconst [d-int64(c)]) +(SUBQconst (SUBQconst x [d]) [c]) && is32Bit(int64(-c)-int64(d)) => (ADDQconst [-c-d] x) +(SARQconst [c] (MOVQconst [d])) => (MOVQconst [d>>uint64(c)]) +(SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)]) +(SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)]) +(SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)]) +(NEGQ (MOVQconst [c])) => (MOVQconst [-c]) +(NEGL (MOVLconst [c])) => (MOVLconst [-c]) +(MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d]) +(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d]) +(ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d]) +(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d]) +(ORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)|d]) +(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d]) +(XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d]) +(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d]) +(NOTQ (MOVQconst [c])) => (MOVQconst [^c]) +(NOTL (MOVLconst [c])) => (MOVLconst [^c]) +(BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1< (MOVQconst [d&^(1< (MOVQconst [d^(1< (MOVQconst [c|d]) + +// generic simplifications +// TODO: more of this +(ADDQ x (NEGQ y)) => (SUBQ x y) +(ADDL x (NEGL y)) => (SUBL x y) +(SUBQ x x) => (MOVQconst [0]) +(SUBL x x) => (MOVLconst [0]) +(ANDQ x x) => x +(ANDL x x) => x +(ORQ x x) => x +(ORL x x) => x +(XORQ x x) => (MOVQconst [0]) +(XORL x x) => (MOVLconst [0]) + +(SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)]) +(SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)]) +(SHLQconst [d] (MOVLconst [c])) => (MOVQconst [int64(c) << uint64(d)]) + +// Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range. +(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) => (ADDQconst [-c] x) +(MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x) + +// checking AND against 0. +(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y) +(CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y) +(CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y) +(CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y) +(CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x) +(CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x) +(CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x) +(CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTBconst [int8(c)] x) + +// Convert TESTx to TESTxconst if possible. +(TESTQ (MOVQconst [c]) x) && is32Bit(c) => (TESTQconst [int32(c)] x) +(TESTL (MOVLconst [c]) x) => (TESTLconst [c] x) +(TESTW (MOVLconst [c]) x) => (TESTWconst [int16(c)] x) +(TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x) + +// TEST %reg,%reg is shorter than CMP +(CMPQconst x [0]) => (TESTQ x x) +(CMPLconst x [0]) => (TESTL x x) +(CMPWconst x [0]) => (TESTW x x) +(CMPBconst x [0]) => (TESTB x x) +(TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x) +(TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x) +(TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x) +(TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTB x x) + +// Convert LEAQ1 back to ADDQ if we can +(LEAQ1 [0] x y) && v.Aux == nil => (ADDQ x y) + +(MOVQstoreconst [c] {s} p1 x:(MOVQstoreconst [a] {s} p0 mem)) + && config.useSSE + && x.Uses == 1 + && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) + && a.Val() == 0 + && c.Val() == 0 + && setPos(v, x.Pos) + && clobber(x) + => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem) +(MOVQstoreconst [a] {s} p0 x:(MOVQstoreconst [c] {s} p1 mem)) + && config.useSSE + && x.Uses == 1 + && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) + && a.Val() == 0 + && c.Val() == 0 + && setPos(v, x.Pos) + && clobber(x) + => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p0 mem) + +// Merge load and op +// TODO: add indexed variants? +((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem) +((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem) +((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem) +(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) +(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem) +(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem) +(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) => + ((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem) +(MOVQstore {sym} [off] ptr x:(BT(S|R|C)Qconst [c] l:(MOVQload {sym} [off] ptr mem)) mem) && x.Uses == 1 && l.Uses == 1 && clobber(x, l) => + (BT(S|R|C)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + +// Merge ADDQconst and LEAQ into atomic loads. +(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem) +(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem) + +// Merge ADDQconst and LEAQ into atomic stores. +(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (XCHGQ [off1+off2] {sym} val ptr mem) +(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB => + (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) +(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (XCHGL [off1+off2] {sym} val ptr mem) +(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB => + (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem) + +// Merge ADDQconst into atomic adds. +// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions. +(XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (XADDQlock [off1+off2] {sym} val ptr mem) +(XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) => + (XADDLlock [off1+off2] {sym} val ptr mem) + +// Merge ADDQconst into atomic compare and swaps. +// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions. +(CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) => + (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem) +(CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) => + (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem) + +// We don't need the conditional move if we know the arg of BSF is not zero. +(CMOVQEQ x _ (Select1 (BS(F|R)Q (ORQconst [c] _)))) && c != 0 => x +// Extension is unnecessary for trailing zeros. +(BSFQ (ORQconst [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst [1<<8] x)) +(BSFQ (ORQconst [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst [1<<16] x)) + +// Redundant sign/zero extensions +// Note: see issue 21963. We have to make sure we use the right type on +// the resulting extension (the outer type, not the inner type). +(MOVLQSX (MOVLQSX x)) => (MOVLQSX x) +(MOVLQSX (MOVWQSX x)) => (MOVWQSX x) +(MOVLQSX (MOVBQSX x)) => (MOVBQSX x) +(MOVWQSX (MOVWQSX x)) => (MOVWQSX x) +(MOVWQSX (MOVBQSX x)) => (MOVBQSX x) +(MOVBQSX (MOVBQSX x)) => (MOVBQSX x) +(MOVLQZX (MOVLQZX x)) => (MOVLQZX x) +(MOVLQZX (MOVWQZX x)) => (MOVWQZX x) +(MOVLQZX (MOVBQZX x)) => (MOVBQZX x) +(MOVWQZX (MOVWQZX x)) => (MOVWQZX x) +(MOVWQZX (MOVBQZX x)) => (MOVBQZX x) +(MOVBQZX (MOVBQZX x)) => (MOVBQZX x) + +(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem) + && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) => + ((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) +(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem) + && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) => + ((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem) + +// float <-> int register moves, with no conversion. +// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}. +(MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) => (MOVQf2i val) +(MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) => (MOVLf2i val) +(MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) => (MOVQi2f val) +(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) => (MOVLi2f val) + +// Other load-like ops. +(ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ADDQ x (MOVQf2i y)) +(ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ADDL x (MOVLf2i y)) +(SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (SUBQ x (MOVQf2i y)) +(SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (SUBL x (MOVLf2i y)) +(ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ANDQ x (MOVQf2i y)) +(ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ANDL x (MOVLf2i y)) +( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => ( ORQ x (MOVQf2i y)) +( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => ( ORL x (MOVLf2i y)) +(XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (XORQ x (MOVQf2i y)) +(XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (XORL x (MOVLf2i y)) + +(ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (ADDSD x (MOVQi2f y)) +(ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (ADDSS x (MOVLi2f y)) +(SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (SUBSD x (MOVQi2f y)) +(SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (SUBSS x (MOVLi2f y)) +(MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (MULSD x (MOVQi2f y)) +(MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (MULSS x (MOVLi2f y)) + +// Redirect stores to use the other register set. +(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) => (MOVSDstore [off] {sym} ptr val mem) +(MOVLstore [off] {sym} ptr (MOVLf2i val) mem) => (MOVSSstore [off] {sym} ptr val mem) +(MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) => (MOVQstore [off] {sym} ptr val mem) +(MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) => (MOVLstore [off] {sym} ptr val mem) + +// Load args directly into the register class where it will be used. +// We do this by just modifying the type of the Arg. +(MOVQf2i (Arg [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg [off] {sym}) +(MOVLf2i (Arg [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg [off] {sym}) +(MOVQi2f (Arg [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg [off] {sym}) +(MOVLi2f (Arg [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg [off] {sym}) + +// LEAQ is rematerializeable, so this helps to avoid register spill. +// See issue 22947 for details +(ADD(Q|L)const [off] x:(SP)) => (LEA(Q|L) [off] x) + +// HMULx is commutative, but its first argument must go in AX. +// If possible, put a rematerializeable value in the first argument slot, +// to reduce the odds that another value will be have to spilled +// specifically to free up AX. +(HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L) y x) +(HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x) + +// Fold loads into compares +// Note: these may be undone by the flagalloc pass. +(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem) +(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem)) + +(CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c]) + && l.Uses == 1 + && clobber(l) => +@l.Block (CMP(Q|L)constload {sym} [makeValAndOff(c,off)] ptr mem) +(CMP(W|B)const l:(MOV(W|B)load {sym} [off] ptr mem) [c]) + && l.Uses == 1 + && clobber(l) => +@l.Block (CMP(W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem) + +(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validVal(c) => (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem) +(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem) +(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem) +(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem) + +(TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2) + && l == l2 + && l.Uses == 2 + && clobber(l) => + @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0, off)] ptr mem) + +// Convert ANDload to MOVload when we can do the AND in a containing TEST op. +// Only do when it's within the same block, so we don't have flags live across basic block boundaries. +// See issue 44228. +(TEST(Q|L) a:(AND(Q|L)load [off] {sym} x ptr mem) a) && a.Uses == 2 && a.Block == v.Block && clobber(a) => (TEST(Q|L) (MOV(Q|L)load [off] {sym} ptr mem) x) + +(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))]) +(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) => + (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) + (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem)) + +// Arch-specific inlining for small or disjoint runtime.memmove +// Match post-lowering calls, memory version. +(SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem))))) + && sc.Val64() >= 0 + && isSameCall(sym, "runtime.memmove") + && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 + && isInlinableMemmove(dst, src, sc.Val64(), config) + && clobber(s1, s2, s3, call) + => (Move [sc.Val64()] dst src mem) + +// Match post-lowering calls, register version. +(SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem)) + && sz >= 0 + && isSameCall(sym, "runtime.memmove") + && call.Uses == 1 + && isInlinableMemmove(dst, src, sz, config) + && clobber(call) + => (Move [sz] dst src mem) + +// Prefetch instructions +(PrefetchCache ...) => (PrefetchT0 ...) +(PrefetchCacheStreamed ...) => (PrefetchNTA ...) + +// CPUID feature: BMI1. +(AND(Q|L) x (NOT(Q|L) y)) && buildcfg.GOAMD64 >= 3 => (ANDN(Q|L) x y) +(AND(Q|L) x (NEG(Q|L) x)) && buildcfg.GOAMD64 >= 3 => (BLSI(Q|L) x) +(XOR(Q|L) x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (BLSMSK(Q|L) x) +(AND(Q|L) x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (Select0 (BLSR(Q|L) x)) +// eliminate TEST instruction in classical "isPowerOfTwo" check +(SETEQ (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (SETEQ (Select1 blsr)) +(CMOVQEQ x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVQEQ x y (Select1 blsr)) +(CMOVLEQ x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVLEQ x y (Select1 blsr)) +(EQ (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s) yes no) => (EQ (Select1 blsr) yes no) +(SETNE (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (SETNE (Select1 blsr)) +(CMOVQNE x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVQNE x y (Select1 blsr)) +(CMOVLNE x y (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s)) => (CMOVLNE x y (Select1 blsr)) +(NE (TEST(Q|L) s:(Select0 blsr:(BLSR(Q|L) _)) s) yes no) => (NE (Select1 blsr) yes no) + +(BSWAP(Q|L) (BSWAP(Q|L) p)) => p + +// CPUID feature: MOVBE. +(MOV(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)store [i] {s} p w mem) +(MOVBE(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 => (MOV(Q|L)store [i] {s} p w mem) +(BSWAP(Q|L) x:(MOV(Q|L)load [i] {s} p mem)) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => @x.Block (MOVBE(Q|L)load [i] {s} p mem) +(BSWAP(Q|L) x:(MOVBE(Q|L)load [i] {s} p mem)) && x.Uses == 1 => @x.Block (MOV(Q|L)load [i] {s} p mem) +(MOVWstore [i] {s} p x:(ROLWconst [8] w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBEWstore [i] {s} p w mem) +(MOVBEWstore [i] {s} p x:(ROLWconst [8] w) mem) && x.Uses == 1 => (MOVWstore [i] {s} p w mem) + +(SAR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SARX(Q|L)load [off] {sym} ptr x mem) +(SHL(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHLX(Q|L)load [off] {sym} ptr x mem) +(SHR(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l) => (SHRX(Q|L)load [off] {sym} ptr x mem) + +((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVQconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) +((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem)) +((SHL|SHR|SAR)XLload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Lconst [int8(c&31)] (MOVLload [off] {sym} ptr mem)) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go new file mode 100644 index 0000000000000000000000000000000000000000..606171947bbd75b188bfff8c0ee9bc65f26c59d6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64Ops.go @@ -0,0 +1,1167 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +// Notes: +// - Integer types live in the low portion of registers. Upper portions are junk. +// - Boolean types use the low-order byte of a register. 0=false, 1=true. +// Upper bytes are junk. +// - Floating-point types live in the low natural slot of an sse2 register. +// Unused portions are junk. +// - We do not use AH,BH,CH,DH registers. +// - When doing sub-register operations, we try to write the whole +// destination register to avoid a partial-register write. +// - Unused portions of AuxInt (or the Val portion of ValAndOff) are +// filled by sign-extending the used portion. Users of AuxInt which interpret +// AuxInt as unsigned (e.g. shifts) must be careful. +// - All SymOff opcodes require their offset to fit in an int32. + +// Suffixes encode the bit width of various instructions. +// Q (quad word) = 64 bit +// L (long word) = 32 bit +// W (word) = 16 bit +// B (byte) = 8 bit +// D (double) = 64 bit float +// S (single) = 32 bit float + +// copied from ../../amd64/reg.go +var regNamesAMD64 = []string{ + "AX", + "CX", + "DX", + "BX", + "SP", + "BP", + "SI", + "DI", + "R8", + "R9", + "R10", + "R11", + "R12", + "R13", + "g", // a.k.a. R14 + "R15", + "X0", + "X1", + "X2", + "X3", + "X4", + "X5", + "X6", + "X7", + "X8", + "X9", + "X10", + "X11", + "X12", + "X13", + "X14", + "X15", // constant 0 in ABIInternal + + // If you add registers, update asyncPreempt in runtime + + // pseudo-registers + "SB", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNamesAMD64) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNamesAMD64 { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + // Common individual register masks + var ( + ax = buildReg("AX") + cx = buildReg("CX") + dx = buildReg("DX") + bx = buildReg("BX") + gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15") + g = buildReg("g") + fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14") + x15 = buildReg("X15") + gpsp = gp | buildReg("SP") + gpspsb = gpsp | buildReg("SB") + gpspsbg = gpspsb | g + callerSave = gp | fp | g // runtime.setg (and anything calling it) may clobber g + ) + // Common slices of register masks + var ( + gponly = []regMask{gp} + fponly = []regMask{fp} + ) + + // Common regInfo + var ( + gp01 = regInfo{inputs: nil, outputs: gponly} + gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly} + gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly} + gp11sb = regInfo{inputs: []regMask{gpspsbg}, outputs: gponly} + gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} + gp21sb = regInfo{inputs: []regMask{gpspsbg, gpsp}, outputs: gponly} + gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}} + gp31shift = regInfo{inputs: []regMask{gp, gp, cx}, outputs: []regMask{gp}} + gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax, dx}} + gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax} + gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}} + gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}} + + gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}} + gp1flags = regInfo{inputs: []regMask{gpsp}} + gp0flagsLoad = regInfo{inputs: []regMask{gpspsbg, 0}} + gp1flagsLoad = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}} + gp2flagsLoad = regInfo{inputs: []regMask{gpspsbg, gpsp, gpsp, 0}} + flagsgp = regInfo{inputs: nil, outputs: gponly} + + gp11flags = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}} + gp1flags1flags = regInfo{inputs: []regMask{gp, 0}, outputs: []regMask{gp, 0}} + + readflags = regInfo{inputs: nil, outputs: gponly} + + gpload = regInfo{inputs: []regMask{gpspsbg, 0}, outputs: gponly} + gp21load = regInfo{inputs: []regMask{gp, gpspsbg, 0}, outputs: gponly} + gploadidx = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}, outputs: gponly} + gp21loadidx = regInfo{inputs: []regMask{gp, gpspsbg, gpsp, 0}, outputs: gponly} + gp21shxload = regInfo{inputs: []regMask{gpspsbg, gp, 0}, outputs: gponly} + gp21shxloadidx = regInfo{inputs: []regMask{gpspsbg, gpsp, gp, 0}, outputs: gponly} + + gpstore = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}} + gpstoreconst = regInfo{inputs: []regMask{gpspsbg, 0}} + gpstoreidx = regInfo{inputs: []regMask{gpspsbg, gpsp, gpsp, 0}} + gpstoreconstidx = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}} + gpstorexchg = regInfo{inputs: []regMask{gp, gpspsbg, 0}, outputs: []regMask{gp}} + cmpxchg = regInfo{inputs: []regMask{gp, ax, gp, 0}, outputs: []regMask{gp, 0}, clobbers: ax} + + fp01 = regInfo{inputs: nil, outputs: fponly} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly} + fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} + fp21load = regInfo{inputs: []regMask{fp, gpspsbg, 0}, outputs: fponly} + fp21loadidx = regInfo{inputs: []regMask{fp, gpspsbg, gpspsb, 0}, outputs: fponly} + fpgp = regInfo{inputs: fponly, outputs: gponly} + gpfp = regInfo{inputs: gponly, outputs: fponly} + fp11 = regInfo{inputs: fponly, outputs: fponly} + fp2flags = regInfo{inputs: []regMask{fp, fp}} + + fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly} + fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly} + + fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} + fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} + + prefreg = regInfo{inputs: []regMask{gpspsbg}} + ) + + var AMD64ops = []opData{ + // {ADD,SUB,MUL,DIV}Sx: floating-point arithmetic + // x==S for float32, x==D for float64 + // computes arg0 OP arg1 + {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true}, + {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true}, + {name: "SUBSS", argLength: 2, reg: fp21, asm: "SUBSS", resultInArg0: true}, + {name: "SUBSD", argLength: 2, reg: fp21, asm: "SUBSD", resultInArg0: true}, + {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true}, + {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true}, + {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true}, + {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, + + // MOVSxload: floating-point loads + // x==S for float32, x==D for float64 + // load from arg0+auxint+aux, arg1 = mem + {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + + // MOVSxconst: floatint-point constants + // x==S for float32, x==D for float64 + {name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true}, + {name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true}, + + // MOVSxloadidx: floating-point indexed loads + // x==S for float32, x==D for float64 + // load from arg0 + scale*arg1+auxint+aux, arg2 = mem + {name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", scale: 1, aux: "SymOff", symEffect: "Read"}, + {name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", scale: 4, aux: "SymOff", symEffect: "Read"}, + {name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", scale: 1, aux: "SymOff", symEffect: "Read"}, + {name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", scale: 8, aux: "SymOff", symEffect: "Read"}, + + // MOVSxstore: floating-point stores + // x==S for float32, x==D for float64 + // does *(arg0+auxint+aux) = arg1, arg2 = mem + {name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, + {name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, + + // MOVSxstoreidx: floating-point indexed stores + // x==S for float32, x==D for float64 + // does *(arg0+scale*arg1+auxint+aux) = arg2, arg3 = mem + {name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", scale: 1, aux: "SymOff", symEffect: "Write"}, + {name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", scale: 4, aux: "SymOff", symEffect: "Write"}, + {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", scale: 1, aux: "SymOff", symEffect: "Write"}, + {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", scale: 8, aux: "SymOff", symEffect: "Write"}, + + // {ADD,SUB,MUL,DIV}Sxload: floating-point load / op combo + // x==S for float32, x==D for float64 + // computes arg0 OP *(arg1+auxint+aux), arg2=mem + {name: "ADDSSload", argLength: 3, reg: fp21load, asm: "ADDSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, + {name: "ADDSDload", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, + {name: "SUBSSload", argLength: 3, reg: fp21load, asm: "SUBSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, + {name: "SUBSDload", argLength: 3, reg: fp21load, asm: "SUBSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, + {name: "MULSSload", argLength: 3, reg: fp21load, asm: "MULSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, + {name: "MULSDload", argLength: 3, reg: fp21load, asm: "MULSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, + {name: "DIVSSload", argLength: 3, reg: fp21load, asm: "DIVSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, + {name: "DIVSDload", argLength: 3, reg: fp21load, asm: "DIVSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, + + // {ADD,SUB,MUL,DIV}Sxloadidx: floating-point indexed load / op combo + // x==S for float32, x==D for float64 + // computes arg0 OP *(arg1+scale*arg2+auxint+aux), arg3=mem + {name: "ADDSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "ADDSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "ADDSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "ADDSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "ADDSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "ADDSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "ADDSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "ADDSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "SUBSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "SUBSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "SUBSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "SUBSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "SUBSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "SUBSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "SUBSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "SUBSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "MULSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "MULSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "MULSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "MULSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "MULSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "MULSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "MULSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "MULSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "DIVSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "DIVSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "DIVSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "DIVSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "DIVSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "DIVSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + {name: "DIVSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "DIVSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, + + // {ADD,SUB,MUL,DIV,AND,OR,XOR}x: binary integer ops + // unadorned versions compute arg0 OP arg1 + // const versions compute arg0 OP auxint (auxint is a sign-extended 32-bit value) + // constmodify versions compute *(arg0+ValAndOff(AuxInt).Off().aux) OP= ValAndOff(AuxInt).Val(), arg1 = mem + // x==L operations zero the upper 4 bytes of the destination register (not meaningful for constmodify versions). + {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, + {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, + {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, + {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, + {name: "ADDQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, + {name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, + + {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, + {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, + {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, + {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, + + {name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true, clobberFlags: true}, + {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, + {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMUL3Q", aux: "Int32", clobberFlags: true}, + {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, + + // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x. + {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, + // Let x = arg0*arg1 (full 64x64->128 unsigned multiply). Returns uint64(x), and flags set to overflow if uint64(x) != x. + {name: "MULQU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt64,Flags)", asm: "MULQ", commutative: true, clobberFlags: true}, + + // HMULx[U]: computes the high bits of an integer multiply. + // computes arg0 * arg1 >> (x==L?32:64) + // The multiply is unsigned for the U versions, signed for the non-U versions. + // HMULx[U] are intentionally not marked as commutative, even though they are. + // This is because they have asymmetric register requirements. + // There are rewrite rules to try to place arguments in preferable slots. + {name: "HMULQ", argLength: 2, reg: gp21hmul, asm: "IMULQ", clobberFlags: true}, + {name: "HMULL", argLength: 2, reg: gp21hmul, asm: "IMULL", clobberFlags: true}, + {name: "HMULQU", argLength: 2, reg: gp21hmul, asm: "MULQ", clobberFlags: true}, + {name: "HMULLU", argLength: 2, reg: gp21hmul, asm: "MULL", clobberFlags: true}, + + // (arg0 + arg1) / 2 as unsigned, all 64 result bits + {name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, + + // DIVx[U] computes [arg0 / arg1, arg0 % arg1] + // For signed versions, AuxInt non-zero means that the divisor has been proved to be not -1. + {name: "DIVQ", argLength: 2, reg: gp11div, typ: "(Int64,Int64)", asm: "IDIVQ", aux: "Bool", clobberFlags: true}, + {name: "DIVL", argLength: 2, reg: gp11div, typ: "(Int32,Int32)", asm: "IDIVL", aux: "Bool", clobberFlags: true}, + {name: "DIVW", argLength: 2, reg: gp11div, typ: "(Int16,Int16)", asm: "IDIVW", aux: "Bool", clobberFlags: true}, + {name: "DIVQU", argLength: 2, reg: gp11div, typ: "(UInt64,UInt64)", asm: "DIVQ", clobberFlags: true}, + {name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true}, + {name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true}, + + // computes -arg0, flags set for 0-arg0. + {name: "NEGLflags", argLength: 1, reg: gp11flags, typ: "(UInt32,Flags)", asm: "NEGL", resultInArg0: true}, + + // The following 4 add opcodes return the low 64 bits of the sum in the first result and + // the carry (the 65th bit) in the carry flag. + {name: "ADDQcarry", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "ADDQ", commutative: true, resultInArg0: true}, // r = arg0+arg1 + {name: "ADCQ", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "ADCQ", commutative: true, resultInArg0: true}, // r = arg0+arg1+carry(arg2) + {name: "ADDQconstcarry", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "ADDQ", aux: "Int32", resultInArg0: true}, // r = arg0+auxint + {name: "ADCQconst", argLength: 2, reg: gp1flags1flags, typ: "(UInt64,Flags)", asm: "ADCQ", aux: "Int32", resultInArg0: true}, // r = arg0+auxint+carry(arg1) + + // The following 4 add opcodes return the low 64 bits of the difference in the first result and + // the borrow (if the result is negative) in the carry flag. + {name: "SUBQborrow", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "SUBQ", resultInArg0: true}, // r = arg0-arg1 + {name: "SBBQ", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "SBBQ", resultInArg0: true}, // r = arg0-(arg1+carry(arg2)) + {name: "SUBQconstborrow", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "SUBQ", aux: "Int32", resultInArg0: true}, // r = arg0-auxint + {name: "SBBQconst", argLength: 2, reg: gp1flags1flags, typ: "(UInt64,Flags)", asm: "SBBQ", aux: "Int32", resultInArg0: true}, // r = arg0-(auxint+carry(arg1)) + + {name: "MULQU2", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}, commutative: true, asm: "MULQ", clobberFlags: true}, // arg0 * arg1, returns (hi, lo) + {name: "DIVQU2", argLength: 3, reg: regInfo{inputs: []regMask{dx, ax, gpsp}, outputs: []regMask{ax, dx}}, asm: "DIVQ", clobberFlags: true}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r) + + {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1 + {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1 + {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint + {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint + {name: "ANDQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem + {name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem + + {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1 + {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1 + {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint + {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint + {name: "ORQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem + {name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem + + {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1 + {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1 + {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint + {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint + {name: "XORQconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem + {name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem + + // CMPx: compare arg0 to arg1. + {name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, + {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, + {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, + {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, + + // CMPxconst: compare arg0 to auxint. + {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int32"}, + {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, + {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, + {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, + + // CMPxload: compare *(arg0+auxint+aux) to arg1 (in that order). arg2=mem. + {name: "CMPQload", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + {name: "CMPLload", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + {name: "CMPWload", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + {name: "CMPBload", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + + // CMPxconstload: compare *(arg0+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg1=mem. + {name: "CMPQconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPQ", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + {name: "CMPLconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPL", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + {name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + {name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true}, + + // CMPxloadidx: compare *(arg0+N*arg1+auxint+aux) to arg2 (in that order). arg3=mem. + {name: "CMPQloadidx8", argLength: 4, reg: gp2flagsLoad, asm: "CMPQ", scale: 8, aux: "SymOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPQloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPQ", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPLloadidx4", argLength: 4, reg: gp2flagsLoad, asm: "CMPL", scale: 4, aux: "SymOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPLloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPL", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPWloadidx2", argLength: 4, reg: gp2flagsLoad, asm: "CMPW", scale: 2, aux: "SymOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPWloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPW", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPBloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPB", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"}, + + // CMPxconstloadidx: compare *(arg0+N*arg1+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg2=mem. + {name: "CMPQconstloadidx8", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", scale: 8, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPQconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPLconstloadidx4", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", scale: 4, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPLconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPWconstloadidx2", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", scale: 2, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPWconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"}, + {name: "CMPBconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"}, + + // UCOMISx: floating-point compare arg0 to arg1 + // x==S for float32, x==D for float64 + {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, + {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, + + // bit test/set/clear operations + {name: "BTL", argLength: 2, reg: gp2flags, asm: "BTL", typ: "Flags"}, // test whether bit arg0%32 in arg1 is set + {name: "BTQ", argLength: 2, reg: gp2flags, asm: "BTQ", typ: "Flags"}, // test whether bit arg0%64 in arg1 is set + {name: "BTCL", argLength: 2, reg: gp21, asm: "BTCL", resultInArg0: true, clobberFlags: true}, // complement bit arg1%32 in arg0 + {name: "BTCQ", argLength: 2, reg: gp21, asm: "BTCQ", resultInArg0: true, clobberFlags: true}, // complement bit arg1%64 in arg0 + {name: "BTRL", argLength: 2, reg: gp21, asm: "BTRL", resultInArg0: true, clobberFlags: true}, // reset bit arg1%32 in arg0 + {name: "BTRQ", argLength: 2, reg: gp21, asm: "BTRQ", resultInArg0: true, clobberFlags: true}, // reset bit arg1%64 in arg0 + {name: "BTSL", argLength: 2, reg: gp21, asm: "BTSL", resultInArg0: true, clobberFlags: true}, // set bit arg1%32 in arg0 + {name: "BTSQ", argLength: 2, reg: gp21, asm: "BTSQ", resultInArg0: true, clobberFlags: true}, // set bit arg1%64 in arg0 + {name: "BTLconst", argLength: 1, reg: gp1flags, asm: "BTL", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 32 + {name: "BTQconst", argLength: 1, reg: gp1flags, asm: "BTQ", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 64 + {name: "BTCQconst", argLength: 1, reg: gp11, asm: "BTCQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 31 <= auxint < 64 + {name: "BTRQconst", argLength: 1, reg: gp11, asm: "BTRQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 31 <= auxint < 64 + {name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 31 <= auxint < 64 + + // BT[SRC]Qconstmodify + // + // S: set bit + // R: reset (clear) bit + // C: complement bit + // + // Apply operation to bit ValAndOff(AuxInt).Val() in the 64 bits at + // memory address arg0+ValAndOff(AuxInt).Off()+aux + // Bit index must be in range (31-63). + // (We use OR/AND/XOR for thinner targets and lower bit indexes.) + // arg1=mem, returns mem + // + // Note that there aren't non-const versions of these instructions. + // Well, there are such instructions, but they are slow and weird so we don't use them. + {name: "BTSQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTSQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, + {name: "BTRQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTRQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, + {name: "BTCQconstmodify", argLength: 2, reg: gpstoreconst, asm: "BTCQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, + + // TESTx: compare (arg0 & arg1) to 0 + {name: "TESTQ", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTQ", typ: "Flags"}, + {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, + {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, + {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, + + // TESTxconst: compare (arg0 & auxint) to 0 + {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int32"}, + {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, + {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, + {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, + + // S{HL, HR, AR}x: shift operations + // SHL: shift left + // SHR: shift right logical (0s are shifted in from beyond the word size) + // SAR: shift right arithmetic (sign bit is shifted in from beyond the word size) + // arg0 is the value being shifted + // arg1 is the amount to shift, interpreted mod (Q=64,L=32,W=32,B=32) + // (Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount!) + // For *const versions, use auxint instead of arg1 as the shift amount. auxint must be in the range 0 to (Q=63,L=31,W=15,B=7) inclusive. + {name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true}, + {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, + {name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, + {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int8", resultInArg0: true, clobberFlags: true}, + + {name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, + {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, + {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, + {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, + {name: "SHRQconst", argLength: 1, reg: gp11, asm: "SHRQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, + {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int8", resultInArg0: true, clobberFlags: true}, + {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int8", resultInArg0: true, clobberFlags: true}, + {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, + + {name: "SARQ", argLength: 2, reg: gp21shift, asm: "SARQ", resultInArg0: true, clobberFlags: true}, + {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, + {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, + {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, + {name: "SARQconst", argLength: 1, reg: gp11, asm: "SARQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, + {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int8", resultInArg0: true, clobberFlags: true}, + {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int8", resultInArg0: true, clobberFlags: true}, + {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, + + // unsigned arg0 >> arg2, shifting in bits from arg1 (==(arg1<<64+arg0)>>arg2, keeping low 64 bits), shift amount is mod 64 + {name: "SHRDQ", argLength: 3, reg: gp31shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, + // unsigned arg0 << arg2, shifting in bits from arg1 (==(arg0<<64+arg1)< hgfedcba + // L: abcdefgh -> 0000hgfe (L zeros the upper 4 bytes) + {name: "BSWAPQ", argLength: 1, reg: gp11, asm: "BSWAPQ", resultInArg0: true}, + {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true}, + + // POPCNTx counts the number of set bits in the low-order (L=32,Q=64) bits of arg0. + // POPCNTx instructions are only guaranteed to be available if GOAMD64>=v2. + // For GOAMD64=v2. + // For GOAMD64 condition from arg0 + {name: "SETGE", argLength: 1, reg: readflags, asm: "SETGE"}, // extract signed >= condition from arg0 + {name: "SETB", argLength: 1, reg: readflags, asm: "SETCS"}, // extract unsigned < condition from arg0 + {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0 + {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0 + {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0 + {name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0 + // Variants that store result to memory + {name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETLstore", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETLEstore", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETGstore", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETGEstore", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETBstore", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETBEstore", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETAstore", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETAEstore", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem + {name: "SETEQstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETEQ", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract == condition from arg2 to arg0+arg1+auxint+aux, arg3=mem + {name: "SETNEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETNE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract != condition from arg2 to arg0+arg1+auxint+aux, arg3=mem + {name: "SETLstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLT", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed < condition from arg2 to arg0+arg1+auxint+aux, arg3=mem + {name: "SETLEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed <= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem + {name: "SETGstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETGT", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed > condition from arg2 to arg0+arg1+auxint+aux, arg3=mem + {name: "SETGEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETGE", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract signed >= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem + {name: "SETBstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETCS", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned < condition from arg2 to arg0+arg1+auxint+aux, arg3=mem + {name: "SETBEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETLS", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned <= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem + {name: "SETAstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETHI", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned > condition from arg2 to arg0+arg1+auxint+aux, arg3=mem + {name: "SETAEstoreidx1", argLength: 4, reg: gpstoreconstidx, asm: "SETCC", aux: "SymOff", typ: "Mem", scale: 1, commutative: true, symEffect: "Write"}, // extract unsigned >= condition from arg2 to arg0+arg1+auxint+aux, arg3=mem + + // Need different opcodes for floating point conditions because + // any comparison involving a NaN is always FALSE and thus + // the patterns for inverting conditions cannot be used. + {name: "SETEQF", argLength: 1, reg: flagsgp, asm: "SETEQ", clobberFlags: true, needIntTemp: true}, // extract == condition from arg0 + {name: "SETNEF", argLength: 1, reg: flagsgp, asm: "SETNE", clobberFlags: true, needIntTemp: true}, // extract != condition from arg0 + {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0 + {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0 + + {name: "SETGF", argLength: 1, reg: flagsgp, asm: "SETHI"}, // extract floating > condition from arg0 + {name: "SETGEF", argLength: 1, reg: flagsgp, asm: "SETCC"}, // extract floating >= condition from arg0 + + {name: "MOVBQSX", argLength: 1, reg: gp11, asm: "MOVBQSX"}, // sign extend arg0 from int8 to int64 + {name: "MOVBQZX", argLength: 1, reg: gp11, asm: "MOVBLZX"}, // zero extend arg0 from int8 to int64 + {name: "MOVWQSX", argLength: 1, reg: gp11, asm: "MOVWQSX"}, // sign extend arg0 from int16 to int64 + {name: "MOVWQZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int64 + {name: "MOVLQSX", argLength: 1, reg: gp11, asm: "MOVLQSX"}, // sign extend arg0 from int32 to int64 + {name: "MOVLQZX", argLength: 1, reg: gp11, asm: "MOVL"}, // zero extend arg0 from int32 to int64 + + {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint + {name: "MOVQconst", reg: gp01, asm: "MOVQ", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint + + {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32 + {name: "CVTTSD2SQ", argLength: 1, reg: fpgp, asm: "CVTTSD2SQ"}, // convert float64 to int64 + {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL"}, // convert float32 to int32 + {name: "CVTTSS2SQ", argLength: 1, reg: fpgp, asm: "CVTTSS2SQ"}, // convert float32 to int64 + {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS"}, // convert int32 to float32 + {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD"}, // convert int32 to float64 + {name: "CVTSQ2SS", argLength: 1, reg: gpfp, asm: "CVTSQ2SS"}, // convert int64 to float32 + {name: "CVTSQ2SD", argLength: 1, reg: gpfp, asm: "CVTSQ2SD"}, // convert int64 to float64 + {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS"}, // convert float64 to float32 + {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64 + + // Move values between int and float registers, with no conversion. + // TODO: should we have generic versions of these? + {name: "MOVQi2f", argLength: 1, reg: gpfp, typ: "Float64"}, // move 64 bits from int to float reg + {name: "MOVQf2i", argLength: 1, reg: fpgp, typ: "UInt64"}, // move 64 bits from float to int reg + {name: "MOVLi2f", argLength: 1, reg: gpfp, typ: "Float32"}, // move 32 bits from int to float reg + {name: "MOVLf2i", argLength: 1, reg: fpgp, typ: "UInt32"}, // move 32 bits from float to int reg, zero extend + + {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs (for float negation). + {name: "POR", argLength: 2, reg: fp21, asm: "POR", commutative: true, resultInArg0: true}, // inclusive or, applied to X regs (for float min/max). + + {name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux + {name: "LEAL", argLength: 1, reg: gp11sb, asm: "LEAL", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux + {name: "LEAW", argLength: 1, reg: gp11sb, asm: "LEAW", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux + + // LEAxn computes arg0 + n*arg1 + auxint + aux + // x==L zeroes the upper 4 bytes. + {name: "LEAQ1", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux + {name: "LEAL1", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux + {name: "LEAW1", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux + {name: "LEAQ2", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux + {name: "LEAL2", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux + {name: "LEAW2", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux + {name: "LEAQ4", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux + {name: "LEAL4", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux + {name: "LEAW4", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux + {name: "LEAQ8", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux + {name: "LEAL8", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux + {name: "LEAW8", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux + // Note: LEAx{1,2,4,8} must not have OpSB as either argument. + + // MOVxload: loads + // Load (Q=8,L=4,W=2,B=1) bytes from (arg0+auxint+aux), arg1=mem. + // "+auxint+aux" == add auxint and the offset of the symbol in aux (if any) to the effective address + // Standard versions zero extend the result. SX versions sign extend the result. + {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, + {name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, + {name: "MOVWQSXload", argLength: 2, reg: gpload, asm: "MOVWQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, + {name: "MOVLQSXload", argLength: 2, reg: gpload, asm: "MOVLQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "MOVQload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, + + // MOVxstore: stores + // Store (Q=8,L=4,W=2,B=1) low bytes of arg1. + // Does *(arg0+auxint+aux) = arg1, arg2=mem. + {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, + {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, + {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, + {name: "MOVQstore", argLength: 3, reg: gpstore, asm: "MOVQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, + + // MOVOload/store: 16 byte load/store + // These operations are only used to move data around: there is no *O arithmetic, for example. + {name: "MOVOload", argLength: 2, reg: fpload, asm: "MOVUPS", aux: "SymOff", typ: "Int128", faultOnNilArg0: true, symEffect: "Read"}, // load 16 bytes from arg0+auxint+aux. arg1=mem + {name: "MOVOstore", argLength: 3, reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem + + // MOVxloadidx: indexed loads + // load (Q=8,L=4,W=2,B=1) bytes from (arg0+scale*arg1+auxint+aux), arg2=mem. + // Results are zero-extended. (TODO: sign-extending indexed loads) + {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", scale: 1, aux: "SymOff", typ: "UInt8", symEffect: "Read"}, + {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", scale: 1, aux: "SymOff", typ: "UInt16", symEffect: "Read"}, + {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", scale: 2, aux: "SymOff", typ: "UInt16", symEffect: "Read"}, + {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, + {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", scale: 4, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, + {name: "MOVLloadidx8", argLength: 3, reg: gploadidx, asm: "MOVL", scale: 8, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, + {name: "MOVQloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, + {name: "MOVQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVQ", scale: 8, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, + + // MOVxstoreidx: indexed stores + // Store (Q=8,L=4,W=2,B=1) low bytes of arg2. + // Does *(arg0+scale*arg1+auxint+aux) = arg2, arg3=mem. + {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", scale: 1, aux: "SymOff", symEffect: "Write"}, + {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", scale: 1, aux: "SymOff", symEffect: "Write"}, + {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", scale: 2, aux: "SymOff", symEffect: "Write"}, + {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymOff", symEffect: "Write"}, + {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", scale: 4, aux: "SymOff", symEffect: "Write"}, + {name: "MOVLstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVL", scale: 8, aux: "SymOff", symEffect: "Write"}, + {name: "MOVQstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymOff", symEffect: "Write"}, + {name: "MOVQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVQ", scale: 8, aux: "SymOff", symEffect: "Write"}, + + // TODO: add size-mismatched indexed loads/stores, like MOVBstoreidx4? + + // MOVxstoreconst: constant stores + // Store (O=16,Q=8,L=4,W=2,B=1) constant bytes. + // Does *(arg0+ValAndOff(AuxInt).Off()+aux) = ValAndOff(AuxInt).Val(), arg1=mem. + // O version can only store the constant 0. + {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, + {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, + {name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, + {name: "MOVQstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVQ", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, + {name: "MOVOstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVUPS", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, + + // MOVxstoreconstidx: constant indexed stores + // Store (Q=8,L=4,W=2,B=1) constant bytes. + // Does *(arg0+scale*arg1+ValAndOff(AuxInt).Off()+aux) = ValAndOff(AuxInt).Val(), arg2=mem. + {name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVB", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, + {name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVW", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, + {name: "MOVWstoreconstidx2", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", scale: 2, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, + {name: "MOVLstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, + {name: "MOVLstoreconstidx4", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", scale: 4, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, + {name: "MOVQstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, + {name: "MOVQstoreconstidx8", argLength: 3, reg: gpstoreconstidx, asm: "MOVQ", scale: 8, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, + + // arg0 = pointer to start of memory to zero + // arg1 = mem + // auxint = # of bytes to zero + // returns mem + { + name: "DUFFZERO", + aux: "Int64", + argLength: 2, + reg: regInfo{ + inputs: []regMask{buildReg("DI")}, + clobbers: buildReg("DI"), + }, + faultOnNilArg0: true, + unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts + }, + + // arg0 = address of memory to zero + // arg1 = # of 8-byte words to zero + // arg2 = value to store (will always be zero) + // arg3 = mem + // returns mem + { + name: "REPSTOSQ", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")}, + clobbers: buildReg("DI CX"), + }, + faultOnNilArg0: true, + }, + + // With a register ABI, the actual register info for these instructions (i.e., what is used in regalloc) is augmented with per-call-site bindings of additional arguments to specific in and out registers. + {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem + + // arg0 = destination pointer + // arg1 = source pointer + // arg2 = mem + // auxint = # of bytes to copy, must be multiple of 16 + // returns memory + { + name: "DUFFCOPY", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("DI"), buildReg("SI")}, + clobbers: buildReg("DI SI X0"), // uses X0 as a temporary + }, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts + }, + + // arg0 = destination pointer + // arg1 = source pointer + // arg2 = # of 8-byte words to copy + // arg3 = mem + // returns memory + { + name: "REPMOVSQ", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("DI"), buildReg("SI"), buildReg("CX")}, + clobbers: buildReg("DI SI CX"), + }, + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // (InvertFlags (CMPQ a b)) == (CMPQ b a) + // So if we want (SETL (CMPQ a b)) but we can't do that because a is a constant, + // then we do (SETL (InvertFlags (CMPQ b a))) instead. + // Rewrites will convert this to (SETG (CMPQ b a)). + // InvertFlags is a pseudo-op which can't appear in assembly output. + {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 + + // Pseudo-ops + {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem + // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, + // and sorts it to the very beginning of the block to prevent other + // use of DX (the closure pointer) + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}, zeroWidth: true}, + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem + {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, + //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, + // LoweredWB invokes runtime.gcWriteBarrier{auxint}. arg0=mem, auxint=# of buffer entries needed. + // It saves all GP registers if necessary, but may clobber others. + // Returns a pointer to a write barrier buffer in R11. + {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: callerSave &^ (gp | g), outputs: []regMask{buildReg("R11")}}, clobberFlags: true, aux: "Int64"}, + + {name: "LoweredHasCPUFeature", argLength: 0, reg: gp01, rematerializeable: true, typ: "UInt64", aux: "Sym", symEffect: "None"}, + + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{dx, bx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{cx, dx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{ax, cx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go). + + // Constant flag values. For any comparison, there are 5 possible + // outcomes: the three from the signed total order (<,==,>) and the + // three from the unsigned total order. The == cases overlap. + // Note: there's a sixth "unordered" outcome for floating-point + // comparisons, but we don't use such a beast yet. + // These ops are for temporary use by rewrite rules. They + // cannot appear in the generated assembly. + {name: "FlagEQ"}, // equal + {name: "FlagLT_ULT"}, // signed < and unsigned < + {name: "FlagLT_UGT"}, // signed < and unsigned > + {name: "FlagGT_UGT"}, // signed > and unsigned > + {name: "FlagGT_ULT"}, // signed > and unsigned < + + // Atomic loads. These are just normal loads but return tuples + // so they can be properly ordered with other loads. + // load from arg0+auxint+aux. arg1=mem. + {name: "MOVBatomicload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "MOVLatomicload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "MOVQatomicload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + + // Atomic stores and exchanges. Stores use XCHG to get the right memory ordering semantics. + // store arg0 to arg1+auxint+aux, arg2=mem. + // These ops return a tuple of . + // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)! + {name: "XCHGB", argLength: 3, reg: gpstorexchg, asm: "XCHGB", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"}, + {name: "XCHGL", argLength: 3, reg: gpstorexchg, asm: "XCHGL", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"}, + {name: "XCHGQ", argLength: 3, reg: gpstorexchg, asm: "XCHGQ", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"}, + + // Atomic adds. + // *(arg1+auxint+aux) += arg0. arg2=mem. + // Returns a tuple of . + // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)! + {name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"}, + {name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"}, + {name: "AddTupleFirst32", argLength: 2}, // arg1=tuple . Returns . + {name: "AddTupleFirst64", argLength: 2}, // arg1=tuple . Returns . + + // Compare and swap. + // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. + // if *(arg0+auxint+aux) == arg1 { + // *(arg0+auxint+aux) = arg2 + // return (true, memory) + // } else { + // return (false, memory) + // } + // Note that these instructions also return the old value in AX, but we ignore it. + // TODO: have these return flags instead of bool. The current system generates: + // CMPXCHGQ ... + // SETEQ AX + // CMPB AX, $0 + // JNE ... + // instead of just + // CMPXCHGQ ... + // JEQ ... + // but we can't do that because memory-using ops can't generate flags yet + // (flagalloc wants to move flag-generating instructions around). + {name: "CMPXCHGLlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, + {name: "CMPXCHGQlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGQ", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, + + // Atomic memory updates. + {name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1 + {name: "ANDLlock", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1 + {name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1 + {name: "ORLlock", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1 + + // Prefetch instructions + // Do prefetch arg0 address. arg0=addr, arg1=memory. Instruction variant selects locality hint + {name: "PrefetchT0", argLength: 2, reg: prefreg, asm: "PREFETCHT0", hasSideEffects: true}, + {name: "PrefetchNTA", argLength: 2, reg: prefreg, asm: "PREFETCHNTA", hasSideEffects: true}, + + // CPUID feature: BMI1. + {name: "ANDNQ", argLength: 2, reg: gp21, asm: "ANDNQ", clobberFlags: true}, // arg0 &^ arg1 + {name: "ANDNL", argLength: 2, reg: gp21, asm: "ANDNL", clobberFlags: true}, // arg0 &^ arg1 + {name: "BLSIQ", argLength: 1, reg: gp11, asm: "BLSIQ", clobberFlags: true}, // arg0 & -arg0 + {name: "BLSIL", argLength: 1, reg: gp11, asm: "BLSIL", clobberFlags: true}, // arg0 & -arg0 + {name: "BLSMSKQ", argLength: 1, reg: gp11, asm: "BLSMSKQ", clobberFlags: true}, // arg0 ^ (arg0 - 1) + {name: "BLSMSKL", argLength: 1, reg: gp11, asm: "BLSMSKL", clobberFlags: true}, // arg0 ^ (arg0 - 1) + {name: "BLSRQ", argLength: 1, reg: gp11flags, asm: "BLSRQ", typ: "(UInt64,Flags)"}, // arg0 & (arg0 - 1) + {name: "BLSRL", argLength: 1, reg: gp11flags, asm: "BLSRL", typ: "(UInt32,Flags)"}, // arg0 & (arg0 - 1) + // count the number of trailing zero bits, prefer TZCNTQ over BSFQ, as TZCNTQ(0)==64 + // and BSFQ(0) is undefined. Same for TZCNTL(0)==32 + {name: "TZCNTQ", argLength: 1, reg: gp11, asm: "TZCNTQ", clobberFlags: true}, + {name: "TZCNTL", argLength: 1, reg: gp11, asm: "TZCNTL", clobberFlags: true}, + + // CPUID feature: LZCNT. + // count the number of leading zero bits. + {name: "LZCNTQ", argLength: 1, reg: gp11, asm: "LZCNTQ", typ: "UInt64", clobberFlags: true}, + {name: "LZCNTL", argLength: 1, reg: gp11, asm: "LZCNTL", typ: "UInt32", clobberFlags: true}, + + // CPUID feature: MOVBE + // MOVBEWload does not satisfy zero extended, so only use MOVBEWstore + {name: "MOVBEWstore", argLength: 3, reg: gpstore, asm: "MOVBEW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVBELload", argLength: 2, reg: gpload, asm: "MOVBEL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load and swap 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVBELstore", argLength: 3, reg: gpstore, asm: "MOVBEL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVBEQload", argLength: 2, reg: gpload, asm: "MOVBEQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load and swap 8 bytes from arg0+auxint+aux. arg1=mem + {name: "MOVBEQstore", argLength: 3, reg: gpstore, asm: "MOVBEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem + // indexed MOVBE loads + {name: "MOVBELloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBEL", scale: 1, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load and swap 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend. + {name: "MOVBELloadidx4", argLength: 3, reg: gploadidx, asm: "MOVBEL", scale: 4, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load and swap 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem. Zero extend. + {name: "MOVBELloadidx8", argLength: 3, reg: gploadidx, asm: "MOVBEL", scale: 8, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load and swap 4 bytes from arg0+8*arg1+auxint+aux. arg2=mem. Zero extend. + {name: "MOVBEQloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBEQ", scale: 1, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load and swap 8 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVBEQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVBEQ", scale: 8, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load and swap 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem + // indexed MOVBE stores + {name: "MOVBEWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVBEW", scale: 1, aux: "SymOff", symEffect: "Write"}, // swap and store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVBEWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVBEW", scale: 2, aux: "SymOff", symEffect: "Write"}, // swap and store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem + {name: "MOVBELstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVBEL", scale: 1, aux: "SymOff", symEffect: "Write"}, // swap and store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVBELstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVBEL", scale: 4, aux: "SymOff", symEffect: "Write"}, // swap and store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem + {name: "MOVBELstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVBEL", scale: 8, aux: "SymOff", symEffect: "Write"}, // swap and store 4 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem + {name: "MOVBEQstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVBEQ", scale: 1, aux: "SymOff", symEffect: "Write"}, // swap and store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVBEQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVBEQ", scale: 8, aux: "SymOff", symEffect: "Write"}, // swap and store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem + + // CPUID feature: BMI2. + {name: "SARXQ", argLength: 2, reg: gp21, asm: "SARXQ"}, // signed arg0 >> arg1, shift amount is mod 64 + {name: "SARXL", argLength: 2, reg: gp21, asm: "SARXL"}, // signed int32(arg0) >> arg1, shift amount is mod 32 + {name: "SHLXQ", argLength: 2, reg: gp21, asm: "SHLXQ"}, // arg0 << arg1, shift amount is mod 64 + {name: "SHLXL", argLength: 2, reg: gp21, asm: "SHLXL"}, // arg0 << arg1, shift amount is mod 32 + {name: "SHRXQ", argLength: 2, reg: gp21, asm: "SHRXQ"}, // unsigned arg0 >> arg1, shift amount is mod 64 + {name: "SHRXL", argLength: 2, reg: gp21, asm: "SHRXL"}, // unsigned uint32(arg0) >> arg1, shift amount is mod 32 + + {name: "SARXLload", argLength: 3, reg: gp21shxload, asm: "SARXL", aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 32 + {name: "SARXQload", argLength: 3, reg: gp21shxload, asm: "SARXQ", aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 64 + {name: "SHLXLload", argLength: 3, reg: gp21shxload, asm: "SHLXL", aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+auxint+aux) << arg1, arg2=mem, shift amount is mod 32 + {name: "SHLXQload", argLength: 3, reg: gp21shxload, asm: "SHLXQ", aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+auxint+aux) << arg1, arg2=mem, shift amount is mod 64 + {name: "SHRXLload", argLength: 3, reg: gp21shxload, asm: "SHRXL", aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 32 + {name: "SHRXQload", argLength: 3, reg: gp21shxload, asm: "SHRXQ", aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+auxint+aux) >> arg1, arg2=mem, shift amount is mod 64 + + {name: "SARXLloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SARXL", scale: 1, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+1*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32 + {name: "SARXLloadidx4", argLength: 4, reg: gp21shxloadidx, asm: "SARXL", scale: 4, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+4*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32 + {name: "SARXLloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SARXL", scale: 8, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+8*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32 + {name: "SARXQloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SARXQ", scale: 1, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+1*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 64 + {name: "SARXQloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SARXQ", scale: 8, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // signed *(arg0+8*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 64 + {name: "SHLXLloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SHLXL", scale: 1, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+1*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 32 + {name: "SHLXLloadidx4", argLength: 4, reg: gp21shxloadidx, asm: "SHLXL", scale: 4, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+4*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 32 + {name: "SHLXLloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SHLXL", scale: 8, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+8*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 32 + {name: "SHLXQloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SHLXQ", scale: 1, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+1*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 64 + {name: "SHLXQloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SHLXQ", scale: 8, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // *(arg0+8*arg1+auxint+aux) << arg2, arg3=mem, shift amount is mod 64 + {name: "SHRXLloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SHRXL", scale: 1, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+1*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32 + {name: "SHRXLloadidx4", argLength: 4, reg: gp21shxloadidx, asm: "SHRXL", scale: 4, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+4*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32 + {name: "SHRXLloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SHRXL", scale: 8, aux: "SymOff", typ: "Uint32", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+8*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 32 + {name: "SHRXQloadidx1", argLength: 4, reg: gp21shxloadidx, asm: "SHRXQ", scale: 1, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+1*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 64 + {name: "SHRXQloadidx8", argLength: 4, reg: gp21shxloadidx, asm: "SHRXQ", scale: 8, aux: "SymOff", typ: "Uint64", faultOnNilArg0: true, symEffect: "Read"}, // unsigned *(arg0+8*arg1+auxint+aux) >> arg2, arg3=mem, shift amount is mod 64 + } + + var AMD64blocks = []blockData{ + {name: "EQ", controls: 1}, + {name: "NE", controls: 1}, + {name: "LT", controls: 1}, + {name: "LE", controls: 1}, + {name: "GT", controls: 1}, + {name: "GE", controls: 1}, + {name: "OS", controls: 1}, + {name: "OC", controls: 1}, + {name: "ULT", controls: 1}, + {name: "ULE", controls: 1}, + {name: "UGT", controls: 1}, + {name: "UGE", controls: 1}, + {name: "EQF", controls: 1}, + {name: "NEF", controls: 1}, + {name: "ORD", controls: 1}, // FP, ordered comparison (parity zero) + {name: "NAN", controls: 1}, // FP, unordered comparison (parity one) + + // JUMPTABLE implements jump tables. + // Aux is the symbol (an *obj.LSym) for the jump table. + // control[0] is the index into the jump table. + // control[1] is the address of the jump table (the address of the symbol stored in Aux). + {name: "JUMPTABLE", controls: 2, aux: "Sym"}, + } + + archs = append(archs, arch{ + name: "AMD64", + pkg: "cmd/internal/obj/x86", + genfile: "../../amd64/ssa.go", + ops: AMD64ops, + blocks: AMD64blocks, + regnames: regNamesAMD64, + ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11", + ParamFloatRegNames: "X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14", + gpregmask: gp, + fpregmask: fp, + specialregmask: x15, + framepointerreg: int8(num["BP"]), + linkreg: -1, // not used + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64latelower.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64latelower.rules new file mode 100644 index 0000000000000000000000000000000000000000..1dd804577aeafacb704c3ee28e134f6105cc8a3e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64latelower.rules @@ -0,0 +1,13 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Prefer SARX/SHLX/SHRX instruction because it has less register restriction on the shift input. +(SAR(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SARX(Q|L) x y) +(SHL(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SHLX(Q|L) x y) +(SHR(Q|L) x y) && buildcfg.GOAMD64 >= 3 => (SHRX(Q|L) x y) + +// See comments in ARM64latelower.rules for why these are here. +(MOVLQZX x) && zeroUpper32Bits(x,3) => x +(MOVWQZX x) && zeroUpper48Bits(x,3) => x +(MOVBQZX x) && zeroUpper56Bits(x,3) => x diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64splitload.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64splitload.rules new file mode 100644 index 0000000000000000000000000000000000000000..dd8f8ac4a164fd99a42285e3f62751e0d7762e54 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/AMD64splitload.rules @@ -0,0 +1,45 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains rules used by flagalloc and addressingmodes to +// split a flag-generating merged load op into separate load and op. +// Unlike with the other rules files, not all of these +// rules will be applied to all values. +// Rather, flagalloc will request for rules to be applied +// to a particular problematic value. +// These are often the exact inverse of rules in AMD64.rules, +// only with the conditions removed. +// +// For addressingmodes, certain single instructions are slower than the two instruction +// split generated here (which is different from the inputs to addressingmodes). +// For example: +// (CMPBconstload c (ADDQ x y)) -> (CMPBconstloadidx1 c x y) -> (CMPB c (MOVBloadidx1 x y)) + +(CMP(Q|L|W|B)load {sym} [off] ptr x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)load {sym} [off] ptr mem) x) + +(CMP(Q|L|W|B)constload {sym} [vo] ptr mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)load {sym} [vo.Off()] ptr mem) x) + +(CMPQconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPQconst (MOVQload {sym} [vo.Off()] ptr mem) [vo.Val()]) +(CMPLconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()]) +(CMPWconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()]) +(CMPBconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()]) + +(CMP(Q|L|W|B)loadidx1 {sym} [off] ptr idx x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)loadidx1 {sym} [off] ptr idx mem) x) +(CMPQloadidx8 {sym} [off] ptr idx x mem) => (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x) +(CMPLloadidx4 {sym} [off] ptr idx x mem) => (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x) +(CMPWloadidx2 {sym} [off] ptr idx x mem) => (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x) + +(CMP(Q|L|W|B)constloadidx1 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)loadidx1 {sym} [vo.Off()] ptr idx mem) x) +(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTQ x:(MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) x) +(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTL x:(MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) x) +(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTW x:(MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) x) + +(CMPQconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) +(CMPLconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) +(CMPWconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val16()]) +(CMPBconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPBconst (MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val8()]) + +(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) +(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) [vo.Val()]) +(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) [vo.Val16()]) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM.rules new file mode 100644 index 0000000000000000000000000000000000000000..ed0ed80afa729feb0e8c3c8f9318dd6a0259f975 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM.rules @@ -0,0 +1,1475 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +(Add(Ptr|32|16|8) ...) => (ADD ...) +(Add(32|64)F ...) => (ADD(F|D) ...) +(Add32carry ...) => (ADDS ...) +(Add32withcarry ...) => (ADC ...) + +(Sub(Ptr|32|16|8) ...) => (SUB ...) +(Sub(32|64)F ...) => (SUB(F|D) ...) +(Sub32carry ...) => (SUBS ...) +(Sub32withcarry ...) => (SBC ...) + +(Mul(32|16|8) ...) => (MUL ...) +(Mul(32|64)F ...) => (MUL(F|D) ...) +(Hmul(32|32u) ...) => (HMU(L|LU) ...) +(Mul32uhilo ...) => (MULLU ...) + +(Div32 x y) => + (SUB (XOR // negate the result if one operand is negative + (Select0 (CALLudiv + (SUB (XOR x (Signmask x)) (Signmask x)) // negate x if negative + (SUB (XOR y (Signmask y)) (Signmask y)))) // negate y if negative + (Signmask (XOR x y))) (Signmask (XOR x y))) +(Div32u x y) => (Select0 (CALLudiv x y)) +(Div16 x y) => (Div32 (SignExt16to32 x) (SignExt16to32 y)) +(Div16u x y) => (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y)) +(Div8 x y) => (Div32 (SignExt8to32 x) (SignExt8to32 y)) +(Div8u x y) => (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y)) +(Div(32|64)F ...) => (DIV(F|D) ...) + +(Mod32 x y) => + (SUB (XOR // negate the result if x is negative + (Select1 (CALLudiv + (SUB (XOR x (Signmask x)) (Signmask x)) // negate x if negative + (SUB (XOR y (Signmask y)) (Signmask y)))) // negate y if negative + (Signmask x)) (Signmask x)) +(Mod32u x y) => (Select1 (CALLudiv x y)) +(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y)) +(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) +(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y)) +(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) + +// (x + y) / 2 with x>=y -> (x - y) / 2 + y +(Avg32u x y) => (ADD (SRLconst (SUB x y) [1]) y) + +(And(32|16|8) ...) => (AND ...) +(Or(32|16|8) ...) => (OR ...) +(Xor(32|16|8) ...) => (XOR ...) + +// unary ops +(Neg(32|16|8) x) => (RSBconst [0] x) +(Neg(32|64)F ...) => (NEG(F|D) ...) + +(Com(32|16|8) ...) => (MVN ...) + +(Sqrt ...) => (SQRTD ...) +(Sqrt32 ...) => (SQRTF ...) +(Abs ...) => (ABSD ...) + +// TODO: optimize this for ARMv5 and ARMv6 +(Ctz32NonZero ...) => (Ctz32 ...) +(Ctz16NonZero ...) => (Ctz32 ...) +(Ctz8NonZero ...) => (Ctz32 ...) + +// count trailing zero for ARMv5 and ARMv6 +// 32 - CLZ(x&-x - 1) +(Ctz32 x) && buildcfg.GOARM.Version<=6 => + (RSBconst [32] (CLZ (SUBconst (AND x (RSBconst [0] x)) [1]))) +(Ctz16 x) && buildcfg.GOARM.Version<=6 => + (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x10000] x) (RSBconst [0] (ORconst [0x10000] x))) [1]))) +(Ctz8 x) && buildcfg.GOARM.Version<=6 => + (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x100] x) (RSBconst [0] (ORconst [0x100] x))) [1]))) + +// count trailing zero for ARMv7 +(Ctz32 x) && buildcfg.GOARM.Version==7 => (CLZ (RBIT x)) +(Ctz16 x) && buildcfg.GOARM.Version==7 => (CLZ (RBIT (ORconst [0x10000] x))) +(Ctz8 x) && buildcfg.GOARM.Version==7 => (CLZ (RBIT (ORconst [0x100] x))) + +// bit length +(BitLen32 x) => (RSBconst [32] (CLZ x)) + +// byte swap for ARMv5 +// let (a, b, c, d) be the bytes of x from high to low +// t1 = x right rotate 16 bits -- (c, d, a, b ) +// t2 = x ^ t1 -- (a^c, b^d, a^c, b^d) +// t3 = t2 &^ 0xff0000 -- (a^c, 0, a^c, b^d) +// t4 = t3 >> 8 -- (0, a^c, 0, a^c) +// t5 = x right rotate 8 bits -- (d, a, b, c ) +// result = t4 ^ t5 -- (d, c, b, a ) +// using shifted ops this can be done in 4 instructions. +(Bswap32 x) && buildcfg.GOARM.Version==5 => + (XOR + (SRLconst (BICconst (XOR x (SRRconst [16] x)) [0xff0000]) [8]) + (SRRconst x [8])) + +// byte swap for ARMv6 and above +(Bswap32 x) && buildcfg.GOARM.Version>=6 => (REV x) + +// boolean ops -- booleans are represented with 0=false, 1=true +(AndB ...) => (AND ...) +(OrB ...) => (OR ...) +(EqB x y) => (XORconst [1] (XOR x y)) +(NeqB ...) => (XOR ...) +(Not x) => (XORconst [1] x) + +// shifts +// hardware instruction uses only the low byte of the shift +// we compare to 256 to ensure Go semantics for large shifts +(Lsh32x32 x y) => (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) +(Lsh32x16 x y) => (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Lsh32x8 x y) => (SLL x (ZeroExt8to32 y)) + +(Lsh16x32 x y) => (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) +(Lsh16x16 x y) => (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Lsh16x8 x y) => (SLL x (ZeroExt8to32 y)) + +(Lsh8x32 x y) => (CMOVWHSconst (SLL x y) (CMPconst [256] y) [0]) +(Lsh8x16 x y) => (CMOVWHSconst (SLL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Lsh8x8 x y) => (SLL x (ZeroExt8to32 y)) + +(Rsh32Ux32 x y) => (CMOVWHSconst (SRL x y) (CMPconst [256] y) [0]) +(Rsh32Ux16 x y) => (CMOVWHSconst (SRL x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Rsh32Ux8 x y) => (SRL x (ZeroExt8to32 y)) + +(Rsh16Ux32 x y) => (CMOVWHSconst (SRL (ZeroExt16to32 x) y) (CMPconst [256] y) [0]) +(Rsh16Ux16 x y) => (CMOVWHSconst (SRL (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Rsh16Ux8 x y) => (SRL (ZeroExt16to32 x) (ZeroExt8to32 y)) + +(Rsh8Ux32 x y) => (CMOVWHSconst (SRL (ZeroExt8to32 x) y) (CMPconst [256] y) [0]) +(Rsh8Ux16 x y) => (CMOVWHSconst (SRL (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0]) +(Rsh8Ux8 x y) => (SRL (ZeroExt8to32 x) (ZeroExt8to32 y)) + +(Rsh32x32 x y) => (SRAcond x y (CMPconst [256] y)) +(Rsh32x16 x y) => (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) +(Rsh32x8 x y) => (SRA x (ZeroExt8to32 y)) + +(Rsh16x32 x y) => (SRAcond (SignExt16to32 x) y (CMPconst [256] y)) +(Rsh16x16 x y) => (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) +(Rsh16x8 x y) => (SRA (SignExt16to32 x) (ZeroExt8to32 y)) + +(Rsh8x32 x y) => (SRAcond (SignExt8to32 x) y (CMPconst [256] y)) +(Rsh8x16 x y) => (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y))) +(Rsh8x8 x y) => (SRA (SignExt8to32 x) (ZeroExt8to32 y)) + +// constant shifts +// generic opt rewrites all constant shifts to shift by Const64 +(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SLLconst x [int32(c)]) +(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SRAconst x [int32(c)]) +(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 => (SRLconst x [int32(c)]) +(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SLLconst x [int32(c)]) +(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SRAconst (SLLconst x [16]) [int32(c+16)]) +(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 => (SRLconst (SLLconst x [16]) [int32(c+16)]) +(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SLLconst x [int32(c)]) +(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SRAconst (SLLconst x [24]) [int32(c+24)]) +(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 => (SRLconst (SLLconst x [24]) [int32(c+24)]) + +// large constant shifts +(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0]) +(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0]) +(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0]) +(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0]) +(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0]) +(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0]) + +// large constant signed right shift, we leave the sign bit +(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 => (SRAconst x [31]) +(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SRAconst (SLLconst x [16]) [31]) +(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SRAconst (SLLconst x [24]) [31]) + +// constants +(Const(8|16|32) [val]) => (MOVWconst [int32(val)]) +(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)]) +(ConstNil) => (MOVWconst [0]) +(ConstBool [t]) => (MOVWconst [b2i32(t)]) + +// truncations +// Because we ignore high parts of registers, truncates are just copies. +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) + +// Zero-/Sign-extensions +(ZeroExt8to16 ...) => (MOVBUreg ...) +(ZeroExt8to32 ...) => (MOVBUreg ...) +(ZeroExt16to32 ...) => (MOVHUreg ...) + +(SignExt8to16 ...) => (MOVBreg ...) +(SignExt8to32 ...) => (MOVBreg ...) +(SignExt16to32 ...) => (MOVHreg ...) + +(Signmask x) => (SRAconst x [31]) +(Zeromask x) => (SRAconst (RSBshiftRL x x [1]) [31]) // sign bit of uint32(x)>>1 - x +(Slicemask x) => (SRAconst (RSBconst [0] x) [31]) + +// float <-> int conversion +(Cvt32to32F ...) => (MOVWF ...) +(Cvt32to64F ...) => (MOVWD ...) +(Cvt32Uto32F ...) => (MOVWUF ...) +(Cvt32Uto64F ...) => (MOVWUD ...) +(Cvt32Fto32 ...) => (MOVFW ...) +(Cvt64Fto32 ...) => (MOVDW ...) +(Cvt32Fto32U ...) => (MOVFWU ...) +(Cvt64Fto32U ...) => (MOVDWU ...) +(Cvt32Fto64F ...) => (MOVFD ...) +(Cvt64Fto32F ...) => (MOVDF ...) + +(Round(32|64)F ...) => (Copy ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +// fused-multiply-add +(FMA x y z) => (FMULAD z x y) + +// comparisons +(Eq8 x y) => (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Eq16 x y) => (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Eq32 x y) => (Equal (CMP x y)) +(EqPtr x y) => (Equal (CMP x y)) +(Eq(32|64)F x y) => (Equal (CMP(F|D) x y)) + +(Neq8 x y) => (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Neq16 x y) => (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Neq32 x y) => (NotEqual (CMP x y)) +(NeqPtr x y) => (NotEqual (CMP x y)) +(Neq(32|64)F x y) => (NotEqual (CMP(F|D) x y)) + +(Less8 x y) => (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y))) +(Less16 x y) => (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y))) +(Less32 x y) => (LessThan (CMP x y)) +(Less(32|64)F x y) => (GreaterThan (CMP(F|D) y x)) // reverse operands to work around NaN + +(Less8U x y) => (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Less16U x y) => (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Less32U x y) => (LessThanU (CMP x y)) + +(Leq8 x y) => (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) +(Leq16 x y) => (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) +(Leq32 x y) => (LessEqual (CMP x y)) +(Leq(32|64)F x y) => (GreaterEqual (CMP(F|D) y x)) // reverse operands to work around NaN + +(Leq8U x y) => (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Leq16U x y) => (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Leq32U x y) => (LessEqualU (CMP x y)) + +(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr) +(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr) + +(Addr {sym} base) => (MOVWaddr {sym} base) +(LocalAddr {sym} base mem) && t.Elem().HasPointers() => (MOVWaddr {sym} (SPanchored base mem)) +(LocalAddr {sym} base _) && !t.Elem().HasPointers() => (MOVWaddr {sym} base) + +// loads +(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) +(Load ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem) +(Load ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (MOVFload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) + +// stores +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem) + +// zero instructions +(Zero [0] _ mem) => mem +(Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem) +(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore ptr (MOVWconst [0]) mem) +(Zero [2] ptr mem) => + (MOVBstore [1] ptr (MOVWconst [0]) + (MOVBstore [0] ptr (MOVWconst [0]) mem)) +(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore ptr (MOVWconst [0]) mem) +(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] ptr (MOVWconst [0]) + (MOVHstore [0] ptr (MOVWconst [0]) mem)) +(Zero [4] ptr mem) => + (MOVBstore [3] ptr (MOVWconst [0]) + (MOVBstore [2] ptr (MOVWconst [0]) + (MOVBstore [1] ptr (MOVWconst [0]) + (MOVBstore [0] ptr (MOVWconst [0]) mem)))) + +(Zero [3] ptr mem) => + (MOVBstore [2] ptr (MOVWconst [0]) + (MOVBstore [1] ptr (MOVWconst [0]) + (MOVBstore [0] ptr (MOVWconst [0]) mem))) + +// Medium zeroing uses a duff device +// 4 and 128 are magic constants, see runtime/mkduff.go +(Zero [s] {t} ptr mem) + && s%4 == 0 && s > 4 && s <= 512 + && t.Alignment()%4 == 0 && !config.noDuffDevice => + (DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem) + +// Large zeroing uses a loop +(Zero [s] {t} ptr mem) + && (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0 => + (LoweredZero [t.Alignment()] + ptr + (ADDconst ptr [int32(s-moveSize(t.Alignment(), config))]) + (MOVWconst [0]) + mem) + +// moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem) +(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore dst (MOVHUload src mem) mem) +(Move [2] dst src mem) => + (MOVBstore [1] dst (MOVBUload [1] src mem) + (MOVBstore dst (MOVBUload src mem) mem)) +(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore dst (MOVWload src mem) mem) +(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] dst (MOVHUload [2] src mem) + (MOVHstore dst (MOVHUload src mem) mem)) +(Move [4] dst src mem) => + (MOVBstore [3] dst (MOVBUload [3] src mem) + (MOVBstore [2] dst (MOVBUload [2] src mem) + (MOVBstore [1] dst (MOVBUload [1] src mem) + (MOVBstore dst (MOVBUload src mem) mem)))) + +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBUload [2] src mem) + (MOVBstore [1] dst (MOVBUload [1] src mem) + (MOVBstore dst (MOVBUload src mem) mem))) + +// Medium move uses a duff device +// 8 and 128 are magic constants, see runtime/mkduff.go +(Move [s] {t} dst src mem) + && s%4 == 0 && s > 4 && s <= 512 + && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s) => + (DUFFCOPY [8 * (128 - s/4)] dst src mem) + +// Large move uses a loop +(Move [s] {t} dst src mem) + && ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s) => + (LoweredMove [t.Alignment()] + dst + src + (ADDconst src [int32(s-moveSize(t.Alignment(), config))]) + mem) + +// calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// checks +(NilCheck ...) => (LoweredNilCheck ...) +(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr)) +(IsInBounds idx len) => (LessThanU (CMP idx len)) +(IsSliceInBounds idx len) => (LessEqualU (CMP idx len)) + +// pseudo-ops +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) + +// Absorb pseudo-ops into blocks. +(If (Equal cc) yes no) => (EQ cc yes no) +(If (NotEqual cc) yes no) => (NE cc yes no) +(If (LessThan cc) yes no) => (LT cc yes no) +(If (LessThanU cc) yes no) => (ULT cc yes no) +(If (LessEqual cc) yes no) => (LE cc yes no) +(If (LessEqualU cc) yes no) => (ULE cc yes no) +(If (GreaterThan cc) yes no) => (GT cc yes no) +(If (GreaterThanU cc) yes no) => (UGT cc yes no) +(If (GreaterEqual cc) yes no) => (GE cc yes no) +(If (GreaterEqualU cc) yes no) => (UGE cc yes no) + +(If cond yes no) => (NE (CMPconst [0] cond) yes no) + +// Absorb boolean tests into block +(NE (CMPconst [0] (Equal cc)) yes no) => (EQ cc yes no) +(NE (CMPconst [0] (NotEqual cc)) yes no) => (NE cc yes no) +(NE (CMPconst [0] (LessThan cc)) yes no) => (LT cc yes no) +(NE (CMPconst [0] (LessThanU cc)) yes no) => (ULT cc yes no) +(NE (CMPconst [0] (LessEqual cc)) yes no) => (LE cc yes no) +(NE (CMPconst [0] (LessEqualU cc)) yes no) => (ULE cc yes no) +(NE (CMPconst [0] (GreaterThan cc)) yes no) => (GT cc yes no) +(NE (CMPconst [0] (GreaterThanU cc)) yes no) => (UGT cc yes no) +(NE (CMPconst [0] (GreaterEqual cc)) yes no) => (GE cc yes no) +(NE (CMPconst [0] (GreaterEqualU cc)) yes no) => (UGE cc yes no) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem) +(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem) +(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem) + +// Optimizations + +// fold offset into address +(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off1+off2] {sym} ptr) +(SUBconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off2-off1] {sym} ptr) + +// fold address into load/store +(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVBload [off1+off2] {sym} ptr mem) +(MOVBload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVBload [off1-off2] {sym} ptr mem) +(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVBUload [off1+off2] {sym} ptr mem) +(MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVBUload [off1-off2] {sym} ptr mem) +(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVHload [off1+off2] {sym} ptr mem) +(MOVHload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVHload [off1-off2] {sym} ptr mem) +(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVHUload [off1+off2] {sym} ptr mem) +(MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVHUload [off1-off2] {sym} ptr mem) +(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVWload [off1+off2] {sym} ptr mem) +(MOVWload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVWload [off1-off2] {sym} ptr mem) +(MOVFload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVFload [off1+off2] {sym} ptr mem) +(MOVFload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVFload [off1-off2] {sym} ptr mem) +(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVDload [off1+off2] {sym} ptr mem) +(MOVDload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVDload [off1-off2] {sym} ptr mem) + +(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVBstore [off1+off2] {sym} ptr val mem) +(MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVBstore [off1-off2] {sym} ptr val mem) +(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVHstore [off1+off2] {sym} ptr val mem) +(MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVHstore [off1-off2] {sym} ptr val mem) +(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVWstore [off1+off2] {sym} ptr val mem) +(MOVWstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVWstore [off1-off2] {sym} ptr val mem) +(MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVFstore [off1+off2] {sym} ptr val mem) +(MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVFstore [off1-off2] {sym} ptr val mem) +(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVDstore [off1+off2] {sym} ptr val mem) +(MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVDstore [off1-off2] {sym} ptr val mem) + +(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + +(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + +// replace load from same location as preceding store with zero/sign extension (or copy in case of full width) +(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x) +(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBUreg x) +(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHreg x) +(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHUreg x) +(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x + +(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x +(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x + +(MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => x +(MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x +(MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x +(MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x +(MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVBUreg x) +(MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVBreg x) +(MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVHUreg x) +(MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVHreg x) + +// fold constant into arithmetic ops +(ADD x (MOVWconst [c])) && !t.IsPtr() => (ADDconst [c] x) +(SUB (MOVWconst [c]) x) => (RSBconst [c] x) +(SUB x (MOVWconst [c])) => (SUBconst [c] x) +(RSB (MOVWconst [c]) x) => (SUBconst [c] x) +(RSB x (MOVWconst [c])) => (RSBconst [c] x) + +(ADDS x (MOVWconst [c])) => (ADDSconst [c] x) +(SUBS x (MOVWconst [c])) => (SUBSconst [c] x) + +(ADC (MOVWconst [c]) x flags) => (ADCconst [c] x flags) +(SBC (MOVWconst [c]) x flags) => (RSCconst [c] x flags) +(SBC x (MOVWconst [c]) flags) => (SBCconst [c] x flags) + +(AND x (MOVWconst [c])) => (ANDconst [c] x) +(OR x (MOVWconst [c])) => (ORconst [c] x) +(XOR x (MOVWconst [c])) => (XORconst [c] x) +(BIC x (MOVWconst [c])) => (BICconst [c] x) + +(SLL x (MOVWconst [c])) && 0 <= c && c < 32 => (SLLconst x [c]) +(SRL x (MOVWconst [c])) && 0 <= c && c < 32 => (SRLconst x [c]) +(SRA x (MOVWconst [c])) && 0 <= c && c < 32 => (SRAconst x [c]) + +(CMP x (MOVWconst [c])) => (CMPconst [c] x) +(CMP (MOVWconst [c]) x) => (InvertFlags (CMPconst [c] x)) +(CMN x (MOVWconst [c])) => (CMNconst [c] x) +(TST x (MOVWconst [c])) => (TSTconst [c] x) +(TEQ x (MOVWconst [c])) => (TEQconst [c] x) + +(SRR x (MOVWconst [c])) => (SRRconst x [c&31]) + +// Canonicalize the order of arguments to comparisons - helps with CSE. +(CMP x y) && canonLessThan(x,y) => (InvertFlags (CMP y x)) + +// don't extend after proper load +// MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type. +(MOVBreg x:(MOVBload _ _)) => (MOVWreg x) +(MOVBUreg x:(MOVBUload _ _)) => (MOVWreg x) +(MOVHreg x:(MOVBload _ _)) => (MOVWreg x) +(MOVHreg x:(MOVBUload _ _)) => (MOVWreg x) +(MOVHreg x:(MOVHload _ _)) => (MOVWreg x) +(MOVHUreg x:(MOVBUload _ _)) => (MOVWreg x) +(MOVHUreg x:(MOVHUload _ _)) => (MOVWreg x) + +// fold extensions and ANDs together +(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x) +(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&0xffff] x) +(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 => (ANDconst [c&0x7f] x) +(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 => (ANDconst [c&0x7fff] x) + +// fold double extensions +(MOVBreg x:(MOVBreg _)) => (MOVWreg x) +(MOVBUreg x:(MOVBUreg _)) => (MOVWreg x) +(MOVHreg x:(MOVBreg _)) => (MOVWreg x) +(MOVHreg x:(MOVBUreg _)) => (MOVWreg x) +(MOVHreg x:(MOVHreg _)) => (MOVWreg x) +(MOVHUreg x:(MOVBUreg _)) => (MOVWreg x) +(MOVHUreg x:(MOVHUreg _)) => (MOVWreg x) + +// don't extend before store +(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) + +// if a register move has only 1 use, just use the same register without emitting instruction +// MOVWnop doesn't emit instruction, only for ensuring the type. +(MOVWreg x) && x.Uses == 1 => (MOVWnop x) + +// TODO: we should be able to get rid of MOVWnop all together. +// But for now, this is enough to get rid of lots of them. +(MOVWnop (MOVWconst [c])) => (MOVWconst [c]) + +// mul by constant +(MUL x (MOVWconst [c])) && int32(c) == -1 => (RSBconst [0] x) +(MUL _ (MOVWconst [0])) => (MOVWconst [0]) +(MUL x (MOVWconst [1])) => x +(MUL x (MOVWconst [c])) && isPowerOfTwo32(c) => (SLLconst [int32(log32(c))] x) +(MUL x (MOVWconst [c])) && isPowerOfTwo32(c-1) && c >= 3 => (ADDshiftLL x x [int32(log32(c-1))]) +(MUL x (MOVWconst [c])) && isPowerOfTwo32(c+1) && c >= 7 => (RSBshiftLL x x [int32(log32(c+1))]) +(MUL x (MOVWconst [c])) && c%3 == 0 && isPowerOfTwo32(c/3) => (SLLconst [int32(log32(c/3))] (ADDshiftLL x x [1])) +(MUL x (MOVWconst [c])) && c%5 == 0 && isPowerOfTwo32(c/5) => (SLLconst [int32(log32(c/5))] (ADDshiftLL x x [2])) +(MUL x (MOVWconst [c])) && c%7 == 0 && isPowerOfTwo32(c/7) => (SLLconst [int32(log32(c/7))] (RSBshiftLL x x [3])) +(MUL x (MOVWconst [c])) && c%9 == 0 && isPowerOfTwo32(c/9) => (SLLconst [int32(log32(c/9))] (ADDshiftLL x x [3])) + +(MULA x (MOVWconst [c]) a) && c == -1 => (SUB a x) +(MULA _ (MOVWconst [0]) a) => a +(MULA x (MOVWconst [1]) a) => (ADD x a) +(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c) => (ADD (SLLconst [int32(log32(c))] x) a) +(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c-1) && c >= 3 => (ADD (ADDshiftLL x x [int32(log32(c-1))]) a) +(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c+1) && c >= 7 => (ADD (RSBshiftLL x x [int32(log32(c+1))]) a) +(MULA x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo32(c/3) => (ADD (SLLconst [int32(log32(c/3))] (ADDshiftLL x x [1])) a) +(MULA x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo32(c/5) => (ADD (SLLconst [int32(log32(c/5))] (ADDshiftLL x x [2])) a) +(MULA x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo32(c/7) => (ADD (SLLconst [int32(log32(c/7))] (RSBshiftLL x x [3])) a) +(MULA x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo32(c/9) => (ADD (SLLconst [int32(log32(c/9))] (ADDshiftLL x x [3])) a) + +(MULA (MOVWconst [c]) x a) && c == -1 => (SUB a x) +(MULA (MOVWconst [0]) _ a) => a +(MULA (MOVWconst [1]) x a) => (ADD x a) +(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c) => (ADD (SLLconst [int32(log32(c))] x) a) +(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c-1) && c >= 3 => (ADD (ADDshiftLL x x [int32(log32(c-1))]) a) +(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c+1) && c >= 7 => (ADD (RSBshiftLL x x [int32(log32(c+1))]) a) +(MULA (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo32(c/3) => (ADD (SLLconst [int32(log32(c/3))] (ADDshiftLL x x [1])) a) +(MULA (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo32(c/5) => (ADD (SLLconst [int32(log32(c/5))] (ADDshiftLL x x [2])) a) +(MULA (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo32(c/7) => (ADD (SLLconst [int32(log32(c/7))] (RSBshiftLL x x [3])) a) +(MULA (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo32(c/9) => (ADD (SLLconst [int32(log32(c/9))] (ADDshiftLL x x [3])) a) + +(MULS x (MOVWconst [c]) a) && c == -1 => (ADD a x) +(MULS _ (MOVWconst [0]) a) => a +(MULS x (MOVWconst [1]) a) => (RSB x a) +(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c) => (RSB (SLLconst [int32(log32(c))] x) a) +(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c-1) && c >= 3 => (RSB (ADDshiftLL x x [int32(log32(c-1))]) a) +(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c+1) && c >= 7 => (RSB (RSBshiftLL x x [int32(log32(c+1))]) a) +(MULS x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo32(c/3) => (RSB (SLLconst [int32(log32(c/3))] (ADDshiftLL x x [1])) a) +(MULS x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo32(c/5) => (RSB (SLLconst [int32(log32(c/5))] (ADDshiftLL x x [2])) a) +(MULS x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo32(c/7) => (RSB (SLLconst [int32(log32(c/7))] (RSBshiftLL x x [3])) a) +(MULS x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo32(c/9) => (RSB (SLLconst [int32(log32(c/9))] (ADDshiftLL x x [3])) a) + +(MULS (MOVWconst [c]) x a) && c == -1 => (ADD a x) +(MULS (MOVWconst [0]) _ a) => a +(MULS (MOVWconst [1]) x a) => (RSB x a) +(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c) => (RSB (SLLconst [int32(log32(c))] x) a) +(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c-1) && c >= 3 => (RSB (ADDshiftLL x x [int32(log32(c-1))]) a) +(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c+1) && c >= 7 => (RSB (RSBshiftLL x x [int32(log32(c+1))]) a) +(MULS (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo32(c/3) => (RSB (SLLconst [int32(log32(c/3))] (ADDshiftLL x x [1])) a) +(MULS (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo32(c/5) => (RSB (SLLconst [int32(log32(c/5))] (ADDshiftLL x x [2])) a) +(MULS (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo32(c/7) => (RSB (SLLconst [int32(log32(c/7))] (RSBshiftLL x x [3])) a) +(MULS (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo32(c/9) => (RSB (SLLconst [int32(log32(c/9))] (ADDshiftLL x x [3])) a) + +// div by constant +(Select0 (CALLudiv x (MOVWconst [1]))) => x +(Select1 (CALLudiv _ (MOVWconst [1]))) => (MOVWconst [0]) +(Select0 (CALLudiv x (MOVWconst [c]))) && isPowerOfTwo32(c) => (SRLconst [int32(log32(c))] x) +(Select1 (CALLudiv x (MOVWconst [c]))) && isPowerOfTwo32(c) => (ANDconst [c-1] x) + +// constant comparisons +(CMPconst (MOVWconst [x]) [y]) => (FlagConstant [subFlags32(x,y)]) +(CMNconst (MOVWconst [x]) [y]) => (FlagConstant [addFlags32(x,y)]) +(TSTconst (MOVWconst [x]) [y]) => (FlagConstant [logicFlags32(x&y)]) +(TEQconst (MOVWconst [x]) [y]) => (FlagConstant [logicFlags32(x^y)]) + +// other known comparisons +(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags32(0, 1)]) +(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags32(0, 1)]) +(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags32(0, 1)]) +(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1< (FlagConstant [subFlags32(0, 1)]) + +// absorb flag constants into branches +(EQ (FlagConstant [fc]) yes no) && fc.eq() => (First yes no) +(EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes) + +(NE (FlagConstant [fc]) yes no) && fc.ne() => (First yes no) +(NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes) + +(LT (FlagConstant [fc]) yes no) && fc.lt() => (First yes no) +(LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes) + +(LE (FlagConstant [fc]) yes no) && fc.le() => (First yes no) +(LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes) + +(GT (FlagConstant [fc]) yes no) && fc.gt() => (First yes no) +(GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes) + +(GE (FlagConstant [fc]) yes no) && fc.ge() => (First yes no) +(GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes) + +(ULT (FlagConstant [fc]) yes no) && fc.ult() => (First yes no) +(ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes) + +(ULE (FlagConstant [fc]) yes no) && fc.ule() => (First yes no) +(ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes) + +(UGT (FlagConstant [fc]) yes no) && fc.ugt() => (First yes no) +(UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes) + +(UGE (FlagConstant [fc]) yes no) && fc.uge() => (First yes no) +(UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes) + +(LTnoov (FlagConstant [fc]) yes no) && fc.ltNoov() => (First yes no) +(LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes) + +(LEnoov (FlagConstant [fc]) yes no) && fc.leNoov() => (First yes no) +(LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes) + +(GTnoov (FlagConstant [fc]) yes no) && fc.gtNoov() => (First yes no) +(GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes) + +(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no) +(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes) + +// absorb InvertFlags into branches +(LT (InvertFlags cmp) yes no) => (GT cmp yes no) +(GT (InvertFlags cmp) yes no) => (LT cmp yes no) +(LE (InvertFlags cmp) yes no) => (GE cmp yes no) +(GE (InvertFlags cmp) yes no) => (LE cmp yes no) +(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no) +(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no) +(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no) +(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no) +(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no) +(NE (InvertFlags cmp) yes no) => (NE cmp yes no) +(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no) +(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no) +(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no) +(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no) + +// absorb flag constants into boolean values +(Equal (FlagConstant [fc])) => (MOVWconst [b2i32(fc.eq())]) +(NotEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ne())]) +(LessThan (FlagConstant [fc])) => (MOVWconst [b2i32(fc.lt())]) +(LessThanU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ult())]) +(LessEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.le())]) +(LessEqualU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ule())]) +(GreaterThan (FlagConstant [fc])) => (MOVWconst [b2i32(fc.gt())]) +(GreaterThanU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ugt())]) +(GreaterEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ge())]) +(GreaterEqualU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.uge())]) + +// absorb InvertFlags into boolean values +(Equal (InvertFlags x)) => (Equal x) +(NotEqual (InvertFlags x)) => (NotEqual x) +(LessThan (InvertFlags x)) => (GreaterThan x) +(LessThanU (InvertFlags x)) => (GreaterThanU x) +(GreaterThan (InvertFlags x)) => (LessThan x) +(GreaterThanU (InvertFlags x)) => (LessThanU x) +(LessEqual (InvertFlags x)) => (GreaterEqual x) +(LessEqualU (InvertFlags x)) => (GreaterEqualU x) +(GreaterEqual (InvertFlags x)) => (LessEqual x) +(GreaterEqualU (InvertFlags x)) => (LessEqualU x) + +// absorb flag constants into conditional instructions +(CMOVWLSconst _ (FlagConstant [fc]) [c]) && fc.ule() => (MOVWconst [c]) +(CMOVWLSconst x (FlagConstant [fc]) [c]) && fc.ugt() => x + +(CMOVWHSconst _ (FlagConstant [fc]) [c]) && fc.uge() => (MOVWconst [c]) +(CMOVWHSconst x (FlagConstant [fc]) [c]) && fc.ult() => x + +(CMOVWLSconst x (InvertFlags flags) [c]) => (CMOVWHSconst x flags [c]) +(CMOVWHSconst x (InvertFlags flags) [c]) => (CMOVWLSconst x flags [c]) + +(SRAcond x _ (FlagConstant [fc])) && fc.uge() => (SRAconst x [31]) +(SRAcond x y (FlagConstant [fc])) && fc.ult() => (SRA x y) + +// remove redundant *const ops +(ADDconst [0] x) => x +(SUBconst [0] x) => x +(ANDconst [0] _) => (MOVWconst [0]) +(ANDconst [c] x) && int32(c)==-1 => x +(ORconst [0] x) => x +(ORconst [c] _) && int32(c)==-1 => (MOVWconst [-1]) +(XORconst [0] x) => x +(BICconst [0] x) => x +(BICconst [c] _) && int32(c)==-1 => (MOVWconst [0]) + +// generic constant folding +(ADDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (SUBconst [-c] x) +(SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (ADDconst [-c] x) +(ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (BICconst [int32(^uint32(c))] x) +(BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (ANDconst [int32(^uint32(c))] x) +(ADDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x) +(SUBconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x) +(ANDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x) +(BICconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x) +(ADDconst [c] (MOVWconst [d])) => (MOVWconst [c+d]) +(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x) +(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x) +(ADDconst [c] (RSBconst [d] x)) => (RSBconst [c+d] x) +(ADCconst [c] (ADDconst [d] x) flags) => (ADCconst [c+d] x flags) +(ADCconst [c] (SUBconst [d] x) flags) => (ADCconst [c-d] x flags) +(SUBconst [c] (MOVWconst [d])) => (MOVWconst [d-c]) +(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x) +(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x) +(SUBconst [c] (RSBconst [d] x)) => (RSBconst [-c+d] x) +(SBCconst [c] (ADDconst [d] x) flags) => (SBCconst [c-d] x flags) +(SBCconst [c] (SUBconst [d] x) flags) => (SBCconst [c+d] x flags) +(RSBconst [c] (MOVWconst [d])) => (MOVWconst [c-d]) +(RSBconst [c] (RSBconst [d] x)) => (ADDconst [c-d] x) +(RSBconst [c] (ADDconst [d] x)) => (RSBconst [c-d] x) +(RSBconst [c] (SUBconst [d] x)) => (RSBconst [c+d] x) +(RSCconst [c] (ADDconst [d] x) flags) => (RSCconst [c-d] x flags) +(RSCconst [c] (SUBconst [d] x) flags) => (RSCconst [c+d] x flags) +(SLLconst [c] (MOVWconst [d])) => (MOVWconst [d< (MOVWconst [int32(uint32(d)>>uint64(c))]) +(SRAconst [c] (MOVWconst [d])) => (MOVWconst [d>>uint64(c)]) +(MUL (MOVWconst [c]) (MOVWconst [d])) => (MOVWconst [c*d]) +(MULA (MOVWconst [c]) (MOVWconst [d]) a) => (ADDconst [c*d] a) +(MULS (MOVWconst [c]) (MOVWconst [d]) a) => (SUBconst [c*d] a) +(Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)/uint32(d))]) +(Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)%uint32(d))]) +(ANDconst [c] (MOVWconst [d])) => (MOVWconst [c&d]) +(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) +(ORconst [c] (MOVWconst [d])) => (MOVWconst [c|d]) +(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x) +(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d]) +(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x) +(BICconst [c] (MOVWconst [d])) => (MOVWconst [d&^c]) +(BICconst [c] (BICconst [d] x)) => (BICconst [c|d] x) +(MVN (MOVWconst [c])) => (MOVWconst [^c]) +(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))]) +(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))]) +(MOVHreg (MOVWconst [c])) => (MOVWconst [int32(int16(c))]) +(MOVHUreg (MOVWconst [c])) => (MOVWconst [int32(uint16(c))]) +(MOVWreg (MOVWconst [c])) => (MOVWconst [c]) +// BFX: Width = c >> 8, LSB = c & 0xff, result = d << (32 - Width - LSB) >> (32 - Width) +(BFX [c] (MOVWconst [d])) => (MOVWconst [d<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))]) +(BFXU [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))]) + +// absorb shifts into ops +(ADD x (SLLconst [c] y)) => (ADDshiftLL x y [c]) +(ADD x (SRLconst [c] y)) => (ADDshiftRL x y [c]) +(ADD x (SRAconst [c] y)) => (ADDshiftRA x y [c]) +(ADD x (SLL y z)) => (ADDshiftLLreg x y z) +(ADD x (SRL y z)) => (ADDshiftRLreg x y z) +(ADD x (SRA y z)) => (ADDshiftRAreg x y z) +(ADC x (SLLconst [c] y) flags) => (ADCshiftLL x y [c] flags) +(ADC x (SRLconst [c] y) flags) => (ADCshiftRL x y [c] flags) +(ADC x (SRAconst [c] y) flags) => (ADCshiftRA x y [c] flags) +(ADC x (SLL y z) flags) => (ADCshiftLLreg x y z flags) +(ADC x (SRL y z) flags) => (ADCshiftRLreg x y z flags) +(ADC x (SRA y z) flags) => (ADCshiftRAreg x y z flags) +(ADDS x (SLLconst [c] y)) => (ADDSshiftLL x y [c]) +(ADDS x (SRLconst [c] y)) => (ADDSshiftRL x y [c]) +(ADDS x (SRAconst [c] y)) => (ADDSshiftRA x y [c]) +(ADDS x (SLL y z)) => (ADDSshiftLLreg x y z) +(ADDS x (SRL y z)) => (ADDSshiftRLreg x y z) +(ADDS x (SRA y z)) => (ADDSshiftRAreg x y z) +(SUB x (SLLconst [c] y)) => (SUBshiftLL x y [c]) +(SUB (SLLconst [c] y) x) => (RSBshiftLL x y [c]) +(SUB x (SRLconst [c] y)) => (SUBshiftRL x y [c]) +(SUB (SRLconst [c] y) x) => (RSBshiftRL x y [c]) +(SUB x (SRAconst [c] y)) => (SUBshiftRA x y [c]) +(SUB (SRAconst [c] y) x) => (RSBshiftRA x y [c]) +(SUB x (SLL y z)) => (SUBshiftLLreg x y z) +(SUB (SLL y z) x) => (RSBshiftLLreg x y z) +(SUB x (SRL y z)) => (SUBshiftRLreg x y z) +(SUB (SRL y z) x) => (RSBshiftRLreg x y z) +(SUB x (SRA y z)) => (SUBshiftRAreg x y z) +(SUB (SRA y z) x) => (RSBshiftRAreg x y z) +(SBC x (SLLconst [c] y) flags) => (SBCshiftLL x y [c] flags) +(SBC (SLLconst [c] y) x flags) => (RSCshiftLL x y [c] flags) +(SBC x (SRLconst [c] y) flags) => (SBCshiftRL x y [c] flags) +(SBC (SRLconst [c] y) x flags) => (RSCshiftRL x y [c] flags) +(SBC x (SRAconst [c] y) flags) => (SBCshiftRA x y [c] flags) +(SBC (SRAconst [c] y) x flags) => (RSCshiftRA x y [c] flags) +(SBC x (SLL y z) flags) => (SBCshiftLLreg x y z flags) +(SBC (SLL y z) x flags) => (RSCshiftLLreg x y z flags) +(SBC x (SRL y z) flags) => (SBCshiftRLreg x y z flags) +(SBC (SRL y z) x flags) => (RSCshiftRLreg x y z flags) +(SBC x (SRA y z) flags) => (SBCshiftRAreg x y z flags) +(SBC (SRA y z) x flags) => (RSCshiftRAreg x y z flags) +(SUBS x (SLLconst [c] y)) => (SUBSshiftLL x y [c]) +(SUBS (SLLconst [c] y) x) => (RSBSshiftLL x y [c]) +(SUBS x (SRLconst [c] y)) => (SUBSshiftRL x y [c]) +(SUBS (SRLconst [c] y) x) => (RSBSshiftRL x y [c]) +(SUBS x (SRAconst [c] y)) => (SUBSshiftRA x y [c]) +(SUBS (SRAconst [c] y) x) => (RSBSshiftRA x y [c]) +(SUBS x (SLL y z)) => (SUBSshiftLLreg x y z) +(SUBS (SLL y z) x) => (RSBSshiftLLreg x y z) +(SUBS x (SRL y z)) => (SUBSshiftRLreg x y z) +(SUBS (SRL y z) x) => (RSBSshiftRLreg x y z) +(SUBS x (SRA y z)) => (SUBSshiftRAreg x y z) +(SUBS (SRA y z) x) => (RSBSshiftRAreg x y z) +(RSB x (SLLconst [c] y)) => (RSBshiftLL x y [c]) +(RSB (SLLconst [c] y) x) => (SUBshiftLL x y [c]) +(RSB x (SRLconst [c] y)) => (RSBshiftRL x y [c]) +(RSB (SRLconst [c] y) x) => (SUBshiftRL x y [c]) +(RSB x (SRAconst [c] y)) => (RSBshiftRA x y [c]) +(RSB (SRAconst [c] y) x) => (SUBshiftRA x y [c]) +(RSB x (SLL y z)) => (RSBshiftLLreg x y z) +(RSB (SLL y z) x) => (SUBshiftLLreg x y z) +(RSB x (SRL y z)) => (RSBshiftRLreg x y z) +(RSB (SRL y z) x) => (SUBshiftRLreg x y z) +(RSB x (SRA y z)) => (RSBshiftRAreg x y z) +(RSB (SRA y z) x) => (SUBshiftRAreg x y z) +(AND x (SLLconst [c] y)) => (ANDshiftLL x y [c]) +(AND x (SRLconst [c] y)) => (ANDshiftRL x y [c]) +(AND x (SRAconst [c] y)) => (ANDshiftRA x y [c]) +(AND x (SLL y z)) => (ANDshiftLLreg x y z) +(AND x (SRL y z)) => (ANDshiftRLreg x y z) +(AND x (SRA y z)) => (ANDshiftRAreg x y z) +(OR x (SLLconst [c] y)) => (ORshiftLL x y [c]) +(OR x (SRLconst [c] y)) => (ORshiftRL x y [c]) +(OR x (SRAconst [c] y)) => (ORshiftRA x y [c]) +(OR x (SLL y z)) => (ORshiftLLreg x y z) +(OR x (SRL y z)) => (ORshiftRLreg x y z) +(OR x (SRA y z)) => (ORshiftRAreg x y z) +(XOR x (SLLconst [c] y)) => (XORshiftLL x y [c]) +(XOR x (SRLconst [c] y)) => (XORshiftRL x y [c]) +(XOR x (SRAconst [c] y)) => (XORshiftRA x y [c]) +(XOR x (SRRconst [c] y)) => (XORshiftRR x y [c]) +(XOR x (SLL y z)) => (XORshiftLLreg x y z) +(XOR x (SRL y z)) => (XORshiftRLreg x y z) +(XOR x (SRA y z)) => (XORshiftRAreg x y z) +(BIC x (SLLconst [c] y)) => (BICshiftLL x y [c]) +(BIC x (SRLconst [c] y)) => (BICshiftRL x y [c]) +(BIC x (SRAconst [c] y)) => (BICshiftRA x y [c]) +(BIC x (SLL y z)) => (BICshiftLLreg x y z) +(BIC x (SRL y z)) => (BICshiftRLreg x y z) +(BIC x (SRA y z)) => (BICshiftRAreg x y z) +(MVN (SLLconst [c] x)) => (MVNshiftLL x [c]) +(MVN (SRLconst [c] x)) => (MVNshiftRL x [c]) +(MVN (SRAconst [c] x)) => (MVNshiftRA x [c]) +(MVN (SLL x y)) => (MVNshiftLLreg x y) +(MVN (SRL x y)) => (MVNshiftRLreg x y) +(MVN (SRA x y)) => (MVNshiftRAreg x y) + +(CMP x (SLLconst [c] y)) => (CMPshiftLL x y [c]) +(CMP (SLLconst [c] y) x) => (InvertFlags (CMPshiftLL x y [c])) +(CMP x (SRLconst [c] y)) => (CMPshiftRL x y [c]) +(CMP (SRLconst [c] y) x) => (InvertFlags (CMPshiftRL x y [c])) +(CMP x (SRAconst [c] y)) => (CMPshiftRA x y [c]) +(CMP (SRAconst [c] y) x) => (InvertFlags (CMPshiftRA x y [c])) +(CMP x (SLL y z)) => (CMPshiftLLreg x y z) +(CMP (SLL y z) x) => (InvertFlags (CMPshiftLLreg x y z)) +(CMP x (SRL y z)) => (CMPshiftRLreg x y z) +(CMP (SRL y z) x) => (InvertFlags (CMPshiftRLreg x y z)) +(CMP x (SRA y z)) => (CMPshiftRAreg x y z) +(CMP (SRA y z) x) => (InvertFlags (CMPshiftRAreg x y z)) +(TST x (SLLconst [c] y)) => (TSTshiftLL x y [c]) +(TST x (SRLconst [c] y)) => (TSTshiftRL x y [c]) +(TST x (SRAconst [c] y)) => (TSTshiftRA x y [c]) +(TST x (SLL y z)) => (TSTshiftLLreg x y z) +(TST x (SRL y z)) => (TSTshiftRLreg x y z) +(TST x (SRA y z)) => (TSTshiftRAreg x y z) +(TEQ x (SLLconst [c] y)) => (TEQshiftLL x y [c]) +(TEQ x (SRLconst [c] y)) => (TEQshiftRL x y [c]) +(TEQ x (SRAconst [c] y)) => (TEQshiftRA x y [c]) +(TEQ x (SLL y z)) => (TEQshiftLLreg x y z) +(TEQ x (SRL y z)) => (TEQshiftRLreg x y z) +(TEQ x (SRA y z)) => (TEQshiftRAreg x y z) +(CMN x (SLLconst [c] y)) => (CMNshiftLL x y [c]) +(CMN x (SRLconst [c] y)) => (CMNshiftRL x y [c]) +(CMN x (SRAconst [c] y)) => (CMNshiftRA x y [c]) +(CMN x (SLL y z)) => (CMNshiftLLreg x y z) +(CMN x (SRL y z)) => (CMNshiftRLreg x y z) +(CMN x (SRA y z)) => (CMNshiftRAreg x y z) + +// prefer *const ops to *shift ops +(ADDshiftLL (MOVWconst [c]) x [d]) => (ADDconst [c] (SLLconst x [d])) +(ADDshiftRL (MOVWconst [c]) x [d]) => (ADDconst [c] (SRLconst x [d])) +(ADDshiftRA (MOVWconst [c]) x [d]) => (ADDconst [c] (SRAconst x [d])) +(ADCshiftLL (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SLLconst x [d]) flags) +(ADCshiftRL (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SRLconst x [d]) flags) +(ADCshiftRA (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SRAconst x [d]) flags) +(ADDSshiftLL (MOVWconst [c]) x [d]) => (ADDSconst [c] (SLLconst x [d])) +(ADDSshiftRL (MOVWconst [c]) x [d]) => (ADDSconst [c] (SRLconst x [d])) +(ADDSshiftRA (MOVWconst [c]) x [d]) => (ADDSconst [c] (SRAconst x [d])) +(SUBshiftLL (MOVWconst [c]) x [d]) => (RSBconst [c] (SLLconst x [d])) +(SUBshiftRL (MOVWconst [c]) x [d]) => (RSBconst [c] (SRLconst x [d])) +(SUBshiftRA (MOVWconst [c]) x [d]) => (RSBconst [c] (SRAconst x [d])) +(SBCshiftLL (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SLLconst x [d]) flags) +(SBCshiftRL (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SRLconst x [d]) flags) +(SBCshiftRA (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SRAconst x [d]) flags) +(SUBSshiftLL (MOVWconst [c]) x [d]) => (RSBSconst [c] (SLLconst x [d])) +(SUBSshiftRL (MOVWconst [c]) x [d]) => (RSBSconst [c] (SRLconst x [d])) +(SUBSshiftRA (MOVWconst [c]) x [d]) => (RSBSconst [c] (SRAconst x [d])) +(RSBshiftLL (MOVWconst [c]) x [d]) => (SUBconst [c] (SLLconst x [d])) +(RSBshiftRL (MOVWconst [c]) x [d]) => (SUBconst [c] (SRLconst x [d])) +(RSBshiftRA (MOVWconst [c]) x [d]) => (SUBconst [c] (SRAconst x [d])) +(RSCshiftLL (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SLLconst x [d]) flags) +(RSCshiftRL (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SRLconst x [d]) flags) +(RSCshiftRA (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SRAconst x [d]) flags) +(RSBSshiftLL (MOVWconst [c]) x [d]) => (SUBSconst [c] (SLLconst x [d])) +(RSBSshiftRL (MOVWconst [c]) x [d]) => (SUBSconst [c] (SRLconst x [d])) +(RSBSshiftRA (MOVWconst [c]) x [d]) => (SUBSconst [c] (SRAconst x [d])) +(ANDshiftLL (MOVWconst [c]) x [d]) => (ANDconst [c] (SLLconst x [d])) +(ANDshiftRL (MOVWconst [c]) x [d]) => (ANDconst [c] (SRLconst x [d])) +(ANDshiftRA (MOVWconst [c]) x [d]) => (ANDconst [c] (SRAconst x [d])) +(ORshiftLL (MOVWconst [c]) x [d]) => (ORconst [c] (SLLconst x [d])) +(ORshiftRL (MOVWconst [c]) x [d]) => (ORconst [c] (SRLconst x [d])) +(ORshiftRA (MOVWconst [c]) x [d]) => (ORconst [c] (SRAconst x [d])) +(XORshiftLL (MOVWconst [c]) x [d]) => (XORconst [c] (SLLconst x [d])) +(XORshiftRL (MOVWconst [c]) x [d]) => (XORconst [c] (SRLconst x [d])) +(XORshiftRA (MOVWconst [c]) x [d]) => (XORconst [c] (SRAconst x [d])) +(XORshiftRR (MOVWconst [c]) x [d]) => (XORconst [c] (SRRconst x [d])) +(CMPshiftLL (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst x [d]))) +(CMPshiftRL (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst x [d]))) +(CMPshiftRA (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst x [d]))) +(TSTshiftLL (MOVWconst [c]) x [d]) => (TSTconst [c] (SLLconst x [d])) +(TSTshiftRL (MOVWconst [c]) x [d]) => (TSTconst [c] (SRLconst x [d])) +(TSTshiftRA (MOVWconst [c]) x [d]) => (TSTconst [c] (SRAconst x [d])) +(TEQshiftLL (MOVWconst [c]) x [d]) => (TEQconst [c] (SLLconst x [d])) +(TEQshiftRL (MOVWconst [c]) x [d]) => (TEQconst [c] (SRLconst x [d])) +(TEQshiftRA (MOVWconst [c]) x [d]) => (TEQconst [c] (SRAconst x [d])) +(CMNshiftLL (MOVWconst [c]) x [d]) => (CMNconst [c] (SLLconst x [d])) +(CMNshiftRL (MOVWconst [c]) x [d]) => (CMNconst [c] (SRLconst x [d])) +(CMNshiftRA (MOVWconst [c]) x [d]) => (CMNconst [c] (SRAconst x [d])) + +(ADDshiftLLreg (MOVWconst [c]) x y) => (ADDconst [c] (SLL x y)) +(ADDshiftRLreg (MOVWconst [c]) x y) => (ADDconst [c] (SRL x y)) +(ADDshiftRAreg (MOVWconst [c]) x y) => (ADDconst [c] (SRA x y)) +(ADCshiftLLreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SLL x y) flags) +(ADCshiftRLreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SRL x y) flags) +(ADCshiftRAreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SRA x y) flags) +(ADDSshiftLLreg (MOVWconst [c]) x y) => (ADDSconst [c] (SLL x y)) +(ADDSshiftRLreg (MOVWconst [c]) x y) => (ADDSconst [c] (SRL x y)) +(ADDSshiftRAreg (MOVWconst [c]) x y) => (ADDSconst [c] (SRA x y)) +(SUBshiftLLreg (MOVWconst [c]) x y) => (RSBconst [c] (SLL x y)) +(SUBshiftRLreg (MOVWconst [c]) x y) => (RSBconst [c] (SRL x y)) +(SUBshiftRAreg (MOVWconst [c]) x y) => (RSBconst [c] (SRA x y)) +(SBCshiftLLreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SLL x y) flags) +(SBCshiftRLreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SRL x y) flags) +(SBCshiftRAreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SRA x y) flags) +(SUBSshiftLLreg (MOVWconst [c]) x y) => (RSBSconst [c] (SLL x y)) +(SUBSshiftRLreg (MOVWconst [c]) x y) => (RSBSconst [c] (SRL x y)) +(SUBSshiftRAreg (MOVWconst [c]) x y) => (RSBSconst [c] (SRA x y)) +(RSBshiftLLreg (MOVWconst [c]) x y) => (SUBconst [c] (SLL x y)) +(RSBshiftRLreg (MOVWconst [c]) x y) => (SUBconst [c] (SRL x y)) +(RSBshiftRAreg (MOVWconst [c]) x y) => (SUBconst [c] (SRA x y)) +(RSCshiftLLreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SLL x y) flags) +(RSCshiftRLreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SRL x y) flags) +(RSCshiftRAreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SRA x y) flags) +(RSBSshiftLLreg (MOVWconst [c]) x y) => (SUBSconst [c] (SLL x y)) +(RSBSshiftRLreg (MOVWconst [c]) x y) => (SUBSconst [c] (SRL x y)) +(RSBSshiftRAreg (MOVWconst [c]) x y) => (SUBSconst [c] (SRA x y)) +(ANDshiftLLreg (MOVWconst [c]) x y) => (ANDconst [c] (SLL x y)) +(ANDshiftRLreg (MOVWconst [c]) x y) => (ANDconst [c] (SRL x y)) +(ANDshiftRAreg (MOVWconst [c]) x y) => (ANDconst [c] (SRA x y)) +(ORshiftLLreg (MOVWconst [c]) x y) => (ORconst [c] (SLL x y)) +(ORshiftRLreg (MOVWconst [c]) x y) => (ORconst [c] (SRL x y)) +(ORshiftRAreg (MOVWconst [c]) x y) => (ORconst [c] (SRA x y)) +(XORshiftLLreg (MOVWconst [c]) x y) => (XORconst [c] (SLL x y)) +(XORshiftRLreg (MOVWconst [c]) x y) => (XORconst [c] (SRL x y)) +(XORshiftRAreg (MOVWconst [c]) x y) => (XORconst [c] (SRA x y)) +(CMPshiftLLreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SLL x y))) +(CMPshiftRLreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SRL x y))) +(CMPshiftRAreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SRA x y))) +(TSTshiftLLreg (MOVWconst [c]) x y) => (TSTconst [c] (SLL x y)) +(TSTshiftRLreg (MOVWconst [c]) x y) => (TSTconst [c] (SRL x y)) +(TSTshiftRAreg (MOVWconst [c]) x y) => (TSTconst [c] (SRA x y)) +(TEQshiftLLreg (MOVWconst [c]) x y) => (TEQconst [c] (SLL x y)) +(TEQshiftRLreg (MOVWconst [c]) x y) => (TEQconst [c] (SRL x y)) +(TEQshiftRAreg (MOVWconst [c]) x y) => (TEQconst [c] (SRA x y)) +(CMNshiftLLreg (MOVWconst [c]) x y) => (CMNconst [c] (SLL x y)) +(CMNshiftRLreg (MOVWconst [c]) x y) => (CMNconst [c] (SRL x y)) +(CMNshiftRAreg (MOVWconst [c]) x y) => (CMNconst [c] (SRA x y)) + +// constant folding in *shift ops +(ADDshiftLL x (MOVWconst [c]) [d]) => (ADDconst x [c< (ADDconst x [int32(uint32(c)>>uint64(d))]) +(ADDshiftRA x (MOVWconst [c]) [d]) => (ADDconst x [c>>uint64(d)]) +(ADCshiftLL x (MOVWconst [c]) [d] flags) => (ADCconst x [c< (ADCconst x [int32(uint32(c)>>uint64(d))] flags) +(ADCshiftRA x (MOVWconst [c]) [d] flags) => (ADCconst x [c>>uint64(d)] flags) +(ADDSshiftLL x (MOVWconst [c]) [d]) => (ADDSconst x [c< (ADDSconst x [int32(uint32(c)>>uint64(d))]) +(ADDSshiftRA x (MOVWconst [c]) [d]) => (ADDSconst x [c>>uint64(d)]) +(SUBshiftLL x (MOVWconst [c]) [d]) => (SUBconst x [c< (SUBconst x [int32(uint32(c)>>uint64(d))]) +(SUBshiftRA x (MOVWconst [c]) [d]) => (SUBconst x [c>>uint64(d)]) +(SBCshiftLL x (MOVWconst [c]) [d] flags) => (SBCconst x [c< (SBCconst x [int32(uint32(c)>>uint64(d))] flags) +(SBCshiftRA x (MOVWconst [c]) [d] flags) => (SBCconst x [c>>uint64(d)] flags) +(SUBSshiftLL x (MOVWconst [c]) [d]) => (SUBSconst x [c< (SUBSconst x [int32(uint32(c)>>uint64(d))]) +(SUBSshiftRA x (MOVWconst [c]) [d]) => (SUBSconst x [c>>uint64(d)]) +(RSBshiftLL x (MOVWconst [c]) [d]) => (RSBconst x [c< (RSBconst x [int32(uint32(c)>>uint64(d))]) +(RSBshiftRA x (MOVWconst [c]) [d]) => (RSBconst x [c>>uint64(d)]) +(RSCshiftLL x (MOVWconst [c]) [d] flags) => (RSCconst x [c< (RSCconst x [int32(uint32(c)>>uint64(d))] flags) +(RSCshiftRA x (MOVWconst [c]) [d] flags) => (RSCconst x [c>>uint64(d)] flags) +(RSBSshiftLL x (MOVWconst [c]) [d]) => (RSBSconst x [c< (RSBSconst x [int32(uint32(c)>>uint64(d))]) +(RSBSshiftRA x (MOVWconst [c]) [d]) => (RSBSconst x [c>>uint64(d)]) +(ANDshiftLL x (MOVWconst [c]) [d]) => (ANDconst x [c< (ANDconst x [int32(uint32(c)>>uint64(d))]) +(ANDshiftRA x (MOVWconst [c]) [d]) => (ANDconst x [c>>uint64(d)]) +(ORshiftLL x (MOVWconst [c]) [d]) => (ORconst x [c< (ORconst x [int32(uint32(c)>>uint64(d))]) +(ORshiftRA x (MOVWconst [c]) [d]) => (ORconst x [c>>uint64(d)]) +(XORshiftLL x (MOVWconst [c]) [d]) => (XORconst x [c< (XORconst x [int32(uint32(c)>>uint64(d))]) +(XORshiftRA x (MOVWconst [c]) [d]) => (XORconst x [c>>uint64(d)]) +(XORshiftRR x (MOVWconst [c]) [d]) => (XORconst x [int32(uint32(c)>>uint64(d)|uint32(c)< (BICconst x [c< (BICconst x [int32(uint32(c)>>uint64(d))]) +(BICshiftRA x (MOVWconst [c]) [d]) => (BICconst x [c>>uint64(d)]) +(MVNshiftLL (MOVWconst [c]) [d]) => (MOVWconst [^(c< (MOVWconst [^int32(uint32(c)>>uint64(d))]) +(MVNshiftRA (MOVWconst [c]) [d]) => (MOVWconst [int32(c)>>uint64(d)]) +(CMPshiftLL x (MOVWconst [c]) [d]) => (CMPconst x [c< (CMPconst x [int32(uint32(c)>>uint64(d))]) +(CMPshiftRA x (MOVWconst [c]) [d]) => (CMPconst x [c>>uint64(d)]) +(TSTshiftLL x (MOVWconst [c]) [d]) => (TSTconst x [c< (TSTconst x [int32(uint32(c)>>uint64(d))]) +(TSTshiftRA x (MOVWconst [c]) [d]) => (TSTconst x [c>>uint64(d)]) +(TEQshiftLL x (MOVWconst [c]) [d]) => (TEQconst x [c< (TEQconst x [int32(uint32(c)>>uint64(d))]) +(TEQshiftRA x (MOVWconst [c]) [d]) => (TEQconst x [c>>uint64(d)]) +(CMNshiftLL x (MOVWconst [c]) [d]) => (CMNconst x [c< (CMNconst x [int32(uint32(c)>>uint64(d))]) +(CMNshiftRA x (MOVWconst [c]) [d]) => (CMNconst x [c>>uint64(d)]) + +(ADDshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftLL x y [c]) +(ADDshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftRL x y [c]) +(ADDshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftRA x y [c]) +(ADCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftLL x y [c] flags) +(ADCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftRL x y [c] flags) +(ADCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftRA x y [c] flags) +(ADDSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftLL x y [c]) +(ADDSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftRL x y [c]) +(ADDSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftRA x y [c]) +(SUBshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftLL x y [c]) +(SUBshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftRL x y [c]) +(SUBshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftRA x y [c]) +(SBCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftLL x y [c] flags) +(SBCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftRL x y [c] flags) +(SBCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftRA x y [c] flags) +(SUBSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftLL x y [c]) +(SUBSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftRL x y [c]) +(SUBSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftRA x y [c]) +(RSBshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftLL x y [c]) +(RSBshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftRL x y [c]) +(RSBshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftRA x y [c]) +(RSCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftLL x y [c] flags) +(RSCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftRL x y [c] flags) +(RSCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftRA x y [c] flags) +(RSBSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftLL x y [c]) +(RSBSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftRL x y [c]) +(RSBSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftRA x y [c]) +(ANDshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftLL x y [c]) +(ANDshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftRL x y [c]) +(ANDshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftRA x y [c]) +(ORshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftLL x y [c]) +(ORshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftRL x y [c]) +(ORshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftRA x y [c]) +(XORshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftLL x y [c]) +(XORshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftRL x y [c]) +(XORshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftRA x y [c]) +(BICshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftLL x y [c]) +(BICshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftRL x y [c]) +(BICshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftRA x y [c]) +(MVNshiftLLreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftLL x [c]) +(MVNshiftRLreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftRL x [c]) +(MVNshiftRAreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftRA x [c]) +(CMPshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftLL x y [c]) +(CMPshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftRL x y [c]) +(CMPshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftRA x y [c]) +(TSTshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftLL x y [c]) +(TSTshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftRL x y [c]) +(TSTshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftRA x y [c]) +(TEQshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftLL x y [c]) +(TEQshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftRL x y [c]) +(TEQshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftRA x y [c]) +(CMNshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftLL x y [c]) +(CMNshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRL x y [c]) +(CMNshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRA x y [c]) + +(RotateLeft16 x (MOVWconst [c])) => (Or16 (Lsh16x32 x (MOVWconst [c&15])) (Rsh16Ux32 x (MOVWconst [-c&15]))) +(RotateLeft8 x (MOVWconst [c])) => (Or8 (Lsh8x32 x (MOVWconst [c&7])) (Rsh8Ux32 x (MOVWconst [-c&7]))) +(RotateLeft32 x y) => (SRR x (RSBconst [0] y)) + +// ((x>>8) | (x<<8)) -> (REV16 x), the type of x is uint16, "|" can also be "^" or "+". +// UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by +// ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL. +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (BFXU [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x) +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [24] (SLLconst [16] x)) x) && buildcfg.GOARM.Version>=6 => (REV16 x) + +// use indexed loads and stores +(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVWloadidx ptr idx mem) +(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVWstoreidx ptr idx val mem) +(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil => (MOVWloadshiftLL ptr idx [c] mem) +(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil => (MOVWloadshiftRL ptr idx [c] mem) +(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil => (MOVWloadshiftRA ptr idx [c] mem) +(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftLL ptr idx [c] val mem) +(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftRL ptr idx [c] val mem) +(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftRA ptr idx [c] val mem) +(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVBUloadidx ptr idx mem) +(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVBloadidx ptr idx mem) +(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVBstoreidx ptr idx val mem) +(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVHUloadidx ptr idx mem) +(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVHloadidx ptr idx mem) +(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVHstoreidx ptr idx val mem) + +// constant folding in indexed loads and stores +(MOVWloadidx ptr (MOVWconst [c]) mem) => (MOVWload [c] ptr mem) +(MOVWloadidx (MOVWconst [c]) ptr mem) => (MOVWload [c] ptr mem) +(MOVBloadidx ptr (MOVWconst [c]) mem) => (MOVBload [c] ptr mem) +(MOVBloadidx (MOVWconst [c]) ptr mem) => (MOVBload [c] ptr mem) +(MOVBUloadidx ptr (MOVWconst [c]) mem) => (MOVBUload [c] ptr mem) +(MOVBUloadidx (MOVWconst [c]) ptr mem) => (MOVBUload [c] ptr mem) +(MOVHUloadidx ptr (MOVWconst [c]) mem) => (MOVHUload [c] ptr mem) +(MOVHUloadidx (MOVWconst [c]) ptr mem) => (MOVHUload [c] ptr mem) +(MOVHloadidx ptr (MOVWconst [c]) mem) => (MOVHload [c] ptr mem) +(MOVHloadidx (MOVWconst [c]) ptr mem) => (MOVHload [c] ptr mem) + +(MOVWstoreidx ptr (MOVWconst [c]) val mem) => (MOVWstore [c] ptr val mem) +(MOVWstoreidx (MOVWconst [c]) ptr val mem) => (MOVWstore [c] ptr val mem) +(MOVBstoreidx ptr (MOVWconst [c]) val mem) => (MOVBstore [c] ptr val mem) +(MOVBstoreidx (MOVWconst [c]) ptr val mem) => (MOVBstore [c] ptr val mem) +(MOVHstoreidx ptr (MOVWconst [c]) val mem) => (MOVHstore [c] ptr val mem) +(MOVHstoreidx (MOVWconst [c]) ptr val mem) => (MOVHstore [c] ptr val mem) + +(MOVWloadidx ptr (SLLconst idx [c]) mem) => (MOVWloadshiftLL ptr idx [c] mem) +(MOVWloadidx (SLLconst idx [c]) ptr mem) => (MOVWloadshiftLL ptr idx [c] mem) +(MOVWloadidx ptr (SRLconst idx [c]) mem) => (MOVWloadshiftRL ptr idx [c] mem) +(MOVWloadidx (SRLconst idx [c]) ptr mem) => (MOVWloadshiftRL ptr idx [c] mem) +(MOVWloadidx ptr (SRAconst idx [c]) mem) => (MOVWloadshiftRA ptr idx [c] mem) +(MOVWloadidx (SRAconst idx [c]) ptr mem) => (MOVWloadshiftRA ptr idx [c] mem) + +(MOVWstoreidx ptr (SLLconst idx [c]) val mem) => (MOVWstoreshiftLL ptr idx [c] val mem) +(MOVWstoreidx (SLLconst idx [c]) ptr val mem) => (MOVWstoreshiftLL ptr idx [c] val mem) +(MOVWstoreidx ptr (SRLconst idx [c]) val mem) => (MOVWstoreshiftRL ptr idx [c] val mem) +(MOVWstoreidx (SRLconst idx [c]) ptr val mem) => (MOVWstoreshiftRL ptr idx [c] val mem) +(MOVWstoreidx ptr (SRAconst idx [c]) val mem) => (MOVWstoreshiftRA ptr idx [c] val mem) +(MOVWstoreidx (SRAconst idx [c]) ptr val mem) => (MOVWstoreshiftRA ptr idx [c] val mem) + +(MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) => (MOVWload [int32(uint32(c)< (MOVWload [int32(uint32(c)>>uint64(d))] ptr mem) +(MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) => (MOVWload [c>>uint64(d)] ptr mem) + +(MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [int32(uint32(c)< (MOVWstore [int32(uint32(c)>>uint64(d))] ptr val mem) +(MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [c>>uint64(d)] ptr val mem) + +// generic simplifications +(ADD x (RSBconst [0] y)) => (SUB x y) +(ADD (RSBconst [c] x) (RSBconst [d] y)) => (RSBconst [c+d] (ADD x y)) +(SUB x x) => (MOVWconst [0]) +(RSB x x) => (MOVWconst [0]) +(AND x x) => x +(OR x x) => x +(XOR x x) => (MOVWconst [0]) +(BIC x x) => (MOVWconst [0]) + +(ADD (MUL x y) a) => (MULA x y a) +(SUB a (MUL x y)) && buildcfg.GOARM.Version == 7 => (MULS x y a) +(RSB (MUL x y) a) && buildcfg.GOARM.Version == 7 => (MULS x y a) + +(NEGF (MULF x y)) && buildcfg.GOARM.Version >= 6 => (NMULF x y) +(NEGD (MULD x y)) && buildcfg.GOARM.Version >= 6 => (NMULD x y) +(MULF (NEGF x) y) && buildcfg.GOARM.Version >= 6 => (NMULF x y) +(MULD (NEGD x) y) && buildcfg.GOARM.Version >= 6 => (NMULD x y) +(NMULF (NEGF x) y) => (MULF x y) +(NMULD (NEGD x) y) => (MULD x y) + +// the result will overwrite the addend, since they are in the same register +(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y) +(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y) +(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y) +(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y) +(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y) +(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y) +(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y) +(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y) + +(AND x (MVN y)) => (BIC x y) + +// simplification with *shift ops +(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0]) +(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0]) +(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0]) +(RSBshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0]) +(RSBshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0]) +(RSBshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0]) +(ANDshiftLL y:(SLLconst x [c]) x [c]) => y +(ANDshiftRL y:(SRLconst x [c]) x [c]) => y +(ANDshiftRA y:(SRAconst x [c]) x [c]) => y +(ORshiftLL y:(SLLconst x [c]) x [c]) => y +(ORshiftRL y:(SRLconst x [c]) x [c]) => y +(ORshiftRA y:(SRAconst x [c]) x [c]) => y +(XORshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0]) +(XORshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0]) +(XORshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0]) +(BICshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0]) +(BICshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0]) +(BICshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0]) +(AND x (MVNshiftLL y [c])) => (BICshiftLL x y [c]) +(AND x (MVNshiftRL y [c])) => (BICshiftRL x y [c]) +(AND x (MVNshiftRA y [c])) => (BICshiftRA x y [c]) + +// floating point optimizations +(CMPF x (MOVFconst [0])) => (CMPF0 x) +(CMPD x (MOVDconst [0])) => (CMPD0 x) + +// bit extraction +(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x) +(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x) + +// comparison simplification +((EQ|NE) (CMP x (RSBconst [0] y))) => ((EQ|NE) (CMN x y)) // sense of carry bit not preserved; see also #50854 +((EQ|NE) (CMN x (RSBconst [0] y))) => ((EQ|NE) (CMP x y)) // sense of carry bit not preserved; see also #50864 +(EQ (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (EQ (CMP x y) yes no) +(EQ (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (EQ (CMP a (MUL x y)) yes no) +(EQ (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (EQ (CMPconst [c] x) yes no) +(EQ (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftLL x y [c]) yes no) +(EQ (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftRL x y [c]) yes no) +(EQ (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftRA x y [c]) yes no) +(EQ (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftLLreg x y z) yes no) +(EQ (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftRLreg x y z) yes no) +(EQ (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftRAreg x y z) yes no) +(NE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (NE (CMP x y) yes no) +(NE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (NE (CMP a (MUL x y)) yes no) +(NE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (NE (CMPconst [c] x) yes no) +(NE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftLL x y [c]) yes no) +(NE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftRL x y [c]) yes no) +(NE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftRA x y [c]) yes no) +(NE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftLLreg x y z) yes no) +(NE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftRLreg x y z) yes no) +(NE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftRAreg x y z) yes no) +(EQ (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (EQ (CMN x y) yes no) +(EQ (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (EQ (CMN a (MUL x y)) yes no) +(EQ (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (EQ (CMNconst [c] x) yes no) +(EQ (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftLL x y [c]) yes no) +(EQ (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftRL x y [c]) yes no) +(EQ (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftRA x y [c]) yes no) +(EQ (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftLLreg x y z) yes no) +(EQ (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftRLreg x y z) yes no) +(EQ (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftRAreg x y z) yes no) +(NE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (NE (CMN x y) yes no) +(NE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (NE (CMN a (MUL x y)) yes no) +(NE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (NE (CMNconst [c] x) yes no) +(NE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftLL x y [c]) yes no) +(NE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftRL x y [c]) yes no) +(NE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftRA x y [c]) yes no) +(NE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftLLreg x y z) yes no) +(NE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftRLreg x y z) yes no) +(NE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftRAreg x y z) yes no) +(EQ (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (EQ (TST x y) yes no) +(EQ (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (EQ (TSTconst [c] x) yes no) +(EQ (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftLL x y [c]) yes no) +(EQ (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftRL x y [c]) yes no) +(EQ (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftRA x y [c]) yes no) +(EQ (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftLLreg x y z) yes no) +(EQ (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftRLreg x y z) yes no) +(EQ (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftRAreg x y z) yes no) +(NE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (NE (TST x y) yes no) +(NE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (NE (TSTconst [c] x) yes no) +(NE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftLL x y [c]) yes no) +(NE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftRL x y [c]) yes no) +(NE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftRA x y [c]) yes no) +(NE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftLLreg x y z) yes no) +(NE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftRLreg x y z) yes no) +(NE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftRAreg x y z) yes no) +(EQ (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (EQ (TEQ x y) yes no) +(EQ (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (EQ (TEQconst [c] x) yes no) +(EQ (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftLL x y [c]) yes no) +(EQ (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftRL x y [c]) yes no) +(EQ (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftRA x y [c]) yes no) +(EQ (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftLLreg x y z) yes no) +(EQ (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftRLreg x y z) yes no) +(EQ (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftRAreg x y z) yes no) +(NE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (NE (TEQ x y) yes no) +(NE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (NE (TEQconst [c] x) yes no) +(NE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftLL x y [c]) yes no) +(NE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftRL x y [c]) yes no) +(NE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftRA x y [c]) yes no) +(NE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftLLreg x y z) yes no) +(NE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftRLreg x y z) yes no) +(NE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftRAreg x y z) yes no) +(LT (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (LTnoov (CMP x y) yes no) +(LT (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (LTnoov (CMP a (MUL x y)) yes no) +(LT (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (LTnoov (CMPconst [c] x) yes no) +(LT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftLL x y [c]) yes no) +(LT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftRL x y [c]) yes no) +(LT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftRA x y [c]) yes no) +(LT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftLLreg x y z) yes no) +(LT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftRLreg x y z) yes no) +(LT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftRAreg x y z) yes no) +(LE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (LEnoov (CMP x y) yes no) +(LE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (LEnoov (CMP a (MUL x y)) yes no) +(LE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (LEnoov (CMPconst [c] x) yes no) +(LE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftLL x y [c]) yes no) +(LE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftRL x y [c]) yes no) +(LE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftRA x y [c]) yes no) +(LE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftLLreg x y z) yes no) +(LE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftRLreg x y z) yes no) +(LE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftRAreg x y z) yes no) +(LT (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (LTnoov (CMN x y) yes no) +(LT (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (LTnoov (CMN a (MUL x y)) yes no) +(LT (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (LTnoov (CMNconst [c] x) yes no) +(LT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftLL x y [c]) yes no) +(LT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftRL x y [c]) yes no) +(LT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftRA x y [c]) yes no) +(LT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftLLreg x y z) yes no) +(LT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftRLreg x y z) yes no) +(LT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftRAreg x y z) yes no) +(LE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (LEnoov (CMN x y) yes no) +(LE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (LEnoov (CMN a (MUL x y)) yes no) +(LE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (LEnoov (CMNconst [c] x) yes no) +(LE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftLL x y [c]) yes no) +(LE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftRL x y [c]) yes no) +(LE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftRA x y [c]) yes no) +(LE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftLLreg x y z) yes no) +(LE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftRLreg x y z) yes no) +(LE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftRAreg x y z) yes no) +(LT (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (LTnoov (TST x y) yes no) +(LT (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (LTnoov (TSTconst [c] x) yes no) +(LT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftLL x y [c]) yes no) +(LT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftRL x y [c]) yes no) +(LT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftRA x y [c]) yes no) +(LT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftLLreg x y z) yes no) +(LT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftRLreg x y z) yes no) +(LT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftRAreg x y z) yes no) +(LE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (LEnoov (TST x y) yes no) +(LE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (LEnoov (TSTconst [c] x) yes no) +(LE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftLL x y [c]) yes no) +(LE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftRL x y [c]) yes no) +(LE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftRA x y [c]) yes no) +(LE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftLLreg x y z) yes no) +(LE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftRLreg x y z) yes no) +(LE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftRAreg x y z) yes no) +(LT (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (LTnoov (TEQ x y) yes no) +(LT (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (LTnoov (TEQconst [c] x) yes no) +(LT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftLL x y [c]) yes no) +(LT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftRL x y [c]) yes no) +(LT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftRA x y [c]) yes no) +(LT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftLLreg x y z) yes no) +(LT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftRLreg x y z) yes no) +(LT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftRAreg x y z) yes no) +(LE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (LEnoov (TEQ x y) yes no) +(LE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (LEnoov (TEQconst [c] x) yes no) +(LE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftLL x y [c]) yes no) +(LE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftRL x y [c]) yes no) +(LE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftRA x y [c]) yes no) +(LE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftLLreg x y z) yes no) +(LE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftRLreg x y z) yes no) +(LE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftRAreg x y z) yes no) +(GT (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (GTnoov (CMP x y) yes no) +(GT (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (GTnoov (CMP a (MUL x y)) yes no) +(GT (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (GTnoov (CMPconst [c] x) yes no) +(GT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftLL x y [c]) yes no) +(GT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftRL x y [c]) yes no) +(GT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftRA x y [c]) yes no) +(GT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftLLreg x y z) yes no) +(GT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftRLreg x y z) yes no) +(GT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftRAreg x y z) yes no) +(GE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (GEnoov (CMP x y) yes no) +(GE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (GEnoov (CMP a (MUL x y)) yes no) +(GE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (GEnoov (CMPconst [c] x) yes no) +(GE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftLL x y [c]) yes no) +(GE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftRL x y [c]) yes no) +(GE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftRA x y [c]) yes no) +(GE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftLLreg x y z) yes no) +(GE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftRLreg x y z) yes no) +(GE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftRAreg x y z) yes no) +(GT (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (GTnoov (CMN x y) yes no) +(GT (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (GTnoov (CMNconst [c] x) yes no) +(GT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftLL x y [c]) yes no) +(GT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftRL x y [c]) yes no) +(GT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftRA x y [c]) yes no) +(GT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftLLreg x y z) yes no) +(GT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftRLreg x y z) yes no) +(GT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftRAreg x y z) yes no) +(GE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (GEnoov (CMN x y) yes no) +(GE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (GEnoov (CMN a (MUL x y)) yes no) +(GE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (GEnoov (CMNconst [c] x) yes no) +(GE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftLL x y [c]) yes no) +(GE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftRL x y [c]) yes no) +(GE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftRA x y [c]) yes no) +(GE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftLLreg x y z) yes no) +(GE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftRLreg x y z) yes no) +(GE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftRAreg x y z) yes no) +(GT (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (GTnoov (CMN a (MUL x y)) yes no) +(GT (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (GTnoov (TST x y) yes no) +(GT (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (GTnoov (TSTconst [c] x) yes no) +(GT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftLL x y [c]) yes no) +(GT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftRL x y [c]) yes no) +(GT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftRA x y [c]) yes no) +(GT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftLLreg x y z) yes no) +(GT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftRLreg x y z) yes no) +(GT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftRAreg x y z) yes no) +(GE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (GEnoov (TST x y) yes no) +(GE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (GEnoov (TSTconst [c] x) yes no) +(GE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftLL x y [c]) yes no) +(GE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftRL x y [c]) yes no) +(GE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftRA x y [c]) yes no) +(GE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftLLreg x y z) yes no) +(GE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftRLreg x y z) yes no) +(GE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftRAreg x y z) yes no) +(GT (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (GTnoov (TEQ x y) yes no) +(GT (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (GTnoov (TEQconst [c] x) yes no) +(GT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftLL x y [c]) yes no) +(GT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftRL x y [c]) yes no) +(GT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftRA x y [c]) yes no) +(GT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftLLreg x y z) yes no) +(GT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftRLreg x y z) yes no) +(GT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftRAreg x y z) yes no) +(GE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (GEnoov (TEQ x y) yes no) +(GE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (GEnoov (TEQconst [c] x) yes no) +(GE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftLL x y [c]) yes no) +(GE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftRL x y [c]) yes no) +(GE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftRA x y [c]) yes no) +(GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftLLreg x y z) yes no) +(GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftRLreg x y z) yes no) +(GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftRAreg x y z) yes no) + +(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read8(sym, int64(off)))]) +(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM64.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM64.rules new file mode 100644 index 0000000000000000000000000000000000000000..18a6586fb0bf01621489ef7733dd14ecd49d4b29 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM64.rules @@ -0,0 +1,1937 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +(Add(Ptr|64|32|16|8) ...) => (ADD ...) +(Add(32|64)F ...) => (FADD(S|D) ...) + +(Sub(Ptr|64|32|16|8) ...) => (SUB ...) +(Sub(32|64)F ...) => (FSUB(S|D) ...) + +(Mul64 ...) => (MUL ...) +(Mul(32|16|8) ...) => (MULW ...) +(Mul(32|64)F ...) => (FMUL(S|D) ...) + +(Hmul64 ...) => (MULH ...) +(Hmul64u ...) => (UMULH ...) +(Hmul32 x y) => (SRAconst (MULL x y) [32]) +(Hmul32u x y) => (SRAconst (UMULL x y) [32]) +(Select0 (Mul64uhilo x y)) => (UMULH x y) +(Select1 (Mul64uhilo x y)) => (MUL x y) + +(Div64 [false] x y) => (DIV x y) +(Div32 [false] x y) => (DIVW x y) +(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y)) +(Div16u x y) => (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y)) +(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y)) +(Div8u x y) => (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y)) +(Div64u ...) => (UDIV ...) +(Div32u ...) => (UDIVW ...) +(Div32F ...) => (FDIVS ...) +(Div64F ...) => (FDIVD ...) + +(Mod64 x y) => (MOD x y) +(Mod32 x y) => (MODW x y) +(Mod64u ...) => (UMOD ...) +(Mod32u ...) => (UMODW ...) +(Mod(16|8) x y) => (MODW (SignExt(16|8)to32 x) (SignExt(16|8)to32 y)) +(Mod(16|8)u x y) => (UMODW (ZeroExt(16|8)to32 x) (ZeroExt(16|8)to32 y)) + +// (x + y) / 2 with x>=y => (x - y) / 2 + y +(Avg64u x y) => (ADD (SRLconst (SUB x y) [1]) y) + +(And(64|32|16|8) ...) => (AND ...) +(Or(64|32|16|8) ...) => (OR ...) +(Xor(64|32|16|8) ...) => (XOR ...) + +// unary ops +(Neg(64|32|16|8) ...) => (NEG ...) +(Neg(32|64)F ...) => (FNEG(S|D) ...) +(Com(64|32|16|8) ...) => (MVN ...) + +// math package intrinsics +(Abs ...) => (FABSD ...) +(Sqrt ...) => (FSQRTD ...) +(Ceil ...) => (FRINTPD ...) +(Floor ...) => (FRINTMD ...) +(Round ...) => (FRINTAD ...) +(RoundToEven ...) => (FRINTND ...) +(Trunc ...) => (FRINTZD ...) +(FMA x y z) => (FMADDD z x y) + +(Sqrt32 ...) => (FSQRTS ...) + +(Min(64|32)F ...) => (FMIN(D|S) ...) +(Max(64|32)F ...) => (FMAX(D|S) ...) + +// lowering rotates +// we do rotate detection in generic rules, if the following rules need to be changed, check generic rules first. +(RotateLeft8 x (MOVDconst [c])) => (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) +(RotateLeft8 x y) => (OR (SLL x (ANDconst [7] y)) (SRL (ZeroExt8to64 x) (ANDconst [7] (NEG y)))) +(RotateLeft16 x (MOVDconst [c])) => (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) +(RotateLeft16 x y) => (RORW (ORshiftLL (ZeroExt16to32 x) (ZeroExt16to32 x) [16]) (NEG y)) +(RotateLeft32 x y) => (RORW x (NEG y)) +(RotateLeft64 x y) => (ROR x (NEG y)) + +(Ctz(64|32|16|8)NonZero ...) => (Ctz(64|32|32|32) ...) + +(Ctz64 x) => (CLZ (RBIT x)) +(Ctz32 x) => (CLZW (RBITW x)) +(Ctz16 x) => (CLZW (RBITW (ORconst [0x10000] x))) +(Ctz8 x) => (CLZW (RBITW (ORconst [0x100] x))) + +(PopCount64 x) => (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp x)))) +(PopCount32 x) => (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp (ZeroExt32to64 x))))) +(PopCount16 x) => (FMOVDfpgp (VUADDLV (VCNT (FMOVDgpfp (ZeroExt16to64 x))))) + +// Load args directly into the register class where it will be used. +(FMOVDgpfp (Arg [off] {sym})) => @b.Func.Entry (Arg [off] {sym}) +(FMOVDfpgp (Arg [off] {sym})) => @b.Func.Entry (Arg [off] {sym}) + +// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set. +(MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) => (FMOVDstore [off] {sym} ptr val mem) +(FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) => (MOVDstore [off] {sym} ptr val mem) +(MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) => (FMOVSstore [off] {sym} ptr val mem) +(FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem) + +// float <=> int register moves, with no conversion. +// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}. +(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) => (FMOVDfpgp val) +(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (FMOVDgpfp val) +(MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) => (FMOVSfpgp val) +(FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (FMOVSgpfp val) + +(BitLen64 x) => (SUB (MOVDconst [64]) (CLZ x)) +(BitLen32 x) => (SUB (MOVDconst [32]) (CLZW x)) + +(Bswap64 ...) => (REV ...) +(Bswap32 ...) => (REVW ...) +(Bswap16 ...) => (REV16W ...) + +(BitRev64 ...) => (RBIT ...) +(BitRev32 ...) => (RBITW ...) +(BitRev16 x) => (SRLconst [48] (RBIT x)) +(BitRev8 x) => (SRLconst [56] (RBIT x)) + +// In fact, UMOD will be translated into UREM instruction, and UREM is originally translated into +// UDIV and MSUB instructions. But if there is already an identical UDIV instruction just before or +// after UREM (case like quo, rem := z/y, z%y), then the second UDIV instruction becomes redundant. +// The purpose of this rule is to have this extra UDIV instruction removed in CSE pass. +(UMOD x y) => (MSUB x y (UDIV x y)) +(UMODW x y) => (MSUBW x y (UDIVW x y)) + +// 64-bit addition with carry. +(Select0 (Add64carry x y c)) => (Select0 (ADCSflags x y (Select1 (ADDSconstflags [-1] c)))) +(Select1 (Add64carry x y c)) => (ADCzerocarry (Select1 (ADCSflags x y (Select1 (ADDSconstflags [-1] c))))) + +// 64-bit subtraction with borrowing. +(Select0 (Sub64borrow x y bo)) => (Select0 (SBCSflags x y (Select1 (NEGSflags bo)))) +(Select1 (Sub64borrow x y bo)) => (NEG (NGCzerocarry (Select1 (SBCSflags x y (Select1 (NEGSflags bo)))))) + +// boolean ops -- booleans are represented with 0=false, 1=true +(AndB ...) => (AND ...) +(OrB ...) => (OR ...) +(EqB x y) => (XOR (MOVDconst [1]) (XOR x y)) +(NeqB ...) => (XOR ...) +(Not x) => (XOR (MOVDconst [1]) x) + +// shifts +// hardware instruction uses only the low 6 bits of the shift +// we compare to 64 to ensure Go semantics for large shifts +// Rules about rotates with non-const shift are based on the following rules, +// if the following rules change, please also modify the rules based on them. + +// check shiftIsBounded first, if shift value is proved to be valid then we +// can do the shift directly. +// left shift +(Lsh(64|32|16|8)x64 x y) && shiftIsBounded(v) => (SLL x y) +(Lsh(64|32|16|8)x32 x y) && shiftIsBounded(v) => (SLL x y) +(Lsh(64|32|16|8)x16 x y) && shiftIsBounded(v) => (SLL x y) +(Lsh(64|32|16|8)x8 x y) && shiftIsBounded(v) => (SLL x y) + +// signed right shift +(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y) +(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt32to64 x) y) +(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y) +(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y) + +// unsigned right shift +(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y) +(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt32to64 x) y) +(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y) +(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y) + +// shift value may be out of range, use CMP + CSEL instead +(Lsh64x64 x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) +(Lsh64x(32|16|8) x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))) + +(Lsh32x64 x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) +(Lsh32x(32|16|8) x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))) + +(Lsh16x64 x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) +(Lsh16x(32|16|8) x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))) + +(Lsh8x64 x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] y)) +(Lsh8x(32|16|8) x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL x y) (Const64 [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))) + +(Rsh64Ux64 x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL x y) (Const64 [0]) (CMPconst [64] y)) +(Rsh64Ux(32|16|8) x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL x y) (Const64 [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))) + +(Rsh32Ux64 x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] y)) +(Rsh32Ux(32|16|8) x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt32to64 x) y) (Const64 [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))) + +(Rsh16Ux64 x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] y)) +(Rsh16Ux(32|16|8) x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt16to64 x) y) (Const64 [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))) + +(Rsh8Ux64 x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] y)) +(Rsh8Ux(32|16|8) x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL (ZeroExt8to64 x) y) (Const64 [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))) + +(Rsh64x64 x y) && !shiftIsBounded(v) => (SRA x (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) +(Rsh64x(32|16|8) x y) && !shiftIsBounded(v) => (SRA x (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))) + +(Rsh32x64 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) +(Rsh32x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))) + +(Rsh16x64 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) +(Rsh16x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))) + +(Rsh8x64 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] y))) +(Rsh8x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] y (Const64 [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))) + +// constants +(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)]) +(Const(32|64)F [val]) => (FMOV(S|D)const [float64(val)]) +(ConstNil) => (MOVDconst [0]) +(ConstBool [t]) => (MOVDconst [b2i(t)]) + +(Slicemask x) => (SRAconst (NEG x) [63]) + +// truncations +// Because we ignore high parts of registers, truncates are just copies. +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) +(Trunc64to8 ...) => (Copy ...) +(Trunc64to16 ...) => (Copy ...) +(Trunc64to32 ...) => (Copy ...) + +// Zero-/Sign-extensions +(ZeroExt8to16 ...) => (MOVBUreg ...) +(ZeroExt8to32 ...) => (MOVBUreg ...) +(ZeroExt16to32 ...) => (MOVHUreg ...) +(ZeroExt8to64 ...) => (MOVBUreg ...) +(ZeroExt16to64 ...) => (MOVHUreg ...) +(ZeroExt32to64 ...) => (MOVWUreg ...) + +(SignExt8to16 ...) => (MOVBreg ...) +(SignExt8to32 ...) => (MOVBreg ...) +(SignExt16to32 ...) => (MOVHreg ...) +(SignExt8to64 ...) => (MOVBreg ...) +(SignExt16to64 ...) => (MOVHreg ...) +(SignExt32to64 ...) => (MOVWreg ...) + +// float <=> int conversion +(Cvt32to32F ...) => (SCVTFWS ...) +(Cvt32to64F ...) => (SCVTFWD ...) +(Cvt64to32F ...) => (SCVTFS ...) +(Cvt64to64F ...) => (SCVTFD ...) +(Cvt32Uto32F ...) => (UCVTFWS ...) +(Cvt32Uto64F ...) => (UCVTFWD ...) +(Cvt64Uto32F ...) => (UCVTFS ...) +(Cvt64Uto64F ...) => (UCVTFD ...) +(Cvt32Fto32 ...) => (FCVTZSSW ...) +(Cvt64Fto32 ...) => (FCVTZSDW ...) +(Cvt32Fto64 ...) => (FCVTZSS ...) +(Cvt64Fto64 ...) => (FCVTZSD ...) +(Cvt32Fto32U ...) => (FCVTZUSW ...) +(Cvt64Fto32U ...) => (FCVTZUDW ...) +(Cvt32Fto64U ...) => (FCVTZUS ...) +(Cvt64Fto64U ...) => (FCVTZUD ...) +(Cvt32Fto64F ...) => (FCVTSD ...) +(Cvt64Fto32F ...) => (FCVTDS ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +(Round32F ...) => (LoweredRound32F ...) +(Round64F ...) => (LoweredRound64F ...) + +// comparisons +(Eq8 x y) => (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Eq16 x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Eq32 x y) => (Equal (CMPW x y)) +(Eq64 x y) => (Equal (CMP x y)) +(EqPtr x y) => (Equal (CMP x y)) +(Eq32F x y) => (Equal (FCMPS x y)) +(Eq64F x y) => (Equal (FCMPD x y)) + +(Neq8 x y) => (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Neq16 x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Neq32 x y) => (NotEqual (CMPW x y)) +(Neq64 x y) => (NotEqual (CMP x y)) +(NeqPtr x y) => (NotEqual (CMP x y)) +(Neq(32|64)F x y) => (NotEqual (FCMP(S|D) x y)) + +(Less(8|16) x y) => (LessThan (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y))) +(Less32 x y) => (LessThan (CMPW x y)) +(Less64 x y) => (LessThan (CMP x y)) + +// Set condition flags for floating-point comparisons "x < y" +// and "x <= y". Because if either or both of the operands are +// NaNs, all three of (x < y), (x == y) and (x > y) are false, +// and ARM Manual says FCMP instruction sets PSTATE. +// of this case to (0, 0, 1, 1). +(Less32F x y) => (LessThanF (FCMPS x y)) +(Less64F x y) => (LessThanF (FCMPD x y)) + +// For an unsigned integer x, the following rules are useful when combining branch +// 0 < x => x != 0 +// x <= 0 => x == 0 +// x < 1 => x == 0 +// 1 <= x => x != 0 +(Less(8U|16U|32U|64U) zero:(MOVDconst [0]) x) => (Neq(8|16|32|64) zero x) +(Leq(8U|16U|32U|64U) x zero:(MOVDconst [0])) => (Eq(8|16|32|64) x zero) +(Less(8U|16U|32U|64U) x (MOVDconst [1])) => (Eq(8|16|32|64) x (MOVDconst [0])) +(Leq(8U|16U|32U|64U) (MOVDconst [1]) x) => (Neq(8|16|32|64) (MOVDconst [0]) x) + +(Less8U x y) => (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Less16U x y) => (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Less32U x y) => (LessThanU (CMPW x y)) +(Less64U x y) => (LessThanU (CMP x y)) + +(Leq8 x y) => (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y))) +(Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y))) +(Leq32 x y) => (LessEqual (CMPW x y)) +(Leq64 x y) => (LessEqual (CMP x y)) + +// Refer to the comments for op Less64F above. +(Leq32F x y) => (LessEqualF (FCMPS x y)) +(Leq64F x y) => (LessEqualF (FCMPD x y)) + +(Leq8U x y) => (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Leq16U x y) => (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Leq32U x y) => (LessEqualU (CMPW x y)) +(Leq64U x y) => (LessEqualU (CMP x y)) + +// Optimize comparison between a floating-point value and 0.0 with "FCMP $(0.0), Fn" +(FCMPS x (FMOVSconst [0])) => (FCMPS0 x) +(FCMPS (FMOVSconst [0]) x) => (InvertFlags (FCMPS0 x)) +(FCMPD x (FMOVDconst [0])) => (FCMPD0 x) +(FCMPD (FMOVDconst [0]) x) => (InvertFlags (FCMPD0 x)) + +// CSEL needs a flag-generating argument. Synthesize a TSTW if necessary. +(CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL [boolval.Op] x y flagArg(boolval)) +(CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval)) + +(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVDaddr [int32(off)] ptr) +(OffPtr [off] ptr) => (ADDconst [off] ptr) + +(Addr {sym} base) => (MOVDaddr {sym} base) +(LocalAddr {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem)) +(LocalAddr {sym} base _) && !t.Elem().HasPointers() => (MOVDaddr {sym} base) + +// loads +(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) +(Load ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem) +(Load ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem) +(Load ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem) +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem) + +// stores +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVSstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem) + +// zeroing +(Zero [0] _ mem) => mem +(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem) +(Zero [2] ptr mem) => (MOVHstore ptr (MOVDconst [0]) mem) +(Zero [4] ptr mem) => (MOVWstore ptr (MOVDconst [0]) mem) +(Zero [3] ptr mem) => + (MOVBstore [2] ptr (MOVDconst [0]) + (MOVHstore ptr (MOVDconst [0]) mem)) +(Zero [5] ptr mem) => + (MOVBstore [4] ptr (MOVDconst [0]) + (MOVWstore ptr (MOVDconst [0]) mem)) +(Zero [6] ptr mem) => + (MOVHstore [4] ptr (MOVDconst [0]) + (MOVWstore ptr (MOVDconst [0]) mem)) +(Zero [7] ptr mem) => + (MOVWstore [3] ptr (MOVDconst [0]) + (MOVWstore ptr (MOVDconst [0]) mem)) +(Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst [0]) mem) +(Zero [9] ptr mem) => + (MOVBstore [8] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)) +(Zero [10] ptr mem) => + (MOVHstore [8] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)) +(Zero [11] ptr mem) => + (MOVDstore [3] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)) +(Zero [12] ptr mem) => + (MOVWstore [8] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)) +(Zero [13] ptr mem) => + (MOVDstore [5] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)) +(Zero [14] ptr mem) => + (MOVDstore [6] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)) +(Zero [15] ptr mem) => + (MOVDstore [7] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)) +(Zero [16] ptr mem) => + (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem) + +(Zero [32] ptr mem) => + (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)) + +(Zero [48] ptr mem) => + (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))) + +(Zero [64] ptr mem) => + (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) + (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))) + +// strip off fractional word zeroing +(Zero [s] ptr mem) && s%16 != 0 && s%16 <= 8 && s > 16 => + (Zero [8] + (OffPtr ptr [s-8]) + (Zero [s-s%16] ptr mem)) +(Zero [s] ptr mem) && s%16 != 0 && s%16 > 8 && s > 16 => + (Zero [16] + (OffPtr ptr [s-16]) + (Zero [s-s%16] ptr mem)) + +// medium zeroing uses a duff device +// 4, 16, and 64 are magic constants, see runtime/mkduff.go +(Zero [s] ptr mem) + && s%16 == 0 && s > 64 && s <= 16*64 + && !config.noDuffDevice => + (DUFFZERO [4 * (64 - s/16)] ptr mem) + +// large zeroing uses a loop +(Zero [s] ptr mem) + && s%16 == 0 && (s > 16*64 || config.noDuffDevice) => + (LoweredZero + ptr + (ADDconst [s-16] ptr) + mem) + +// moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem) +(Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem) +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBUload [2] src mem) + (MOVHstore dst (MOVHUload src mem) mem)) +(Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem) +(Move [5] dst src mem) => + (MOVBstore [4] dst (MOVBUload [4] src mem) + (MOVWstore dst (MOVWUload src mem) mem)) +(Move [6] dst src mem) => + (MOVHstore [4] dst (MOVHUload [4] src mem) + (MOVWstore dst (MOVWUload src mem) mem)) +(Move [7] dst src mem) => + (MOVWstore [3] dst (MOVWUload [3] src mem) + (MOVWstore dst (MOVWUload src mem) mem)) +(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem) +(Move [9] dst src mem) => + (MOVBstore [8] dst (MOVBUload [8] src mem) + (MOVDstore dst (MOVDload src mem) mem)) +(Move [10] dst src mem) => + (MOVHstore [8] dst (MOVHUload [8] src mem) + (MOVDstore dst (MOVDload src mem) mem)) +(Move [11] dst src mem) => + (MOVDstore [3] dst (MOVDload [3] src mem) + (MOVDstore dst (MOVDload src mem) mem)) +(Move [12] dst src mem) => + (MOVWstore [8] dst (MOVWUload [8] src mem) + (MOVDstore dst (MOVDload src mem) mem)) +(Move [13] dst src mem) => + (MOVDstore [5] dst (MOVDload [5] src mem) + (MOVDstore dst (MOVDload src mem) mem)) +(Move [14] dst src mem) => + (MOVDstore [6] dst (MOVDload [6] src mem) + (MOVDstore dst (MOVDload src mem) mem)) +(Move [15] dst src mem) => + (MOVDstore [7] dst (MOVDload [7] src mem) + (MOVDstore dst (MOVDload src mem) mem)) +(Move [16] dst src mem) => + (STP dst (Select0 (LDP src mem)) (Select1 (LDP src mem)) mem) +(Move [32] dst src mem) => + (STP [16] dst (Select0 (LDP [16] src mem)) (Select1 (LDP [16] src mem)) + (STP dst (Select0 (LDP src mem)) (Select1 (LDP src mem)) mem)) +(Move [48] dst src mem) => + (STP [32] dst (Select0 (LDP [32] src mem)) (Select1 (LDP [32] src mem)) + (STP [16] dst (Select0 (LDP [16] src mem)) (Select1 (LDP [16] src mem)) + (STP dst (Select0 (LDP src mem)) (Select1 (LDP src mem)) mem))) +(Move [64] dst src mem) => + (STP [48] dst (Select0 (LDP [48] src mem)) (Select1 (LDP [48] src mem)) + (STP [32] dst (Select0 (LDP [32] src mem)) (Select1 (LDP [32] src mem)) + (STP [16] dst (Select0 (LDP [16] src mem)) (Select1 (LDP [16] src mem)) + (STP dst (Select0 (LDP src mem)) (Select1 (LDP src mem)) mem)))) + +(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i+8] ptr mem)) && x.Uses == 1 && setPos(v, x.Pos) && clobber(x) => (MOVQstorezero {s} [i] ptr mem) +(MOVDstorezero {s} [i] ptr x:(MOVDstorezero {s} [i-8] ptr mem)) && x.Uses == 1 && setPos(v, x.Pos) && clobber(x) => (MOVQstorezero {s} [i-8] ptr mem) + +// strip off fractional word move +(Move [s] dst src mem) && s%16 != 0 && s%16 <= 8 && s > 16 => + (Move [8] + (OffPtr dst [s-8]) + (OffPtr src [s-8]) + (Move [s-s%16] dst src mem)) +(Move [s] dst src mem) && s%16 != 0 && s%16 > 8 && s > 16 => + (Move [16] + (OffPtr dst [s-16]) + (OffPtr src [s-16]) + (Move [s-s%16] dst src mem)) + +// medium move uses a duff device +(Move [s] dst src mem) + && s > 64 && s <= 16*64 && s%16 == 0 + && !config.noDuffDevice && logLargeCopy(v, s) => + (DUFFCOPY [8 * (64 - s/16)] dst src mem) +// 8 is the number of bytes to encode: +// +// LDP.P 16(R16), (R26, R27) +// STP.P (R26, R27), 16(R17) +// +// 64 is number of these blocks. See runtime/duff_arm64.s:duffcopy + +// large move uses a loop +(Move [s] dst src mem) + && s%16 == 0 && (s > 16*64 || config.noDuffDevice) + && logLargeCopy(v, s) => + (LoweredMove + dst + src + (ADDconst src [s-16]) + mem) + +// calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// checks +(NilCheck ...) => (LoweredNilCheck ...) +(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr)) +(IsInBounds idx len) => (LessThanU (CMP idx len)) +(IsSliceInBounds idx len) => (LessEqualU (CMP idx len)) + +// pseudo-ops +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) + +// Absorb pseudo-ops into blocks. +(If (Equal cc) yes no) => (EQ cc yes no) +(If (NotEqual cc) yes no) => (NE cc yes no) +(If (LessThan cc) yes no) => (LT cc yes no) +(If (LessThanU cc) yes no) => (ULT cc yes no) +(If (LessEqual cc) yes no) => (LE cc yes no) +(If (LessEqualU cc) yes no) => (ULE cc yes no) +(If (GreaterThan cc) yes no) => (GT cc yes no) +(If (GreaterThanU cc) yes no) => (UGT cc yes no) +(If (GreaterEqual cc) yes no) => (GE cc yes no) +(If (GreaterEqualU cc) yes no) => (UGE cc yes no) +(If (LessThanF cc) yes no) => (FLT cc yes no) +(If (LessEqualF cc) yes no) => (FLE cc yes no) +(If (GreaterThanF cc) yes no) => (FGT cc yes no) +(If (GreaterEqualF cc) yes no) => (FGE cc yes no) + +(If cond yes no) => (TBNZ [0] cond yes no) + +(JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (MOVDaddr {makeJumpTableSym(b)} (SB))) + +// atomic intrinsics +// Note: these ops do not accept offset. +(AtomicLoad8 ...) => (LDARB ...) +(AtomicLoad32 ...) => (LDARW ...) +(AtomicLoad64 ...) => (LDAR ...) +(AtomicLoadPtr ...) => (LDAR ...) + +(AtomicStore8 ...) => (STLRB ...) +(AtomicStore32 ...) => (STLRW ...) +(AtomicStore64 ...) => (STLR ...) +(AtomicStorePtrNoWB ...) => (STLR ...) + +(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...) +(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...) +(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...) + +(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...) +(AtomicExchange(32|64)Variant ...) => (LoweredAtomicExchange(32|64)Variant ...) +(AtomicCompareAndSwap(32|64)Variant ...) => (LoweredAtomicCas(32|64)Variant ...) + +// Currently the updated value is not used, but we need a register to temporarily hold it. +(AtomicAnd(8|32) ptr val mem) => (Select1 (LoweredAtomicAnd(8|32) ptr val mem)) +(AtomicOr(8|32) ptr val mem) => (Select1 (LoweredAtomicOr(8|32) ptr val mem)) +(AtomicAnd(8|32)Variant ptr val mem) => (Select1 (LoweredAtomicAnd(8|32)Variant ptr val mem)) +(AtomicOr(8|32)Variant ptr val mem) => (Select1 (LoweredAtomicOr(8|32)Variant ptr val mem)) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +// Publication barrier (0xe is ST option) +(PubBarrier mem) => (DMB [0xe] mem) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +// Optimizations + +// Absorb boolean tests into block +(NZ (Equal cc) yes no) => (EQ cc yes no) +(NZ (NotEqual cc) yes no) => (NE cc yes no) +(NZ (LessThan cc) yes no) => (LT cc yes no) +(NZ (LessThanU cc) yes no) => (ULT cc yes no) +(NZ (LessEqual cc) yes no) => (LE cc yes no) +(NZ (LessEqualU cc) yes no) => (ULE cc yes no) +(NZ (GreaterThan cc) yes no) => (GT cc yes no) +(NZ (GreaterThanU cc) yes no) => (UGT cc yes no) +(NZ (GreaterEqual cc) yes no) => (GE cc yes no) +(NZ (GreaterEqualU cc) yes no) => (UGE cc yes no) +(NZ (LessThanF cc) yes no) => (FLT cc yes no) +(NZ (LessEqualF cc) yes no) => (FLE cc yes no) +(NZ (GreaterThanF cc) yes no) => (FGT cc yes no) +(NZ (GreaterEqualF cc) yes no) => (FGE cc yes no) + +(TBNZ [0] (Equal cc) yes no) => (EQ cc yes no) +(TBNZ [0] (NotEqual cc) yes no) => (NE cc yes no) +(TBNZ [0] (LessThan cc) yes no) => (LT cc yes no) +(TBNZ [0] (LessThanU cc) yes no) => (ULT cc yes no) +(TBNZ [0] (LessEqual cc) yes no) => (LE cc yes no) +(TBNZ [0] (LessEqualU cc) yes no) => (ULE cc yes no) +(TBNZ [0] (GreaterThan cc) yes no) => (GT cc yes no) +(TBNZ [0] (GreaterThanU cc) yes no) => (UGT cc yes no) +(TBNZ [0] (GreaterEqual cc) yes no) => (GE cc yes no) +(TBNZ [0] (GreaterEqualU cc) yes no) => (UGE cc yes no) +(TBNZ [0] (LessThanF cc) yes no) => (FLT cc yes no) +(TBNZ [0] (LessEqualF cc) yes no) => (FLE cc yes no) +(TBNZ [0] (GreaterThanF cc) yes no) => (FGT cc yes no) +(TBNZ [0] (GreaterEqualF cc) yes no) => (FGE cc yes no) + +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TST x y) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTconst [c] y) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTW x y) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTWconst [int32(c)] y) yes no) + +// For conditional instructions such as CSET, CSEL. +((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0] z:(AND x y))) && z.Uses == 1 => + ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TST x y)) +((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] x:(ANDconst [c] y))) && x.Uses == 1 => + ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTWconst [int32(c)] y)) +((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] z:(AND x y))) && z.Uses == 1 => + ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTW x y)) +((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0] x:(ANDconst [c] y))) && x.Uses == 1 => + ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTconst [c] y)) + +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNconst [c] y) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNWconst [int32(c)] y) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN x y) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW x y) yes no) + +// CMP(x,-y) -> CMN(x,y) is only valid for unordered comparison, if y can be -1<<63 +((EQ|NE) (CMP x z:(NEG y)) yes no) && z.Uses == 1 => ((EQ|NE) (CMN x y) yes no) +((Equal|NotEqual) (CMP x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMN x y)) + +// CMPW(x,-y) -> CMNW(x,y) is only valid for unordered comparison, if y can be -1<<31 +((EQ|NE) (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => ((EQ|NE) (CMNW x y) yes no) +((Equal|NotEqual) (CMPW x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMNW x y)) + +// For conditional instructions such as CSET, CSEL. +// TODO: add support for LE, GT, overflow needs to be considered. +((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNconst [c] y)) +((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNWconst [int32(c)] y)) +((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(ADD x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN x y)) +((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(ADD x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW x y)) +((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(MADD a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN a (MUL x y))) +((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(MSUB a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMP a (MUL x y))) +((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MADDW a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW a (MULW x y))) +((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MSUBW a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMPW a (MULW x y))) + +((CMPconst|CMNconst) [c] y) && c < 0 && c != -1<<63 => ((CMNconst|CMPconst) [-c] y) +((CMPWconst|CMNWconst) [c] y) && c < 0 && c != -1<<31 => ((CMNWconst|CMPWconst) [-c] y) + +((EQ|NE) (CMPconst [0] x) yes no) => ((Z|NZ) x yes no) +((EQ|NE) (CMPWconst [0] x) yes no) => ((ZW|NZW) x yes no) + +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN a (MUL x y)) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMP a (MUL x y)) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW a (MULW x y)) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMPW a (MULW x y)) yes no) + +// Absorb bit-tests into block +(Z (ANDconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no) +(NZ (ANDconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no) +(ZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no) +(NZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no) +(EQ (TSTconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no) +(NE (TSTconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no) +(EQ (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no) +(NE (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no) + +// Test sign-bit for signed comparisons against zero +(GE (CMPWconst [0] x) yes no) => (TBZ [31] x yes no) +(GE (CMPconst [0] x) yes no) => (TBZ [63] x yes no) +(LT (CMPWconst [0] x) yes no) => (TBNZ [31] x yes no) +(LT (CMPconst [0] x) yes no) => (TBNZ [63] x yes no) + +// fold offset into address +(ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => + (MOVDaddr [int32(off1)+off2] {sym} ptr) + +// fold address into load/store. +// Do not fold global variable access in -dynlink mode, where it will +// be rewritten to use the GOT via REGTMP, which currently cannot handle +// large offset. +(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVBload [off1+int32(off2)] {sym} ptr mem) +(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVBUload [off1+int32(off2)] {sym} ptr mem) +(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVHload [off1+int32(off2)] {sym} ptr mem) +(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVHUload [off1+int32(off2)] {sym} ptr mem) +(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVWload [off1+int32(off2)] {sym} ptr mem) +(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVWUload [off1+int32(off2)] {sym} ptr mem) +(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVDload [off1+int32(off2)] {sym} ptr mem) +(LDP [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (LDP [off1+int32(off2)] {sym} ptr mem) +(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (FMOVSload [off1+int32(off2)] {sym} ptr mem) +(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (FMOVDload [off1+int32(off2)] {sym} ptr mem) + +// register indexed load +(MOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem) +(MOVWUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem) +(MOVWload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem) +(MOVHUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem) +(MOVHload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem) +(MOVBUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem) +(MOVBload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem) +(FMOVSload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx ptr idx mem) +(FMOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx ptr idx mem) + +(MOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem) +(MOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem) +(MOVWUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem) +(MOVWUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem) +(MOVWloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem) +(MOVWloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem) +(MOVHUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem) +(MOVHUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem) +(MOVHloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem) +(MOVHloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem) +(MOVBUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem) +(MOVBUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem) +(MOVBloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem) +(MOVBloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem) +(FMOVSloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem) +(FMOVSloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem) +(FMOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem) +(FMOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem) + +// shifted register indexed load +(MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx8 ptr idx mem) +(MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx4 ptr idx mem) +(MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx4 ptr idx mem) +(MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx2 ptr idx mem) +(MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx2 ptr idx mem) +(MOVDloadidx ptr (SLLconst [3] idx) mem) => (MOVDloadidx8 ptr idx mem) +(MOVWloadidx ptr (SLLconst [2] idx) mem) => (MOVWloadidx4 ptr idx mem) +(MOVWUloadidx ptr (SLLconst [2] idx) mem) => (MOVWUloadidx4 ptr idx mem) +(MOVHloadidx ptr (SLLconst [1] idx) mem) => (MOVHloadidx2 ptr idx mem) +(MOVHUloadidx ptr (SLLconst [1] idx) mem) => (MOVHUloadidx2 ptr idx mem) +(MOVHloadidx ptr (ADD idx idx) mem) => (MOVHloadidx2 ptr idx mem) +(MOVHUloadidx ptr (ADD idx idx) mem) => (MOVHUloadidx2 ptr idx mem) +(MOVDloadidx (SLLconst [3] idx) ptr mem) => (MOVDloadidx8 ptr idx mem) +(MOVWloadidx (SLLconst [2] idx) ptr mem) => (MOVWloadidx4 ptr idx mem) +(MOVWUloadidx (SLLconst [2] idx) ptr mem) => (MOVWUloadidx4 ptr idx mem) +(MOVHloadidx (ADD idx idx) ptr mem) => (MOVHloadidx2 ptr idx mem) +(MOVHUloadidx (ADD idx idx) ptr mem) => (MOVHUloadidx2 ptr idx mem) +(MOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDload [int32(c)<<3] ptr mem) +(MOVWUloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWUload [int32(c)<<2] ptr mem) +(MOVWloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWload [int32(c)<<2] ptr mem) +(MOVHUloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHUload [int32(c)<<1] ptr mem) +(MOVHloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHload [int32(c)<<1] ptr mem) + +(FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx8 ptr idx mem) +(FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx4 ptr idx mem) +(FMOVDloadidx ptr (SLLconst [3] idx) mem) => (FMOVDloadidx8 ptr idx mem) +(FMOVSloadidx ptr (SLLconst [2] idx) mem) => (FMOVSloadidx4 ptr idx mem) +(FMOVDloadidx (SLLconst [3] idx) ptr mem) => (FMOVDloadidx8 ptr idx mem) +(FMOVSloadidx (SLLconst [2] idx) ptr mem) => (FMOVSloadidx4 ptr idx mem) +(FMOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (FMOVDload ptr [int32(c)<<3] mem) +(FMOVSloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (FMOVSload ptr [int32(c)<<2] mem) + +(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVBstore [off1+int32(off2)] {sym} ptr val mem) +(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVHstore [off1+int32(off2)] {sym} ptr val mem) +(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVWstore [off1+int32(off2)] {sym} ptr val mem) +(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVDstore [off1+int32(off2)] {sym} ptr val mem) +(STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (STP [off1+int32(off2)] {sym} ptr val1 val2 mem) +(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (FMOVSstore [off1+int32(off2)] {sym} ptr val mem) +(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (FMOVDstore [off1+int32(off2)] {sym} ptr val mem) +(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVDstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVQstorezero [off1+int32(off2)] {sym} ptr mem) + +// register indexed store +(MOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem) +(MOVWstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem) +(MOVHstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem) +(MOVBstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem) +(FMOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx ptr idx val mem) +(FMOVSstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx ptr idx val mem) +(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVDstore [int32(c)] ptr val mem) +(MOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVDstore [int32(c)] idx val mem) +(MOVWstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem) +(MOVWstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVWstore [int32(c)] idx val mem) +(MOVHstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVHstore [int32(c)] ptr val mem) +(MOVHstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVHstore [int32(c)] idx val mem) +(MOVBstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVBstore [int32(c)] ptr val mem) +(MOVBstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVBstore [int32(c)] idx val mem) +(FMOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVDstore [int32(c)] ptr val mem) +(FMOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVDstore [int32(c)] idx val mem) +(FMOVSstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVSstore [int32(c)] ptr val mem) +(FMOVSstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVSstore [int32(c)] idx val mem) + +// shifted register indexed store +(MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx8 ptr idx val mem) +(MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx4 ptr idx val mem) +(MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx2 ptr idx val mem) +(MOVDstoreidx ptr (SLLconst [3] idx) val mem) => (MOVDstoreidx8 ptr idx val mem) +(MOVWstoreidx ptr (SLLconst [2] idx) val mem) => (MOVWstoreidx4 ptr idx val mem) +(MOVHstoreidx ptr (SLLconst [1] idx) val mem) => (MOVHstoreidx2 ptr idx val mem) +(MOVHstoreidx ptr (ADD idx idx) val mem) => (MOVHstoreidx2 ptr idx val mem) +(MOVDstoreidx (SLLconst [3] idx) ptr val mem) => (MOVDstoreidx8 ptr idx val mem) +(MOVWstoreidx (SLLconst [2] idx) ptr val mem) => (MOVWstoreidx4 ptr idx val mem) +(MOVHstoreidx (SLLconst [1] idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem) +(MOVHstoreidx (ADD idx idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem) +(MOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (MOVDstore [int32(c)<<3] ptr val mem) +(MOVWstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (MOVWstore [int32(c)<<2] ptr val mem) +(MOVHstoreidx2 ptr (MOVDconst [c]) val mem) && is32Bit(c<<1) => (MOVHstore [int32(c)<<1] ptr val mem) + +(FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx8 ptr idx val mem) +(FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx4 ptr idx val mem) +(FMOVDstoreidx ptr (SLLconst [3] idx) val mem) => (FMOVDstoreidx8 ptr idx val mem) +(FMOVSstoreidx ptr (SLLconst [2] idx) val mem) => (FMOVSstoreidx4 ptr idx val mem) +(FMOVDstoreidx (SLLconst [3] idx) ptr val mem) => (FMOVDstoreidx8 ptr idx val mem) +(FMOVSstoreidx (SLLconst [2] idx) ptr val mem) => (FMOVSstoreidx4 ptr idx val mem) +(FMOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (FMOVDstore [int32(c)<<3] ptr val mem) +(FMOVSstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (FMOVSstore [int32(c)<<2] ptr val mem) + +(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(LDP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (LDP [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + +(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem) +(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + +// store zero +(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) +(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem) +(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem) +(STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) => (MOVQstorezero [off] {sym} ptr mem) + +// register indexed store zero +(MOVDstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDstorezeroidx ptr idx mem) +(MOVWstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx ptr idx mem) +(MOVHstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx ptr idx mem) +(MOVBstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBstorezeroidx ptr idx mem) +(MOVDstoreidx ptr idx (MOVDconst [0]) mem) => (MOVDstorezeroidx ptr idx mem) +(MOVWstoreidx ptr idx (MOVDconst [0]) mem) => (MOVWstorezeroidx ptr idx mem) +(MOVHstoreidx ptr idx (MOVDconst [0]) mem) => (MOVHstorezeroidx ptr idx mem) +(MOVBstoreidx ptr idx (MOVDconst [0]) mem) => (MOVBstorezeroidx ptr idx mem) +(MOVDstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDstorezero [int32(c)] ptr mem) +(MOVDstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVDstorezero [int32(c)] idx mem) +(MOVWstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWstorezero [int32(c)] ptr mem) +(MOVWstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVWstorezero [int32(c)] idx mem) +(MOVHstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHstorezero [int32(c)] ptr mem) +(MOVHstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVHstorezero [int32(c)] idx mem) +(MOVBstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBstorezero [int32(c)] ptr mem) +(MOVBstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVBstorezero [int32(c)] idx mem) + +// shifted register indexed store zero +(MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDstorezeroidx8 ptr idx mem) +(MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx4 ptr idx mem) +(MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx2 ptr idx mem) +(MOVDstorezeroidx ptr (SLLconst [3] idx) mem) => (MOVDstorezeroidx8 ptr idx mem) +(MOVWstorezeroidx ptr (SLLconst [2] idx) mem) => (MOVWstorezeroidx4 ptr idx mem) +(MOVHstorezeroidx ptr (SLLconst [1] idx) mem) => (MOVHstorezeroidx2 ptr idx mem) +(MOVHstorezeroidx ptr (ADD idx idx) mem) => (MOVHstorezeroidx2 ptr idx mem) +(MOVDstorezeroidx (SLLconst [3] idx) ptr mem) => (MOVDstorezeroidx8 ptr idx mem) +(MOVWstorezeroidx (SLLconst [2] idx) ptr mem) => (MOVWstorezeroidx4 ptr idx mem) +(MOVHstorezeroidx (SLLconst [1] idx) ptr mem) => (MOVHstorezeroidx2 ptr idx mem) +(MOVHstorezeroidx (ADD idx idx) ptr mem) => (MOVHstorezeroidx2 ptr idx mem) +(MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) => (MOVDstorezeroidx8 ptr idx mem) +(MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) => (MOVWstorezeroidx4 ptr idx mem) +(MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) => (MOVHstorezeroidx2 ptr idx mem) +(MOVDstorezeroidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDstorezero [int32(c<<3)] ptr mem) +(MOVWstorezeroidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWstorezero [int32(c<<2)] ptr mem) +(MOVHstorezeroidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHstorezero [int32(c<<1)] ptr mem) + +// replace load from same location as preceding store with zero/sign extension (or copy in case of full width) +// these seem to have bad interaction with other rules, resulting in slower code +//(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x) +//(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x) +//(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x) +//(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x) +//(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWreg x) +//(MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWUreg x) +//(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x +//(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x +//(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x +//(LDP [off] {sym} ptr (STP [off2] {sym2} ptr2 x y _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x y + +(MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0]) +(MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0]) +(MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0]) +(MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0]) +(MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0]) +(MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0]) +(MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0]) + +(MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) + && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0]) +(MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _)) + && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0]) +(MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) + && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0]) +(MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _)) + && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0]) +(MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) + && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0]) +(MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _)) + && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0]) +(MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _)) + && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0]) + +(MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0]) +(MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0]) +(MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0]) +(MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0]) +(MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0]) + +// don't extend before store +(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVBstoreidx ptr idx (MOVBreg x) mem) => (MOVBstoreidx ptr idx x mem) +(MOVBstoreidx ptr idx (MOVBUreg x) mem) => (MOVBstoreidx ptr idx x mem) +(MOVBstoreidx ptr idx (MOVHreg x) mem) => (MOVBstoreidx ptr idx x mem) +(MOVBstoreidx ptr idx (MOVHUreg x) mem) => (MOVBstoreidx ptr idx x mem) +(MOVBstoreidx ptr idx (MOVWreg x) mem) => (MOVBstoreidx ptr idx x mem) +(MOVBstoreidx ptr idx (MOVWUreg x) mem) => (MOVBstoreidx ptr idx x mem) +(MOVHstoreidx ptr idx (MOVHreg x) mem) => (MOVHstoreidx ptr idx x mem) +(MOVHstoreidx ptr idx (MOVHUreg x) mem) => (MOVHstoreidx ptr idx x mem) +(MOVHstoreidx ptr idx (MOVWreg x) mem) => (MOVHstoreidx ptr idx x mem) +(MOVHstoreidx ptr idx (MOVWUreg x) mem) => (MOVHstoreidx ptr idx x mem) +(MOVWstoreidx ptr idx (MOVWreg x) mem) => (MOVWstoreidx ptr idx x mem) +(MOVWstoreidx ptr idx (MOVWUreg x) mem) => (MOVWstoreidx ptr idx x mem) +(MOVHstoreidx2 ptr idx (MOVHreg x) mem) => (MOVHstoreidx2 ptr idx x mem) +(MOVHstoreidx2 ptr idx (MOVHUreg x) mem) => (MOVHstoreidx2 ptr idx x mem) +(MOVHstoreidx2 ptr idx (MOVWreg x) mem) => (MOVHstoreidx2 ptr idx x mem) +(MOVHstoreidx2 ptr idx (MOVWUreg x) mem) => (MOVHstoreidx2 ptr idx x mem) +(MOVWstoreidx4 ptr idx (MOVWreg x) mem) => (MOVWstoreidx4 ptr idx x mem) +(MOVWstoreidx4 ptr idx (MOVWUreg x) mem) => (MOVWstoreidx4 ptr idx x mem) + +// if a register move has only 1 use, just use the same register without emitting instruction +// MOVDnop doesn't emit instruction, only for ensuring the type. +(MOVDreg x) && x.Uses == 1 => (MOVDnop x) + +// TODO: we should be able to get rid of MOVDnop all together. +// But for now, this is enough to get rid of lots of them. +(MOVDnop (MOVDconst [c])) => (MOVDconst [c]) + +// fold constant into arithmetic ops +(ADD x (MOVDconst [c])) && !t.IsPtr() => (ADDconst [c] x) +(SUB x (MOVDconst [c])) => (SUBconst [c] x) +(AND x (MOVDconst [c])) => (ANDconst [c] x) +(OR x (MOVDconst [c])) => (ORconst [c] x) +(XOR x (MOVDconst [c])) => (XORconst [c] x) +(TST x (MOVDconst [c])) => (TSTconst [c] x) +(TSTW x (MOVDconst [c])) => (TSTWconst [int32(c)] x) +(CMN x (MOVDconst [c])) => (CMNconst [c] x) +(CMNW x (MOVDconst [c])) => (CMNWconst [int32(c)] x) +(BIC x (MOVDconst [c])) => (ANDconst [^c] x) +(EON x (MOVDconst [c])) => (XORconst [^c] x) +(ORN x (MOVDconst [c])) => (ORconst [^c] x) + +(SLL x (MOVDconst [c])) => (SLLconst x [c&63]) +(SRL x (MOVDconst [c])) => (SRLconst x [c&63]) +(SRA x (MOVDconst [c])) => (SRAconst x [c&63]) +(SLL x (ANDconst [63] y)) => (SLL x y) +(SRL x (ANDconst [63] y)) => (SRL x y) +(SRA x (ANDconst [63] y)) => (SRA x y) + +(CMP x (MOVDconst [c])) => (CMPconst [c] x) +(CMP (MOVDconst [c]) x) => (InvertFlags (CMPconst [c] x)) +(CMPW x (MOVDconst [c])) => (CMPWconst [int32(c)] x) +(CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x)) + +(ROR x (MOVDconst [c])) => (RORconst x [c&63]) +(RORW x (MOVDconst [c])) => (RORWconst x [c&31]) + +(ADDSflags x (MOVDconst [c])) => (ADDSconstflags [c] x) + +(ADDconst [c] y) && c < 0 => (SUBconst [-c] y) + +// Canonicalize the order of arguments to comparisons - helps with CSE. +((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x)) + +// mul-neg => mneg +(NEG (MUL x y)) => (MNEG x y) +(NEG (MULW x y)) && v.Type.Size() <= 4 => (MNEGW x y) +(MUL (NEG x) y) => (MNEG x y) +(MULW (NEG x) y) => (MNEGW x y) + +// madd/msub +(ADD a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MADD a x y) +(SUB a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MSUB a x y) +(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y) +(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y) + +(ADD a l:(MULW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y) +(SUB a l:(MULW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y) +(ADD a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y) +(SUB a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y) + +// optimize ADCSflags, SBCSflags and friends +(ADCSflags x y (Select1 (ADDSconstflags [-1] (ADCzerocarry c)))) => (ADCSflags x y c) +(ADCSflags x y (Select1 (ADDSconstflags [-1] (MOVDconst [0])))) => (ADDSflags x y) +(SBCSflags x y (Select1 (NEGSflags (NEG (NGCzerocarry bo))))) => (SBCSflags x y bo) +(SBCSflags x y (Select1 (NEGSflags (MOVDconst [0])))) => (SUBSflags x y) + +// mul by constant +(MUL x (MOVDconst [-1])) => (NEG x) +(MUL _ (MOVDconst [0])) => (MOVDconst [0]) +(MUL x (MOVDconst [1])) => x +(MUL x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x) +(MUL x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (ADDshiftLL x x [log64(c-1)]) +(MUL x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (ADDshiftLL (NEG x) x [log64(c+1)]) +(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst [log64(c/3)] (ADDshiftLL x x [1])) +(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SLLconst [log64(c/5)] (ADDshiftLL x x [2])) +(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst [log64(c/7)] (ADDshiftLL (NEG x) x [3])) +(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SLLconst [log64(c/9)] (ADDshiftLL x x [3])) + +(MULW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (NEG x)) +(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0]) +(MULW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg x) +(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (SLLconst [log64(c)] x)) +(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (MOVWUreg (ADDshiftLL x x [log64(c-1)])) +(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (MOVWUreg (ADDshiftLL (NEG x) x [log64(c+1)])) +(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SLLconst [log64(c/3)] (ADDshiftLL x x [1]))) +(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SLLconst [log64(c/5)] (ADDshiftLL x x [2]))) +(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SLLconst [log64(c/7)] (ADDshiftLL (NEG x) x [3]))) +(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SLLconst [log64(c/9)] (ADDshiftLL x x [3]))) + +// mneg by constant +(MNEG x (MOVDconst [-1])) => x +(MNEG _ (MOVDconst [0])) => (MOVDconst [0]) +(MNEG x (MOVDconst [1])) => (NEG x) +(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst [log64(c)] x)) +(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (NEG (ADDshiftLL x x [log64(c-1)])) +(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (NEG (ADDshiftLL (NEG x) x [log64(c+1)])) +(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst [log64(c/3)] (SUBshiftLL x x [2])) +(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (NEG (SLLconst [log64(c/5)] (ADDshiftLL x x [2]))) +(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst [log64(c/7)] (SUBshiftLL x x [3])) +(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (NEG (SLLconst [log64(c/9)] (ADDshiftLL x x [3]))) + + +(MNEGW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg x) +(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0]) +(MNEGW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (NEG x)) +(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst [log64(c)] x)) +(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (MOVWUreg (NEG (ADDshiftLL x x [log64(c-1)]))) +(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (MOVWUreg (NEG (ADDshiftLL (NEG x) x [log64(c+1)]))) +(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SLLconst [log64(c/3)] (SUBshiftLL x x [2]))) +(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (NEG (SLLconst [log64(c/5)] (ADDshiftLL x x [2])))) +(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SLLconst [log64(c/7)] (SUBshiftLL x x [3]))) +(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (NEG (SLLconst [log64(c/9)] (ADDshiftLL x x [3])))) + + +(MADD a x (MOVDconst [-1])) => (SUB a x) +(MADD a _ (MOVDconst [0])) => a +(MADD a x (MOVDconst [1])) => (ADD a x) +(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)]) +(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL x x [log64(c-1)])) +(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL x x [log64(c+1)])) +(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL x x [2]) [log64(c/3)]) +(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL x x [2]) [log64(c/5)]) +(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL x x [3]) [log64(c/7)]) +(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL x x [3]) [log64(c/9)]) + +(MADD a (MOVDconst [-1]) x) => (SUB a x) +(MADD a (MOVDconst [0]) _) => a +(MADD a (MOVDconst [1]) x) => (ADD a x) +(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)]) +(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL x x [log64(c-1)])) +(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL x x [log64(c+1)])) +(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL x x [2]) [log64(c/3)]) +(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL x x [2]) [log64(c/5)]) +(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL x x [3]) [log64(c/7)]) +(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL x x [3]) [log64(c/9)]) + +(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (SUB a x)) +(MADDW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a) +(MADDW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (ADD a x)) +(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (ADDshiftLL a x [log64(c)])) +(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (ADD a (ADDshiftLL x x [log64(c-1)]))) +(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (SUB a (SUBshiftLL x x [log64(c+1)]))) +(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL a (SUBshiftLL x x [2]) [log64(c/3)])) +(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL a (ADDshiftLL x x [2]) [log64(c/5)])) +(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL a (SUBshiftLL x x [3]) [log64(c/7)])) +(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL a (ADDshiftLL x x [3]) [log64(c/9)])) + +(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (SUB a x)) +(MADDW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a) +(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (ADD a x)) +(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (MOVWUreg (ADDshiftLL a x [log64(c)])) +(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (ADD a (ADDshiftLL x x [log64(c-1)]))) +(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (SUB a (SUBshiftLL x x [log64(c+1)]))) +(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL a (SUBshiftLL x x [2]) [log64(c/3)])) +(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL a (ADDshiftLL x x [2]) [log64(c/5)])) +(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL a (SUBshiftLL x x [3]) [log64(c/7)])) +(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL a (ADDshiftLL x x [3]) [log64(c/9)])) + +(MSUB a x (MOVDconst [-1])) => (ADD a x) +(MSUB a _ (MOVDconst [0])) => a +(MSUB a x (MOVDconst [1])) => (SUB a x) +(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)]) +(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL x x [log64(c-1)])) +(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL x x [log64(c+1)])) +(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL x x [2]) [log64(c/3)]) +(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL x x [2]) [log64(c/5)]) +(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL x x [3]) [log64(c/7)]) +(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL x x [3]) [log64(c/9)]) + +(MSUB a (MOVDconst [-1]) x) => (ADD a x) +(MSUB a (MOVDconst [0]) _) => a +(MSUB a (MOVDconst [1]) x) => (SUB a x) +(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)]) +(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL x x [log64(c-1)])) +(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL x x [log64(c+1)])) +(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL x x [2]) [log64(c/3)]) +(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL x x [2]) [log64(c/5)]) +(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL x x [3]) [log64(c/7)]) +(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL x x [3]) [log64(c/9)]) + +(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (ADD a x)) +(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a) +(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (SUB a x)) +(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (MOVWUreg (SUBshiftLL a x [log64(c)])) +(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (SUB a (ADDshiftLL x x [log64(c-1)]))) +(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (ADD a (SUBshiftLL x x [log64(c+1)]))) +(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL a (SUBshiftLL x x [2]) [log64(c/3)])) +(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL a (ADDshiftLL x x [2]) [log64(c/5)])) +(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL a (SUBshiftLL x x [3]) [log64(c/7)])) +(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL a (ADDshiftLL x x [3]) [log64(c/9)])) + +(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (ADD a x)) +(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a) +(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (SUB a x)) +(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (MOVWUreg (SUBshiftLL a x [log64(c)])) +(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (MOVWUreg (SUB a (ADDshiftLL x x [log64(c-1)]))) +(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (MOVWUreg (ADD a (SUBshiftLL x x [log64(c+1)]))) +(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL a (SUBshiftLL x x [2]) [log64(c/3)])) +(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL a (ADDshiftLL x x [2]) [log64(c/5)])) +(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL a (SUBshiftLL x x [3]) [log64(c/7)])) +(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL a (ADDshiftLL x x [3]) [log64(c/9)])) + +// div by constant +(UDIV x (MOVDconst [1])) => x +(UDIV x (MOVDconst [c])) && isPowerOfTwo64(c) => (SRLconst [log64(c)] x) +(UDIVW x (MOVDconst [c])) && uint32(c)==1 => (MOVWUreg x) +(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log64(c)] (MOVWUreg x)) +(UMOD _ (MOVDconst [1])) => (MOVDconst [0]) +(UMOD x (MOVDconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x) +(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0]) +(UMODW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (ANDconst [c-1] x) + +// generic simplifications +(ADD x (NEG y)) => (SUB x y) +(SUB x x) => (MOVDconst [0]) +(AND x x) => x +(OR x x) => x +(XOR x x) => (MOVDconst [0]) +(BIC x x) => (MOVDconst [0]) +(EON x x) => (MOVDconst [-1]) +(ORN x x) => (MOVDconst [-1]) +(AND x (MVN y)) => (BIC x y) +(XOR x (MVN y)) => (EON x y) +(OR x (MVN y)) => (ORN x y) +(MVN (XOR x y)) => (EON x y) +(NEG (NEG x)) => x + +(CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) => (CSETM [cc] flag) +(CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) => (CSETM [arm64Negate(cc)] flag) +(CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag) +(CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag) +(CSEL [cc] x (ADDconst [1] a) flag) => (CSINC [cc] x a flag) +(CSEL [cc] (ADDconst [1] a) x flag) => (CSINC [arm64Negate(cc)] x a flag) +(CSEL [cc] x (MVN a) flag) => (CSINV [cc] x a flag) +(CSEL [cc] (MVN a) x flag) => (CSINV [arm64Negate(cc)] x a flag) +(CSEL [cc] x (NEG a) flag) => (CSNEG [cc] x a flag) +(CSEL [cc] (NEG a) x flag) => (CSNEG [arm64Negate(cc)] x a flag) + +(SUB x (SUB y z)) => (SUB (ADD x z) y) +(SUB (SUB x y) z) => (SUB x (ADD y z)) + +// remove redundant *const ops +(ADDconst [0] x) => x +(SUBconst [0] x) => x +(ANDconst [0] _) => (MOVDconst [0]) +(ANDconst [-1] x) => x +(ORconst [0] x) => x +(ORconst [-1] _) => (MOVDconst [-1]) +(XORconst [0] x) => x +(XORconst [-1] x) => (MVN x) + +// generic constant folding +(ADDconst [c] (MOVDconst [d])) => (MOVDconst [c+d]) +(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x) +(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x) +(SUBconst [c] (MOVDconst [d])) => (MOVDconst [d-c]) +(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x) +(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x) +(SLLconst [c] (MOVDconst [d])) => (MOVDconst [d< (MOVDconst [int64(uint64(d)>>uint64(c))]) +(SRAconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)]) +(MUL (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d]) +(MNEG (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d]) +(MULW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(c*d))]) +(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(-c*d))]) +(MADD (MOVDconst [c]) x y) => (ADDconst [c] (MUL x y)) +(MSUB (MOVDconst [c]) x y) => (ADDconst [c] (MNEG x y)) +(MADD a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a) +(MSUB a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a) +(MADDW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst [c] (MULW x y))) +(MSUBW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst [c] (MNEGW x y))) +(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (ADDconst [c*d] a)) +(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (SUBconst [c*d] a)) +(DIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c/d]) +(UDIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)/uint64(d))]) +(DIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)/int32(d)))]) +(UDIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)/uint32(d))]) +(MOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c%d]) +(UMOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)%uint64(d))]) +(MODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)%int32(d)))]) +(UMODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)%uint32(d))]) +(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d]) +(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) +(ANDconst [c] (MOVWUreg x)) => (ANDconst [c&(1<<32-1)] x) +(ANDconst [c] (MOVHUreg x)) => (ANDconst [c&(1<<16-1)] x) +(ANDconst [c] (MOVBUreg x)) => (ANDconst [c&(1<<8-1)] x) +(MOVWUreg (ANDconst [c] x)) => (ANDconst [c&(1<<32-1)] x) +(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&(1<<16-1)] x) +(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&(1<<8-1)] x) +(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d]) +(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x) +(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d]) +(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x) +(MVN (MOVDconst [c])) => (MOVDconst [^c]) +(NEG (MOVDconst [c])) => (MOVDconst [-c]) +(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))]) +(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))]) +(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))]) +(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))]) +(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))]) +(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))]) +(MOVDreg (MOVDconst [c])) => (MOVDconst [c]) + +// constant comparisons +(CMPconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags64(x,y)]) +(CMPWconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags32(int32(x),y)]) +(TSTconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags64(x&y)]) +(TSTWconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags32(int32(x)&y)]) +(CMNconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags64(x,y)]) +(CMNWconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags32(int32(x),y)]) + +// other known comparisons +(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)]) +(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)]) +(CMPconst (MOVWUreg _) [c]) && 0xffffffff < c => (FlagConstant [subFlags64(0,1)]) +(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags64(0,1)]) +(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1< (FlagConstant [subFlags64(0,1)]) +(CMPWconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)]) +(CMPWconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)]) + +// absorb flag constants into branches +(EQ (FlagConstant [fc]) yes no) && fc.eq() => (First yes no) +(EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes) + +(NE (FlagConstant [fc]) yes no) && fc.ne() => (First yes no) +(NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes) + +(LT (FlagConstant [fc]) yes no) && fc.lt() => (First yes no) +(LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes) + +(LE (FlagConstant [fc]) yes no) && fc.le() => (First yes no) +(LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes) + +(GT (FlagConstant [fc]) yes no) && fc.gt() => (First yes no) +(GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes) + +(GE (FlagConstant [fc]) yes no) && fc.ge() => (First yes no) +(GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes) + +(ULT (FlagConstant [fc]) yes no) && fc.ult() => (First yes no) +(ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes) + +(ULE (FlagConstant [fc]) yes no) && fc.ule() => (First yes no) +(ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes) + +(UGT (FlagConstant [fc]) yes no) && fc.ugt() => (First yes no) +(UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes) + +(UGE (FlagConstant [fc]) yes no) && fc.uge() => (First yes no) +(UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes) + +(LTnoov (FlagConstant [fc]) yes no) && fc.ltNoov() => (First yes no) +(LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes) + +(LEnoov (FlagConstant [fc]) yes no) && fc.leNoov() => (First yes no) +(LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes) + +(GTnoov (FlagConstant [fc]) yes no) && fc.gtNoov() => (First yes no) +(GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes) + +(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no) +(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes) + +(Z (MOVDconst [0]) yes no) => (First yes no) +(Z (MOVDconst [c]) yes no) && c != 0 => (First no yes) +(NZ (MOVDconst [0]) yes no) => (First no yes) +(NZ (MOVDconst [c]) yes no) && c != 0 => (First yes no) +(ZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First yes no) +(ZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First no yes) +(NZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First no yes) +(NZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First yes no) + +// absorb InvertFlags into branches +(LT (InvertFlags cmp) yes no) => (GT cmp yes no) +(GT (InvertFlags cmp) yes no) => (LT cmp yes no) +(LE (InvertFlags cmp) yes no) => (GE cmp yes no) +(GE (InvertFlags cmp) yes no) => (LE cmp yes no) +(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no) +(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no) +(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no) +(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no) +(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no) +(NE (InvertFlags cmp) yes no) => (NE cmp yes no) +(FLT (InvertFlags cmp) yes no) => (FGT cmp yes no) +(FGT (InvertFlags cmp) yes no) => (FLT cmp yes no) +(FLE (InvertFlags cmp) yes no) => (FGE cmp yes no) +(FGE (InvertFlags cmp) yes no) => (FLE cmp yes no) +(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no) +(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no) +(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no) +(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no) + +// absorb InvertFlags into conditional instructions +(CSEL [cc] x y (InvertFlags cmp)) => (CSEL [arm64Invert(cc)] x y cmp) +(CSEL0 [cc] x (InvertFlags cmp)) => (CSEL0 [arm64Invert(cc)] x cmp) +(CSETM [cc] (InvertFlags cmp)) => (CSETM [arm64Invert(cc)] cmp) +(CSINC [cc] x y (InvertFlags cmp)) => (CSINC [arm64Invert(cc)] x y cmp) +(CSINV [cc] x y (InvertFlags cmp)) => (CSINV [arm64Invert(cc)] x y cmp) +(CSNEG [cc] x y (InvertFlags cmp)) => (CSNEG [arm64Invert(cc)] x y cmp) + +// absorb flag constants into boolean values +(Equal (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())]) +(NotEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ne())]) +(LessThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.lt())]) +(LessThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ult())]) +(LessEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.le())]) +(LessEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ule())]) +(GreaterThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.gt())]) +(GreaterThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ugt())]) +(GreaterEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ge())]) +(GreaterEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())]) + +// absorb InvertFlags into boolean values +(Equal (InvertFlags x)) => (Equal x) +(NotEqual (InvertFlags x)) => (NotEqual x) +(LessThan (InvertFlags x)) => (GreaterThan x) +(LessThanU (InvertFlags x)) => (GreaterThanU x) +(GreaterThan (InvertFlags x)) => (LessThan x) +(GreaterThanU (InvertFlags x)) => (LessThanU x) +(LessEqual (InvertFlags x)) => (GreaterEqual x) +(LessEqualU (InvertFlags x)) => (GreaterEqualU x) +(GreaterEqual (InvertFlags x)) => (LessEqual x) +(GreaterEqualU (InvertFlags x)) => (LessEqualU x) +(LessThanF (InvertFlags x)) => (GreaterThanF x) +(LessEqualF (InvertFlags x)) => (GreaterEqualF x) +(GreaterThanF (InvertFlags x)) => (LessThanF x) +(GreaterEqualF (InvertFlags x)) => (LessEqualF x) +(LessThanNoov (InvertFlags x)) => (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov x) x) +(GreaterEqualNoov (InvertFlags x)) => (CSINC [OpARM64NotEqual] (LessThanNoov x) (MOVDconst [0]) x) + +// Don't bother extending if we're not using the higher bits. +(MOV(B|BU)reg x) && v.Type.Size() <= 1 => x +(MOV(H|HU)reg x) && v.Type.Size() <= 2 => x +(MOV(W|WU)reg x) && v.Type.Size() <= 4 => x + +// omit sign extension +(MOVWreg (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst x [c]) +(MOVHreg (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst x [c]) +(MOVBreg (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst x [c]) + +// absorb flag constants into conditional instructions +(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x +(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y +(CSEL0 [cc] x flag) && ccARM64Eval(cc, flag) > 0 => x +(CSEL0 [cc] _ flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0]) +(CSNEG [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x +(CSNEG [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (NEG y) +(CSINV [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x +(CSINV [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (Not y) +(CSINC [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x +(CSINC [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (ADDconst [1] y) +(CSETM [cc] flag) && ccARM64Eval(cc, flag) > 0 => (MOVDconst [-1]) +(CSETM [cc] flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0]) + +// absorb flags back into boolean CSEL +(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil => + (CSEL [boolval.Op] x y flagArg(boolval)) +(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil => + (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval)) +(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil => + (CSEL0 [boolval.Op] x flagArg(boolval)) +(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil => + (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval)) + +// absorb shifts into ops +(NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y) +(NEG x:(SRLconst [c] y)) && clobberIfDead(x) => (NEGshiftRL [c] y) +(NEG x:(SRAconst [c] y)) && clobberIfDead(x) => (NEGshiftRA [c] y) +(MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y) +(MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y) +(MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y) +(MVN x:(RORconst [c] y)) && clobberIfDead(x) => (MVNshiftRO [c] y) +(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c]) +(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c]) +(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c]) +(SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (SUBshiftLL x0 y [c]) +(SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (SUBshiftRL x0 y [c]) +(SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (SUBshiftRA x0 y [c]) +(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c]) +(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c]) +(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c]) +(AND x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ANDshiftRO x0 y [c]) +(OR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL x0 y [c]) // useful for combined load +(OR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL x0 y [c]) +(OR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA x0 y [c]) +(OR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORshiftRO x0 y [c]) +(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c]) +(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c]) +(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c]) +(XOR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (XORshiftRO x0 y [c]) +(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c]) +(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c]) +(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c]) +(BIC x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (BICshiftRO x0 y [c]) +(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c]) +(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c]) +(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c]) +(ORN x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORNshiftRO x0 y [c]) +(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c]) +(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c]) +(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c]) +(EON x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (EONshiftRO x0 y [c]) +(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c]) +(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c])) +(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c]) +(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRL x1 y [c])) +(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMPshiftRA x0 y [c]) +(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRA x1 y [c])) +(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMNshiftLL x0 y [c]) +(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMNshiftRL x0 y [c]) +(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMNshiftRA x0 y [c]) +(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c]) +(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c]) +(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c]) +(TST x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (TSTshiftRO x0 y [c]) + +// prefer *const ops to *shift ops +(ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst x [d])) +(ADDshiftRL (MOVDconst [c]) x [d]) => (ADDconst [c] (SRLconst x [d])) +(ADDshiftRA (MOVDconst [c]) x [d]) => (ADDconst [c] (SRAconst x [d])) +(ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst x [d])) +(ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst x [d])) +(ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst x [d])) +(ANDshiftRO (MOVDconst [c]) x [d]) => (ANDconst [c] (RORconst x [d])) +(ORshiftLL (MOVDconst [c]) x [d]) => (ORconst [c] (SLLconst x [d])) +(ORshiftRL (MOVDconst [c]) x [d]) => (ORconst [c] (SRLconst x [d])) +(ORshiftRA (MOVDconst [c]) x [d]) => (ORconst [c] (SRAconst x [d])) +(ORshiftRO (MOVDconst [c]) x [d]) => (ORconst [c] (RORconst x [d])) +(XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst x [d])) +(XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst x [d])) +(XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst x [d])) +(XORshiftRO (MOVDconst [c]) x [d]) => (XORconst [c] (RORconst x [d])) +(CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst x [d]))) +(CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst x [d]))) +(CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst x [d]))) +(CMNshiftLL (MOVDconst [c]) x [d]) => (CMNconst [c] (SLLconst x [d])) +(CMNshiftRL (MOVDconst [c]) x [d]) => (CMNconst [c] (SRLconst x [d])) +(CMNshiftRA (MOVDconst [c]) x [d]) => (CMNconst [c] (SRAconst x [d])) +(TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst x [d])) +(TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst x [d])) +(TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst x [d])) +(TSTshiftRO (MOVDconst [c]) x [d]) => (TSTconst [c] (RORconst x [d])) + +// constant folding in *shift ops +(MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)< (MOVDconst [^int64(uint64(c)>>uint64(d))]) +(MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))]) +(MVNshiftRO (MOVDconst [c]) [d]) => (MOVDconst [^rotateRight64(c, d)]) +(NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)< (MOVDconst [-int64(uint64(c)>>uint64(d))]) +(NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))]) +(ADDshiftLL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)< (ADDconst x [int64(uint64(c)>>uint64(d))]) +(ADDshiftRA x (MOVDconst [c]) [d]) => (ADDconst x [c>>uint64(d)]) +(SUBshiftLL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)< (SUBconst x [int64(uint64(c)>>uint64(d))]) +(SUBshiftRA x (MOVDconst [c]) [d]) => (SUBconst x [c>>uint64(d)]) +(ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)< (ANDconst x [int64(uint64(c)>>uint64(d))]) +(ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)]) +(ANDshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [rotateRight64(c, d)]) +(ORshiftLL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)< (ORconst x [int64(uint64(c)>>uint64(d))]) +(ORshiftRA x (MOVDconst [c]) [d]) => (ORconst x [c>>uint64(d)]) +(ORshiftRO x (MOVDconst [c]) [d]) => (ORconst x [rotateRight64(c, d)]) +(XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)< (XORconst x [int64(uint64(c)>>uint64(d))]) +(XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)]) +(XORshiftRO x (MOVDconst [c]) [d]) => (XORconst x [rotateRight64(c, d)]) +(BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)< (ANDconst x [^int64(uint64(c)>>uint64(d))]) +(BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))]) +(BICshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [^rotateRight64(c, d)]) +(ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)< (ORconst x [^int64(uint64(c)>>uint64(d))]) +(ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst x [^(c>>uint64(d))]) +(ORNshiftRO x (MOVDconst [c]) [d]) => (ORconst x [^rotateRight64(c, d)]) +(EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)< (XORconst x [^int64(uint64(c)>>uint64(d))]) +(EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))]) +(EONshiftRO x (MOVDconst [c]) [d]) => (XORconst x [^rotateRight64(c, d)]) +(CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)< (CMPconst x [int64(uint64(c)>>uint64(d))]) +(CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)]) +(CMNshiftLL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)< (CMNconst x [int64(uint64(c)>>uint64(d))]) +(CMNshiftRA x (MOVDconst [c]) [d]) => (CMNconst x [c>>uint64(d)]) +(TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)< (TSTconst x [int64(uint64(c)>>uint64(d))]) +(TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)]) +(TSTshiftRO x (MOVDconst [c]) [d]) => (TSTconst x [rotateRight64(c, d)]) + +// simplification with *shift ops +(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0]) +(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0]) +(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0]) +(ANDshiftLL y:(SLLconst x [c]) x [c]) => y +(ANDshiftRL y:(SRLconst x [c]) x [c]) => y +(ANDshiftRA y:(SRAconst x [c]) x [c]) => y +(ANDshiftRO y:(RORconst x [c]) x [c]) => y +(ORshiftLL y:(SLLconst x [c]) x [c]) => y +(ORshiftRL y:(SRLconst x [c]) x [c]) => y +(ORshiftRA y:(SRAconst x [c]) x [c]) => y +(ORshiftRO y:(RORconst x [c]) x [c]) => y +(XORshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0]) +(XORshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0]) +(XORshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0]) +(XORshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0]) +(BICshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0]) +(BICshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0]) +(BICshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0]) +(BICshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0]) +(EONshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1]) +(EONshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1]) +(EONshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1]) +(EONshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1]) +(ORNshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1]) +(ORNshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1]) +(ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1]) +(ORNshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1]) + +// rev16w | rev16 +// ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+". +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 8)] x) x) => (REV16W x) + +// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+". +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x)) + && uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff + => (REV16W x) + +// ((x & 0xff00ff00ff00ff00)>>8) | ((x & 0x00ff00ff00ff00ff)<<8), "|" can also be "^" or "+". +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + && (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) + => (REV16 x) + +// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+". +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x)) + && (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) + => (REV16 (ANDconst [0xffffffff] x)) + +// Extract from reg pair +(ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x) +( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x) +(XORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x) + +(ADDshiftLL [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) + => (EXTRWconst [32-c] x2 x) +( ORshiftLL [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) + => (EXTRWconst [32-c] x2 x) +(XORshiftLL [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c) + => (EXTRWconst [32-c] x2 x) + +// Rewrite special pairs of shifts to AND. +// On ARM64 the bitmask can fit into an instruction. +(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 => (ANDconst [1< (ANDconst [^(1< (ORconst [c1] x) + +// If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0. +(MOVWUreg (SLLconst [lc] x)) && lc >= 32 => (MOVDconst [0]) +(MOVHUreg (SLLconst [lc] x)) && lc >= 16 => (MOVDconst [0]) +(MOVBUreg (SLLconst [lc] x)) && lc >= 8 => (MOVDconst [0]) + +// After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimiza to constant 0. +(SRLconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVDconst [0]) +(SRLconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVDconst [0]) +(SRLconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVDconst [0]) + +// bitfield ops + +// sbfiz +// (x << lc) >> rc +(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) +// int64(x << lc) +(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x) +(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x) +(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x) +// int64(x) << lc +(SLLconst [lc] (MOVWreg x)) => (SBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x) +(SLLconst [lc] (MOVHreg x)) => (SBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x) +(SLLconst [lc] (MOVBreg x)) => (SBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x) + +// sbfx +// (x << lc) >> rc +(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x) +// int64(x) >> rc +(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x) +(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x) +(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x) +// merge sbfx and sign-extension into sbfx +(MOVWreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (SBFX [bfc] x) +(MOVHreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 16 => (SBFX [bfc] x) +(MOVBreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 8 => (SBFX [bfc] x) + +// sbfiz/sbfx combinations: merge shifts into bitfield ops +(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb() + => (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x) +(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.getARM64BFlsb() + && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth() + => (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x) + +// ubfiz +// (x << lc) >> rc +(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x) +// uint64(x) << lc +(SLLconst [lc] (MOVWUreg x)) => (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x) +(SLLconst [lc] (MOVHUreg x)) => (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x) +(SLLconst [lc] (MOVBUreg x)) => (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x) +// uint64(x << lc) +(MOVWUreg (SLLconst [lc] x)) && lc < 32 => (UBFIZ [armBFAuxInt(lc, 32-lc)] x) +(MOVHUreg (SLLconst [lc] x)) && lc < 16 => (UBFIZ [armBFAuxInt(lc, 16-lc)] x) +(MOVBUreg (SLLconst [lc] x)) && lc < 8 => (UBFIZ [armBFAuxInt(lc, 8-lc)] x) + +// merge ANDconst into ubfiz +// (x & ac) << sc +(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0) + => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x) +// (x << sc) & ac +(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc) + => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x) + +// ubfx +// (x << lc) >> rc +(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x) +// uint64(x) >> rc +(SRLconst [rc] (MOVWUreg x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32-rc)] x) +(SRLconst [rc] (MOVHUreg x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16-rc)] x) +(SRLconst [rc] (MOVBUreg x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8-rc)] x) +// uint64(x >> rc) +(MOVWUreg (SRLconst [rc] x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32)] x) +(MOVHUreg (SRLconst [rc] x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16)] x) +(MOVBUreg (SRLconst [rc] x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8)] x) +// merge ANDconst into ubfx +// (x >> sc) & ac +(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0) + => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x) +// (x & ac) >> sc +(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc) + => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x) +// merge ANDconst and ubfx into ubfx +(ANDconst [c] (UBFX [bfc] x)) && isARM64BFMask(0, c, 0) => + (UBFX [armBFAuxInt(bfc.getARM64BFlsb(), min(bfc.getARM64BFwidth(), arm64BFWidth(c, 0)))] x) +(UBFX [bfc] (ANDconst [c] x)) && isARM64BFMask(0, c, 0) && bfc.getARM64BFlsb() + bfc.getARM64BFwidth() <= arm64BFWidth(c, 0) => + (UBFX [bfc] x) +// merge ubfx and zerso-extension into ubfx +(MOVWUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (UBFX [bfc] x) +(MOVHUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 16 => (UBFX [bfc] x) +(MOVBUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 8 => (UBFX [bfc] x) + +// ubfiz/ubfx combinations: merge shifts into bitfield ops +(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.getARM64BFwidth() + => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x) +(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64 + => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x) +(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64 + => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x) +(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFwidth() + => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x) +// ((x << c1) >> c2) >> c3 +(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.getARM64BFlsb() + => (ANDconst [1< (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x) +(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.getARM64BFlsb() + && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth() + => (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x) +// ((x << c1) << c2) >> c3 +(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.getARM64BFlsb() + => (ANDconst [1< (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x) +(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.getARM64BFlsb() + && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth() + => (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x) + +// bfi +(OR (UBFIZ [bfc] x) (ANDconst [ac] y)) + && ac == ^((1< (BFI [bfc] y x) +(ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y)) + && lc > rc && ac == ^((1< (BFI [armBFAuxInt(lc-rc, 64-lc)] x y) +// bfxil +(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1< (BFXIL [bfc] y x) +(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.getARM64BFwidth() + => (BFXIL [bfc] y x) +(ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1< (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x) + +// FP simplification +(FNEGS (FMULS x y)) => (FNMULS x y) +(FNEGD (FMULD x y)) => (FNMULD x y) +(FMULS (FNEGS x) y) => (FNMULS x y) +(FMULD (FNEGD x) y) => (FNMULD x y) +(FNEGS (FNMULS x y)) => (FMULS x y) +(FNEGD (FNMULD x y)) => (FMULD x y) +(FNMULS (FNEGS x) y) => (FMULS x y) +(FNMULD (FNEGD x) y) => (FMULD x y) + +(FADDS a (FMULS x y)) && a.Block.Func.useFMA(v) => (FMADDS a x y) +(FADDD a (FMULD x y)) && a.Block.Func.useFMA(v) => (FMADDD a x y) +(FSUBS a (FMULS x y)) && a.Block.Func.useFMA(v) => (FMSUBS a x y) +(FSUBD a (FMULD x y)) && a.Block.Func.useFMA(v) => (FMSUBD a x y) +(FSUBS (FMULS x y) a) && a.Block.Func.useFMA(v) => (FNMSUBS a x y) +(FSUBD (FMULD x y) a) && a.Block.Func.useFMA(v) => (FNMSUBD a x y) +(FADDS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMSUBS a x y) +(FADDD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMSUBD a x y) +(FSUBS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMADDS a x y) +(FSUBD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMADDD a x y) +(FSUBS (FNMULS x y) a) && a.Block.Func.useFMA(v) => (FNMADDS a x y) +(FSUBD (FNMULD x y) a) && a.Block.Func.useFMA(v) => (FNMADDD a x y) + +(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read8(sym, int64(off)))]) +(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVDload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) + +// Prefetch instructions (aux is option: 0 - PLDL1KEEP; 1 - PLDL1STRM) +(PrefetchCache addr mem) => (PRFM [0] addr mem) +(PrefetchCacheStreamed addr mem) => (PRFM [1] addr mem) + +// Arch-specific inlining for small or disjoint runtime.memmove +(SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem))))) + && sz >= 0 + && isSameCall(sym, "runtime.memmove") + && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 + && isInlinableMemmove(dst, src, sz, config) + && clobber(s1, s2, s3, call) + => (Move [sz] dst src mem) + +// Match post-lowering calls, register version. +(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem)) + && sz >= 0 + && isSameCall(sym, "runtime.memmove") + && call.Uses == 1 + && isInlinableMemmove(dst, src, sz, config) + && clobber(call) + => (Move [sz] dst src mem) + +((REV|REVW) ((REV|REVW) p)) => p + +// runtime/internal/math.MulUintptr intrinsics + +(Select0 (Mul64uover x y)) => (MUL x y) +(Select1 (Mul64uover x y)) => (NotEqual (CMPconst (UMULH x y) [0])) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go new file mode 100644 index 0000000000000000000000000000000000000000..5a98aa0c5424ad2650b75e60eb40e29cb565d4f8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM64Ops.go @@ -0,0 +1,803 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +// Notes: +// - Integer types live in the low portion of registers. Upper portions are junk. +// - Boolean types use the low-order byte of a register. 0=false, 1=true. +// Upper bytes are junk. +// - *const instructions may use a constant larger than the instruction can encode. +// In this case the assembler expands to multiple instructions and uses tmp +// register (R27). +// - All 32-bit Ops will zero the upper 32 bits of the destination register. + +// Suffixes encode the bit width of various instructions. +// D (double word) = 64 bit +// W (word) = 32 bit +// H (half word) = 16 bit +// HU = 16 bit unsigned +// B (byte) = 8 bit +// BU = 8 bit unsigned +// S (single) = 32 bit float +// D (double) = 64 bit float + +// Note: registers not used in regalloc are not included in this list, +// so that regmask stays within int64 +// Be careful when hand coding regmasks. +var regNamesARM64 = []string{ + "R0", + "R1", + "R2", + "R3", + "R4", + "R5", + "R6", + "R7", + "R8", + "R9", + "R10", + "R11", + "R12", + "R13", + "R14", + "R15", + "R16", + "R17", + "R18", // platform register, not used + "R19", + "R20", + "R21", + "R22", + "R23", + "R24", + "R25", + "R26", + // R27 = REGTMP not used in regalloc + "g", // aka R28 + "R29", // frame pointer, not used + "R30", // aka REGLINK + "SP", // aka R31 + + "F0", + "F1", + "F2", + "F3", + "F4", + "F5", + "F6", + "F7", + "F8", + "F9", + "F10", + "F11", + "F12", + "F13", + "F14", + "F15", + "F16", + "F17", + "F18", + "F19", + "F20", + "F21", + "F22", + "F23", + "F24", + "F25", + "F26", + "F27", + "F28", + "F29", + "F30", + "F31", + + // If you add registers, update asyncPreempt in runtime. + + // pseudo-registers + "SB", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNamesARM64) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNamesARM64 { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + // Common individual register masks + var ( + gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30") + gpg = gp | buildReg("g") + gpsp = gp | buildReg("SP") + gpspg = gpg | buildReg("SP") + gpspsbg = gpspg | buildReg("SB") + fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31") + callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g + r0 = buildReg("R0") + r1 = buildReg("R1") + r2 = buildReg("R2") + r3 = buildReg("R3") + ) + // Common regInfo + var ( + gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} + gp0flags1 = regInfo{inputs: []regMask{0}, outputs: []regMask{gp}} + gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} + gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} + gp1flags = regInfo{inputs: []regMask{gpg}} + gp1flags1 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} + gp11flags = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}} + gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} + gp21nog = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}} + gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}} + gp2flags = regInfo{inputs: []regMask{gpg, gpg}} + gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}} + gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}} + gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} + gp31 = regInfo{inputs: []regMask{gpg, gpg, gpg}, outputs: []regMask{gp}} + gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} + gpload2 = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gpg, gpg}} + gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} + gpstore0 = regInfo{inputs: []regMask{gpspsbg}} + gpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}} + gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} + gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} + fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} + fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} + fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}} + gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} + fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}} + fp2flags = regInfo{inputs: []regMask{fp, fp}} + fp1flags = regInfo{inputs: []regMask{fp}} + fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}} + fp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{fp}} + fpstore = regInfo{inputs: []regMask{gpspsbg, fp}} + fpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, fp}} + readflags = regInfo{inputs: nil, outputs: []regMask{gp}} + prefreg = regInfo{inputs: []regMask{gpspsbg}} + ) + ops := []opData{ + // binary ops + {name: "ADCSflags", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "ADCS", commutative: true}, // arg0+arg1+carry, set flags. + {name: "ADCzerocarry", argLength: 1, reg: gp0flags1, typ: "UInt64", asm: "ADC"}, // ZR+ZR+carry + {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1 + {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int64"}, // arg0 + auxInt + {name: "ADDSconstflags", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "ADDS", aux: "Int64"}, // arg0+auxint, set flags. + {name: "ADDSflags", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "ADDS", commutative: true}, // arg0+arg1, set flags. + {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1 + {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int64"}, // arg0 - auxInt + {name: "SBCSflags", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "SBCS"}, // arg0-(arg1+borrowing), set flags. + {name: "SUBSflags", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "SUBS"}, // arg0 - arg1, set flags. + {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1 + {name: "MULW", argLength: 2, reg: gp21, asm: "MULW", commutative: true}, // arg0 * arg1, 32-bit + {name: "MNEG", argLength: 2, reg: gp21, asm: "MNEG", commutative: true}, // -arg0 * arg1 + {name: "MNEGW", argLength: 2, reg: gp21, asm: "MNEGW", commutative: true}, // -arg0 * arg1, 32-bit + {name: "MULH", argLength: 2, reg: gp21, asm: "SMULH", commutative: true}, // (arg0 * arg1) >> 64, signed + {name: "UMULH", argLength: 2, reg: gp21, asm: "UMULH", commutative: true}, // (arg0 * arg1) >> 64, unsigned + {name: "MULL", argLength: 2, reg: gp21, asm: "SMULL", commutative: true}, // arg0 * arg1, signed, 32-bit mult results in 64-bit + {name: "UMULL", argLength: 2, reg: gp21, asm: "UMULL", commutative: true}, // arg0 * arg1, unsigned, 32-bit mult results in 64-bit + {name: "DIV", argLength: 2, reg: gp21, asm: "SDIV"}, // arg0 / arg1, signed + {name: "UDIV", argLength: 2, reg: gp21, asm: "UDIV"}, // arg0 / arg1, unsigned + {name: "DIVW", argLength: 2, reg: gp21, asm: "SDIVW"}, // arg0 / arg1, signed, 32 bit + {name: "UDIVW", argLength: 2, reg: gp21, asm: "UDIVW"}, // arg0 / arg1, unsigned, 32 bit + {name: "MOD", argLength: 2, reg: gp21, asm: "REM"}, // arg0 % arg1, signed + {name: "UMOD", argLength: 2, reg: gp21, asm: "UREM"}, // arg0 % arg1, unsigned + {name: "MODW", argLength: 2, reg: gp21, asm: "REMW"}, // arg0 % arg1, signed, 32 bit + {name: "UMODW", argLength: 2, reg: gp21, asm: "UREMW"}, // arg0 % arg1, unsigned, 32 bit + + {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0 + arg1 + {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true}, // arg0 + arg1 + {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0 - arg1 + {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD"}, // arg0 - arg1 + {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0 * arg1 + {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true}, // arg0 * arg1 + {name: "FNMULS", argLength: 2, reg: fp21, asm: "FNMULS", commutative: true}, // -(arg0 * arg1) + {name: "FNMULD", argLength: 2, reg: fp21, asm: "FNMULD", commutative: true}, // -(arg0 * arg1) + {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0 / arg1 + {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD"}, // arg0 / arg1 + + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1 + {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt + {name: "OR", argLength: 2, reg: gp21, asm: "ORR", commutative: true}, // arg0 | arg1 + {name: "ORconst", argLength: 1, reg: gp11, asm: "ORR", aux: "Int64"}, // arg0 | auxInt + {name: "XOR", argLength: 2, reg: gp21, asm: "EOR", commutative: true}, // arg0 ^ arg1 + {name: "XORconst", argLength: 1, reg: gp11, asm: "EOR", aux: "Int64"}, // arg0 ^ auxInt + {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1 + {name: "EON", argLength: 2, reg: gp21, asm: "EON"}, // arg0 ^ ^arg1 + {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0 | ^arg1 + + // unary ops + {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0 + {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 + {name: "NEGSflags", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "NEGS"}, // -arg0, set flags. + {name: "NGCzerocarry", argLength: 1, reg: gp0flags1, typ: "UInt64", asm: "NGC"}, // -1 if borrowing, 0 otherwise. + {name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD"}, // abs(arg0), float64 + {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS"}, // -arg0, float32 + {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD"}, // -arg0, float64 + {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"}, // sqrt(arg0), float64 + {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0), float32 + {name: "FMIND", argLength: 2, reg: fp21, asm: "FMIND"}, // min(arg0, arg1) + {name: "FMINS", argLength: 2, reg: fp21, asm: "FMINS"}, // min(arg0, arg1) + {name: "FMAXD", argLength: 2, reg: fp21, asm: "FMAXD"}, // max(arg0, arg1) + {name: "FMAXS", argLength: 2, reg: fp21, asm: "FMAXS"}, // max(arg0, arg1) + {name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // byte reverse, 64-bit + {name: "REVW", argLength: 1, reg: gp11, asm: "REVW"}, // byte reverse, 32-bit + {name: "REV16", argLength: 1, reg: gp11, asm: "REV16"}, // byte reverse in each 16-bit halfword, 64-bit + {name: "REV16W", argLength: 1, reg: gp11, asm: "REV16W"}, // byte reverse in each 16-bit halfword, 32-bit + {name: "RBIT", argLength: 1, reg: gp11, asm: "RBIT"}, // bit reverse, 64-bit + {name: "RBITW", argLength: 1, reg: gp11, asm: "RBITW"}, // bit reverse, 32-bit + {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, // count leading zero, 64-bit + {name: "CLZW", argLength: 1, reg: gp11, asm: "CLZW"}, // count leading zero, 32-bit + {name: "VCNT", argLength: 1, reg: fp11, asm: "VCNT"}, // count set bits for each 8-bit unit and store the result in each 8-bit unit + {name: "VUADDLV", argLength: 1, reg: fp11, asm: "VUADDLV"}, // unsigned sum of eight bytes in a 64-bit value, zero extended to 64-bit. + {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, + {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, + + // 3-operand, the addend comes first + {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS"}, // +arg0 + (arg1 * arg2) + {name: "FMADDD", argLength: 3, reg: fp31, asm: "FMADDD"}, // +arg0 + (arg1 * arg2) + {name: "FNMADDS", argLength: 3, reg: fp31, asm: "FNMADDS"}, // -arg0 - (arg1 * arg2) + {name: "FNMADDD", argLength: 3, reg: fp31, asm: "FNMADDD"}, // -arg0 - (arg1 * arg2) + {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // +arg0 - (arg1 * arg2) + {name: "FMSUBD", argLength: 3, reg: fp31, asm: "FMSUBD"}, // +arg0 - (arg1 * arg2) + {name: "FNMSUBS", argLength: 3, reg: fp31, asm: "FNMSUBS"}, // -arg0 + (arg1 * arg2) + {name: "FNMSUBD", argLength: 3, reg: fp31, asm: "FNMSUBD"}, // -arg0 + (arg1 * arg2) + {name: "MADD", argLength: 3, reg: gp31, asm: "MADD"}, // +arg0 + (arg1 * arg2) + {name: "MADDW", argLength: 3, reg: gp31, asm: "MADDW"}, // +arg0 + (arg1 * arg2), 32-bit + {name: "MSUB", argLength: 3, reg: gp31, asm: "MSUB"}, // +arg0 - (arg1 * arg2) + {name: "MSUBW", argLength: 3, reg: gp31, asm: "MSUBW"}, // +arg0 - (arg1 * arg2), 32-bit + + // shifts + {name: "SLL", argLength: 2, reg: gp21, asm: "LSL"}, // arg0 << arg1, shift amount is mod 64 + {name: "SLLconst", argLength: 1, reg: gp11, asm: "LSL", aux: "Int64"}, // arg0 << auxInt, auxInt should be in the range 0 to 63. + {name: "SRL", argLength: 2, reg: gp21, asm: "LSR"}, // arg0 >> arg1, unsigned, shift amount is mod 64 + {name: "SRLconst", argLength: 1, reg: gp11, asm: "LSR", aux: "Int64"}, // arg0 >> auxInt, unsigned, auxInt should be in the range 0 to 63. + {name: "SRA", argLength: 2, reg: gp21, asm: "ASR"}, // arg0 >> arg1, signed, shift amount is mod 64 + {name: "SRAconst", argLength: 1, reg: gp11, asm: "ASR", aux: "Int64"}, // arg0 >> auxInt, signed, auxInt should be in the range 0 to 63. + {name: "ROR", argLength: 2, reg: gp21, asm: "ROR"}, // arg0 right rotate by (arg1 mod 64) bits + {name: "RORW", argLength: 2, reg: gp21, asm: "RORW"}, // arg0 right rotate by (arg1 mod 32) bits + {name: "RORconst", argLength: 1, reg: gp11, asm: "ROR", aux: "Int64"}, // arg0 right rotate by auxInt bits, auxInt should be in the range 0 to 63. + {name: "RORWconst", argLength: 1, reg: gp11, asm: "RORW", aux: "Int64"}, // uint32(arg0) right rotate by auxInt bits, auxInt should be in the range 0 to 31. + {name: "EXTRconst", argLength: 2, reg: gp21, asm: "EXTR", aux: "Int64"}, // extract 64 bits from arg0:arg1 starting at lsb auxInt, auxInt should be in the range 0 to 63. + {name: "EXTRWconst", argLength: 2, reg: gp21, asm: "EXTRW", aux: "Int64"}, // extract 32 bits from arg0[31:0]:arg1[31:0] starting at lsb auxInt and zero top 32 bits, auxInt should be in the range 0 to 31. + + // comparisons + {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to auxInt + {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1, 32 bit + {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt, 32 bit + {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1, provided arg1 is not 1<<63 + {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // arg0 compare to -auxInt + {name: "CMNW", argLength: 2, reg: gp2flags, asm: "CMNW", typ: "Flags", commutative: true}, // arg0 compare to -arg1, 32 bit, provided arg1 is not 1<<31 + {name: "CMNWconst", argLength: 1, reg: gp1flags, asm: "CMNW", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt, 32 bit + {name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0 + {name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int64", typ: "Flags"}, // arg0 & auxInt compare to 0 + {name: "TSTW", argLength: 2, reg: gp2flags, asm: "TSTW", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0, 32 bit + {name: "TSTWconst", argLength: 1, reg: gp1flags, asm: "TSTW", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0, 32 bit + {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "FCMPS", typ: "Flags"}, // arg0 compare to arg1, float32 + {name: "FCMPD", argLength: 2, reg: fp2flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to arg1, float64 + {name: "FCMPS0", argLength: 1, reg: fp1flags, asm: "FCMPS", typ: "Flags"}, // arg0 compare to 0, float32 + {name: "FCMPD0", argLength: 1, reg: fp1flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to 0, float64 + + // shifted ops + {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "MVNshiftRO", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0 ROR auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "NEGshiftLL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "NEGshiftRA", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1<>auxInt, unsigned shift, auxInt should be in the range 0 to 63. + {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1<>auxInt, unsigned shift, auxInt should be in the range 0 to 63. + {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "ANDshiftRO", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1<>auxInt, unsigned shift, auxInt should be in the range 0 to 63. + {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "ORshiftRO", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1 ROR auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1<>auxInt, unsigned shift, auxInt should be in the range 0 to 63. + {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "XORshiftRO", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1 ROR auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "BICshiftRO", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "EONshiftLL", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "EONshiftRA", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "EONshiftRO", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "ORNshiftLL", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1<>auxInt), unsigned shift, auxInt should be in the range 0 to 63. + {name: "ORNshiftRA", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "ORNshiftRO", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63. + {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1<>auxInt, unsigned shift, auxInt should be in the range 0 to 63. + {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63. + {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1<>auxInt) compare to 0, unsigned shift, auxInt should be in the range 0 to 63. + {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63. + {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1<>auxInt) compare to 0, unsigned shift, auxInt should be in the range 0 to 63. + {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63. + {name: "TSTshiftRO", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1 ROR auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63. + + // bitfield ops + // for all bitfield ops lsb is auxInt>>8, width is auxInt&0xff + // insert low width bits of arg1 into the result starting at bit lsb, copy other bits from arg0 + {name: "BFI", argLength: 2, reg: gp21nog, asm: "BFI", aux: "ARM64BitField", resultInArg0: true}, + // extract width bits of arg1 starting at bit lsb and insert at low end of result, copy other bits from arg0 + {name: "BFXIL", argLength: 2, reg: gp21nog, asm: "BFXIL", aux: "ARM64BitField", resultInArg0: true}, + // insert low width bits of arg0 into the result starting at bit lsb, bits to the left of the inserted bit field are set to the high/sign bit of the inserted bit field, bits to the right are zeroed + {name: "SBFIZ", argLength: 1, reg: gp11, asm: "SBFIZ", aux: "ARM64BitField"}, + // extract width bits of arg0 starting at bit lsb and insert at low end of result, remaining high bits are set to the high/sign bit of the extracted bitfield + {name: "SBFX", argLength: 1, reg: gp11, asm: "SBFX", aux: "ARM64BitField"}, + // insert low width bits of arg0 into the result starting at bit lsb, bits to the left and right of the inserted bit field are zeroed + {name: "UBFIZ", argLength: 1, reg: gp11, asm: "UBFIZ", aux: "ARM64BitField"}, + // extract width bits of arg0 starting at bit lsb and insert at low end of result, remaining high bits are zeroed + {name: "UBFX", argLength: 1, reg: gp11, asm: "UBFX", aux: "ARM64BitField"}, + + // moves + {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "UInt64", rematerializeable: true}, // 64 bits from auxint + {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVS", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float + {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float + + {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB + + {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVDload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVD", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "LDP", argLength: 2, reg: gpload2, aux: "SymOff", asm: "LDP", typ: "(UInt64,UInt64)", faultOnNilArg0: true, symEffect: "Read"}, // load from ptr = arg0 + auxInt + aux, returns the tuple <*(*uint64)ptr, *(*uint64)(ptr+8)>. arg1=mem. + {name: "FMOVSload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVS", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "FMOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + + // register indexed load + {name: "MOVDloadidx", argLength: 3, reg: gp2load, asm: "MOVD", typ: "UInt64"}, // load 64-bit dword from arg0 + arg1, arg2 = mem. + {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW", typ: "Int32"}, // load 32-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem. + {name: "MOVWUloadidx", argLength: 3, reg: gp2load, asm: "MOVWU", typ: "UInt32"}, // load 32-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem. + {name: "MOVHloadidx", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load 16-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem. + {name: "MOVHUloadidx", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load 16-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem. + {name: "MOVBloadidx", argLength: 3, reg: gp2load, asm: "MOVB", typ: "Int8"}, // load 8-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem. + {name: "MOVBUloadidx", argLength: 3, reg: gp2load, asm: "MOVBU", typ: "UInt8"}, // load 8-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem. + {name: "FMOVSloadidx", argLength: 3, reg: fp2load, asm: "FMOVS", typ: "Float32"}, // load 32-bit float from arg0 + arg1, arg2=mem. + {name: "FMOVDloadidx", argLength: 3, reg: fp2load, asm: "FMOVD", typ: "Float64"}, // load 64-bit float from arg0 + arg1, arg2=mem. + + // shifted register indexed load + {name: "MOVHloadidx2", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load 16-bit half-word from arg0 + arg1*2, sign-extended to 64-bit, arg2=mem. + {name: "MOVHUloadidx2", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load 16-bit half-word from arg0 + arg1*2, zero-extended to 64-bit, arg2=mem. + {name: "MOVWloadidx4", argLength: 3, reg: gp2load, asm: "MOVW", typ: "Int32"}, // load 32-bit word from arg0 + arg1*4, sign-extended to 64-bit, arg2=mem. + {name: "MOVWUloadidx4", argLength: 3, reg: gp2load, asm: "MOVWU", typ: "UInt32"}, // load 32-bit word from arg0 + arg1*4, zero-extended to 64-bit, arg2=mem. + {name: "MOVDloadidx8", argLength: 3, reg: gp2load, asm: "MOVD", typ: "UInt64"}, // load 64-bit double-word from arg0 + arg1*8, arg2 = mem. + {name: "FMOVSloadidx4", argLength: 3, reg: fp2load, asm: "FMOVS", typ: "Float32"}, // load 32-bit float from arg0 + arg1*4, arg2 = mem. + {name: "FMOVDloadidx8", argLength: 3, reg: fp2load, asm: "FMOVD", typ: "Float64"}, // load 64-bit float from arg0 + arg1*8, arg2 = mem. + + {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVDstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "STP", argLength: 4, reg: gpstore2, aux: "SymOff", asm: "STP", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of arg1 and arg2 to arg0 + auxInt + aux. arg3=mem. + {name: "FMOVSstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVS", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "FMOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + + // register indexed store + {name: "MOVBstoreidx", argLength: 4, reg: gpstore2, asm: "MOVB", typ: "Mem"}, // store 1 byte of arg2 to arg0 + arg1, arg3 = mem. + {name: "MOVHstoreidx", argLength: 4, reg: gpstore2, asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg2 to arg0 + arg1, arg3 = mem. + {name: "MOVWstoreidx", argLength: 4, reg: gpstore2, asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg2 to arg0 + arg1, arg3 = mem. + {name: "MOVDstoreidx", argLength: 4, reg: gpstore2, asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg2 to arg0 + arg1, arg3 = mem. + {name: "FMOVSstoreidx", argLength: 4, reg: fpstore2, asm: "FMOVS", typ: "Mem"}, // store 32-bit float of arg2 to arg0 + arg1, arg3=mem. + {name: "FMOVDstoreidx", argLength: 4, reg: fpstore2, asm: "FMOVD", typ: "Mem"}, // store 64-bit float of arg2 to arg0 + arg1, arg3=mem. + + // shifted register indexed store + {name: "MOVHstoreidx2", argLength: 4, reg: gpstore2, asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg2 to arg0 + arg1*2, arg3 = mem. + {name: "MOVWstoreidx4", argLength: 4, reg: gpstore2, asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg2 to arg0 + arg1*4, arg3 = mem. + {name: "MOVDstoreidx8", argLength: 4, reg: gpstore2, asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg2 to arg0 + arg1*8, arg3 = mem. + {name: "FMOVSstoreidx4", argLength: 4, reg: fpstore2, asm: "FMOVS", typ: "Mem"}, // store 32-bit float of arg2 to arg0 + arg1*4, arg3=mem. + {name: "FMOVDstoreidx8", argLength: 4, reg: fpstore2, asm: "FMOVD", typ: "Mem"}, // store 64-bit float of arg2 to arg0 + arg1*8, arg3=mem. + + {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVQstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "STP", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of zero to arg0 + auxInt + aux. arg1=mem. + + // register indexed store zero + {name: "MOVBstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVB", typ: "Mem"}, // store 1 byte of zero to arg0 + arg1, arg2 = mem. + {name: "MOVHstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVH", typ: "Mem"}, // store 2 bytes of zero to arg0 + arg1, arg2 = mem. + {name: "MOVWstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVW", typ: "Mem"}, // store 4 bytes of zero to arg0 + arg1, arg2 = mem. + {name: "MOVDstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVD", typ: "Mem"}, // store 8 bytes of zero to arg0 + arg1, arg2 = mem. + + // shifted register indexed store zero + {name: "MOVHstorezeroidx2", argLength: 3, reg: gpstore, asm: "MOVH", typ: "Mem"}, // store 2 bytes of zero to arg0 + arg1*2, arg2 = mem. + {name: "MOVWstorezeroidx4", argLength: 3, reg: gpstore, asm: "MOVW", typ: "Mem"}, // store 4 bytes of zero to arg0 + arg1*4, arg2 = mem. + {name: "MOVDstorezeroidx8", argLength: 3, reg: gpstore, asm: "MOVD", typ: "Mem"}, // store 8 bytes of zero to arg0 + arg1*8, arg2 = mem. + + {name: "FMOVDgpfp", argLength: 1, reg: gpfp, asm: "FMOVD"}, // move int64 to float64 (no conversion) + {name: "FMOVDfpgp", argLength: 1, reg: fpgp, asm: "FMOVD"}, // move float64 to int64 (no conversion) + {name: "FMOVSgpfp", argLength: 1, reg: gpfp, asm: "FMOVS"}, // move 32bits from int to float reg (no conversion) + {name: "FMOVSfpgp", argLength: 1, reg: fpgp, asm: "FMOVS"}, // move 32bits from float to int reg, zero extend (no conversion) + + // conversions + {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte + {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte + {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half + {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half + {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word + {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word + {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOVD"}, // move from arg0 + + {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register + + {name: "SCVTFWS", argLength: 1, reg: gpfp, asm: "SCVTFWS"}, // int32 -> float32 + {name: "SCVTFWD", argLength: 1, reg: gpfp, asm: "SCVTFWD"}, // int32 -> float64 + {name: "UCVTFWS", argLength: 1, reg: gpfp, asm: "UCVTFWS"}, // uint32 -> float32 + {name: "UCVTFWD", argLength: 1, reg: gpfp, asm: "UCVTFWD"}, // uint32 -> float64 + {name: "SCVTFS", argLength: 1, reg: gpfp, asm: "SCVTFS"}, // int64 -> float32 + {name: "SCVTFD", argLength: 1, reg: gpfp, asm: "SCVTFD"}, // int64 -> float64 + {name: "UCVTFS", argLength: 1, reg: gpfp, asm: "UCVTFS"}, // uint64 -> float32 + {name: "UCVTFD", argLength: 1, reg: gpfp, asm: "UCVTFD"}, // uint64 -> float64 + {name: "FCVTZSSW", argLength: 1, reg: fpgp, asm: "FCVTZSSW"}, // float32 -> int32 + {name: "FCVTZSDW", argLength: 1, reg: fpgp, asm: "FCVTZSDW"}, // float64 -> int32 + {name: "FCVTZUSW", argLength: 1, reg: fpgp, asm: "FCVTZUSW"}, // float32 -> uint32 + {name: "FCVTZUDW", argLength: 1, reg: fpgp, asm: "FCVTZUDW"}, // float64 -> uint32 + {name: "FCVTZSS", argLength: 1, reg: fpgp, asm: "FCVTZSS"}, // float32 -> int64 + {name: "FCVTZSD", argLength: 1, reg: fpgp, asm: "FCVTZSD"}, // float64 -> int64 + {name: "FCVTZUS", argLength: 1, reg: fpgp, asm: "FCVTZUS"}, // float32 -> uint64 + {name: "FCVTZUD", argLength: 1, reg: fpgp, asm: "FCVTZUD"}, // float64 -> uint64 + {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD"}, // float32 -> float64 + {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS"}, // float64 -> float32 + + // floating-point round to integral + {name: "FRINTAD", argLength: 1, reg: fp11, asm: "FRINTAD"}, + {name: "FRINTMD", argLength: 1, reg: fp11, asm: "FRINTMD"}, + {name: "FRINTND", argLength: 1, reg: fp11, asm: "FRINTND"}, + {name: "FRINTPD", argLength: 1, reg: fp11, asm: "FRINTPD"}, + {name: "FRINTZD", argLength: 1, reg: fp11, asm: "FRINTZD"}, + + // conditional instructions; auxint is + // one of the arm64 comparison pseudo-ops (LessThan, LessThanU, etc.) + {name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : arg1 + {name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : 0 + {name: "CSINC", argLength: 3, reg: gp2flags1, asm: "CSINC", aux: "CCop"}, // auxint(flags) ? arg0 : arg1 + 1 + {name: "CSINV", argLength: 3, reg: gp2flags1, asm: "CSINV", aux: "CCop"}, // auxint(flags) ? arg0 : ^arg1 + {name: "CSNEG", argLength: 3, reg: gp2flags1, asm: "CSNEG", aux: "CCop"}, // auxint(flags) ? arg0 : -arg1 + {name: "CSETM", argLength: 1, reg: readflags, asm: "CSETM", aux: "CCop"}, // auxint(flags) ? -1 : 0 + + // function calls + {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem + + // pseudo-ops + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. + + {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise. + {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise. + {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed xy false otherwise. + {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise. + {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned xy false otherwise. + {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise. + {name: "LessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point xy false otherwise. + {name: "GreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y false otherwise. + {name: "NotLessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y || x is unordered with y, false otherwise. + {name: "NotLessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y || x is unordered with y, false otherwise. + {name: "NotGreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y || x is unordered with y, false otherwise. + {name: "NotGreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x=y but without honoring overflow, false otherwise. + + // duffzero + // arg0 = address of memory to zero + // arg1 = mem + // auxint = offset into duffzero code to start executing + // returns mem + // R20 changed as side effect + // R16 and R17 may be clobbered by linker trampoline. + { + name: "DUFFZERO", + aux: "Int64", + argLength: 2, + reg: regInfo{ + inputs: []regMask{buildReg("R20")}, + clobbers: buildReg("R16 R17 R20 R30"), + }, + faultOnNilArg0: true, + unsafePoint: true, // FP maintenance around DUFFZERO can be clobbered by interrupts + }, + + // large zeroing + // arg0 = address of memory to zero (in R16 aka arm64.REGRT1, changed as side effect) + // arg1 = address of the last 16-byte unit to zero + // arg2 = mem + // returns mem + // STP.P (ZR,ZR), 16(R16) + // CMP Rarg1, R16 + // BLE -2(PC) + // Note: the-end-of-the-memory may be not a valid pointer. it's a problem if it is spilled. + // the-end-of-the-memory - 16 is with the area to zero, ok to spill. + { + name: "LoweredZero", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R16"), gp}, + clobbers: buildReg("R16"), + }, + clobberFlags: true, + faultOnNilArg0: true, + }, + + // duffcopy + // arg0 = address of dst memory (in R21, changed as side effect) + // arg1 = address of src memory (in R20, changed as side effect) + // arg2 = mem + // auxint = offset into duffcopy code to start executing + // returns mem + // R20, R21 changed as side effect + // R16 and R17 may be clobbered by linker trampoline. + { + name: "DUFFCOPY", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R21"), buildReg("R20")}, + clobbers: buildReg("R16 R17 R20 R21 R26 R30"), + }, + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts + }, + + // large move + // arg0 = address of dst memory (in R17 aka arm64.REGRT2, changed as side effect) + // arg1 = address of src memory (in R16 aka arm64.REGRT1, changed as side effect) + // arg2 = address of the last element of src + // arg3 = mem + // returns mem + // LDP.P 16(R16), (R25, Rtmp) + // STP.P (R25, Rtmp), 16(R17) + // CMP Rarg2, R16 + // BLE -3(PC) + // Note: the-end-of-src may be not a valid pointer. it's a problem if it is spilled. + // the-end-of-src - 16 is within the area to copy, ok to spill. + { + name: "LoweredMove", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("R17"), buildReg("R16"), gp &^ buildReg("R25")}, + clobbers: buildReg("R16 R17 R25"), + }, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, + // and sorts it to the very beginning of the block to prevent other + // use of R26 (arm64.REGCTXT, the closure pointer) + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R26")}}, zeroWidth: true}, + + // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem + {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, + + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + + // Constant flag value. + // Note: there's an "unordered" outcome for floating-point + // comparisons, but we don't use such a beast yet. + // This op is for temporary use by rewrite rules. It + // cannot appear in the generated assembly. + {name: "FlagConstant", aux: "FlagConstant"}, + + // (InvertFlags (CMP a b)) == (CMP b a) + // InvertFlags is a pseudo-op which can't appear in assembly output. + {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 + + // atomic loads. + // load from arg0. arg1=mem. auxint must be zero. + // returns so they can be properly ordered with other loads. + {name: "LDAR", argLength: 2, reg: gpload, asm: "LDAR", faultOnNilArg0: true}, + {name: "LDARB", argLength: 2, reg: gpload, asm: "LDARB", faultOnNilArg0: true}, + {name: "LDARW", argLength: 2, reg: gpload, asm: "LDARW", faultOnNilArg0: true}, + + // atomic stores. + // store arg1 to arg0. arg2=mem. returns memory. auxint must be zero. + {name: "STLRB", argLength: 3, reg: gpstore, asm: "STLRB", faultOnNilArg0: true, hasSideEffects: true}, + {name: "STLR", argLength: 3, reg: gpstore, asm: "STLR", faultOnNilArg0: true, hasSideEffects: true}, + {name: "STLRW", argLength: 3, reg: gpstore, asm: "STLRW", faultOnNilArg0: true, hasSideEffects: true}, + + // atomic exchange. + // store arg1 to arg0. arg2=mem. returns . auxint must be zero. + // LDAXR (Rarg0), Rout + // STLXR Rarg1, (Rarg0), Rtmp + // CBNZ Rtmp, -2(PC) + {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic exchange variant. + // store arg1 to arg0. arg2=mem. returns . auxint must be zero. + // SWPALD Rarg1, (Rarg0), Rout + {name: "LoweredAtomicExchange64Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicExchange32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic add. + // *arg0 += arg1. arg2=mem. returns . auxint must be zero. + // LDAXR (Rarg0), Rout + // ADD Rarg1, Rout + // STLXR Rout, (Rarg0), Rtmp + // CBNZ Rtmp, -3(PC) + {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic add variant. + // *arg0 += arg1. arg2=mem. returns . auxint must be zero. + // LDADDAL (Rarg0), Rarg1, Rout + // ADD Rarg1, Rout + {name: "LoweredAtomicAdd64Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicAdd32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic compare and swap. + // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero. + // if *arg0 == arg1 { + // *arg0 = arg2 + // return (true, memory) + // } else { + // return (false, memory) + // } + // LDAXR (Rarg0), Rtmp + // CMP Rarg1, Rtmp + // BNE 3(PC) + // STLXR Rarg2, (Rarg0), Rtmp + // CBNZ Rtmp, -4(PC) + // CSET EQ, Rout + {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic compare and swap variant. + // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero. + // if *arg0 == arg1 { + // *arg0 = arg2 + // return (true, memory) + // } else { + // return (false, memory) + // } + // MOV Rarg1, Rtmp + // CASAL Rtmp, (Rarg0), Rarg2 + // CMP Rarg1, Rtmp + // CSET EQ, Rout + {name: "LoweredAtomicCas64Variant", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicCas32Variant", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic and/or. + // *arg0 &= (|=) arg1. arg2=mem. returns . auxint must be zero. + // LDAXR (Rarg0), Rout + // AND/OR Rarg1, Rout + // STLXR Rout, (Rarg0), Rtmp + // CBNZ Rtmp, -3(PC) + {name: "LoweredAtomicAnd8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicOr8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic and/or variant. + // *arg0 &= (|=) arg1. arg2=mem. returns . auxint must be zero. + // AND: + // MNV Rarg1, Rtemp + // LDANDALB Rtemp, (Rarg0), Rout + // AND Rarg1, Rout + // OR: + // LDORALB Rarg1, (Rarg0), Rout + // ORR Rarg1, Rout + {name: "LoweredAtomicAnd8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicAnd32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicOr8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicOr32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true}, + + // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed + // It saves all GP registers if necessary, + // but clobbers R30 (LR) because it's a call. + // R16 and R17 may be clobbered by linker trampoline. + // Returns a pointer to a write barrier buffer in R25. + {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R16 R17 R30"), outputs: []regMask{buildReg("R25")}}, clobberFlags: true, aux: "Int64"}, + + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go). + + // Prefetch instruction + // Do prefetch arg0 address with option aux. arg0=addr, arg1=memory, aux=option. + {name: "PRFM", argLength: 2, aux: "Int64", reg: prefreg, asm: "PRFM", hasSideEffects: true}, + + // Publication barrier + {name: "DMB", argLength: 1, aux: "Int64", asm: "DMB", hasSideEffects: true}, // Do data barrier. arg0=memory, aux=option. + } + + blocks := []blockData{ + {name: "EQ", controls: 1}, + {name: "NE", controls: 1}, + {name: "LT", controls: 1}, + {name: "LE", controls: 1}, + {name: "GT", controls: 1}, + {name: "GE", controls: 1}, + {name: "ULT", controls: 1}, + {name: "ULE", controls: 1}, + {name: "UGT", controls: 1}, + {name: "UGE", controls: 1}, + {name: "Z", controls: 1}, // Control == 0 (take a register instead of flags) + {name: "NZ", controls: 1}, // Control != 0 + {name: "ZW", controls: 1}, // Control == 0, 32-bit + {name: "NZW", controls: 1}, // Control != 0, 32-bit + {name: "TBZ", controls: 1, aux: "Int64"}, // Control & (1 << AuxInt) == 0 + {name: "TBNZ", controls: 1, aux: "Int64"}, // Control & (1 << AuxInt) != 0 + {name: "FLT", controls: 1}, + {name: "FLE", controls: 1}, + {name: "FGT", controls: 1}, + {name: "FGE", controls: 1}, + {name: "LTnoov", controls: 1}, // 'LT' but without honoring overflow + {name: "LEnoov", controls: 1}, // 'LE' but without honoring overflow + {name: "GTnoov", controls: 1}, // 'GT' but without honoring overflow + {name: "GEnoov", controls: 1}, // 'GE' but without honoring overflow + + // JUMPTABLE implements jump tables. + // Aux is the symbol (an *obj.LSym) for the jump table. + // control[0] is the index into the jump table. + // control[1] is the address of the jump table (the address of the symbol stored in Aux). + {name: "JUMPTABLE", controls: 2, aux: "Sym"}, + } + + archs = append(archs, arch{ + name: "ARM64", + pkg: "cmd/internal/obj/arm64", + genfile: "../../arm64/ssa.go", + ops: ops, + blocks: blocks, + regnames: regNamesARM64, + ParamIntRegNames: "R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15", + ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15", + gpregmask: gp, + fpregmask: fp, + framepointerreg: -1, // not used + linkreg: int8(num["R30"]), + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM64latelower.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM64latelower.rules new file mode 100644 index 0000000000000000000000000000000000000000..e50d985aa0c8f71d8d49114f48677e66ff451b64 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARM64latelower.rules @@ -0,0 +1,87 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains rules used by the laterLower pass. +// These are often the exact inverse of rules in ARM64.rules. + +(ADDconst [c] x) && !isARM64addcon(c) => (ADD x (MOVDconst [c])) +(SUBconst [c] x) && !isARM64addcon(c) => (SUB x (MOVDconst [c])) +(ANDconst [c] x) && !isARM64bitcon(uint64(c)) => (AND x (MOVDconst [c])) +(ORconst [c] x) && !isARM64bitcon(uint64(c)) => (OR x (MOVDconst [c])) +(XORconst [c] x) && !isARM64bitcon(uint64(c)) => (XOR x (MOVDconst [c])) +(TSTconst [c] x) && !isARM64bitcon(uint64(c)) => (TST x (MOVDconst [c])) +(TSTWconst [c] x) && !isARM64bitcon(uint64(c)|uint64(c)<<32) => (TSTW x (MOVDconst [int64(c)])) + +(CMPconst [c] x) && !isARM64addcon(c) => (CMP x (MOVDconst [c])) +(CMPWconst [c] x) && !isARM64addcon(int64(c)) => (CMPW x (MOVDconst [int64(c)])) +(CMNconst [c] x) && !isARM64addcon(c) => (CMN x (MOVDconst [c])) +(CMNWconst [c] x) && !isARM64addcon(int64(c)) => (CMNW x (MOVDconst [int64(c)])) + +(ADDSconstflags [c] x) && !isARM64addcon(c) => (ADDSflags x (MOVDconst [c])) + +// These rules remove unneeded sign/zero extensions. +// They occur in late lower because they rely on the fact +// that their arguments don't get rewritten to a non-extended opcode instead. + +// Boolean-generating instructions (NOTE: NOT all boolean Values) always +// zero upper bit of the register; no need to zero-extend +(MOVBUreg x:((Equal|NotEqual|LessThan|LessThanU|LessThanF|LessEqual|LessEqualU|LessEqualF|GreaterThan|GreaterThanU|GreaterThanF|GreaterEqual|GreaterEqualU|GreaterEqualF) _)) => x + +// omit unsigned extension +(MOVWUreg x) && zeroUpper32Bits(x, 3) => x + +// don't extend after proper load +(MOVBreg x:(MOVBload _ _)) => (MOVDreg x) +(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x) +(MOVHreg x:(MOVBload _ _)) => (MOVDreg x) +(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x) +(MOVHreg x:(MOVHload _ _)) => (MOVDreg x) +(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x) +(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x) +(MOVWreg x:(MOVBload _ _)) => (MOVDreg x) +(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x) +(MOVWreg x:(MOVHload _ _)) => (MOVDreg x) +(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x) +(MOVWreg x:(MOVWload _ _)) => (MOVDreg x) +(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x) +(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x) +(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x) +(MOVBreg x:(MOVBloadidx _ _ _)) => (MOVDreg x) +(MOVBUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x) +(MOVHreg x:(MOVBloadidx _ _ _)) => (MOVDreg x) +(MOVHreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x) +(MOVHreg x:(MOVHloadidx _ _ _)) => (MOVDreg x) +(MOVHUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x) +(MOVHUreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x) +(MOVWreg x:(MOVBloadidx _ _ _)) => (MOVDreg x) +(MOVWreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x) +(MOVWreg x:(MOVHloadidx _ _ _)) => (MOVDreg x) +(MOVWreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x) +(MOVWreg x:(MOVWloadidx _ _ _)) => (MOVDreg x) +(MOVWUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x) +(MOVWUreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x) +(MOVWUreg x:(MOVWUloadidx _ _ _)) => (MOVDreg x) +(MOVHreg x:(MOVHloadidx2 _ _ _)) => (MOVDreg x) +(MOVHUreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x) +(MOVWreg x:(MOVHloadidx2 _ _ _)) => (MOVDreg x) +(MOVWreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x) +(MOVWreg x:(MOVWloadidx4 _ _ _)) => (MOVDreg x) +(MOVWUreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x) +(MOVWUreg x:(MOVWUloadidx4 _ _ _)) => (MOVDreg x) + +// fold double extensions +(MOVBreg x:(MOVBreg _)) => (MOVDreg x) +(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x) +(MOVHreg x:(MOVBreg _)) => (MOVDreg x) +(MOVHreg x:(MOVBUreg _)) => (MOVDreg x) +(MOVHreg x:(MOVHreg _)) => (MOVDreg x) +(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x) +(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x) +(MOVWreg x:(MOVBreg _)) => (MOVDreg x) +(MOVWreg x:(MOVBUreg _)) => (MOVDreg x) +(MOVWreg x:(MOVHreg _)) => (MOVDreg x) +(MOVWreg x:(MOVWreg _)) => (MOVDreg x) +(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x) +(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x) +(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARMOps.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARMOps.go new file mode 100644 index 0000000000000000000000000000000000000000..39d24694e78fd58b06523be892cb494c4f200ed6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/ARMOps.go @@ -0,0 +1,600 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +// Notes: +// - Integer types live in the low portion of registers. Upper portions are junk. +// - Boolean types use the low-order byte of a register. 0=false, 1=true. +// Upper bytes are junk. +// - *const instructions may use a constant larger than the instruction can encode. +// In this case the assembler expands to multiple instructions and uses tmp +// register (R11). + +// Suffixes encode the bit width of various instructions. +// W (word) = 32 bit +// H (half word) = 16 bit +// HU = 16 bit unsigned +// B (byte) = 8 bit +// BU = 8 bit unsigned +// F (float) = 32 bit float +// D (double) = 64 bit float + +var regNamesARM = []string{ + "R0", + "R1", + "R2", + "R3", + "R4", + "R5", + "R6", + "R7", + "R8", + "R9", + "g", // aka R10 + "R11", // tmp + "R12", + "SP", // aka R13 + "R14", // link + "R15", // pc + + "F0", + "F1", + "F2", + "F3", + "F4", + "F5", + "F6", + "F7", + "F8", + "F9", + "F10", + "F11", + "F12", + "F13", + "F14", + "F15", // tmp + + // If you add registers, update asyncPreempt in runtime. + + // pseudo-registers + "SB", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNamesARM) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNamesARM { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + // Common individual register masks + var ( + gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14") + gpg = gp | buildReg("g") + gpsp = gp | buildReg("SP") + gpspg = gpg | buildReg("SP") + gpspsbg = gpspg | buildReg("SB") + fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15") + callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g + r0 = buildReg("R0") + r1 = buildReg("R1") + r2 = buildReg("R2") + r3 = buildReg("R3") + r4 = buildReg("R4") + ) + // Common regInfo + var ( + gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} + gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} + gp11carry = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}} + gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} + gp1flags = regInfo{inputs: []regMask{gpg}} + gp1flags1 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}} + gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} + gp21carry = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, 0}} + gp2flags = regInfo{inputs: []regMask{gpg, gpg}} + gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}} + gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}} + gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}} + gp31carry = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp, 0}} + gp3flags = regInfo{inputs: []regMask{gp, gp, gp}} + gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}} + gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} + gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} + gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} + gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}} + fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} + fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} + fp1flags = regInfo{inputs: []regMask{fp}} + fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}, clobbers: buildReg("F15")} // int-float conversion uses F15 as tmp + gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}, clobbers: buildReg("F15")} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} + fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}} + fp2flags = regInfo{inputs: []regMask{fp, fp}} + fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}} + fpstore = regInfo{inputs: []regMask{gpspsbg, fp}} + readflags = regInfo{inputs: nil, outputs: []regMask{gp}} + ) + ops := []opData{ + // binary ops + {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1 + {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int32"}, // arg0 + auxInt + {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1 + {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int32"}, // arg0 - auxInt + {name: "RSB", argLength: 2, reg: gp21, asm: "RSB"}, // arg1 - arg0 + {name: "RSBconst", argLength: 1, reg: gp11, asm: "RSB", aux: "Int32"}, // auxInt - arg0 + {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1 + {name: "HMUL", argLength: 2, reg: gp21, asm: "MULL", commutative: true}, // (arg0 * arg1) >> 32, signed + {name: "HMULU", argLength: 2, reg: gp21, asm: "MULLU", commutative: true}, // (arg0 * arg1) >> 32, unsigned + + // udiv runtime call for soft division + // output0 = arg0/arg1, output1 = arg0%arg1 + // see ../../../../../runtime/vlop_arm.s + { + name: "CALLudiv", + argLength: 2, + reg: regInfo{ + inputs: []regMask{buildReg("R1"), buildReg("R0")}, + outputs: []regMask{buildReg("R0"), buildReg("R1")}, + clobbers: buildReg("R2 R3 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register + }, + clobberFlags: true, + typ: "(UInt32,UInt32)", + call: false, // TODO(mdempsky): Should this be true? + }, + + {name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag + {name: "ADDSconst", argLength: 1, reg: gp11carry, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag + {name: "ADC", argLength: 3, reg: gp2flags1, asm: "ADC", commutative: true}, // arg0 + arg1 + carry, arg2=flags + {name: "ADCconst", argLength: 2, reg: gp1flags1, asm: "ADC", aux: "Int32"}, // arg0 + auxInt + carry, arg1=flags + {name: "SUBS", argLength: 2, reg: gp21carry, asm: "SUB"}, // arg0 - arg1, set carry flag + {name: "SUBSconst", argLength: 1, reg: gp11carry, asm: "SUB", aux: "Int32"}, // arg0 - auxInt, set carry flag + {name: "RSBSconst", argLength: 1, reg: gp11carry, asm: "RSB", aux: "Int32"}, // auxInt - arg0, set carry flag + {name: "SBC", argLength: 3, reg: gp2flags1, asm: "SBC"}, // arg0 - arg1 - carry, arg2=flags + {name: "SBCconst", argLength: 2, reg: gp1flags1, asm: "SBC", aux: "Int32"}, // arg0 - auxInt - carry, arg1=flags + {name: "RSCconst", argLength: 2, reg: gp1flags1, asm: "RSC", aux: "Int32"}, // auxInt - arg0 - carry, arg1=flags + + {name: "MULLU", argLength: 2, reg: gp22, asm: "MULLU", commutative: true}, // arg0 * arg1, high 32 bits in out0, low 32 bits in out1 + {name: "MULA", argLength: 3, reg: gp31, asm: "MULA"}, // arg0 * arg1 + arg2 + {name: "MULS", argLength: 3, reg: gp31, asm: "MULS"}, // arg2 - arg0 * arg1 + + {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1 + {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1 + {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1 + {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1 + {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1 + {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1 + {name: "NMULF", argLength: 2, reg: fp21, asm: "NMULF", commutative: true}, // -(arg0 * arg1) + {name: "NMULD", argLength: 2, reg: fp21, asm: "NMULD", commutative: true}, // -(arg0 * arg1) + {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1 + {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1 + + {name: "MULAF", argLength: 3, reg: fp31, asm: "MULAF", resultInArg0: true}, // arg0 + (arg1 * arg2) + {name: "MULAD", argLength: 3, reg: fp31, asm: "MULAD", resultInArg0: true}, // arg0 + (arg1 * arg2) + {name: "MULSF", argLength: 3, reg: fp31, asm: "MULSF", resultInArg0: true}, // arg0 - (arg1 * arg2) + {name: "MULSD", argLength: 3, reg: fp31, asm: "MULSD", resultInArg0: true}, // arg0 - (arg1 * arg2) + + // FMULAD only exists on platforms with the VFPv4 instruction set. + // Any use must be preceded by a successful check of runtime.arm_support_vfpv4. + {name: "FMULAD", argLength: 3, reg: fp31, asm: "FMULAD", resultInArg0: true}, // arg0 + (arg1 * arg2) + + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1 + {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0 & auxInt + {name: "OR", argLength: 2, reg: gp21, asm: "ORR", commutative: true}, // arg0 | arg1 + {name: "ORconst", argLength: 1, reg: gp11, asm: "ORR", aux: "Int32"}, // arg0 | auxInt + {name: "XOR", argLength: 2, reg: gp21, asm: "EOR", commutative: true}, // arg0 ^ arg1 + {name: "XORconst", argLength: 1, reg: gp11, asm: "EOR", aux: "Int32"}, // arg0 ^ auxInt + {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1 + {name: "BICconst", argLength: 1, reg: gp11, asm: "BIC", aux: "Int32"}, // arg0 &^ auxInt + + // bit extraction, AuxInt = Width<<8 | LSB + {name: "BFX", argLength: 1, reg: gp11, asm: "BFX", aux: "Int32"}, // extract W bits from bit L in arg0, then signed extend + {name: "BFXU", argLength: 1, reg: gp11, asm: "BFXU", aux: "Int32"}, // extract W bits from bit L in arg0, then unsigned extend + + // unary ops + {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0 + + {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 + {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 + {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 + {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32 + {name: "ABSD", argLength: 1, reg: fp11, asm: "ABSD"}, // abs(arg0), float64 + + {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, // count leading zero + {name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // reverse byte order + {name: "REV16", argLength: 1, reg: gp11, asm: "REV16"}, // reverse byte order in 16-bit halfwords + {name: "RBIT", argLength: 1, reg: gp11, asm: "RBIT"}, // reverse bit order + + // shifts + {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 256 + {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt, 0 <= auxInt < 32 + {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, unsigned, shift amount is mod 256 + {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, unsigned, 0 <= auxInt < 32 + {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> arg1, signed, shift amount is mod 256 + {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed, 0 <= auxInt < 32 + {name: "SRR", argLength: 2, reg: gp21}, // arg0 right rotate by arg1 bits + {name: "SRRconst", argLength: 1, reg: gp11, aux: "Int32"}, // arg0 right rotate by auxInt bits, 0 <= auxInt < 32 + + // auxInt for all of these satisfy 0 <= auxInt < 32 + {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1<>auxInt, unsigned shift + {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift + {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1<>auxInt, unsigned shift + {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift + {name: "RSBshiftLL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1<>auxInt - arg0, unsigned shift + {name: "RSBshiftRA", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift + {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1<>auxInt), unsigned shift + {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), signed shift + {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1<>auxInt, unsigned shift + {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1>>auxInt, signed shift + {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1<>auxInt, unsigned shift + {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, signed shift + {name: "XORshiftRR", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ (arg1 right rotate by auxInt) + {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1<>auxInt), unsigned shift + {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), signed shift + {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0<>auxInt), unsigned shift + {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), signed shift + + {name: "ADCshiftLL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1<>auxInt + carry, unsigned shift, arg2=flags + {name: "ADCshiftRA", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, signed shift, arg2=flags + {name: "SBCshiftLL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1<>auxInt - carry, unsigned shift, arg2=flags + {name: "SBCshiftRA", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, signed shift, arg2=flags + {name: "RSCshiftLL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1<>auxInt - arg0 - carry, unsigned shift, arg2=flags + {name: "RSCshiftRA", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, signed shift, arg2=flags + + {name: "ADDSshiftLL", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1<>auxInt, unsigned shift, set carry flag + {name: "ADDSshiftRA", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift, set carry flag + {name: "SUBSshiftLL", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1<>auxInt, unsigned shift, set carry flag + {name: "SUBSshiftRA", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift, set carry flag + {name: "RSBSshiftLL", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1<>auxInt - arg0, unsigned shift, set carry flag + {name: "RSBSshiftRA", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift, set carry flag + + {name: "ADDshiftLLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1<>arg2, unsigned shift + {name: "ADDshiftRAreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift + {name: "SUBshiftLLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1<>arg2, unsigned shift + {name: "SUBshiftRAreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift + {name: "RSBshiftLLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1<>arg2 - arg0, unsigned shift + {name: "RSBshiftRAreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift + {name: "ANDshiftLLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1<>arg2), unsigned shift + {name: "ANDshiftRAreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), signed shift + {name: "ORshiftLLreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1<>arg2, unsigned shift + {name: "ORshiftRAreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1>>arg2, signed shift + {name: "XORshiftLLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1<>arg2, unsigned shift + {name: "XORshiftRAreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, signed shift + {name: "BICshiftLLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1<>arg2), unsigned shift + {name: "BICshiftRAreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), signed shift + {name: "MVNshiftLLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0<>arg1), unsigned shift + {name: "MVNshiftRAreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), signed shift + + {name: "ADCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1<>arg2 + carry, unsigned shift, arg3=flags + {name: "ADCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, signed shift, arg3=flags + {name: "SBCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1<>arg2 - carry, unsigned shift, arg3=flags + {name: "SBCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, signed shift, arg3=flags + {name: "RSCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1<>arg2 - arg0 - carry, unsigned shift, arg3=flags + {name: "RSCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, signed shift, arg3=flags + + {name: "ADDSshiftLLreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1<>arg2, unsigned shift, set carry flag + {name: "ADDSshiftRAreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift, set carry flag + {name: "SUBSshiftLLreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1<>arg2, unsigned shift, set carry flag + {name: "SUBSshiftRAreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift, set carry flag + {name: "RSBSshiftLLreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1<>arg2 - arg0, unsigned shift, set carry flag + {name: "RSBSshiftRAreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift, set carry flag + + // comparisons + {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt + {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1, provided arg1 is not 1<<63 + {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt + {name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0 + {name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0 + {name: "TEQ", argLength: 2, reg: gp2flags, asm: "TEQ", typ: "Flags", commutative: true}, // arg0 ^ arg1 compare to 0 + {name: "TEQconst", argLength: 1, reg: gp1flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ auxInt compare to 0 + {name: "CMPF", argLength: 2, reg: fp2flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to arg1, float32 + {name: "CMPD", argLength: 2, reg: fp2flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to arg1, float64 + + {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1<>auxInt, unsigned shift + {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift + {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1<>auxInt), unsigned shift + {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1>>auxInt), signed shift + {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1<>auxInt) compare to 0, unsigned shift + {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1>>auxInt) compare to 0, signed shift + {name: "TEQshiftLL", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1<>auxInt) compare to 0, unsigned shift + {name: "TEQshiftRA", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1>>auxInt) compare to 0, signed shift + + {name: "CMPshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1<>arg2, unsigned shift + {name: "CMPshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, signed shift + {name: "CMNshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1<>arg2) compare to 0, unsigned shift + {name: "CMNshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1>>arg2) compare to 0, signed shift + {name: "TSTshiftLLreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1<>arg2) compare to 0, unsigned shift + {name: "TSTshiftRAreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1>>arg2) compare to 0, signed shift + {name: "TEQshiftLLreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1<>arg2) compare to 0, unsigned shift + {name: "TEQshiftRAreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1>>arg2) compare to 0, signed shift + + {name: "CMPF0", argLength: 1, reg: fp1flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to 0, float32 + {name: "CMPD0", argLength: 1, reg: fp1flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to 0, float64 + + // moves + {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", typ: "UInt32", rematerializeable: true}, // 32 low bits of auxint + {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float + {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float + + {name: "MOVWaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVW", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB + + {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + + {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + + {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW", typ: "UInt32"}, // load from arg0 + arg1. arg2=mem + {name: "MOVWloadshiftLL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32", typ: "UInt32"}, // load from arg0 + arg1<>auxInt, unsigned shift. arg2=mem + {name: "MOVWloadshiftRA", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32", typ: "UInt32"}, // load from arg0 + arg1>>auxInt, signed shift. arg2=mem + {name: "MOVBUloadidx", argLength: 3, reg: gp2load, asm: "MOVBU", typ: "UInt8"}, // load from arg0 + arg1. arg2=mem + {name: "MOVBloadidx", argLength: 3, reg: gp2load, asm: "MOVB", typ: "Int8"}, // load from arg0 + arg1. arg2=mem + {name: "MOVHUloadidx", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load from arg0 + arg1. arg2=mem + {name: "MOVHloadidx", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load from arg0 + arg1. arg2=mem + + {name: "MOVWstoreidx", argLength: 4, reg: gp2store, asm: "MOVW", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem + {name: "MOVWstoreshiftLL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32", typ: "Mem"}, // store arg2 to arg0 + arg1<>auxInt, unsigned shift. arg3=mem + {name: "MOVWstoreshiftRA", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32", typ: "Mem"}, // store arg2 to arg0 + arg1>>auxInt, signed shift. arg3=mem + {name: "MOVBstoreidx", argLength: 4, reg: gp2store, asm: "MOVB", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem + {name: "MOVHstoreidx", argLength: 4, reg: gp2store, asm: "MOVH", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem + + {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVBS"}, // move from arg0, sign-extended from byte + {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte + {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVHS"}, // move from arg0, sign-extended from half + {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half + {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0 + + {name: "MOVWnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register + + {name: "MOVWF", argLength: 1, reg: gpfp, asm: "MOVWF"}, // int32 -> float32 + {name: "MOVWD", argLength: 1, reg: gpfp, asm: "MOVWD"}, // int32 -> float64 + {name: "MOVWUF", argLength: 1, reg: gpfp, asm: "MOVWF"}, // uint32 -> float32, set U bit in the instruction + {name: "MOVWUD", argLength: 1, reg: gpfp, asm: "MOVWD"}, // uint32 -> float64, set U bit in the instruction + {name: "MOVFW", argLength: 1, reg: fpgp, asm: "MOVFW"}, // float32 -> int32 + {name: "MOVDW", argLength: 1, reg: fpgp, asm: "MOVDW"}, // float64 -> int32 + {name: "MOVFWU", argLength: 1, reg: fpgp, asm: "MOVFW"}, // float32 -> uint32, set U bit in the instruction + {name: "MOVDWU", argLength: 1, reg: fpgp, asm: "MOVDW"}, // float64 -> uint32, set U bit in the instruction + {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64 + {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 + + // conditional instructions, for lowering shifts + {name: "CMOVWHSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates HS, arg1=flags + {name: "CMOVWLSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates LS, arg1=flags + {name: "SRAcond", argLength: 3, reg: gp2flags1, asm: "SRA"}, // arg0 >> 31 if flags indicates HS, arg0 >> arg1 otherwise, signed shift, arg2=flags + + // function calls + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + + // pseudo-ops + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. + + {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise. + {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise. + {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed xy false otherwise. + {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise. + {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned xy false otherwise. + {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise. + + // duffzero (must be 4-byte aligned) + // arg0 = address of memory to zero (in R1, changed as side effect) + // arg1 = value to store (always zero) + // arg2 = mem + // auxint = offset into duffzero code to start executing + // returns mem + { + name: "DUFFZERO", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R1"), buildReg("R0")}, + clobbers: buildReg("R1 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register + }, + faultOnNilArg0: true, + }, + + // duffcopy (must be 4-byte aligned) + // arg0 = address of dst memory (in R2, changed as side effect) + // arg1 = address of src memory (in R1, changed as side effect) + // arg2 = mem + // auxint = offset into duffcopy code to start executing + // returns mem + { + name: "DUFFCOPY", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R2"), buildReg("R1")}, + clobbers: buildReg("R0 R1 R2 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register + }, + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // large or unaligned zeroing + // arg0 = address of memory to zero (in R1, changed as side effect) + // arg1 = address of the last element to zero + // arg2 = value to store (always zero) + // arg3 = mem + // returns mem + // MOVW.P Rarg2, 4(R1) + // CMP R1, Rarg1 + // BLE -2(PC) + { + name: "LoweredZero", + aux: "Int64", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("R1"), gp, gp}, + clobbers: buildReg("R1"), + }, + clobberFlags: true, + faultOnNilArg0: true, + }, + + // large or unaligned move + // arg0 = address of dst memory (in R2, changed as side effect) + // arg1 = address of src memory (in R1, changed as side effect) + // arg2 = address of the last element of src + // arg3 = mem + // returns mem + // MOVW.P 4(R1), Rtmp + // MOVW.P Rtmp, 4(R2) + // CMP R1, Rarg2 + // BLE -3(PC) + { + name: "LoweredMove", + aux: "Int64", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("R2"), buildReg("R1"), gp}, + clobbers: buildReg("R1 R2"), + }, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, + // and sorts it to the very beginning of the block to prevent other + // use of R7 (arm.REGCTXT, the closure pointer) + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R7")}}, zeroWidth: true}, + + // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem. + {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, + + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + // Extend ops are the same as Bounds ops except the indexes are 64-bit. + {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r2, r3}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go). + {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r1, r2}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go). + {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r0, r1}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go). + + // Constant flag value. + // Note: there's an "unordered" outcome for floating-point + // comparisons, but we don't use such a beast yet. + // This op is for temporary use by rewrite rules. It + // cannot appear in the generated assembly. + {name: "FlagConstant", aux: "FlagConstant"}, + + // (InvertFlags (CMP a b)) == (CMP b a) + // InvertFlags is a pseudo-op which can't appear in assembly output. + {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 + + // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed + // It saves all GP registers if necessary, + // but clobbers R14 (LR) because it's a call, and R12 which is linker trampoline scratch register. + // Returns a pointer to a write barrier buffer in R8. + {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R12 R14"), outputs: []regMask{buildReg("R8")}}, clobberFlags: true, aux: "Int64"}} + + blocks := []blockData{ + {name: "EQ", controls: 1}, + {name: "NE", controls: 1}, + {name: "LT", controls: 1}, + {name: "LE", controls: 1}, + {name: "GT", controls: 1}, + {name: "GE", controls: 1}, + {name: "ULT", controls: 1}, + {name: "ULE", controls: 1}, + {name: "UGT", controls: 1}, + {name: "UGE", controls: 1}, + {name: "LTnoov", controls: 1}, // 'LT' but without honoring overflow + {name: "LEnoov", controls: 1}, // 'LE' but without honoring overflow + {name: "GTnoov", controls: 1}, // 'GT' but without honoring overflow + {name: "GEnoov", controls: 1}, // 'GE' but without honoring overflow + } + + archs = append(archs, arch{ + name: "ARM", + pkg: "cmd/internal/obj/arm", + genfile: "../../arm/ssa.go", + ops: ops, + blocks: blocks, + regnames: regNamesARM, + gpregmask: gp, + fpregmask: fp, + framepointerreg: -1, // not used + linkreg: int8(num["R14"]), + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/LOONG64.rules new file mode 100644 index 0000000000000000000000000000000000000000..2af95191137e6411c8c47e69c5ac8b5e7694d5bc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/LOONG64.rules @@ -0,0 +1,664 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +(Add(Ptr|64|32|16|8) ...) => (ADDV ...) +(Add(32|64)F ...) => (ADD(F|D) ...) + +(Sub(Ptr|64|32|16|8) ...) => (SUBV ...) +(Sub(32|64)F ...) => (SUB(F|D) ...) + +(Mul(64|32|16|8) ...) => (MULV ...) +(Mul(32|64)F ...) => (MUL(F|D) ...) +(Select0 (Mul64uhilo x y)) => (MULHVU x y) +(Select1 (Mul64uhilo x y)) => (MULV x y) +(Select0 (Mul64uover x y)) => (MULV x y) +(Select1 (Mul64uover x y)) => (SGTU (MULHVU x y) (MOVVconst [0])) + +(Hmul64 ...) => (MULHV ...) +(Hmul64u ...) => (MULHVU ...) +(Hmul32 x y) => (SRAVconst (MULV (SignExt32to64 x) (SignExt32to64 y)) [32]) +(Hmul32u x y) => (SRLVconst (MULV (ZeroExt32to64 x) (ZeroExt32to64 y)) [32]) + +(Div64 x y) => (DIVV x y) +(Div64u ...) => (DIVVU ...) +(Div32 x y) => (DIVV (SignExt32to64 x) (SignExt32to64 y)) +(Div32u x y) => (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)) +(Div16 x y) => (DIVV (SignExt16to64 x) (SignExt16to64 y)) +(Div16u x y) => (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)) +(Div8 x y) => (DIVV (SignExt8to64 x) (SignExt8to64 y)) +(Div8u x y) => (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)) +(Div(32|64)F ...) => (DIV(F|D) ...) + +(Mod64 x y) => (REMV x y) +(Mod64u ...) => (REMVU ...) +(Mod32 x y) => (REMV (SignExt32to64 x) (SignExt32to64 y)) +(Mod32u x y) => (REMVU (ZeroExt32to64 x) (ZeroExt32to64 y)) +(Mod16 x y) => (REMV (SignExt16to64 x) (SignExt16to64 y)) +(Mod16u x y) => (REMVU (ZeroExt16to64 x) (ZeroExt16to64 y)) +(Mod8 x y) => (REMV (SignExt8to64 x) (SignExt8to64 y)) +(Mod8u x y) => (REMVU (ZeroExt8to64 x) (ZeroExt8to64 y)) + +(Select0 (Add64carry x y c)) => (ADDV (ADDV x y) c) +(Select1 (Add64carry x y c)) => + (OR (SGTU x s:(ADDV x y)) (SGTU s (ADDV s c))) + +(Select0 (Sub64borrow x y c)) => (SUBV (SUBV x y) c) +(Select1 (Sub64borrow x y c)) => + (OR (SGTU s:(SUBV x y) x) (SGTU (SUBV s c) s)) + +// (x + y) / 2 with x>=y => (x - y) / 2 + y +(Avg64u x y) => (ADDV (SRLVconst (SUBV x y) [1]) y) + +(And(64|32|16|8) ...) => (AND ...) +(Or(64|32|16|8) ...) => (OR ...) +(Xor(64|32|16|8) ...) => (XOR ...) + +// shifts +// hardware instruction uses only the low 6 bits of the shift +// we compare to 64 to ensure Go semantics for large shifts +(Lsh64x64 x y) => (MASKEQZ (SLLV x y) (SGTU (MOVVconst [64]) y)) +(Lsh64x32 x y) => (MASKEQZ (SLLV x (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) +(Lsh64x16 x y) => (MASKEQZ (SLLV x (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) +(Lsh64x8 x y) => (MASKEQZ (SLLV x (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + +(Lsh32x64 x y) => (MASKEQZ (SLLV x y) (SGTU (MOVVconst [64]) y)) +(Lsh32x32 x y) => (MASKEQZ (SLLV x (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) +(Lsh32x16 x y) => (MASKEQZ (SLLV x (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) +(Lsh32x8 x y) => (MASKEQZ (SLLV x (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + +(Lsh16x64 x y) => (MASKEQZ (SLLV x y) (SGTU (MOVVconst [64]) y)) +(Lsh16x32 x y) => (MASKEQZ (SLLV x (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) +(Lsh16x16 x y) => (MASKEQZ (SLLV x (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) +(Lsh16x8 x y) => (MASKEQZ (SLLV x (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + +(Lsh8x64 x y) => (MASKEQZ (SLLV x y) (SGTU (MOVVconst [64]) y)) +(Lsh8x32 x y) => (MASKEQZ (SLLV x (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) +(Lsh8x16 x y) => (MASKEQZ (SLLV x (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) +(Lsh8x8 x y) => (MASKEQZ (SLLV x (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + +(Rsh64Ux64 x y) => (MASKEQZ (SRLV x y) (SGTU (MOVVconst [64]) y)) +(Rsh64Ux32 x y) => (MASKEQZ (SRLV x (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) +(Rsh64Ux16 x y) => (MASKEQZ (SRLV x (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) +(Rsh64Ux8 x y) => (MASKEQZ (SRLV x (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + +(Rsh32Ux64 x y) => (MASKEQZ (SRLV (ZeroExt32to64 x) y) (SGTU (MOVVconst [64]) y)) +(Rsh32Ux32 x y) => (MASKEQZ (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) +(Rsh32Ux16 x y) => (MASKEQZ (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) +(Rsh32Ux8 x y) => (MASKEQZ (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + +(Rsh16Ux64 x y) => (MASKEQZ (SRLV (ZeroExt16to64 x) y) (SGTU (MOVVconst [64]) y)) +(Rsh16Ux32 x y) => (MASKEQZ (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) +(Rsh16Ux16 x y) => (MASKEQZ (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) +(Rsh16Ux8 x y) => (MASKEQZ (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + +(Rsh8Ux64 x y) => (MASKEQZ (SRLV (ZeroExt8to64 x) y) (SGTU (MOVVconst [64]) y)) +(Rsh8Ux32 x y) => (MASKEQZ (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) +(Rsh8Ux16 x y) => (MASKEQZ (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) +(Rsh8Ux8 x y) => (MASKEQZ (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) + +(Rsh64x64 x y) => (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh64x32 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh64x16 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh64x8 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +(Rsh32x64 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh32x32 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh32x16 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh32x8 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +(Rsh16x64 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh16x32 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh16x16 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh16x8 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +(Rsh8x64 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh8x32 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh8x16 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh8x8 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +// rotates +(RotateLeft8 x (MOVVconst [c])) => (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) +(RotateLeft16 x (MOVVconst [c])) => (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) +(RotateLeft32 x y) => (ROTR x (NEGV y)) +(RotateLeft64 x y) => (ROTRV x (NEGV y)) + +// unary ops +(Neg(64|32|16|8) ...) => (NEGV ...) +(Neg(32|64)F ...) => (NEG(F|D) ...) + +(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x) + +(Sqrt ...) => (SQRTD ...) +(Sqrt32 ...) => (SQRTF ...) + +// boolean ops -- booleans are represented with 0=false, 1=true +(AndB ...) => (AND ...) +(OrB ...) => (OR ...) +(EqB x y) => (XOR (MOVVconst [1]) (XOR x y)) +(NeqB ...) => (XOR ...) +(Not x) => (XORconst [1] x) + +// constants +(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)]) +(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)]) +(ConstNil) => (MOVVconst [0]) +(ConstBool [t]) => (MOVVconst [int64(b2i(t))]) + +(Slicemask x) => (SRAVconst (NEGV x) [63]) + +// truncations +// Because we ignore high parts of registers, truncates are just copies. +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) +(Trunc64to8 ...) => (Copy ...) +(Trunc64to16 ...) => (Copy ...) +(Trunc64to32 ...) => (Copy ...) + +// Zero-/Sign-extensions +(ZeroExt8to16 ...) => (MOVBUreg ...) +(ZeroExt8to32 ...) => (MOVBUreg ...) +(ZeroExt16to32 ...) => (MOVHUreg ...) +(ZeroExt8to64 ...) => (MOVBUreg ...) +(ZeroExt16to64 ...) => (MOVHUreg ...) +(ZeroExt32to64 ...) => (MOVWUreg ...) + +(SignExt8to16 ...) => (MOVBreg ...) +(SignExt8to32 ...) => (MOVBreg ...) +(SignExt16to32 ...) => (MOVHreg ...) +(SignExt8to64 ...) => (MOVBreg ...) +(SignExt16to64 ...) => (MOVHreg ...) +(SignExt32to64 ...) => (MOVWreg ...) + +// float <=> int conversion +(Cvt32to32F ...) => (MOVWF ...) +(Cvt32to64F ...) => (MOVWD ...) +(Cvt64to32F ...) => (MOVVF ...) +(Cvt64to64F ...) => (MOVVD ...) +(Cvt32Fto32 ...) => (TRUNCFW ...) +(Cvt64Fto32 ...) => (TRUNCDW ...) +(Cvt32Fto64 ...) => (TRUNCFV ...) +(Cvt64Fto64 ...) => (TRUNCDV ...) +(Cvt32Fto64F ...) => (MOVFD ...) +(Cvt64Fto32F ...) => (MOVDF ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +(Round(32|64)F ...) => (Copy ...) + +// comparisons +(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y)) +(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y)) +(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y)) + +(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) +(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) +(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) +(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0])) +(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0])) +(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y)) + +(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x)) +(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x)) +(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x)) +(Less64 x y) => (SGT y x) +(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN + +(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) +(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) +(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) +(Less64U x y) => (SGTU y x) + +(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) +(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) +(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) +(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y)) +(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN + +(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y)) + +(OffPtr [off] ptr:(SP)) => (MOVVaddr [int32(off)] ptr) +(OffPtr [off] ptr) => (ADDVconst [off] ptr) + +(Addr {sym} base) => (MOVVaddr {sym} base) +(LocalAddr {sym} base mem) && t.Elem().HasPointers() => (MOVVaddr {sym} (SPanchored base mem)) +(LocalAddr {sym} base _) && !t.Elem().HasPointers() => (MOVVaddr {sym} base) + +// loads +(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) +(Load ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem) +(Load ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem) +(Load ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem) +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (MOVFload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) + +// stores +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVVstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem) + +// zeroing +(Zero [0] _ mem) => mem +(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem) +(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore ptr (MOVVconst [0]) mem) +(Zero [2] ptr mem) => + (MOVBstore [1] ptr (MOVVconst [0]) + (MOVBstore [0] ptr (MOVVconst [0]) mem)) +(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore ptr (MOVVconst [0]) mem) +(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] ptr (MOVVconst [0]) + (MOVHstore [0] ptr (MOVVconst [0]) mem)) +(Zero [4] ptr mem) => + (MOVBstore [3] ptr (MOVVconst [0]) + (MOVBstore [2] ptr (MOVVconst [0]) + (MOVBstore [1] ptr (MOVVconst [0]) + (MOVBstore [0] ptr (MOVVconst [0]) mem)))) +(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVVstore ptr (MOVVconst [0]) mem) +(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore [4] ptr (MOVVconst [0]) + (MOVWstore [0] ptr (MOVVconst [0]) mem)) +(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [6] ptr (MOVVconst [0]) + (MOVHstore [4] ptr (MOVVconst [0]) + (MOVHstore [2] ptr (MOVVconst [0]) + (MOVHstore [0] ptr (MOVVconst [0]) mem)))) + +(Zero [3] ptr mem) => + (MOVBstore [2] ptr (MOVVconst [0]) + (MOVBstore [1] ptr (MOVVconst [0]) + (MOVBstore [0] ptr (MOVVconst [0]) mem))) +(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [4] ptr (MOVVconst [0]) + (MOVHstore [2] ptr (MOVVconst [0]) + (MOVHstore [0] ptr (MOVVconst [0]) mem))) +(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore [8] ptr (MOVVconst [0]) + (MOVWstore [4] ptr (MOVVconst [0]) + (MOVWstore [0] ptr (MOVVconst [0]) mem))) +(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVVstore [8] ptr (MOVVconst [0]) + (MOVVstore [0] ptr (MOVVconst [0]) mem)) +(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVVstore [16] ptr (MOVVconst [0]) + (MOVVstore [8] ptr (MOVVconst [0]) + (MOVVstore [0] ptr (MOVVconst [0]) mem))) + +// medium zeroing uses a duff device +// 8, and 128 are magic constants, see runtime/mkduff.go +(Zero [s] {t} ptr mem) + && s%8 == 0 && s > 24 && s <= 8*128 + && t.Alignment()%8 == 0 && !config.noDuffDevice => + (DUFFZERO [8 * (128 - s/8)] ptr mem) + +// large or unaligned zeroing uses a loop +(Zero [s] {t} ptr mem) + && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 => + (LoweredZero [t.Alignment()] + ptr + (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) + mem) + +// moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) +(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore dst (MOVHload src mem) mem) +(Move [2] dst src mem) => + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem)) +(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore dst (MOVWload src mem) mem) +(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem)) +(Move [4] dst src mem) => + (MOVBstore [3] dst (MOVBload [3] src mem) + (MOVBstore [2] dst (MOVBload [2] src mem) + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem)))) +(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVVstore dst (MOVVload src mem) mem) +(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVWstore dst (MOVWload src mem) mem)) +(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [6] dst (MOVHload [6] src mem) + (MOVHstore [4] dst (MOVHload [4] src mem) + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem)))) + +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBload [2] src mem) + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem))) +(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [4] dst (MOVHload [4] src mem) + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem))) +(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore [8] dst (MOVWload [8] src mem) + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVWstore dst (MOVWload src mem) mem))) +(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVVstore [8] dst (MOVVload [8] src mem) + (MOVVstore dst (MOVVload src mem) mem)) +(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVVstore [16] dst (MOVVload [16] src mem) + (MOVVstore [8] dst (MOVVload [8] src mem) + (MOVVstore dst (MOVVload src mem) mem))) + +// medium move uses a duff device +(Move [s] {t} dst src mem) + && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 + && !config.noDuffDevice && logLargeCopy(v, s) => + (DUFFCOPY [16 * (128 - s/8)] dst src mem) +// 16 and 128 are magic constants. 16 is the number of bytes to encode: +// MOVV (R1), R23 +// ADDV $8, R1 +// MOVV R23, (R2) +// ADDV $8, R2 +// and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy. + +// large or unaligned move uses a loop +(Move [s] {t} dst src mem) + && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 => + (LoweredMove [t.Alignment()] + dst + src + (ADDVconst src [s-moveSize(t.Alignment(), config)]) + mem) + +// calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// atomic intrinsics +(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...) +(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...) + +(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...) +(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...) + +(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...) + +(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...) + +(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem) +(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...) + +// checks +(NilCheck ...) => (LoweredNilCheck ...) +(IsNonNil ptr) => (SGTU ptr (MOVVconst [0])) +(IsInBounds idx len) => (SGTU len idx) +(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len)) + +// pseudo-ops +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) + +(If cond yes no) => (NE (MOVBUreg cond) yes no) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +(CondSelect x y cond) => (OR (MASKEQZ x cond) (MASKNEZ y cond)) + +// Optimizations + +// Absorb boolean tests into block +(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no) +(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no) +(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no) +(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no) +(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no) +(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no) +(NE (SGTUconst [1] x) yes no) => (EQ x yes no) +(EQ (SGTUconst [1] x) yes no) => (NE x yes no) +(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no) +(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no) +(NE (SGTconst [0] x) yes no) => (LTZ x yes no) +(EQ (SGTconst [0] x) yes no) => (GEZ x yes no) +(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no) +(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no) +(MOVBUreg x:((SGT|SGTU) _ _)) => x + +// fold offset into address +(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) + +// fold address into load/store +// Do not fold global variable access in -dynlink mode, where it will be rewritten +// to use the GOT via REGTMP, which currently cannot handle large offset. +(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {sym} ptr mem) + +(MOV(B|H|W|V|F|D)store [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {sym} ptr val mem) + +(MOV(B|H|W|V)storezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|H|W|V)storezero [off1+int32(off2)] {sym} ptr mem) + +(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + +(MOV(B|H|W|V|F|D)store [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) + && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + +(MOV(B|H|W|V)storezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|H|W|V)storezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + +(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem) +(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem) +(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem) + +// don't extend after proper load +(MOVBreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVHload _ _)) => (MOVVreg x) +(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVHload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVWload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x) + +// fold double extensions +(MOVBreg x:(MOVBreg _)) => (MOVVreg x) +(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHreg x:(MOVBreg _)) => (MOVVreg x) +(MOVHreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHreg x:(MOVHreg _)) => (MOVVreg x) +(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x) +(MOVWreg x:(MOVBreg _)) => (MOVVreg x) +(MOVWreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVWreg x:(MOVHreg _)) => (MOVVreg x) +(MOVWreg x:(MOVWreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x) + +// don't extend before store +(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem) + +// if a register move has only 1 use, just use the same register without emitting instruction +// MOVVnop doesn't emit instruction, only for ensuring the type. +(MOVVreg x) && x.Uses == 1 => (MOVVnop x) + +// fold constant into arithmetic ops +(ADDV x (MOVVconst [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x) +(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x) +(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x) +(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x) +(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x) +(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x) + +(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) +(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) +(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63]) +(SLLV x (MOVVconst [c])) => (SLLVconst x [c]) +(SRLV x (MOVVconst [c])) => (SRLVconst x [c]) +(SRAV x (MOVVconst [c])) => (SRAVconst x [c]) +(ROTR x (MOVVconst [c])) => (ROTRconst x [c&31]) +(ROTRV x (MOVVconst [c])) => (ROTRVconst x [c&63]) + +(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x) +(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x) + +// mul by constant +(MULV x (MOVVconst [-1])) => (NEGV x) +(MULV _ (MOVVconst [0])) => (MOVVconst [0]) +(MULV x (MOVVconst [1])) => x +(MULV x (MOVVconst [c])) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x) + +// div by constant +(DIVVU x (MOVVconst [1])) => x +(DIVVU x (MOVVconst [c])) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x) +(REMVU _ (MOVVconst [1])) => (MOVVconst [0]) // mod +(REMVU x (MOVVconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod + +// generic simplifications +(ADDV x (NEGV y)) => (SUBV x y) +(SUBV x x) => (MOVVconst [0]) +(SUBV (MOVVconst [0]) x) => (NEGV x) +(AND x x) => x +(OR x x) => x +(XOR x x) => (MOVVconst [0]) + +// remove redundant *const ops +(ADDVconst [0] x) => x +(SUBVconst [0] x) => x +(ANDconst [0] _) => (MOVVconst [0]) +(ANDconst [-1] x) => x +(ORconst [0] x) => x +(ORconst [-1] _) => (MOVVconst [-1]) +(XORconst [0] x) => x +(XORconst [-1] x) => (NORconst [0] x) +(MASKEQZ (MOVVconst [0]) cond) => (MOVVconst [0]) +(MASKNEZ (MOVVconst [0]) cond) => (MOVVconst [0]) +(MASKEQZ x (MOVVconst [c])) && c == 0 => (MOVVconst [0]) +(MASKEQZ x (MOVVconst [c])) && c != 0 => x + +// generic constant folding +(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d]) +(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x) +(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x) +(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c]) +(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x) +(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x) +(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d< (MOVVconst [int64(uint64(d)>>uint64(c))]) +(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)]) +(MULV (MOVVconst [c]) (MOVVconst [d])) => (MOVVconst [c*d]) +(DIVV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c/d]) +(DIVVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))]) +(REMV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c%d]) // mod +(REMVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod +(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d]) +(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) +(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d]) +(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x) +(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d]) +(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x) +(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)]) +(NEGV (MOVVconst [c])) => (MOVVconst [-c]) +(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))]) +(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))]) +(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))]) +(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))]) +(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))]) +(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))]) +(MOVVreg (MOVVconst [c])) => (MOVVconst [c]) + +// constant comparisons +(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1]) +(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0]) +(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1]) +(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0]) + +// other known comparisons +(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1]) +(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0]) +(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1]) +(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0]) +(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1]) +(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0]) +(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1]) +(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0]) +(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0]) +(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1]) +(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) +(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) + +// absorb constants into branches +(EQ (MOVVconst [0]) yes no) => (First yes no) +(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes) +(NE (MOVVconst [0]) yes no) => (First no yes) +(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no) +(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no) +(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes) +(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no) +(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes) +(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no) +(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes) +(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no) +(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes) + +// SGT/SGTU with known outcomes. +(SGT x x) => (MOVVconst [0]) +(SGTU x x) => (MOVVconst [0]) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go new file mode 100644 index 0000000000000000000000000000000000000000..3fbf5be499ec1126e35a2b589f414dd9d257d5b0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go @@ -0,0 +1,486 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +// Notes: +// - Integer types live in the low portion of registers. Upper portions are junk. +// - Boolean types use the low-order byte of a register. 0=false, 1=true. +// Upper bytes are junk. +// - *const instructions may use a constant larger than the instruction can encode. +// In this case the assembler expands to multiple instructions and uses tmp +// register (R23). + +// Suffixes encode the bit width of various instructions. +// V (vlong) = 64 bit +// WU (word) = 32 bit unsigned +// W (word) = 32 bit +// H (half word) = 16 bit +// HU = 16 bit unsigned +// B (byte) = 8 bit +// BU = 8 bit unsigned +// F (float) = 32 bit float +// D (double) = 64 bit float + +// Note: registers not used in regalloc are not included in this list, +// so that regmask stays within int64 +// Be careful when hand coding regmasks. +var regNamesLOONG64 = []string{ + "R0", // constant 0 + "R1", + "SP", // aka R3 + "R4", + "R5", + "R6", + "R7", + "R8", + "R9", + "R10", + "R11", + "R12", + "R13", + "R14", + "R15", + "R16", + "R17", + "R18", + "R19", + "R20", + "R21", + "g", // aka R22 + "R23", + "R24", + "R25", + "R26", + "R27", + "R28", + "R29", + // R30 is REGTMP not used in regalloc + "R31", + + "F0", + "F1", + "F2", + "F3", + "F4", + "F5", + "F6", + "F7", + "F8", + "F9", + "F10", + "F11", + "F12", + "F13", + "F14", + "F15", + "F16", + "F17", + "F18", + "F19", + "F20", + "F21", + "F22", + "F23", + "F24", + "F25", + "F26", + "F27", + "F28", + "F29", + "F30", + "F31", + + // If you add registers, update asyncPreempt in runtime. + + // pseudo-registers + "SB", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNamesLOONG64) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNamesLOONG64 { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + // Common individual register masks + var ( + gp = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R22 is g, R30 is REGTMP + gpg = gp | buildReg("g") + gpsp = gp | buildReg("SP") + gpspg = gpg | buildReg("SP") + gpspsbg = gpspg | buildReg("SB") + fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31") + callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g + r1 = buildReg("R20") + r2 = buildReg("R21") + r3 = buildReg("R23") + r4 = buildReg("R24") + ) + // Common regInfo + var ( + gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} + gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} + gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} + gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} + gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} + gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} + gpstore0 = regInfo{inputs: []regMask{gpspsbg}} + gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} + gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} + fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} + fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} + fp2flags = regInfo{inputs: []regMask{fp, fp}} + fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}} + fpstore = regInfo{inputs: []regMask{gpspsbg, fp}} + readflags = regInfo{inputs: nil, outputs: []regMask{gp}} + ) + ops := []opData{ + // binary ops + {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1 + {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops. + {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1 + {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt + + {name: "MULV", argLength: 2, reg: gp21, asm: "MULV", commutative: true, typ: "Int64"}, // arg0 * arg1 + {name: "MULHV", argLength: 2, reg: gp21, asm: "MULHV", commutative: true, typ: "Int64"}, // (arg0 * arg1) >> 64, signed + {name: "MULHVU", argLength: 2, reg: gp21, asm: "MULHVU", commutative: true, typ: "UInt64"}, // (arg0 * arg1) >> 64, unsigned + {name: "DIVV", argLength: 2, reg: gp21, asm: "DIVV", typ: "Int64"}, // arg0 / arg1, signed + {name: "DIVVU", argLength: 2, reg: gp21, asm: "DIVVU", typ: "UInt64"}, // arg0 / arg1, unsigned + {name: "REMV", argLength: 2, reg: gp21, asm: "REMV", typ: "Int64"}, // arg0 / arg1, signed + {name: "REMVU", argLength: 2, reg: gp21, asm: "REMVU", typ: "UInt64"}, // arg0 / arg1, unsigned + + {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1 + {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1 + {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1 + {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1 + {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1 + {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1 + {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1 + {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1 + + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1 + {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt + {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1 + {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0 | auxInt + {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1 + {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt + {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1) + {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt) + + {name: "NEGV", argLength: 1, reg: gp11}, // -arg0 + {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 + {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 + {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 + {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32 + + {name: "MASKEQZ", argLength: 2, reg: gp21, asm: "MASKEQZ"}, // returns 0 if arg1 == 0, otherwise returns arg0 + {name: "MASKNEZ", argLength: 2, reg: gp21, asm: "MASKNEZ"}, // returns 0 if arg1 != 0, otherwise returns arg0 + + // shifts + {name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64 + {name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt + {name: "SRLV", argLength: 2, reg: gp21, asm: "SRLV"}, // arg0 >> arg1, unsigned, shift amount is mod 64 + {name: "SRLVconst", argLength: 1, reg: gp11, asm: "SRLV", aux: "Int64"}, // arg0 >> auxInt, unsigned + {name: "SRAV", argLength: 2, reg: gp21, asm: "SRAV"}, // arg0 >> arg1, signed, shift amount is mod 64 + {name: "SRAVconst", argLength: 1, reg: gp11, asm: "SRAV", aux: "Int64"}, // arg0 >> auxInt, signed + {name: "ROTR", argLength: 2, reg: gp21, asm: "ROTR"}, // arg0 right rotate by (arg1 mod 32) bits + {name: "ROTRV", argLength: 2, reg: gp21, asm: "ROTRV"}, // arg0 right rotate by (arg1 mod 64) bits + {name: "ROTRconst", argLength: 1, reg: gp11, asm: "ROTR", aux: "Int64"}, // uint32(arg0) right rotate by auxInt bits, auxInt should be in the range 0 to 31. + {name: "ROTRVconst", argLength: 1, reg: gp11, asm: "ROTRV", aux: "Int64"}, // arg0 right rotate by auxInt bits, auxInt should be in the range 0 to 63. + + // comparisons + {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise + {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise + {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise + {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise + + {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32 + {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64 + {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32 + {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64 + {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32 + {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64 + + // moves + {name: "MOVVconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVV", typ: "UInt64", rematerializeable: true}, // auxint + {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float + {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float + + {name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB + + {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + + {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + + {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem. + + // conversions + {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte + {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte + {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half + {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half + {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word + {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word + {name: "MOVVreg", argLength: 1, reg: gp11, asm: "MOVV"}, // move from arg0 + + {name: "MOVVnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register + + {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32 + {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64 + {name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"}, // int64 -> float32 + {name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"}, // int64 -> float64 + {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32 + {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32 + {name: "TRUNCFV", argLength: 1, reg: fp11, asm: "TRUNCFV"}, // float32 -> int64 + {name: "TRUNCDV", argLength: 1, reg: fp11, asm: "TRUNCDV"}, // float64 -> int64 + {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64 + {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 + + // function calls + {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem + + // duffzero + // arg0 = address of memory to zero + // arg1 = mem + // auxint = offset into duffzero code to start executing + // returns mem + // R20 aka loong64.REGRT1 changed as side effect + { + name: "DUFFZERO", + aux: "Int64", + argLength: 2, + reg: regInfo{ + inputs: []regMask{buildReg("R20")}, + clobbers: buildReg("R20 R1"), + }, + typ: "Mem", + faultOnNilArg0: true, + }, + + // duffcopy + // arg0 = address of dst memory (in R21, changed as side effect) + // arg1 = address of src memory (in R20, changed as side effect) + // arg2 = mem + // auxint = offset into duffcopy code to start executing + // returns mem + { + name: "DUFFCOPY", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R21"), buildReg("R20")}, + clobbers: buildReg("R20 R21 R1"), + }, + typ: "Mem", + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // large or unaligned zeroing + // arg0 = address of memory to zero (in R20, changed as side effect) + // arg1 = address of the last element to zero + // arg2 = mem + // auxint = alignment + // returns mem + // MOVx R0, (R20) + // ADDV $sz, R20 + // BGEU Rarg1, R20, -2(PC) + { + name: "LoweredZero", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R20"), gp}, + clobbers: buildReg("R20"), + }, + typ: "Mem", + faultOnNilArg0: true, + }, + + // large or unaligned move + // arg0 = address of dst memory (in R21, changed as side effect) + // arg1 = address of src memory (in R20, changed as side effect) + // arg2 = address of the last element of src + // arg3 = mem + // auxint = alignment + // returns mem + // MOVx (R20), Rtmp + // MOVx Rtmp, (R21) + // ADDV $sz, R20 + // ADDV $sz, R21 + // BGEU Rarg2, R20, -4(PC) + { + name: "LoweredMove", + aux: "Int64", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("R21"), buildReg("R20"), gp}, + clobbers: buildReg("R20 R21"), + }, + typ: "Mem", + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // atomic loads. + // load from arg0. arg1=mem. + // returns so they can be properly ordered with other loads. + {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true}, + + // atomic stores. + // store arg1 to arg0. arg2=mem. returns memory. + {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + // store zero to arg0. arg1=mem. returns memory. + {name: "LoweredAtomicStorezero32", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStorezero64", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic exchange. + // store arg1 to arg0. arg2=mem. returns . + // DBAR + // LL (Rarg0), Rout + // MOVV Rarg1, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // DBAR + {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic add. + // *arg0 += arg1. arg2=mem. returns . + // DBAR + // LL (Rarg0), Rout + // ADDV Rarg1, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // DBAR + // ADDV Rarg1, Rout + {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + // *arg0 += auxint. arg1=mem. returns . auxint is 32-bit. + {name: "LoweredAtomicAddconst32", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicAddconst64", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int64", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic compare and swap. + // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. + // if *arg0 == arg1 { + // *arg0 = arg2 + // return (true, memory) + // } else { + // return (false, memory) + // } + // DBAR + // MOVV $0, Rout + // LL (Rarg0), Rtmp + // BNE Rtmp, Rarg1, 4(PC) + // MOVV Rarg2, Rout + // SC Rout, (Rarg0) + // BEQ Rout, -4(PC) + // DBAR + {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // pseudo-ops + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. + + {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true + {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false + + // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, + // and sorts it to the very beginning of the block to prevent other + // use of R22 (loong64.REGCTXT, the closure pointer) + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R29")}}, zeroWidth: true}, + + // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem. + {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, + + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + + // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed + // It saves all GP registers if necessary, + // but clobbers R1 (LR) because it's a call + // and R30 (REGTMP). + // Returns a pointer to a write barrier buffer in R29. + {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R1"), outputs: []regMask{buildReg("R29")}}, clobberFlags: true, aux: "Int64"}, + + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + } + + blocks := []blockData{ + {name: "EQ", controls: 1}, + {name: "NE", controls: 1}, + {name: "LTZ", controls: 1}, // < 0 + {name: "LEZ", controls: 1}, // <= 0 + {name: "GTZ", controls: 1}, // > 0 + {name: "GEZ", controls: 1}, // >= 0 + {name: "FPT", controls: 1}, // FP flag is true + {name: "FPF", controls: 1}, // FP flag is false + } + + archs = append(archs, arch{ + name: "LOONG64", + pkg: "cmd/internal/obj/loong64", + genfile: "../../loong64/ssa.go", + ops: ops, + blocks: blocks, + regnames: regNamesLOONG64, + // TODO: support register ABI on loong64 + ParamIntRegNames: "R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19", + ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15", + gpregmask: gp, + fpregmask: fp, + framepointerreg: -1, // not used + linkreg: int8(num["R1"]), + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPS.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPS.rules new file mode 100644 index 0000000000000000000000000000000000000000..d6ae0101cbd4b013b395df043145f8bc6c6706c6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPS.rules @@ -0,0 +1,716 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +(Add(Ptr|32|16|8) ...) => (ADD ...) +(Add(32|64)F ...) => (ADD(F|D) ...) + +(Select0 (Add32carry x y)) => (ADD x y) +(Select1 (Add32carry x y)) => (SGTU x (ADD x y)) +(Add32withcarry x y c) => (ADD c (ADD x y)) + +(Sub(Ptr|32|16|8) ...) => (SUB ...) +(Sub(32|64)F ...) => (SUB(F|D) ...) + +(Select0 (Sub32carry x y)) => (SUB x y) +(Select1 (Sub32carry x y)) => (SGTU (SUB x y) x) +(Sub32withcarry x y c) => (SUB (SUB x y) c) + +(Mul(32|16|8) ...) => (MUL ...) +(Mul(32|64)F ...) => (MUL(F|D) ...) + +(Hmul(32|32u) x y) => (Select0 (MUL(T|TU) x y)) +(Mul32uhilo ...) => (MULTU ...) + +(Div32 x y) => (Select1 (DIV x y)) +(Div32u x y) => (Select1 (DIVU x y)) +(Div16 x y) => (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y))) +(Div16u x y) => (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Div8 x y) => (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y))) +(Div8u x y) => (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Div(32|64)F ...) => (DIV(F|D) ...) + +(Mod32 x y) => (Select0 (DIV x y)) +(Mod32u x y) => (Select0 (DIVU x y)) +(Mod16 x y) => (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y))) +(Mod16u x y) => (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Mod8 x y) => (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y))) +(Mod8u x y) => (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y))) + +// math package intrinsics +(Abs ...) => (ABSD ...) + +// (x + y) / 2 with x>=y becomes (x - y) / 2 + y +(Avg32u x y) => (ADD (SRLconst (SUB x y) [1]) y) + +(And(32|16|8) ...) => (AND ...) +(Or(32|16|8) ...) => (OR ...) +(Xor(32|16|8) ...) => (XOR ...) + +// constant shifts +// generic opt rewrites all constant shifts to shift by Const64 +(Lsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SLLconst x [int32(c)]) +(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SRAconst x [int32(c)]) +(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 => (SRLconst x [int32(c)]) +(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SLLconst x [int32(c)]) +(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SRAconst (SLLconst x [16]) [int32(c+16)]) +(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 => (SRLconst (SLLconst x [16]) [int32(c+16)]) +(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SLLconst x [int32(c)]) +(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SRAconst (SLLconst x [24]) [int32(c+24)]) +(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 => (SRLconst (SLLconst x [24]) [int32(c+24)]) + +// large constant shifts +(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0]) +(Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0]) +(Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0]) +(Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0]) +(Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0]) +(Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0]) + +// large constant signed right shift, we leave the sign bit +(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 => (SRAconst x [31]) +(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 => (SRAconst (SLLconst x [16]) [31]) +(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 => (SRAconst (SLLconst x [24]) [31]) + +// shifts +// hardware instruction uses only the low 5 bits of the shift +// we compare to 32 to ensure Go semantics for large shifts +(Lsh32x32 x y) => (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) +(Lsh32x16 x y) => (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Lsh32x8 x y) => (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + +(Lsh16x32 x y) => (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) +(Lsh16x16 x y) => (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Lsh16x8 x y) => (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + +(Lsh8x32 x y) => (CMOVZ (SLL x y) (MOVWconst [0]) (SGTUconst [32] y)) +(Lsh8x16 x y) => (CMOVZ (SLL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Lsh8x8 x y) => (CMOVZ (SLL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + +(Rsh32Ux32 x y) => (CMOVZ (SRL x y) (MOVWconst [0]) (SGTUconst [32] y)) +(Rsh32Ux16 x y) => (CMOVZ (SRL x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Rsh32Ux8 x y) => (CMOVZ (SRL x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + +(Rsh16Ux32 x y) => (CMOVZ (SRL (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y)) +(Rsh16Ux16 x y) => (CMOVZ (SRL (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Rsh16Ux8 x y) => (CMOVZ (SRL (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + +(Rsh8Ux32 x y) => (CMOVZ (SRL (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y)) +(Rsh8Ux16 x y) => (CMOVZ (SRL (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y))) +(Rsh8Ux8 x y) => (CMOVZ (SRL (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y))) + +(Rsh32x32 x y) => (SRA x ( CMOVZ y (MOVWconst [31]) (SGTUconst [32] y))) +(Rsh32x16 x y) => (SRA x ( CMOVZ (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y)))) +(Rsh32x8 x y) => (SRA x ( CMOVZ (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y)))) + +(Rsh16x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ y (MOVWconst [31]) (SGTUconst [32] y))) +(Rsh16x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y)))) +(Rsh16x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y)))) + +(Rsh8x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ y (MOVWconst [31]) (SGTUconst [32] y))) +(Rsh8x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y)))) +(Rsh8x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y)))) + +// rotates +(RotateLeft8 x (MOVWconst [c])) => (Or8 (Lsh8x32 x (MOVWconst [c&7])) (Rsh8Ux32 x (MOVWconst [-c&7]))) +(RotateLeft16 x (MOVWconst [c])) => (Or16 (Lsh16x32 x (MOVWconst [c&15])) (Rsh16Ux32 x (MOVWconst [-c&15]))) +(RotateLeft32 x (MOVWconst [c])) => (Or32 (Lsh32x32 x (MOVWconst [c&31])) (Rsh32Ux32 x (MOVWconst [-c&31]))) +(RotateLeft64 x (MOVWconst [c])) => (Or64 (Lsh64x32 x (MOVWconst [c&63])) (Rsh64Ux32 x (MOVWconst [-c&63]))) + +// unary ops +(Neg(32|16|8) ...) => (NEG ...) +(Neg(32|64)F ...) => (NEG(F|D) ...) + +(Com(32|16|8) x) => (NORconst [0] x) + +(Sqrt ...) => (SQRTD ...) +(Sqrt32 ...) => (SQRTF ...) + +// TODO: optimize this case? +(Ctz32NonZero ...) => (Ctz32 ...) + +// count trailing zero +// 32 - CLZ(x&-x - 1) +(Ctz32 x) => (SUB (MOVWconst [32]) (CLZ (SUBconst [1] (AND x (NEG x))))) + +// bit length +(BitLen32 x) => (SUB (MOVWconst [32]) (CLZ x)) + +// boolean ops -- booleans are represented with 0=false, 1=true +(AndB ...) => (AND ...) +(OrB ...) => (OR ...) +(EqB x y) => (XORconst [1] (XOR x y)) +(NeqB ...) => (XOR ...) +(Not x) => (XORconst [1] x) + +// constants +(Const(32|16|8) [val]) => (MOVWconst [int32(val)]) +(Const(32|64)F ...) => (MOV(F|D)const ...) +(ConstNil) => (MOVWconst [0]) +(ConstBool [t]) => (MOVWconst [b2i32(t)]) + +// truncations +// Because we ignore high parts of registers, truncates are just copies. +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) + +// Zero-/Sign-extensions +(ZeroExt8to16 ...) => (MOVBUreg ...) +(ZeroExt8to32 ...) => (MOVBUreg ...) +(ZeroExt16to32 ...) => (MOVHUreg ...) + +(SignExt8to16 ...) => (MOVBreg ...) +(SignExt8to32 ...) => (MOVBreg ...) +(SignExt16to32 ...) => (MOVHreg ...) + +(Signmask x) => (SRAconst x [31]) +(Zeromask x) => (NEG (SGTU x (MOVWconst [0]))) +(Slicemask x) => (SRAconst (NEG x) [31]) + +// float-int conversion +(Cvt32to(32|64)F ...) => (MOVW(F|D) ...) +(Cvt(32|64)Fto32 ...) => (TRUNC(F|D)W ...) +(Cvt32Fto64F ...) => (MOVFD ...) +(Cvt64Fto32F ...) => (MOVDF ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +(Round(32|64)F ...) => (Copy ...) + +// comparisons +(Eq8 x y) => (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Eq16 x y) => (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Eq32 x y) => (SGTUconst [1] (XOR x y)) +(EqPtr x y) => (SGTUconst [1] (XOR x y)) +(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y)) + +(Neq8 x y) => (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0])) +(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0])) +(Neq32 x y) => (SGTU (XOR x y) (MOVWconst [0])) +(NeqPtr x y) => (SGTU (XOR x y) (MOVWconst [0])) +(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y)) + +(Less8 x y) => (SGT (SignExt8to32 y) (SignExt8to32 x)) +(Less16 x y) => (SGT (SignExt16to32 y) (SignExt16to32 x)) +(Less32 x y) => (SGT y x) +(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN + +(Less8U x y) => (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x)) +(Less16U x y) => (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x)) +(Less32U x y) => (SGTU y x) + +(Leq8 x y) => (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y))) +(Leq16 x y) => (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y))) +(Leq32 x y) => (XORconst [1] (SGT x y)) +(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN + +(Leq8U x y) => (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y))) +(Leq16U x y) => (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y))) +(Leq32U x y) => (XORconst [1] (SGTU x y)) + +(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr) +(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr) + +(Addr {sym} base) => (MOVWaddr {sym} base) +(LocalAddr {sym} base mem) && t.Elem().HasPointers() => (MOVWaddr {sym} (SPanchored base mem)) +(LocalAddr {sym} base _) && !t.Elem().HasPointers() => (MOVWaddr {sym} base) + +// loads +(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) +(Load ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem) +(Load ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (MOVFload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) + +// stores +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem) + +// float <=> int register moves, with no conversion. +// These come up when compiling math.{Float32bits, Float32frombits}. +(MOVWload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (MOVWfpgp val) +(MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val) + +// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set. +(MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) => (MOVFstore [off] {sym} ptr val mem) +(MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem) + +// zero instructions +(Zero [0] _ mem) => mem +(Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem) +(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore ptr (MOVWconst [0]) mem) +(Zero [2] ptr mem) => + (MOVBstore [1] ptr (MOVWconst [0]) + (MOVBstore [0] ptr (MOVWconst [0]) mem)) +(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore ptr (MOVWconst [0]) mem) +(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] ptr (MOVWconst [0]) + (MOVHstore [0] ptr (MOVWconst [0]) mem)) +(Zero [4] ptr mem) => + (MOVBstore [3] ptr (MOVWconst [0]) + (MOVBstore [2] ptr (MOVWconst [0]) + (MOVBstore [1] ptr (MOVWconst [0]) + (MOVBstore [0] ptr (MOVWconst [0]) mem)))) +(Zero [3] ptr mem) => + (MOVBstore [2] ptr (MOVWconst [0]) + (MOVBstore [1] ptr (MOVWconst [0]) + (MOVBstore [0] ptr (MOVWconst [0]) mem))) +(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [4] ptr (MOVWconst [0]) + (MOVHstore [2] ptr (MOVWconst [0]) + (MOVHstore [0] ptr (MOVWconst [0]) mem))) +(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore [4] ptr (MOVWconst [0]) + (MOVWstore [0] ptr (MOVWconst [0]) mem)) +(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore [8] ptr (MOVWconst [0]) + (MOVWstore [4] ptr (MOVWconst [0]) + (MOVWstore [0] ptr (MOVWconst [0]) mem))) +(Zero [16] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore [12] ptr (MOVWconst [0]) + (MOVWstore [8] ptr (MOVWconst [0]) + (MOVWstore [4] ptr (MOVWconst [0]) + (MOVWstore [0] ptr (MOVWconst [0]) mem)))) + +// large or unaligned zeroing uses a loop +(Zero [s] {t} ptr mem) + && (s > 16 || t.Alignment()%4 != 0) => + (LoweredZero [int32(t.Alignment())] + ptr + (ADDconst ptr [int32(s-moveSize(t.Alignment(), config))]) + mem) + +// moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem) +(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore dst (MOVHUload src mem) mem) +(Move [2] dst src mem) => + (MOVBstore [1] dst (MOVBUload [1] src mem) + (MOVBstore dst (MOVBUload src mem) mem)) +(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore dst (MOVWload src mem) mem) +(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] dst (MOVHUload [2] src mem) + (MOVHstore dst (MOVHUload src mem) mem)) +(Move [4] dst src mem) => + (MOVBstore [3] dst (MOVBUload [3] src mem) + (MOVBstore [2] dst (MOVBUload [2] src mem) + (MOVBstore [1] dst (MOVBUload [1] src mem) + (MOVBstore dst (MOVBUload src mem) mem)))) +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBUload [2] src mem) + (MOVBstore [1] dst (MOVBUload [1] src mem) + (MOVBstore dst (MOVBUload src mem) mem))) +(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVWstore dst (MOVWload src mem) mem)) +(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [6] dst (MOVHload [6] src mem) + (MOVHstore [4] dst (MOVHload [4] src mem) + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem)))) +(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [4] dst (MOVHload [4] src mem) + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem))) +(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore [8] dst (MOVWload [8] src mem) + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVWstore dst (MOVWload src mem) mem))) +(Move [16] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore [12] dst (MOVWload [12] src mem) + (MOVWstore [8] dst (MOVWload [8] src mem) + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVWstore dst (MOVWload src mem) mem)))) + + +// large or unaligned move uses a loop +(Move [s] {t} dst src mem) + && (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) => + (LoweredMove [int32(t.Alignment())] + dst + src + (ADDconst src [int32(s-moveSize(t.Alignment(), config))]) + mem) + +// calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// atomic intrinsics +(AtomicLoad(8|32) ...) => (LoweredAtomicLoad(8|32) ...) +(AtomicLoadPtr ...) => (LoweredAtomicLoad32 ...) + +(AtomicStore(8|32) ...) => (LoweredAtomicStore(8|32) ...) +(AtomicStorePtrNoWB ...) => (LoweredAtomicStore32 ...) + +(AtomicExchange32 ...) => (LoweredAtomicExchange ...) +(AtomicAdd32 ...) => (LoweredAtomicAdd ...) + +(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas ...) + +// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8)) +(AtomicOr8 ptr val mem) && !config.BigEndian => + (LoweredAtomicOr (AND (MOVWconst [^3]) ptr) + (SLL (ZeroExt8to32 val) + (SLLconst [3] + (ANDconst [3] ptr))) mem) + +// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8)))) +(AtomicAnd8 ptr val mem) && !config.BigEndian => + (LoweredAtomicAnd (AND (MOVWconst [^3]) ptr) + (OR (SLL (ZeroExt8to32 val) + (SLLconst [3] + (ANDconst [3] ptr))) + (NORconst [0] (SLL + (MOVWconst [0xff]) (SLLconst [3] + (ANDconst [3] ptr))))) mem) + +// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8)) +(AtomicOr8 ptr val mem) && config.BigEndian => + (LoweredAtomicOr (AND (MOVWconst [^3]) ptr) + (SLL (ZeroExt8to32 val) + (SLLconst [3] + (ANDconst [3] + (XORconst [3] ptr)))) mem) + +// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8)))) +(AtomicAnd8 ptr val mem) && config.BigEndian => + (LoweredAtomicAnd (AND (MOVWconst [^3]) ptr) + (OR (SLL (ZeroExt8to32 val) + (SLLconst [3] + (ANDconst [3] + (XORconst [3] ptr)))) + (NORconst [0] (SLL + (MOVWconst [0xff]) (SLLconst [3] + (ANDconst [3] + (XORconst [3] ptr)))))) mem) + +(AtomicAnd32 ...) => (LoweredAtomicAnd ...) +(AtomicOr32 ...) => (LoweredAtomicOr ...) + + +// checks +(NilCheck ...) => (LoweredNilCheck ...) +(IsNonNil ptr) => (SGTU ptr (MOVWconst [0])) +(IsInBounds idx len) => (SGTU len idx) +(IsSliceInBounds idx len) => (XORconst [1] (SGTU idx len)) + +// pseudo-ops +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) + +(If cond yes no) => (NE cond yes no) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem) +(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem) +(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem) + +// Optimizations + +// Absorb boolean tests into block +(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no) +(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no) +(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no) +(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no) +(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTzero _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTUzero _)) yes no) => (EQ cmp yes no) +(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTzero _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTUzero _)) yes no) => (NE cmp yes no) +(NE (SGTUconst [1] x) yes no) => (EQ x yes no) +(EQ (SGTUconst [1] x) yes no) => (NE x yes no) +(NE (SGTUzero x) yes no) => (NE x yes no) +(EQ (SGTUzero x) yes no) => (EQ x yes no) +(NE (SGTconst [0] x) yes no) => (LTZ x yes no) +(EQ (SGTconst [0] x) yes no) => (GEZ x yes no) +(NE (SGTzero x) yes no) => (GTZ x yes no) +(EQ (SGTzero x) yes no) => (LEZ x yes no) + +// fold offset into address +(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off1+off2] {sym} ptr) + +// fold address into load/store +(MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBload [off1+off2] {sym} ptr mem) +(MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBUload [off1+off2] {sym} ptr mem) +(MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHload [off1+off2] {sym} ptr mem) +(MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHUload [off1+off2] {sym} ptr mem) +(MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWload [off1+off2] {sym} ptr mem) +(MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFload [off1+off2] {sym} ptr mem) +(MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDload [off1+off2] {sym} ptr mem) + +(MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstore [off1+off2] {sym} ptr val mem) +(MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstore [off1+off2] {sym} ptr val mem) +(MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstore [off1+off2] {sym} ptr val mem) +(MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFstore [off1+off2] {sym} ptr val mem) +(MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDstore [off1+off2] {sym} ptr val mem) + +(MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstorezero [off1+off2] {sym} ptr mem) +(MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstorezero [off1+off2] {sym} ptr mem) +(MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstorezero [off1+off2] {sym} ptr mem) + +(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + +(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) => + (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +(MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) => + (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + +// replace load from same location as preceding store with zero/sign extension (or copy in case of full width) +(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x) +(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBUreg x) +(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHreg x) +(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHUreg x) +(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x +(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x +(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x + +// store zero +(MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) +(MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem) + +// don't extend after proper load +(MOVBreg x:(MOVBload _ _)) => (MOVWreg x) +(MOVBUreg x:(MOVBUload _ _)) => (MOVWreg x) +(MOVHreg x:(MOVBload _ _)) => (MOVWreg x) +(MOVHreg x:(MOVBUload _ _)) => (MOVWreg x) +(MOVHreg x:(MOVHload _ _)) => (MOVWreg x) +(MOVHUreg x:(MOVBUload _ _)) => (MOVWreg x) +(MOVHUreg x:(MOVHUload _ _)) => (MOVWreg x) + +// fold double extensions +(MOVBreg x:(MOVBreg _)) => (MOVWreg x) +(MOVBUreg x:(MOVBUreg _)) => (MOVWreg x) +(MOVHreg x:(MOVBreg _)) => (MOVWreg x) +(MOVHreg x:(MOVBUreg _)) => (MOVWreg x) +(MOVHreg x:(MOVHreg _)) => (MOVWreg x) +(MOVHUreg x:(MOVBUreg _)) => (MOVWreg x) +(MOVHUreg x:(MOVHUreg _)) => (MOVWreg x) + +// sign extended loads +// Note: The combined instruction must end up in the same block +// as the original load. If not, we end up making a value with +// memory type live in two different blocks, which can lead to +// multiple memory values alive simultaneously. +// Make sure we don't combine these ops if the load has another use. +// This prevents a single load from being split into multiple loads +// which then might return different values. See test/atomicload.go. +(MOVBreg x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload [off] {sym} ptr mem) +(MOVBUreg x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload [off] {sym} ptr mem) +(MOVHreg x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload [off] {sym} ptr mem) +(MOVHUreg x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload [off] {sym} ptr mem) + +// fold extensions and ANDs together +(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x) +(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&0xffff] x) +(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 => (ANDconst [c&0x7f] x) +(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 => (ANDconst [c&0x7fff] x) + +// don't extend before store +(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) + +// if a register move has only 1 use, just use the same register without emitting instruction +// MOVWnop doesn't emit instruction, only for ensuring the type. +(MOVWreg x) && x.Uses == 1 => (MOVWnop x) + +// TODO: we should be able to get rid of MOVWnop all together. +// But for now, this is enough to get rid of lots of them. +(MOVWnop (MOVWconst [c])) => (MOVWconst [c]) + +// fold constant into arithmetic ops +(ADD x (MOVWconst [c])) && !t.IsPtr() => (ADDconst [c] x) +(SUB x (MOVWconst [c])) => (SUBconst [c] x) +(AND x (MOVWconst [c])) => (ANDconst [c] x) +(OR x (MOVWconst [c])) => (ORconst [c] x) +(XOR x (MOVWconst [c])) => (XORconst [c] x) +(NOR x (MOVWconst [c])) => (NORconst [c] x) + +(SLL x (MOVWconst [c])) => (SLLconst x [c&31]) +(SRL x (MOVWconst [c])) => (SRLconst x [c&31]) +(SRA x (MOVWconst [c])) => (SRAconst x [c&31]) + +(SGT (MOVWconst [c]) x) => (SGTconst [c] x) +(SGTU (MOVWconst [c]) x) => (SGTUconst [c] x) +(SGT x (MOVWconst [0])) => (SGTzero x) +(SGTU x (MOVWconst [0])) => (SGTUzero x) + +// mul with constant +(Select1 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0]) +(Select0 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0]) +(Select1 (MULTU (MOVWconst [1]) x )) => x +(Select0 (MULTU (MOVWconst [1]) _ )) => (MOVWconst [0]) +(Select1 (MULTU (MOVWconst [-1]) x )) => (NEG x) +(Select0 (MULTU (MOVWconst [-1]) x )) => (CMOVZ (ADDconst [-1] x) (MOVWconst [0]) x) +(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x) +(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SRLconst [int32(32-log2uint32(int64(c)))] x) + +(MUL (MOVWconst [0]) _ ) => (MOVWconst [0]) +(MUL (MOVWconst [1]) x ) => x +(MUL (MOVWconst [-1]) x ) => (NEG x) +(MUL (MOVWconst [c]) x ) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x) + +// generic simplifications +(ADD x (NEG y)) => (SUB x y) +(SUB x x) => (MOVWconst [0]) +(SUB (MOVWconst [0]) x) => (NEG x) +(AND x x) => x +(OR x x) => x +(XOR x x) => (MOVWconst [0]) + +// miscellaneous patterns generated by dec64 +(AND (SGTUconst [1] x) (SGTUconst [1] y)) => (SGTUconst [1] (OR x y)) +(OR (SGTUzero x) (SGTUzero y)) => (SGTUzero (OR x y)) + +// remove redundant *const ops +(ADDconst [0] x) => x +(SUBconst [0] x) => x +(ANDconst [0] _) => (MOVWconst [0]) +(ANDconst [-1] x) => x +(ORconst [0] x) => x +(ORconst [-1] _) => (MOVWconst [-1]) +(XORconst [0] x) => x +(XORconst [-1] x) => (NORconst [0] x) + +// generic constant folding +(ADDconst [c] (MOVWconst [d])) => (MOVWconst [int32(c+d)]) +(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x) +(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x) +(SUBconst [c] (MOVWconst [d])) => (MOVWconst [d-c]) +(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x) +(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x) +(SLLconst [c] (MOVWconst [d])) => (MOVWconst [d< (MOVWconst [int32(uint32(d)>>uint32(c))]) +(SRAconst [c] (MOVWconst [d])) => (MOVWconst [d>>uint32(c)]) +(MUL (MOVWconst [c]) (MOVWconst [d])) => (MOVWconst [c*d]) +(Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32(uint32(c)*uint32(d))]) +(Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32((int64(uint32(c))*int64(uint32(d)))>>32)]) +(Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [c/d]) +(Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)/uint32(d))]) +(Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [c%d]) +(Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)%uint32(d))]) +(ANDconst [c] (MOVWconst [d])) => (MOVWconst [c&d]) +(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) +(ORconst [c] (MOVWconst [d])) => (MOVWconst [c|d]) +(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x) +(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d]) +(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x) +(NORconst [c] (MOVWconst [d])) => (MOVWconst [^(c|d)]) +(NEG (MOVWconst [c])) => (MOVWconst [-c]) +(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))]) +(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))]) +(MOVHreg (MOVWconst [c])) => (MOVWconst [int32(int16(c))]) +(MOVHUreg (MOVWconst [c])) => (MOVWconst [int32(uint16(c))]) +(MOVWreg (MOVWconst [c])) => (MOVWconst [c]) + +// constant comparisons +(SGTconst [c] (MOVWconst [d])) && c > d => (MOVWconst [1]) +(SGTconst [c] (MOVWconst [d])) && c <= d => (MOVWconst [0]) +(SGTUconst [c] (MOVWconst [d])) && uint32(c) > uint32(d) => (MOVWconst [1]) +(SGTUconst [c] (MOVWconst [d])) && uint32(c) <= uint32(d) => (MOVWconst [0]) +(SGTzero (MOVWconst [d])) && d > 0 => (MOVWconst [1]) +(SGTzero (MOVWconst [d])) && d <= 0 => (MOVWconst [0]) +(SGTUzero (MOVWconst [d])) && d != 0 => (MOVWconst [1]) +(SGTUzero (MOVWconst [d])) && d == 0 => (MOVWconst [0]) + +// other known comparisons +(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVWconst [1]) +(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVWconst [0]) +(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVWconst [1]) +(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVWconst [0]) +(SGTUconst [c] (MOVBUreg _)) && 0xff < uint32(c) => (MOVWconst [1]) +(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVWconst [1]) +(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVWconst [0]) +(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVWconst [1]) +(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVWconst [0]) +(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint32(c) => (MOVWconst [1]) +(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVWconst [1]) +(SGTUconst [c] (ANDconst [m] _)) && uint32(m) < uint32(c) => (MOVWconst [1]) +(SGTconst [c] (SRLconst _ [d])) && 0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1]) +(SGTUconst [c] (SRLconst _ [d])) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1]) + +// absorb constants into branches +(EQ (MOVWconst [0]) yes no) => (First yes no) +(EQ (MOVWconst [c]) yes no) && c != 0 => (First no yes) +(NE (MOVWconst [0]) yes no) => (First no yes) +(NE (MOVWconst [c]) yes no) && c != 0 => (First yes no) +(LTZ (MOVWconst [c]) yes no) && c < 0 => (First yes no) +(LTZ (MOVWconst [c]) yes no) && c >= 0 => (First no yes) +(LEZ (MOVWconst [c]) yes no) && c <= 0 => (First yes no) +(LEZ (MOVWconst [c]) yes no) && c > 0 => (First no yes) +(GTZ (MOVWconst [c]) yes no) && c > 0 => (First yes no) +(GTZ (MOVWconst [c]) yes no) && c <= 0 => (First no yes) +(GEZ (MOVWconst [c]) yes no) && c >= 0 => (First yes no) +(GEZ (MOVWconst [c]) yes no) && c < 0 => (First no yes) + +// conditional move +(CMOVZ _ f (MOVWconst [0])) => f +(CMOVZ a _ (MOVWconst [c])) && c!=0 => a +(CMOVZzero _ (MOVWconst [0])) => (MOVWconst [0]) +(CMOVZzero a (MOVWconst [c])) && c!=0 => a +(CMOVZ a (MOVWconst [0]) c) => (CMOVZzero a c) + +// atomic +(LoweredAtomicStore32 ptr (MOVWconst [0]) mem) => (LoweredAtomicStorezero ptr mem) +(LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(int64(c)) => (LoweredAtomicAddconst [c] ptr mem) + diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPS64.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPS64.rules new file mode 100644 index 0000000000000000000000000000000000000000..cabc7c652dff6ce4d8157ce10c25f947c5261802 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPS64.rules @@ -0,0 +1,817 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +(Add(Ptr|64|32|16|8) ...) => (ADDV ...) +(Add(32|64)F ...) => (ADD(F|D) ...) + +(Sub(Ptr|64|32|16|8) ...) => (SUBV ...) +(Sub(32|64)F ...) => (SUB(F|D) ...) + +(Mul(64|32|16|8) x y) => (Select1 (MULVU x y)) +(Mul(32|64)F ...) => (MUL(F|D) ...) +(Mul64uhilo ...) => (MULVU ...) +(Select0 (Mul64uover x y)) => (Select1 (MULVU x y)) +(Select1 (Mul64uover x y)) => (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) + +(Hmul64 x y) => (Select0 (MULV x y)) +(Hmul64u x y) => (Select0 (MULVU x y)) +(Hmul32 x y) => (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) +(Hmul32u x y) => (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) + +(Div64 x y) => (Select1 (DIVV x y)) +(Div64u x y) => (Select1 (DIVVU x y)) +(Div32 x y) => (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) +(Div32u x y) => (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Div16 x y) => (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) +(Div16u x y) => (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Div8 x y) => (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) +(Div8u x y) => (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Div(32|64)F ...) => (DIV(F|D) ...) + +(Mod64 x y) => (Select0 (DIVV x y)) +(Mod64u x y) => (Select0 (DIVVU x y)) +(Mod32 x y) => (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) +(Mod32u x y) => (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Mod16 x y) => (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) +(Mod16u x y) => (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) +(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) + +(Select0 (Add64carry x y c)) => (ADDV (ADDV x y) c) +(Select1 (Add64carry x y c)) => + (OR (SGTU x s:(ADDV x y)) (SGTU s (ADDV s c))) + +(Select0 (Sub64borrow x y c)) => (SUBV (SUBV x y) c) +(Select1 (Sub64borrow x y c)) => + (OR (SGTU s:(SUBV x y) x) (SGTU (SUBV s c) s)) + +// math package intrinsics +(Abs ...) => (ABSD ...) + +// (x + y) / 2 with x>=y => (x - y) / 2 + y +(Avg64u x y) => (ADDV (SRLVconst (SUBV x y) [1]) y) + +(And(64|32|16|8) ...) => (AND ...) +(Or(64|32|16|8) ...) => (OR ...) +(Xor(64|32|16|8) ...) => (XOR ...) + +// shifts +// hardware instruction uses only the low 6 bits of the shift +// we compare to 64 to ensure Go semantics for large shifts +(Lsh64x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh64x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh64x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh64x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + +(Lsh32x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh32x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh32x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh32x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + +(Lsh16x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh16x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh16x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh16x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + +(Lsh8x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh8x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh8x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh8x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + +(Rsh64Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) +(Rsh64Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) +(Rsh64Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) +(Rsh64Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) + +(Rsh32Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) +(Rsh32Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Rsh32Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) +(Rsh32Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) + +(Rsh16Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) +(Rsh16Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) +(Rsh16Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Rsh16Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) + +(Rsh8Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) +(Rsh8Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) +(Rsh8Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) +(Rsh8Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) + +(Rsh64x64 x y) => (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh64x32 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh64x16 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh64x8 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +(Rsh32x64 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh32x32 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh32x16 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh32x8 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +(Rsh16x64 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh16x32 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh16x16 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh16x8 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +(Rsh8x64 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh8x32 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh8x16 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh8x8 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +// rotates +(RotateLeft8 x (MOVVconst [c])) => (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) +(RotateLeft16 x (MOVVconst [c])) => (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) +(RotateLeft32 x (MOVVconst [c])) => (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) +(RotateLeft64 x (MOVVconst [c])) => (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) + +// unary ops +(Neg(64|32|16|8) ...) => (NEGV ...) +(Neg(32|64)F ...) => (NEG(F|D) ...) + +(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x) + +(Sqrt ...) => (SQRTD ...) +(Sqrt32 ...) => (SQRTF ...) + +// boolean ops -- booleans are represented with 0=false, 1=true +(AndB ...) => (AND ...) +(OrB ...) => (OR ...) +(EqB x y) => (XOR (MOVVconst [1]) (XOR x y)) +(NeqB ...) => (XOR ...) +(Not x) => (XORconst [1] x) + +// constants +(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)]) +(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)]) +(ConstNil) => (MOVVconst [0]) +(ConstBool [t]) => (MOVVconst [int64(b2i(t))]) + +(Slicemask x) => (SRAVconst (NEGV x) [63]) + +// truncations +// Because we ignore high parts of registers, truncates are just copies. +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) +(Trunc64to8 ...) => (Copy ...) +(Trunc64to16 ...) => (Copy ...) +(Trunc64to32 ...) => (Copy ...) + +// Zero-/Sign-extensions +(ZeroExt8to16 ...) => (MOVBUreg ...) +(ZeroExt8to32 ...) => (MOVBUreg ...) +(ZeroExt16to32 ...) => (MOVHUreg ...) +(ZeroExt8to64 ...) => (MOVBUreg ...) +(ZeroExt16to64 ...) => (MOVHUreg ...) +(ZeroExt32to64 ...) => (MOVWUreg ...) + +(SignExt8to16 ...) => (MOVBreg ...) +(SignExt8to32 ...) => (MOVBreg ...) +(SignExt16to32 ...) => (MOVHreg ...) +(SignExt8to64 ...) => (MOVBreg ...) +(SignExt16to64 ...) => (MOVHreg ...) +(SignExt32to64 ...) => (MOVWreg ...) + +// float <=> int conversion +(Cvt32to32F ...) => (MOVWF ...) +(Cvt32to64F ...) => (MOVWD ...) +(Cvt64to32F ...) => (MOVVF ...) +(Cvt64to64F ...) => (MOVVD ...) +(Cvt32Fto32 ...) => (TRUNCFW ...) +(Cvt64Fto32 ...) => (TRUNCDW ...) +(Cvt32Fto64 ...) => (TRUNCFV ...) +(Cvt64Fto64 ...) => (TRUNCDV ...) +(Cvt32Fto64F ...) => (MOVFD ...) +(Cvt64Fto32F ...) => (MOVDF ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +(Round(32|64)F ...) => (Copy ...) + +// comparisons +(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y)) +(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y)) +(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y)) + +(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) +(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) +(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) +(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0])) +(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0])) +(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y)) + +(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x)) +(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x)) +(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x)) +(Less64 x y) => (SGT y x) +(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN + +(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) +(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) +(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) +(Less64U x y) => (SGTU y x) + +(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) +(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) +(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) +(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y)) +(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN + +(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y)) + +(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVVaddr [int32(off)] ptr) +(OffPtr [off] ptr) => (ADDVconst [off] ptr) + +(Addr {sym} base) => (MOVVaddr {sym} base) +(LocalAddr {sym} base mem) && t.Elem().HasPointers() => (MOVVaddr {sym} (SPanchored base mem)) +(LocalAddr {sym} base _) && !t.Elem().HasPointers() => (MOVVaddr {sym} base) + +// loads +(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) +(Load ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem) +(Load ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem) +(Load ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem) +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (MOVFload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) + +// stores +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVVstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem) + +// zeroing +(Zero [0] _ mem) => mem +(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem) +(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore ptr (MOVVconst [0]) mem) +(Zero [2] ptr mem) => + (MOVBstore [1] ptr (MOVVconst [0]) + (MOVBstore [0] ptr (MOVVconst [0]) mem)) +(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore ptr (MOVVconst [0]) mem) +(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] ptr (MOVVconst [0]) + (MOVHstore [0] ptr (MOVVconst [0]) mem)) +(Zero [4] ptr mem) => + (MOVBstore [3] ptr (MOVVconst [0]) + (MOVBstore [2] ptr (MOVVconst [0]) + (MOVBstore [1] ptr (MOVVconst [0]) + (MOVBstore [0] ptr (MOVVconst [0]) mem)))) +(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVVstore ptr (MOVVconst [0]) mem) +(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore [4] ptr (MOVVconst [0]) + (MOVWstore [0] ptr (MOVVconst [0]) mem)) +(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [6] ptr (MOVVconst [0]) + (MOVHstore [4] ptr (MOVVconst [0]) + (MOVHstore [2] ptr (MOVVconst [0]) + (MOVHstore [0] ptr (MOVVconst [0]) mem)))) + +(Zero [3] ptr mem) => + (MOVBstore [2] ptr (MOVVconst [0]) + (MOVBstore [1] ptr (MOVVconst [0]) + (MOVBstore [0] ptr (MOVVconst [0]) mem))) +(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [4] ptr (MOVVconst [0]) + (MOVHstore [2] ptr (MOVVconst [0]) + (MOVHstore [0] ptr (MOVVconst [0]) mem))) +(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore [8] ptr (MOVVconst [0]) + (MOVWstore [4] ptr (MOVVconst [0]) + (MOVWstore [0] ptr (MOVVconst [0]) mem))) +(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVVstore [8] ptr (MOVVconst [0]) + (MOVVstore [0] ptr (MOVVconst [0]) mem)) +(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVVstore [16] ptr (MOVVconst [0]) + (MOVVstore [8] ptr (MOVVconst [0]) + (MOVVstore [0] ptr (MOVVconst [0]) mem))) + +// medium zeroing uses a duff device +// 8, and 128 are magic constants, see runtime/mkduff.go +(Zero [s] {t} ptr mem) + && s%8 == 0 && s > 24 && s <= 8*128 + && t.Alignment()%8 == 0 && !config.noDuffDevice => + (DUFFZERO [8 * (128 - s/8)] ptr mem) + +// large or unaligned zeroing uses a loop +(Zero [s] {t} ptr mem) + && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 => + (LoweredZero [t.Alignment()] + ptr + (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) + mem) + +// moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) +(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore dst (MOVHload src mem) mem) +(Move [2] dst src mem) => + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem)) +(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore dst (MOVWload src mem) mem) +(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem)) +(Move [4] dst src mem) => + (MOVBstore [3] dst (MOVBload [3] src mem) + (MOVBstore [2] dst (MOVBload [2] src mem) + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem)))) +(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVVstore dst (MOVVload src mem) mem) +(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVWstore dst (MOVWload src mem) mem)) +(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [6] dst (MOVHload [6] src mem) + (MOVHstore [4] dst (MOVHload [4] src mem) + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem)))) + +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBload [2] src mem) + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem))) +(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [4] dst (MOVHload [4] src mem) + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem))) +(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore [8] dst (MOVWload [8] src mem) + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVWstore dst (MOVWload src mem) mem))) +(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVVstore [8] dst (MOVVload [8] src mem) + (MOVVstore dst (MOVVload src mem) mem)) +(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVVstore [16] dst (MOVVload [16] src mem) + (MOVVstore [8] dst (MOVVload [8] src mem) + (MOVVstore dst (MOVVload src mem) mem))) + +// float <=> int register moves, with no conversion. +// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}. +(MOVVload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (MOVVfpgp val) +(MOVDload [off] {sym} ptr (MOVVstore [off] {sym} ptr val _)) => (MOVVgpfp val) +(MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (ZeroExt32to64 (MOVWfpgp val)) +(MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val) + +// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set. +(MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) => (MOVDstore [off] {sym} ptr val mem) +(MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) => (MOVVstore [off] {sym} ptr val mem) +(MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) => (MOVFstore [off] {sym} ptr val mem) +(MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem) + +// medium move uses a duff device +(Move [s] {t} dst src mem) + && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 + && !config.noDuffDevice && logLargeCopy(v, s) => + (DUFFCOPY [16 * (128 - s/8)] dst src mem) +// 16 and 128 are magic constants. 16 is the number of bytes to encode: +// MOVV (R1), R23 +// ADDV $8, R1 +// MOVV R23, (R2) +// ADDV $8, R2 +// and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy. + +// large or unaligned move uses a loop +(Move [s] {t} dst src mem) + && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 => + (LoweredMove [t.Alignment()] + dst + src + (ADDVconst src [s-moveSize(t.Alignment(), config)]) + mem) + +// calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// atomic intrinsics +(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...) +(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...) + +(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...) +(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...) + +(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...) + +(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...) + +(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem) +(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...) + +// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3,uint32(val) << ((ptr & 3) * 8)) +(AtomicOr8 ptr val mem) && !config.BigEndian => + (LoweredAtomicOr32 (AND (MOVVconst [^3]) ptr) + (SLLV (ZeroExt8to32 val) + (SLLVconst [3] + (ANDconst [3] ptr))) mem) + +// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8)))) +(AtomicAnd8 ptr val mem) && !config.BigEndian => + (LoweredAtomicAnd32 (AND (MOVVconst [^3]) ptr) + (OR (SLLV (ZeroExt8to32 val) + (SLLVconst [3] + (ANDconst [3] ptr))) + (NORconst [0] (SLLV + (MOVVconst [0xff]) (SLLVconst [3] + (ANDconst [3] ptr))))) mem) + +// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8)) +(AtomicOr8 ptr val mem) && config.BigEndian => + (LoweredAtomicOr32 (AND (MOVVconst [^3]) ptr) + (SLLV (ZeroExt8to32 val) + (SLLVconst [3] + (ANDconst [3] + (XORconst [3] ptr)))) mem) + +// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8)))) +(AtomicAnd8 ptr val mem) && config.BigEndian => + (LoweredAtomicAnd32 (AND (MOVVconst [^3]) ptr) + (OR (SLLV (ZeroExt8to32 val) + (SLLVconst [3] + (ANDconst [3] + (XORconst [3] ptr)))) + (NORconst [0] (SLLV + (MOVVconst [0xff]) (SLLVconst [3] + (ANDconst [3] + (XORconst [3] ptr)))))) mem) + +(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...) +(AtomicOr32 ...) => (LoweredAtomicOr32 ...) + +// checks +(NilCheck ...) => (LoweredNilCheck ...) +(IsNonNil ptr) => (SGTU ptr (MOVVconst [0])) +(IsInBounds idx len) => (SGTU len idx) +(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len)) + +// pseudo-ops +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) + +(If cond yes no) => (NE cond yes no) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +// Optimizations + +// Absorb boolean tests into block +(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no) +(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no) +(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no) +(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no) +(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no) +(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no) +(NE (SGTUconst [1] x) yes no) => (EQ x yes no) +(EQ (SGTUconst [1] x) yes no) => (NE x yes no) +(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no) +(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no) +(NE (SGTconst [0] x) yes no) => (LTZ x yes no) +(EQ (SGTconst [0] x) yes no) => (GEZ x yes no) +(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no) +(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no) + +// fold offset into address +(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) + +// fold address into load/store +(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBload [off1+int32(off2)] {sym} ptr mem) +(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBUload [off1+int32(off2)] {sym} ptr mem) +(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHload [off1+int32(off2)] {sym} ptr mem) +(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHUload [off1+int32(off2)] {sym} ptr mem) +(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWload [off1+int32(off2)] {sym} ptr mem) +(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWUload [off1+int32(off2)] {sym} ptr mem) +(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVload [off1+int32(off2)] {sym} ptr mem) +(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFload [off1+int32(off2)] {sym} ptr mem) +(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDload [off1+int32(off2)] {sym} ptr mem) + +(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem) +(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem) +(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem) +(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem) +(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem) +(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem) +(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) + +(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + +(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => + (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + +// store zero +(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) +(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem) +(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem) + +// don't extend after proper load +(MOVBreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVHload _ _)) => (MOVVreg x) +(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVHload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVWload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x) + +// fold double extensions +(MOVBreg x:(MOVBreg _)) => (MOVVreg x) +(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHreg x:(MOVBreg _)) => (MOVVreg x) +(MOVHreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHreg x:(MOVHreg _)) => (MOVVreg x) +(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x) +(MOVWreg x:(MOVBreg _)) => (MOVVreg x) +(MOVWreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVWreg x:(MOVHreg _)) => (MOVVreg x) +(MOVWreg x:(MOVWreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x) + +// don't extend before store +(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem) + +// if a register move has only 1 use, just use the same register without emitting instruction +// MOVVnop doesn't emit instruction, only for ensuring the type. +(MOVVreg x) && x.Uses == 1 => (MOVVnop x) + +// TODO: we should be able to get rid of MOVVnop all together. +// But for now, this is enough to get rid of lots of them. +(MOVVnop (MOVVconst [c])) => (MOVVconst [c]) + +// fold constant into arithmetic ops +(ADDV x (MOVVconst [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x) +(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x) +(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x) +(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x) +(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x) +(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x) + +(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) +(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) +(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63]) +(SLLV x (MOVVconst [c])) => (SLLVconst x [c]) +(SRLV x (MOVVconst [c])) => (SRLVconst x [c]) +(SRAV x (MOVVconst [c])) => (SRAVconst x [c]) + +(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x) +(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x) + +// mul by constant +(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x) +(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0]) +(Select1 (MULVU x (MOVVconst [1]))) => x +(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x) + +// div by constant +(Select1 (DIVVU x (MOVVconst [1]))) => x +(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x) +(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod +(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod + +// generic simplifications +(ADDV x (NEGV y)) => (SUBV x y) +(SUBV x x) => (MOVVconst [0]) +(SUBV (MOVVconst [0]) x) => (NEGV x) +(AND x x) => x +(OR x x) => x +(XOR x x) => (MOVVconst [0]) + +// remove redundant *const ops +(ADDVconst [0] x) => x +(SUBVconst [0] x) => x +(ANDconst [0] _) => (MOVVconst [0]) +(ANDconst [-1] x) => x +(ORconst [0] x) => x +(ORconst [-1] _) => (MOVVconst [-1]) +(XORconst [0] x) => x +(XORconst [-1] x) => (NORconst [0] x) + +// generic constant folding +(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d]) +(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x) +(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x) +(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c]) +(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x) +(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x) +(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d< (MOVVconst [int64(uint64(d)>>uint64(c))]) +(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)]) +(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d]) +(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c/d]) +(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))]) +(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c%d]) // mod +(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod +(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d]) +(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) +(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d]) +(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x) +(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d]) +(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x) +(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)]) +(NEGV (MOVVconst [c])) => (MOVVconst [-c]) +(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))]) +(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))]) +(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))]) +(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))]) +(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))]) +(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))]) +(MOVVreg (MOVVconst [c])) => (MOVVconst [c]) +(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem) +(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem) +(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem) + +// constant comparisons +(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1]) +(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0]) +(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1]) +(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0]) + +// other known comparisons +(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1]) +(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0]) +(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1]) +(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0]) +(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1]) +(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0]) +(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1]) +(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0]) +(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0]) +(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1]) +(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) +(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) + +// absorb constants into branches +(EQ (MOVVconst [0]) yes no) => (First yes no) +(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes) +(NE (MOVVconst [0]) yes no) => (First no yes) +(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no) +(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no) +(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes) +(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no) +(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes) +(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no) +(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes) +(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no) +(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes) + +// SGT/SGTU with known outcomes. +(SGT x x) => (MOVVconst [0]) +(SGTU x x) => (MOVVconst [0]) + +// fold readonly sym load +(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))]) +(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) +(MOVVload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))]) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go new file mode 100644 index 0000000000000000000000000000000000000000..08cab89d5db9110f21fce7079cc8efa893d62c73 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPS64Ops.go @@ -0,0 +1,501 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +// Notes: +// - Integer types live in the low portion of registers. Upper portions are junk. +// - Boolean types use the low-order byte of a register. 0=false, 1=true. +// Upper bytes are junk. +// - *const instructions may use a constant larger than the instruction can encode. +// In this case the assembler expands to multiple instructions and uses tmp +// register (R23). + +// Suffixes encode the bit width of various instructions. +// V (vlong) = 64 bit +// WU (word) = 32 bit unsigned +// W (word) = 32 bit +// H (half word) = 16 bit +// HU = 16 bit unsigned +// B (byte) = 8 bit +// BU = 8 bit unsigned +// F (float) = 32 bit float +// D (double) = 64 bit float + +// Note: registers not used in regalloc are not included in this list, +// so that regmask stays within int64 +// Be careful when hand coding regmasks. +var regNamesMIPS64 = []string{ + "R0", // constant 0 + "R1", + "R2", + "R3", + "R4", + "R5", + "R6", + "R7", + "R8", + "R9", + "R10", + "R11", + "R12", + "R13", + "R14", + "R15", + "R16", + "R17", + "R18", + "R19", + "R20", + "R21", + "R22", + // R23 = REGTMP not used in regalloc + "R24", + "R25", + // R26 reserved by kernel + // R27 reserved by kernel + // R28 = REGSB not used in regalloc + "SP", // aka R29 + "g", // aka R30 + "R31", // aka REGLINK + + "F0", + "F1", + "F2", + "F3", + "F4", + "F5", + "F6", + "F7", + "F8", + "F9", + "F10", + "F11", + "F12", + "F13", + "F14", + "F15", + "F16", + "F17", + "F18", + "F19", + "F20", + "F21", + "F22", + "F23", + "F24", + "F25", + "F26", + "F27", + "F28", + "F29", + "F30", + "F31", + + "HI", // high bits of multiplication + "LO", // low bits of multiplication + + // If you add registers, update asyncPreempt in runtime. + + // pseudo-registers + "SB", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNamesMIPS64) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNamesMIPS64 { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + // Common individual register masks + var ( + gp = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31") + gpg = gp | buildReg("g") + gpsp = gp | buildReg("SP") + gpspg = gpg | buildReg("SP") + gpspsbg = gpspg | buildReg("SB") + fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31") + lo = buildReg("LO") + hi = buildReg("HI") + callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g + r1 = buildReg("R1") + r2 = buildReg("R2") + r3 = buildReg("R3") + r4 = buildReg("R4") + ) + // Common regInfo + var ( + gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} + gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} + gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} + gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} + gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}} + gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} + gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} + gpstore0 = regInfo{inputs: []regMask{gpspsbg}} + gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} + gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} + fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} + fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} + //fp1flags = regInfo{inputs: []regMask{fp}} + fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}} + gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} + fp2flags = regInfo{inputs: []regMask{fp, fp}} + fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}} + fpstore = regInfo{inputs: []regMask{gpspsbg, fp}} + readflags = regInfo{inputs: nil, outputs: []regMask{gp}} + ) + ops := []opData{ + // binary ops + {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1 + {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops. + {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1 + {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt + {name: "MULV", argLength: 2, reg: gp2hilo, asm: "MULV", commutative: true, typ: "(Int64,Int64)"}, // arg0 * arg1, signed, results hi,lo + {name: "MULVU", argLength: 2, reg: gp2hilo, asm: "MULVU", commutative: true, typ: "(UInt64,UInt64)"}, // arg0 * arg1, unsigned, results hi,lo + {name: "DIVV", argLength: 2, reg: gp2hilo, asm: "DIVV", typ: "(Int64,Int64)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1 + {name: "DIVVU", argLength: 2, reg: gp2hilo, asm: "DIVVU", typ: "(UInt64,UInt64)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1 + + {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1 + {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1 + {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1 + {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1 + {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1 + {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1 + {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1 + {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1 + + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1 + {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt + {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1 + {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0 | auxInt + {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1 + {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt + {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1) + {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt) + + {name: "NEGV", argLength: 1, reg: gp11}, // -arg0 + {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 + {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 + {name: "ABSD", argLength: 1, reg: fp11, asm: "ABSD"}, // abs(arg0), float64 + {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 + {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32 + + // shifts + {name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64 + {name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt + {name: "SRLV", argLength: 2, reg: gp21, asm: "SRLV"}, // arg0 >> arg1, unsigned, shift amount is mod 64 + {name: "SRLVconst", argLength: 1, reg: gp11, asm: "SRLV", aux: "Int64"}, // arg0 >> auxInt, unsigned + {name: "SRAV", argLength: 2, reg: gp21, asm: "SRAV"}, // arg0 >> arg1, signed, shift amount is mod 64 + {name: "SRAVconst", argLength: 1, reg: gp11, asm: "SRAV", aux: "Int64"}, // arg0 >> auxInt, signed + + // comparisons + {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise + {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise + {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise + {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise + + {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32 + {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64 + {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32 + {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64 + {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32 + {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64 + + // moves + {name: "MOVVconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVV", typ: "UInt64", rematerializeable: true}, // auxint + {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float + {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float + + {name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB + + {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + + {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + + {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem. + + // moves (no conversion) + {name: "MOVWfpgp", argLength: 1, reg: fpgp, asm: "MOVW"}, // move float32 to int32 (no conversion). MIPS64 will perform sign-extend to 64-bit by default + {name: "MOVWgpfp", argLength: 1, reg: gpfp, asm: "MOVW"}, // move int32 to float32 (no conversion). MIPS64 will perform sign-extend to 64-bit by default + {name: "MOVVfpgp", argLength: 1, reg: fpgp, asm: "MOVV"}, // move float64 to int64 (no conversion). + {name: "MOVVgpfp", argLength: 1, reg: gpfp, asm: "MOVV"}, // move int64 to float64 (no conversion). + + // conversions + {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte + {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte + {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half + {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half + {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word + {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word + {name: "MOVVreg", argLength: 1, reg: gp11, asm: "MOVV"}, // move from arg0 + + {name: "MOVVnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register + + {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32 + {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64 + {name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"}, // int64 -> float32 + {name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"}, // int64 -> float64 + {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32 + {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32 + {name: "TRUNCFV", argLength: 1, reg: fp11, asm: "TRUNCFV"}, // float32 -> int64 + {name: "TRUNCDV", argLength: 1, reg: fp11, asm: "TRUNCDV"}, // float64 -> int64 + {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64 + {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 + + // function calls + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + + // duffzero + // arg0 = address of memory to zero + // arg1 = mem + // auxint = offset into duffzero code to start executing + // returns mem + // R1 aka mips.REGRT1 changed as side effect + { + name: "DUFFZERO", + aux: "Int64", + argLength: 2, + reg: regInfo{ + inputs: []regMask{gp}, + clobbers: buildReg("R1 R31"), + }, + faultOnNilArg0: true, + }, + + // duffcopy + // arg0 = address of dst memory (in R2, changed as side effect) + // arg1 = address of src memory (in R1, changed as side effect) + // arg2 = mem + // auxint = offset into duffcopy code to start executing + // returns mem + { + name: "DUFFCOPY", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R2"), buildReg("R1")}, + clobbers: buildReg("R1 R2 R31"), + }, + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // large or unaligned zeroing + // arg0 = address of memory to zero (in R1, changed as side effect) + // arg1 = address of the last element to zero + // arg2 = mem + // auxint = alignment + // returns mem + // SUBV $8, R1 + // MOVV R0, 8(R1) + // ADDV $8, R1 + // BNE Rarg1, R1, -2(PC) + { + name: "LoweredZero", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R1"), gp}, + clobbers: buildReg("R1"), + }, + clobberFlags: true, + faultOnNilArg0: true, + }, + + // large or unaligned move + // arg0 = address of dst memory (in R2, changed as side effect) + // arg1 = address of src memory (in R1, changed as side effect) + // arg2 = address of the last element of src + // arg3 = mem + // auxint = alignment + // returns mem + // SUBV $8, R1 + // MOVV 8(R1), Rtmp + // MOVV Rtmp, (R2) + // ADDV $8, R1 + // ADDV $8, R2 + // BNE Rarg2, R1, -4(PC) + { + name: "LoweredMove", + aux: "Int64", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("R2"), buildReg("R1"), gp}, + clobbers: buildReg("R1 R2"), + }, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // atomic and/or. + // *arg0 &= (|=) arg1. arg2=mem. returns memory. + // SYNC + // LL (Rarg0), Rtmp + // AND Rarg1, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + {name: "LoweredAtomicAnd32", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicOr32", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic loads. + // load from arg0. arg1=mem. + // returns so they can be properly ordered with other loads. + {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true}, + + // atomic stores. + // store arg1 to arg0. arg2=mem. returns memory. + {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + // store zero to arg0. arg1=mem. returns memory. + {name: "LoweredAtomicStorezero32", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStorezero64", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic exchange. + // store arg1 to arg0. arg2=mem. returns . + // SYNC + // LL (Rarg0), Rout + // MOVV Rarg1, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic add. + // *arg0 += arg1. arg2=mem. returns . + // SYNC + // LL (Rarg0), Rout + // ADDV Rarg1, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + // ADDV Rarg1, Rout + {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + // *arg0 += auxint. arg1=mem. returns . auxint is 32-bit. + {name: "LoweredAtomicAddconst32", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicAddconst64", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int64", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic compare and swap. + // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. + // if *arg0 == arg1 { + // *arg0 = arg2 + // return (true, memory) + // } else { + // return (false, memory) + // } + // SYNC + // MOVV $0, Rout + // LL (Rarg0), Rtmp + // BNE Rtmp, Rarg1, 4(PC) + // MOVV Rarg2, Rout + // SC Rout, (Rarg0) + // BEQ Rout, -4(PC) + // SYNC + {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // pseudo-ops + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. + + {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true + {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false + + // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, + // and sorts it to the very beginning of the block to prevent other + // use of R22 (mips.REGCTXT, the closure pointer) + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}, zeroWidth: true}, + + // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem. + {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, + + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + + // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed + // It saves all GP registers if necessary, + // but clobbers R31 (LR) because it's a call + // and R23 (REGTMP). + // Returns a pointer to a write barrier buffer in R25. + {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R31"), outputs: []regMask{buildReg("R25")}}, clobberFlags: true, aux: "Int64"}, + + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + } + + blocks := []blockData{ + {name: "EQ", controls: 1}, + {name: "NE", controls: 1}, + {name: "LTZ", controls: 1}, // < 0 + {name: "LEZ", controls: 1}, // <= 0 + {name: "GTZ", controls: 1}, // > 0 + {name: "GEZ", controls: 1}, // >= 0 + {name: "FPT", controls: 1}, // FP flag is true + {name: "FPF", controls: 1}, // FP flag is false + } + + archs = append(archs, arch{ + name: "MIPS64", + pkg: "cmd/internal/obj/mips", + genfile: "../../mips64/ssa.go", + ops: ops, + blocks: blocks, + regnames: regNamesMIPS64, + gpregmask: gp, + fpregmask: fp, + specialregmask: hi | lo, + framepointerreg: -1, // not used + linkreg: int8(num["R31"]), + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPSOps.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPSOps.go new file mode 100644 index 0000000000000000000000000000000000000000..5964bb7a333742861dc39d77f7482da254a65146 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/MIPSOps.go @@ -0,0 +1,447 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +// Notes: +// - Integer types live in the low portion of registers. Upper portions are junk. +// - Boolean types use the low-order byte of a register. 0=false, 1=true. +// Upper bytes are junk. +// - Unused portions of AuxInt are filled by sign-extending the used portion. +// - *const instructions may use a constant larger than the instruction can encode. +// In this case the assembler expands to multiple instructions and uses tmp +// register (R23). + +// Suffixes encode the bit width of various instructions. +// W (word) = 32 bit +// H (half word) = 16 bit +// HU = 16 bit unsigned +// B (byte) = 8 bit +// BU = 8 bit unsigned +// F (float) = 32 bit float +// D (double) = 64 bit float + +// Note: registers not used in regalloc are not included in this list, +// so that regmask stays within int64 +// Be careful when hand coding regmasks. +var regNamesMIPS = []string{ + "R0", // constant 0 + "R1", + "R2", + "R3", + "R4", + "R5", + "R6", + "R7", + "R8", + "R9", + "R10", + "R11", + "R12", + "R13", + "R14", + "R15", + "R16", + "R17", + "R18", + "R19", + "R20", + "R21", + "R22", + //REGTMP + "R24", + "R25", + // R26 reserved by kernel + // R27 reserved by kernel + "R28", + "SP", // aka R29 + "g", // aka R30 + "R31", // REGLINK + + // odd FP registers contain high parts of 64-bit FP values + "F0", + "F2", + "F4", + "F6", + "F8", + "F10", + "F12", + "F14", + "F16", + "F18", + "F20", + "F22", + "F24", + "F26", + "F28", + "F30", + + "HI", // high bits of multiplication + "LO", // low bits of multiplication + + // If you add registers, update asyncPreempt in runtime. + + // pseudo-registers + "SB", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNamesMIPS) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNamesMIPS { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + // Common individual register masks + var ( + gp = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31") + gpg = gp | buildReg("g") + gpsp = gp | buildReg("SP") + gpspg = gpg | buildReg("SP") + gpspsbg = gpspg | buildReg("SB") + fp = buildReg("F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30") + lo = buildReg("LO") + hi = buildReg("HI") + callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g + r1 = buildReg("R1") + r2 = buildReg("R2") + r3 = buildReg("R3") + r4 = buildReg("R4") + r5 = buildReg("R5") + ) + // Common regInfo + var ( + gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} + gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} + gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} + gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} + gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}} + gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}} + gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} + gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} + gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} + gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} + gpstore0 = regInfo{inputs: []regMask{gpspsbg}} + fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}} + gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}} + fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} + fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} + fp2flags = regInfo{inputs: []regMask{fp, fp}} + fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}} + fpstore = regInfo{inputs: []regMask{gpspsbg, fp}} + readflags = regInfo{inputs: nil, outputs: []regMask{gp}} + ) + ops := []opData{ + {name: "ADD", argLength: 2, reg: gp21, asm: "ADDU", commutative: true}, // arg0 + arg1 + {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADDU", aux: "Int32"}, // arg0 + auxInt + {name: "SUB", argLength: 2, reg: gp21, asm: "SUBU"}, // arg0 - arg1 + {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUBU", aux: "Int32"}, // arg0 - auxInt + {name: "MUL", argLength: 2, reg: regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}, clobbers: hi | lo}, asm: "MUL", commutative: true}, // arg0 * arg1 + {name: "MULT", argLength: 2, reg: gp2hilo, asm: "MUL", commutative: true, typ: "(Int32,Int32)"}, // arg0 * arg1, signed, results hi,lo + {name: "MULTU", argLength: 2, reg: gp2hilo, asm: "MULU", commutative: true, typ: "(UInt32,UInt32)"}, // arg0 * arg1, unsigned, results hi,lo + {name: "DIV", argLength: 2, reg: gp2hilo, asm: "DIV", typ: "(Int32,Int32)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1 + {name: "DIVU", argLength: 2, reg: gp2hilo, asm: "DIVU", typ: "(UInt32,UInt32)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1 + + {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1 + {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1 + {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1 + {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1 + {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1 + {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1 + {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1 + {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1 + + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1 + {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0 & auxInt + {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1 + {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int32"}, // arg0 | auxInt + {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt32"}, // arg0 ^ arg1 + {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int32", typ: "UInt32"}, // arg0 ^ auxInt + {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1) + {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int32"}, // ^(arg0 | auxInt) + + {name: "NEG", argLength: 1, reg: gp11}, // -arg0 + {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 + {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 + {name: "ABSD", argLength: 1, reg: fp11, asm: "ABSD"}, // abs(arg0), float64 + {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 + {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32 + + // shifts + {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 32 + {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt, shift amount must be 0 through 31 inclusive + {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, unsigned, shift amount is mod 32 + {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, shift amount must be 0 through 31 inclusive + {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> arg1, signed, shift amount is mod 32 + {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed, shift amount must be 0 through 31 inclusive + + {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, + + // comparisons + {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise + {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int32", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise + {name: "SGTzero", argLength: 1, reg: gp11, asm: "SGT", typ: "Bool"}, // 1 if arg0 > 0 (signed), 0 otherwise + {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise + {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int32", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise + {name: "SGTUzero", argLength: 1, reg: gp11, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > 0 (unsigned), 0 otherwise + + {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32 + {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64 + {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32 + {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64 + {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32 + {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64 + + // moves + {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", typ: "UInt32", rematerializeable: true}, // auxint + {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float32", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float + {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float + + {name: "MOVWaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVW", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB + + {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + + {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + + {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem. + + // moves (no conversion) + {name: "MOVWfpgp", argLength: 1, reg: fpgp, asm: "MOVW"}, // move float32 to int32 (no conversion) + {name: "MOVWgpfp", argLength: 1, reg: gpfp, asm: "MOVW"}, // move int32 to float32 (no conversion) + + // conversions + {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte + {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte + {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half + {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half + {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0 + + {name: "MOVWnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register + + // conditional move on zero (returns arg1 if arg2 is 0, otherwise arg0) + // order of parameters is reversed so we can use resultInArg0 (OpCMOVZ result arg1 arg2-> CMOVZ arg2reg, arg1reg, resultReg) + {name: "CMOVZ", argLength: 3, reg: gp31, asm: "CMOVZ", resultInArg0: true}, + {name: "CMOVZzero", argLength: 2, reg: regInfo{inputs: []regMask{gp, gpg}, outputs: []regMask{gp}}, asm: "CMOVZ", resultInArg0: true}, + + {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32 + {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64 + {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32 + {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32 + {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64 + {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 + + // function calls + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + + // atomic ops + + // load from arg0. arg1=mem. + // returns so they can be properly ordered with other loads. + // SYNC + // MOV(B|W) (Rarg0), Rout + // SYNC + {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true}, + + // store arg1 to arg0. arg2=mem. returns memory. + // SYNC + // MOV(B|W) Rarg1, (Rarg0) + // SYNC + {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStorezero", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic exchange. + // store arg1 to arg0. arg2=mem. returns . + // SYNC + // LL (Rarg0), Rout + // MOVW Rarg1, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + {name: "LoweredAtomicExchange", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic add. + // *arg0 += arg1. arg2=mem. returns . + // SYNC + // LL (Rarg0), Rout + // ADDU Rarg1, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + // ADDU Rarg1, Rout + {name: "LoweredAtomicAdd", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicAddconst", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic compare and swap. + // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. + // if *arg0 == arg1 { + // *arg0 = arg2 + // return (true, memory) + // } else { + // return (false, memory) + // } + // SYNC + // MOVW $0, Rout + // LL (Rarg0), Rtmp + // BNE Rtmp, Rarg1, 4(PC) + // MOVW Rarg2, Rout + // SC Rout, (Rarg0) + // BEQ Rout, -4(PC) + // SYNC + {name: "LoweredAtomicCas", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic and/or. + // *arg0 &= (|=) arg1. arg2=mem. returns memory. + // SYNC + // LL (Rarg0), Rtmp + // AND Rarg1, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // SYNC + {name: "LoweredAtomicAnd", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicOr", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // large or unaligned zeroing + // arg0 = address of memory to zero (in R1, changed as side effect) + // arg1 = address of the last element to zero + // arg2 = mem + // auxint = alignment + // returns mem + // SUBU $4, R1 + // MOVW R0, 4(R1) + // ADDU $4, R1 + // BNE Rarg1, R1, -2(PC) + { + name: "LoweredZero", + aux: "Int32", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R1"), gp}, + clobbers: buildReg("R1"), + }, + faultOnNilArg0: true, + }, + + // large or unaligned move + // arg0 = address of dst memory (in R2, changed as side effect) + // arg1 = address of src memory (in R1, changed as side effect) + // arg2 = address of the last element of src + // arg3 = mem + // auxint = alignment + // returns mem + // SUBU $4, R1 + // MOVW 4(R1), Rtmp + // MOVW Rtmp, (R2) + // ADDU $4, R1 + // ADDU $4, R2 + // BNE Rarg2, R1, -4(PC) + { + name: "LoweredMove", + aux: "Int32", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("R2"), buildReg("R1"), gp}, + clobbers: buildReg("R1 R2"), + }, + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // pseudo-ops + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. + + {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true + {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false + + // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, + // and sorts it to the very beginning of the block to prevent other + // use of R22 (mips.REGCTXT, the closure pointer) + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}, zeroWidth: true}, + + // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem. + {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, + + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + + // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed + // It saves all GP registers if necessary, + // but clobbers R31 (LR) because it's a call + // and R23 (REGTMP). + // Returns a pointer to a write barrier buffer in R25. + {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R31"), outputs: []regMask{buildReg("R25")}}, clobberFlags: true, aux: "Int64"}, + + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + // Extend ops are the same as Bounds ops except the indexes are 64-bit. + {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r3, r4}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go). + {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r2, r3}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go). + {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r1, r2}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go). + } + + blocks := []blockData{ + {name: "EQ", controls: 1}, + {name: "NE", controls: 1}, + {name: "LTZ", controls: 1}, // < 0 + {name: "LEZ", controls: 1}, // <= 0 + {name: "GTZ", controls: 1}, // > 0 + {name: "GEZ", controls: 1}, // >= 0 + {name: "FPT", controls: 1}, // FP flag is true + {name: "FPF", controls: 1}, // FP flag is false + } + + archs = append(archs, arch{ + name: "MIPS", + pkg: "cmd/internal/obj/mips", + genfile: "../../mips/ssa.go", + ops: ops, + blocks: blocks, + regnames: regNamesMIPS, + gpregmask: gp, + fpregmask: fp, + specialregmask: hi | lo, + framepointerreg: -1, // not used + linkreg: int8(num["R31"]), + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/PPC64.rules new file mode 100644 index 0000000000000000000000000000000000000000..c9cd34b9a67ce8e88f629251203ae59901c6c2cf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/PPC64.rules @@ -0,0 +1,1018 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// GOPPC64 values indicate power8, power9, etc. +// That means the code is compiled for that target, +// and will not run on earlier targets. +// +(Add(Ptr|64|32|16|8) ...) => (ADD ...) +(Add64F ...) => (FADD ...) +(Add32F ...) => (FADDS ...) + +(Sub(Ptr|64|32|16|8) ...) => (SUB ...) +(Sub32F ...) => (FSUBS ...) +(Sub64F ...) => (FSUB ...) + +// Combine 64 bit integer multiply and adds +(ADD l:(MULLD x y) z) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z) + +(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y)) +(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y)) +(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y)) +(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y)) +(Mod64 x y) && buildcfg.GOPPC64 >=9 => (MODSD x y) +(Mod64 x y) && buildcfg.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y))) +(Mod64u x y) && buildcfg.GOPPC64 >= 9 => (MODUD x y) +(Mod64u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y))) +(Mod32 x y) && buildcfg.GOPPC64 >= 9 => (MODSW x y) +(Mod32 x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y))) +(Mod32u x y) && buildcfg.GOPPC64 >= 9 => (MODUW x y) +(Mod32u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y))) + +// (x + y) / 2 with x>=y => (x - y) / 2 + y +(Avg64u x y) => (ADD (SRDconst (SUB x y) [1]) y) + +(Mul64 ...) => (MULLD ...) +(Mul(32|16|8) ...) => (MULLW ...) +(Select0 (Mul64uhilo x y)) => (MULHDU x y) +(Select1 (Mul64uhilo x y)) => (MULLD x y) + +(Div64 [false] x y) => (DIVD x y) +(Div64u ...) => (DIVDU ...) +(Div32 [false] x y) => (DIVW x y) +(Div32u ...) => (DIVWU ...) +(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y)) +(Div16u x y) => (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y)) +(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y)) +(Div8u x y) => (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y)) + +(Hmul(64|64u|32|32u) ...) => (MULH(D|DU|W|WU) ...) + +(Mul(32|64)F ...) => ((FMULS|FMUL) ...) + +(Div(32|64)F ...) => ((FDIVS|FDIV) ...) + +// Lowering float <=> int +(Cvt32to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD (SignExt32to64 x))) +(Cvt64to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD x)) + +(Cvt32Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x)) +(Cvt64Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x)) + +(Cvt32Fto64F ...) => (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64 +(Cvt64Fto32F ...) => (FRSP ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +(Round(32|64)F ...) => (LoweredRound(32|64)F ...) + +(Sqrt ...) => (FSQRT ...) +(Sqrt32 ...) => (FSQRTS ...) +(Floor ...) => (FFLOOR ...) +(Ceil ...) => (FCEIL ...) +(Trunc ...) => (FTRUNC ...) +(Round ...) => (FROUND ...) +(Copysign x y) => (FCPSGN y x) +(Abs ...) => (FABS ...) +(FMA ...) => (FMADD ...) + +// Lowering extension +// Note: we always extend to 64 bits even though some ops don't need that many result bits. +(SignExt8to(16|32|64) ...) => (MOVBreg ...) +(SignExt16to(32|64) ...) => (MOVHreg ...) +(SignExt32to64 ...) => (MOVWreg ...) + +(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...) +(ZeroExt16to(32|64) ...) => (MOVHZreg ...) +(ZeroExt32to64 ...) => (MOVWZreg ...) + +(Trunc(16|32|64)to8 x) && t.IsSigned() => (MOVBreg x) +(Trunc(16|32|64)to8 x) => (MOVBZreg x) +(Trunc(32|64)to16 x) && t.IsSigned() => (MOVHreg x) +(Trunc(32|64)to16 x) => (MOVHZreg x) +(Trunc64to32 x) && t.IsSigned() => (MOVWreg x) +(Trunc64to32 x) => (MOVWZreg x) + +// Lowering constants +(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)]) +(Const(32|64)F ...) => (FMOV(S|D)const ...) +(ConstNil) => (MOVDconst [0]) +(ConstBool [t]) => (MOVDconst [b2i(t)]) + +// Carrying addition. +(Select0 (Add64carry x y c)) => (Select0 (ADDE x y (Select1 (ADDCconst c [-1])))) +(Select1 (Add64carry x y c)) => (ADDZEzero (Select1 (ADDE x y (Select1 (ADDCconst c [-1]))))) +// Fold initial carry bit if 0. +(ADDE x y (Select1 (ADDCconst (MOVDconst [0]) [-1]))) => (ADDC x y) +// Fold transfer of CA -> GPR -> CA. Note 2 uses when feeding into a chained Add64carry. +(Select1 (ADDCconst n:(ADDZEzero x) [-1])) && n.Uses <= 2 => x + +// Borrowing subtraction. +(Select0 (Sub64borrow x y c)) => (Select0 (SUBE x y (Select1 (SUBCconst c [0])))) +(Select1 (Sub64borrow x y c)) => (NEG (SUBZEzero (Select1 (SUBE x y (Select1 (SUBCconst c [0])))))) +// Fold initial borrow bit if 0. +(SUBE x y (Select1 (SUBCconst (MOVDconst [0]) [0]))) => (SUBC x y) +// Fold transfer of CA -> GPR -> CA. Note 2 uses when feeding into a chained Sub64borrow. +(Select1 (SUBCconst n:(NEG (SUBZEzero x)) [0])) && n.Uses <= 2 => x + +// Constant folding +(FABS (FMOVDconst [x])) => (FMOVDconst [math.Abs(x)]) +(FSQRT (FMOVDconst [x])) && x >= 0 => (FMOVDconst [math.Sqrt(x)]) +(FFLOOR (FMOVDconst [x])) => (FMOVDconst [math.Floor(x)]) +(FCEIL (FMOVDconst [x])) => (FMOVDconst [math.Ceil(x)]) +(FTRUNC (FMOVDconst [x])) => (FMOVDconst [math.Trunc(x)]) + +// Rotates +(RotateLeft8 x (MOVDconst [c])) => (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) +(RotateLeft16 x (MOVDconst [c])) => (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) +(RotateLeft(32|64) ...) => ((ROTLW|ROTL) ...) + +// Constant rotate generation +(ROTLW x (MOVDconst [c])) => (ROTLWconst x [c&31]) +(ROTL x (MOVDconst [c])) => (ROTLconst x [c&63]) + +// Combine rotate and mask operations +(Select0 (ANDCCconst [m] (ROTLWconst [r] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x) +(AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x) +(Select0 (ANDCCconst [m] (ROTLW x r))) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r) +(AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r) + +// Note, any rotated word bitmask is still a valid word bitmask. +(ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) +(ROTLWconst [r] (Select0 (ANDCCconst [m] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) + +(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0]) +(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x) +(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0]) +(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x) + +(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0]) +(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) +(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0]) +(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) + +// Merge shift right + shift left and clear left (e.g for a table lookup) +(CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x) +(SLDconst [l] (SRWconst [r] x)) && mergePPC64SldiSrw(l,r) != 0 => (RLWINM [mergePPC64SldiSrw(l,r)] x) +// The following reduction shows up frequently too. e.g b[(x>>14)&0xFF] +(CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x) + +// large constant signed right shift, we leave the sign bit +(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 => (SRADconst x [63]) +(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 => (SRAWconst x [63]) +(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 => (SRAWconst (SignExt16to32 x) [63]) +(Rsh8x64 x (MOVDconst [c])) && uint64(c) >= 8 => (SRAWconst (SignExt8to32 x) [63]) + +// constant shifts +((Lsh64|Rsh64|Rsh64U)x64 x (MOVDconst [c])) && uint64(c) < 64 => (S(L|RA|R)Dconst x [c]) +((Lsh32|Rsh32|Rsh32U)x64 x (MOVDconst [c])) && uint64(c) < 32 => (S(L|RA|R)Wconst x [c]) +((Rsh16|Rsh16U)x64 x (MOVDconst [c])) && uint64(c) < 16 => (SR(AW|W)const ((Sign|Zero)Ext16to32 x) [c]) +(Lsh16x64 x (MOVDconst [c])) && uint64(c) < 16 => (SLWconst x [c]) +((Rsh8|Rsh8U)x64 x (MOVDconst [c])) && uint64(c) < 8 => (SR(AW|W)const ((Sign|Zero)Ext8to32 x) [c]) +(Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 => (SLWconst x [c]) + +// Lower bounded shifts first. No need to check shift value. +(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y) +(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y) +(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y) +(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y) +(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y) +(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y) +(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD (MOVHZreg x) y) +(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD (MOVBZreg x) y) +(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD x y) +(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y) +(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD (MOVHreg x) y) +(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD (MOVBreg x) y) + +// Unbounded shifts. Go shifts saturate to 0 or -1 when shifting beyond the number of +// bits in a type, PPC64 shifts do not (see the ISA for details). +// +// Note, y is always non-negative. +// +// Note, ISELZ is intentionally not used in lower. Where possible, ISEL is converted to ISELZ in late lower +// after all the ISEL folding rules have been exercised. + +((Rsh64U|Lsh64)x64 x y) => (ISEL [0] (S(R|L)D x y) (MOVDconst [0]) (CMPUconst y [64])) +((Rsh64U|Lsh64)x32 x y) => (ISEL [0] (S(R|L)D x y) (MOVDconst [0]) (CMPWUconst y [64])) +((Rsh64U|Lsh64)x16 x y) => (ISEL [2] (S(R|L)D x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFC0] y))) +((Rsh64U|Lsh64)x8 x y) => (ISEL [2] (S(R|L)D x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00C0] y))) +(Rsh64x(64|32) x y) => (ISEL [0] (SRAD x y) (SRADconst x [63]) (CMP(U|WU)const y [64])) +(Rsh64x16 x y) => (ISEL [2] (SRAD x y) (SRADconst x [63]) (Select1 (ANDCCconst [0xFFC0] y))) +(Rsh64x8 x y) => (ISEL [2] (SRAD x y) (SRADconst x [63]) (Select1 (ANDCCconst [0x00C0] y))) + +((Rsh32U|Lsh32)x64 x y) => (ISEL [0] (S(R|L)W x y) (MOVDconst [0]) (CMPUconst y [32])) +((Rsh32U|Lsh32)x32 x y) => (ISEL [0] (S(R|L)W x y) (MOVDconst [0]) (CMPWUconst y [32])) +((Rsh32U|Lsh32)x16 x y) => (ISEL [2] (S(R|L)W x y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFE0] y))) +((Rsh32U|Lsh32)x8 x y) => (ISEL [2] (S(R|L)W x y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00E0] y))) +(Rsh32x(64|32) x y) => (ISEL [0] (SRAW x y) (SRAWconst x [31]) (CMP(U|WU)const y [32])) +(Rsh32x16 x y) => (ISEL [2] (SRAW x y) (SRAWconst x [31]) (Select1 (ANDCCconst [0xFFE0] y))) +(Rsh32x8 x y) => (ISEL [2] (SRAW x y) (SRAWconst x [31]) (Select1 (ANDCCconst [0x00E0] y))) + +((Rsh16U|Lsh16)x64 x y) => (ISEL [0] (S(R|L)D (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [16])) +((Rsh16U|Lsh16)x32 x y) => (ISEL [0] (S(R|L)D (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [16])) +((Rsh16U|Lsh16)x16 x y) => (ISEL [2] (S(R|L)D (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF0] y))) +((Rsh16U|Lsh16)x8 x y) => (ISEL [2] (S(R|L)D (MOVHZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F0] y))) +(Rsh16x(64|32) x y) => (ISEL [0] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (CMP(U|WU)const y [16])) +(Rsh16x16 x y) => (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (Select1 (ANDCCconst [0xFFF0] y))) +(Rsh16x8 x y) => (ISEL [2] (SRAD (MOVHreg x) y) (SRADconst (MOVHreg x) [15]) (Select1 (ANDCCconst [0x00F0] y))) + +((Rsh8U|Lsh8)x64 x y) => (ISEL [0] (S(R|L)D (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [8])) +((Rsh8U|Lsh8)x32 x y) => (ISEL [0] (S(R|L)D (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [8])) +((Rsh8U|Lsh8)x16 x y) => (ISEL [2] (S(R|L)D (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0xFFF8] y))) +((Rsh8U|Lsh8)x8 x y) => (ISEL [2] (S(R|L)D (MOVBZreg x) y) (MOVDconst [0]) (Select1 (ANDCCconst [0x00F8] y))) +(Rsh8x(64|32) x y) => (ISEL [0] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (CMP(U|WU)const y [8])) +(Rsh8x16 x y) => (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (Select1 (ANDCCconst [0xFFF8] y))) +(Rsh8x8 x y) => (ISEL [2] (SRAD (MOVBreg x) y) (SRADconst (MOVBreg x) [7]) (Select1 (ANDCCconst [0x00F8] y))) + +// Catch bounded shifts in situations like foo< uint64(c) => (FlagLT) + +(ORN x (MOVDconst [-1])) => x + +(S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x) +(S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x) + +(Addr {sym} base) => (MOVDaddr {sym} [0] base) +(LocalAddr {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem)) +(LocalAddr {sym} base _) && !t.Elem().HasPointers() => (MOVDaddr {sym} base) +(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr) +(MOVDaddr {sym} [n] p:(ADD x y)) && sym == nil && n == 0 => p +(MOVDaddr {sym} [n] ptr) && sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi) => ptr + +// TODO: optimize these cases? +(Ctz32NonZero ...) => (Ctz32 ...) +(Ctz64NonZero ...) => (Ctz64 ...) + +(Ctz64 x) && buildcfg.GOPPC64<=8 => (POPCNTD (ANDN (ADDconst [-1] x) x)) +(Ctz64 x) => (CNTTZD x) +(Ctz32 x) && buildcfg.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN (ADDconst [-1] x) x))) +(Ctz32 x) => (CNTTZW (MOVWZreg x)) +(Ctz16 x) => (POPCNTW (MOVHZreg (ANDN (ADDconst [-1] x) x))) +(Ctz8 x) => (POPCNTB (MOVBZreg (ANDN (ADDconst [-1] x) x))) + +(BitLen64 x) => (SUBFCconst [64] (CNTLZD x)) +(BitLen32 x) => (SUBFCconst [32] (CNTLZW x)) + +(PopCount64 ...) => (POPCNTD ...) +(PopCount(32|16|8) x) => (POPCNT(W|W|B) (MOV(W|H|B)Zreg x)) + +(And(64|32|16|8) ...) => (AND ...) +(Or(64|32|16|8) ...) => (OR ...) +(Xor(64|32|16|8) ...) => (XOR ...) + +(Neg(64|32|16|8) ...) => (NEG ...) +(Neg(64|32)F ...) => (FNEG ...) + +(Com(64|32|16|8) x) => (NOR x x) + +// Lowering boolean ops +(AndB ...) => (AND ...) +(OrB ...) => (OR ...) +(Not x) => (XORconst [1] x) + +// Merge logical operations +(AND x (NOR y y)) => (ANDN x y) +(OR x (NOR y y)) => (ORN x y) + +// Lowering comparisons +(EqB x y) => (Select0 (ANDCCconst [1] (EQV x y))) +// Sign extension dependence on operand sign sets up for sign/zero-extension elision later +(Eq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (Equal (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y))) +(Eq(8|16) x y) => (Equal (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y))) +(Eq(32|64|Ptr) x y) => (Equal ((CMPW|CMP|CMP) x y)) +(Eq(32|64)F x y) => (Equal (FCMPU x y)) + +(NeqB ...) => (XOR ...) +// Like Eq8 and Eq16, prefer sign extension likely to enable later elision. +(Neq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (NotEqual (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y))) +(Neq(8|16) x y) => (NotEqual (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y))) +(Neq(32|64|Ptr) x y) => (NotEqual ((CMPW|CMP|CMP) x y)) +(Neq(32|64)F x y) => (NotEqual (FCMPU x y)) + +(Less(8|16) x y) => (LessThan (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y))) +(Less(32|64) x y) => (LessThan ((CMPW|CMP) x y)) +(Less(32|64)F x y) => (FLessThan (FCMPU x y)) + +(Less(8|16)U x y) => (LessThan (CMPWU (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y))) +(Less(32|64)U x y) => (LessThan ((CMPWU|CMPU) x y)) + +(Leq(8|16) x y) => (LessEqual (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y))) +(Leq(32|64) x y) => (LessEqual ((CMPW|CMP) x y)) +(Leq(32|64)F x y) => (FLessEqual (FCMPU x y)) + +(Leq(8|16)U x y) => (LessEqual (CMPWU (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y))) +(Leq(32|64)U x y) => (LessEqual (CMP(WU|U) x y)) + +// Absorb pseudo-ops into blocks. +(If (Equal cc) yes no) => (EQ cc yes no) +(If (NotEqual cc) yes no) => (NE cc yes no) +(If (LessThan cc) yes no) => (LT cc yes no) +(If (LessEqual cc) yes no) => (LE cc yes no) +(If (GreaterThan cc) yes no) => (GT cc yes no) +(If (GreaterEqual cc) yes no) => (GE cc yes no) +(If (FLessThan cc) yes no) => (FLT cc yes no) +(If (FLessEqual cc) yes no) => (FLE cc yes no) +(If (FGreaterThan cc) yes no) => (FGT cc yes no) +(If (FGreaterEqual cc) yes no) => (FGE cc yes no) + +(If cond yes no) => (NE (CMPWconst [0] (Select0 (ANDCCconst [1] cond))) yes no) + +// Absorb boolean tests into block +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc)))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no) +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc)))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no) + +// absorb flag constants into branches +(EQ (FlagEQ) yes no) => (First yes no) +(EQ (FlagLT) yes no) => (First no yes) +(EQ (FlagGT) yes no) => (First no yes) + +(NE (FlagEQ) yes no) => (First no yes) +(NE (FlagLT) yes no) => (First yes no) +(NE (FlagGT) yes no) => (First yes no) + +(LT (FlagEQ) yes no) => (First no yes) +(LT (FlagLT) yes no) => (First yes no) +(LT (FlagGT) yes no) => (First no yes) + +(LE (FlagEQ) yes no) => (First yes no) +(LE (FlagLT) yes no) => (First yes no) +(LE (FlagGT) yes no) => (First no yes) + +(GT (FlagEQ) yes no) => (First no yes) +(GT (FlagLT) yes no) => (First no yes) +(GT (FlagGT) yes no) => (First yes no) + +(GE (FlagEQ) yes no) => (First yes no) +(GE (FlagLT) yes no) => (First no yes) +(GE (FlagGT) yes no) => (First yes no) + +// absorb InvertFlags into branches +(LT (InvertFlags cmp) yes no) => (GT cmp yes no) +(GT (InvertFlags cmp) yes no) => (LT cmp yes no) +(LE (InvertFlags cmp) yes no) => (GE cmp yes no) +(GE (InvertFlags cmp) yes no) => (LE cmp yes no) +(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no) +(NE (InvertFlags cmp) yes no) => (NE cmp yes no) + +// constant comparisons +(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ) +(CMPWconst (MOVDconst [x]) [y]) && int32(x) (FlagLT) +(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT) + +(CMPconst (MOVDconst [x]) [y]) && x==y => (FlagEQ) +(CMPconst (MOVDconst [x]) [y]) && x (FlagLT) +(CMPconst (MOVDconst [x]) [y]) && x>y => (FlagGT) + +(CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ) +(CMPWUconst (MOVDconst [x]) [y]) && uint32(x) (FlagLT) +(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT) + +(CMPUconst (MOVDconst [x]) [y]) && x==y => (FlagEQ) +(CMPUconst (MOVDconst [x]) [y]) && uint64(x) (FlagLT) +(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT) + +// absorb flag constants into boolean values +(Equal (FlagEQ)) => (MOVDconst [1]) +(Equal (FlagLT)) => (MOVDconst [0]) +(Equal (FlagGT)) => (MOVDconst [0]) + +(NotEqual (FlagEQ)) => (MOVDconst [0]) +(NotEqual (FlagLT)) => (MOVDconst [1]) +(NotEqual (FlagGT)) => (MOVDconst [1]) + +(LessThan (FlagEQ)) => (MOVDconst [0]) +(LessThan (FlagLT)) => (MOVDconst [1]) +(LessThan (FlagGT)) => (MOVDconst [0]) + +(LessEqual (FlagEQ)) => (MOVDconst [1]) +(LessEqual (FlagLT)) => (MOVDconst [1]) +(LessEqual (FlagGT)) => (MOVDconst [0]) + +(GreaterThan (FlagEQ)) => (MOVDconst [0]) +(GreaterThan (FlagLT)) => (MOVDconst [0]) +(GreaterThan (FlagGT)) => (MOVDconst [1]) + +(GreaterEqual (FlagEQ)) => (MOVDconst [1]) +(GreaterEqual (FlagLT)) => (MOVDconst [0]) +(GreaterEqual (FlagGT)) => (MOVDconst [1]) + +// absorb InvertFlags into boolean values +((Equal|NotEqual|LessThan|GreaterThan|LessEqual|GreaterEqual) (InvertFlags x)) => ((Equal|NotEqual|GreaterThan|LessThan|GreaterEqual|LessEqual) x) + + +// Elide compares of bit tests +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 z) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 z:(ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 z) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 (ANDCC x y)) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 (ORCC x y)) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 (XORCC x y)) yes no) + +(CondSelect x y (SETBC [a] cmp)) => (ISEL [a] x y cmp) +(CondSelect x y (SETBCR [a] cmp)) => (ISEL [a+4] x y cmp) +// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably. +(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (Select1 (ANDCCconst [1] bool))) +// Fold any CR -> GPR -> CR transfers when applying the above rule. +(ISEL [6] x y (Select1 (ANDCCconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp) +(ISEL [6] x y ((CMP|CMPW)const [0] (SETBC [c] cmp))) => (ISEL [c] x y cmp) +(ISEL [6] x y ((CMP|CMPW)const [0] (SETBCR [c] cmp))) => (ISEL [c+4] x y cmp) + +// Lowering loads +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem) +(Load ptr mem) && is32BitInt(t) && t.IsSigned() => (MOVWload ptr mem) +(Load ptr mem) && is32BitInt(t) && !t.IsSigned() => (MOVWZload ptr mem) +(Load ptr mem) && is16BitInt(t) && t.IsSigned() => (MOVHload ptr mem) +(Load ptr mem) && is16BitInt(t) && !t.IsSigned() => (MOVHZload ptr mem) +(Load ptr mem) && t.IsBoolean() => (MOVBZload ptr mem) +(Load ptr mem) && is8BitInt(t) && t.IsSigned() => (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load. +(Load ptr mem) && is8BitInt(t) && !t.IsSigned() => (MOVBZload ptr mem) + +(Load ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem) + +(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVSstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) + +// Using Zero instead of LoweredZero allows the +// target address to be folded where possible. +(Zero [0] _ mem) => mem +(Zero [1] destptr mem) => (MOVBstorezero destptr mem) +(Zero [2] destptr mem) => + (MOVHstorezero destptr mem) +(Zero [3] destptr mem) => + (MOVBstorezero [2] destptr + (MOVHstorezero destptr mem)) +(Zero [4] destptr mem) => + (MOVWstorezero destptr mem) +(Zero [5] destptr mem) => + (MOVBstorezero [4] destptr + (MOVWstorezero destptr mem)) +(Zero [6] destptr mem) => + (MOVHstorezero [4] destptr + (MOVWstorezero destptr mem)) +(Zero [7] destptr mem) => + (MOVBstorezero [6] destptr + (MOVHstorezero [4] destptr + (MOVWstorezero destptr mem))) + +(Zero [8] {t} destptr mem) => (MOVDstorezero destptr mem) +(Zero [12] {t} destptr mem) => + (MOVWstorezero [8] destptr + (MOVDstorezero [0] destptr mem)) +(Zero [16] {t} destptr mem) => + (MOVDstorezero [8] destptr + (MOVDstorezero [0] destptr mem)) +(Zero [24] {t} destptr mem) => + (MOVDstorezero [16] destptr + (MOVDstorezero [8] destptr + (MOVDstorezero [0] destptr mem))) +(Zero [32] {t} destptr mem) => + (MOVDstorezero [24] destptr + (MOVDstorezero [16] destptr + (MOVDstorezero [8] destptr + (MOVDstorezero [0] destptr mem)))) + +// Handle cases not handled above +// Lowered Short cases do not generate loops, and as a result don't clobber +// the address registers or flags. +(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem) +(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 => (LoweredZero [s] ptr mem) +(Zero [s] ptr mem) && s < 128 && buildcfg.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem) +(Zero [s] ptr mem) && buildcfg.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem) + +// moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem) +(Move [2] dst src mem) => + (MOVHstore dst (MOVHZload src mem) mem) +(Move [4] dst src mem) => + (MOVWstore dst (MOVWZload src mem) mem) +// MOVD for load and store must have offsets that are multiple of 4 +(Move [8] {t} dst src mem) => + (MOVDstore dst (MOVDload src mem) mem) +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBZload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem)) +(Move [5] dst src mem) => + (MOVBstore [4] dst (MOVBZload [4] src mem) + (MOVWstore dst (MOVWZload src mem) mem)) +(Move [6] dst src mem) => + (MOVHstore [4] dst (MOVHZload [4] src mem) + (MOVWstore dst (MOVWZload src mem) mem)) +(Move [7] dst src mem) => + (MOVBstore [6] dst (MOVBZload [6] src mem) + (MOVHstore [4] dst (MOVHZload [4] src mem) + (MOVWstore dst (MOVWZload src mem) mem))) + +// Large move uses a loop. Since the address is computed and the +// offset is zero, any alignment can be used. +(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s) => + (LoweredMove [s] dst src mem) +(Move [s] dst src mem) && s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9 => + (LoweredQuadMoveShort [s] dst src mem) +(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s) => + (LoweredQuadMove [s] dst src mem) + +// Calls +// Lowering calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// Miscellaneous +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) +(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr)) +(IsInBounds idx len) => (LessThan (CMPU idx len)) +(IsSliceInBounds idx len) => (LessEqual (CMPU idx len)) +(NilCheck ...) => (LoweredNilCheck ...) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +// Publication barrier as intrinsic +(PubBarrier ...) => (LoweredPubBarrier ...) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +// Optimizations +// Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms, +// so ORconst, XORconst easily expand into a pair. + +// Include very-large constants in the const-const case. +(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d]) +(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d]) +(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d]) +(ORN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|^d]) +(ANDN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&^d]) +(NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)]) + +// Discover consts +(AND x (MOVDconst [-1])) => x +(AND x (MOVDconst [c])) && isU16Bit(c) => (Select0 (ANDCCconst [c] x)) +(XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x) +(OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x) + +// Simplify consts +(ANDCCconst [c] (Select0 (ANDCCconst [d] x))) => (ANDCCconst [c&d] x) +(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x) +(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x) +(Select0 (ANDCCconst [-1] x)) => x +(Select0 (ANDCCconst [0] _)) => (MOVDconst [0]) +(Select1 (ANDCCconst [0] _)) => (FlagEQ) +(XORconst [0] x) => x +(ORconst [-1] _) => (MOVDconst [-1]) +(ORconst [0] x) => x + +// zero-extend of small and => small and +(MOVBZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFF => y +(MOVHZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y +(MOVWZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFFFFFF => y +(MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y + +// sign extend of small-positive and => small-positive-and +(MOVBreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7F => y +(MOVHreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7FFF => y +(MOVWreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0 +(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y + +// small and of zero-extend => either zero-extend or small and +(Select0 (ANDCCconst [c] y:(MOVBZreg _))) && c&0xFF == 0xFF => y +(Select0 (ANDCCconst [0xFF] (MOVBreg x))) => (MOVBZreg x) +(Select0 (ANDCCconst [c] y:(MOVHZreg _))) && c&0xFFFF == 0xFFFF => y +(Select0 (ANDCCconst [0xFFFF] (MOVHreg x))) => (MOVHZreg x) + +(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y +(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x) +// normal case +(Select0 (ANDCCconst [c] (MOVBZreg x))) => (Select0 (ANDCCconst [c&0xFF] x)) +(Select0 (ANDCCconst [c] (MOVHZreg x))) => (Select0 (ANDCCconst [c&0xFFFF] x)) +(Select0 (ANDCCconst [c] (MOVWZreg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x)) + +// Eliminate unnecessary sign/zero extend following right shift +(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x)) +(MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) => (SRWconst [c] (MOVHZreg x)) +(MOVWZreg (SRWconst [c] (MOVWZreg x))) => (SRWconst [c] (MOVWZreg x)) +(MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) => (SRAWconst [c] (MOVBreg x)) +(MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) => (SRAWconst [c] (MOVHreg x)) +(MOVWreg (SRAWconst [c] (MOVWreg x))) => (SRAWconst [c] (MOVWreg x)) + +(MOV(WZ|W)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) <= 32 => (S(R|RA)Wconst [c] x) +(MOV(HZ|H)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) <= 16 => (S(R|RA)Wconst [c] x) +(MOV(BZ|B)reg (S(R|RA)Wconst [c] x)) && sizeof(x.Type) == 8 => (S(R|RA)Wconst [c] x) + +// initial right shift will handle sign/zero extend +(MOVBZreg (SRDconst [c] x)) && c>=56 => (SRDconst [c] x) +(MOVBreg (SRDconst [c] x)) && c>56 => (SRDconst [c] x) +(MOVBreg (SRDconst [c] x)) && c==56 => (SRADconst [c] x) +(MOVBreg (SRADconst [c] x)) && c>=56 => (SRADconst [c] x) +(MOVBZreg (SRWconst [c] x)) && c>=24 => (SRWconst [c] x) +(MOVBreg (SRWconst [c] x)) && c>24 => (SRWconst [c] x) +(MOVBreg (SRWconst [c] x)) && c==24 => (SRAWconst [c] x) +(MOVBreg (SRAWconst [c] x)) && c>=24 => (SRAWconst [c] x) + +(MOVHZreg (SRDconst [c] x)) && c>=48 => (SRDconst [c] x) +(MOVHreg (SRDconst [c] x)) && c>48 => (SRDconst [c] x) +(MOVHreg (SRDconst [c] x)) && c==48 => (SRADconst [c] x) +(MOVHreg (SRADconst [c] x)) && c>=48 => (SRADconst [c] x) +(MOVHZreg (SRWconst [c] x)) && c>=16 => (SRWconst [c] x) +(MOVHreg (SRWconst [c] x)) && c>16 => (SRWconst [c] x) +(MOVHreg (SRAWconst [c] x)) && c>=16 => (SRAWconst [c] x) +(MOVHreg (SRWconst [c] x)) && c==16 => (SRAWconst [c] x) + +(MOVWZreg (SRDconst [c] x)) && c>=32 => (SRDconst [c] x) +(MOVWreg (SRDconst [c] x)) && c>32 => (SRDconst [c] x) +(MOVWreg (SRADconst [c] x)) && c>=32 => (SRADconst [c] x) +(MOVWreg (SRDconst [c] x)) && c==32 => (SRADconst [c] x) + +// Various redundant zero/sign extension combinations. +(MOVBZreg y:(MOVBZreg _)) => y // repeat +(MOVBreg y:(MOVBreg _)) => y // repeat +(MOVBreg (MOVBZreg x)) => (MOVBreg x) +(MOVBZreg (MOVBreg x)) => (MOVBZreg x) + +// H - there are more combinations than these + +(MOVHZreg y:(MOV(H|B)Zreg _)) => y // repeat +(MOVHZreg y:(MOVHBRload _ _)) => y + +(MOVHreg y:(MOV(H|B)reg _)) => y // repeat + +(MOV(H|HZ)reg y:(MOV(HZ|H)reg x)) => (MOV(H|HZ)reg x) + +// W - there are more combinations than these + +(MOV(WZ|WZ|WZ|W|W|W)reg y:(MOV(WZ|HZ|BZ|W|H|B)reg _)) => y // repeat +(MOVWZreg y:(MOV(H|W)BRload _ _)) => y + +(MOV(W|WZ)reg y:(MOV(WZ|W)reg x)) => (MOV(W|WZ)reg x) + +// Truncate then logical then truncate: omit first, lesser or equal truncate +(MOVWZreg ((OR|XOR|AND) x (MOVWZreg y))) => (MOVWZreg ((OR|XOR|AND) x y)) +(MOVHZreg ((OR|XOR|AND) x (MOVWZreg y))) => (MOVHZreg ((OR|XOR|AND) x y)) +(MOVHZreg ((OR|XOR|AND) x (MOVHZreg y))) => (MOVHZreg ((OR|XOR|AND) x y)) +(MOVBZreg ((OR|XOR|AND) x (MOVWZreg y))) => (MOVBZreg ((OR|XOR|AND) x y)) +(MOVBZreg ((OR|XOR|AND) x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) x y)) +(MOVBZreg ((OR|XOR|AND) x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) x y)) + +(MOV(B|H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) => z +(MOV(B|H|W)Zreg z:(AND y (MOV(B|H|W)Zload ptr x))) => z +(MOV(H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) => z +(MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x)))) => z + +// Arithmetic constant ops + +(ADD x (MOVDconst [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [c] x) +(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x) +(ADDconst [0] x) => x +(SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x) + +(ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x) +(ADDconst [c] x:(SP)) && is32Bit(c) => (MOVDaddr [int32(c)] x) // so it is rematerializeable + +(MULL(W|D) x (MOVDconst [c])) && is16Bit(c) => (MULL(W|D)const [int32(c)] x) + +// Subtract from (with carry, but ignored) constant. +// Note, these clobber the carry bit. +(SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x) +(SUBFCconst [c] (NEG x)) => (ADDconst [c] x) +(SUBFCconst [c] (SUBFCconst [d] x)) && is32Bit(c-d) => (ADDconst [c-d] x) +(SUBFCconst [0] x) => (NEG x) +(ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x) +(NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x) +(NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x) +(NEG (SUB x y)) => (SUB y x) +(NEG (NEG x)) => x + +// Use register moves instead of stores and loads to move int<=>float values +// Common with math Float64bits, Float64frombits +(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x) +(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) => (MTVSRD x) + +(FMOVDstore [off] {sym} ptr (MTVSRD x) mem) => (MOVDstore [off] {sym} ptr x mem) +(MOVDstore [off] {sym} ptr (MFVSRD x) mem) => (FMOVDstore [off] {sym} ptr x mem) + +(MTVSRD (MOVDconst [c])) && !math.IsNaN(math.Float64frombits(uint64(c))) => (FMOVDconst [math.Float64frombits(uint64(c))]) +(MFVSRD (FMOVDconst [c])) => (MOVDconst [int64(math.Float64bits(c))]) + +(MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem) +(MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem) + +// Rules for MOV* or FMOV* ops determine when indexed (MOV*loadidx or MOV*storeidx) +// or non-indexed (MOV*load or MOV*store) should be used. Indexed instructions +// require an extra instruction and register to load the index so non-indexed is preferred. +// Indexed ops generate indexed load or store instructions for all GOPPC64 values. +// Non-indexed ops generate DS-form loads and stores when the offset fits in 16 bits, +// and on power8 and power9, a multiple of 4 is required for MOVW and MOVD ops. +// On power10, prefixed loads and stores can be used for offsets > 16 bits and <= 32 bits. +// and support for PC relative addressing must be available if relocation is needed. +// On power10, the assembler will determine when to use DS-form or prefixed +// instructions for non-indexed ops depending on the value of the offset. +// +// Fold offsets for stores. +(MOV(D|W|H|B)store [off1] {sym} (ADDconst [off2] x) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|H|B)store [off1+int32(off2)] {sym} x val mem) + +(FMOV(S|D)store [off1] {sym} (ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)store [off1+int32(off2)] {sym} ptr val mem) + +// Fold address into load/store. +// If power10 with PCRel is not available, then +// the assembler needs to generate several instructions and use +// temp register for accessing global, and each time it will reload +// the temp register. So don't fold address of global in that case if there is more than +// one use. +(MOV(B|H|W|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) + && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) => + (MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + +(FMOV(S|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) + && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) => + (FMOV(S|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + +(MOV(B|H|W)Zload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) => + (MOV(B|H|W)Zload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOV(H|W|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) => + (MOV(H|W|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(FMOV(S|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) => + (FMOV(S|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + +// Fold offsets for loads. +(FMOV(S|D)load [off1] {sym} (ADDconst [off2] ptr) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)load [off1+int32(off2)] {sym} ptr mem) + +(MOV(D|W|WZ|H|HZ|BZ)load [off1] {sym} (ADDconst [off2] x) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|WZ|H|HZ|BZ)load [off1+int32(off2)] {sym} x mem) + +// Determine load + addressing that can be done as a register indexed load +(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem) + +// See comments above concerning selection of indexed vs. non-indexed ops. +// These cases don't have relocation. +(MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem) +(MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem) +(MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem) +(MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem) + +// Store of zero => storezero +(MOV(D|W|H|B)store [off] {sym} ptr (MOVDconst [0]) mem) => (MOV(D|W|H|B)storezero [off] {sym} ptr mem) + +// Fold offsets for storezero +(MOV(D|W|H|B)storezero [off1] {sym} (ADDconst [off2] x) mem) && ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2))) => + (MOV(D|W|H|B)storezero [off1+int32(off2)] {sym} x mem) + +// Stores with addressing that can be done as indexed stores +(MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem) + +(MOVDstoreidx ptr (MOVDconst [c]) val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem) +(MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem) +(MOVDstoreidx (MOVDconst [c]) ptr val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem) +(MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem) + +// Fold symbols into storezero +(MOV(D|W|H|B)storezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2) + && ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) => + (MOV(D|W|H|B)storezero [off1+off2] {mergeSym(sym1,sym2)} x mem) + +// atomic intrinsics +(AtomicLoad(8|32|64|Ptr) ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem) +(AtomicLoadAcq(32|64) ptr mem) => (LoweredAtomicLoad(32|64) [0] ptr mem) + +(AtomicStore(8|32|64) ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem) +(AtomicStoreRel(32|64) ptr val mem) => (LoweredAtomicStore(32|64) [0] ptr val mem) + +(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...) + +(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...) + +(AtomicCompareAndSwap(32|64) ptr old new_ mem) => (LoweredAtomicCas(32|64) [1] ptr old new_ mem) +(AtomicCompareAndSwapRel32 ptr old new_ mem) => (LoweredAtomicCas32 [0] ptr old new_ mem) + +(AtomicAnd(8|32) ...) => (LoweredAtomicAnd(8|32) ...) +(AtomicOr(8|32) ...) => (LoweredAtomicOr(8|32) ...) + +(Slicemask x) => (SRADconst (NEG x) [63]) +(Select0 (ANDCCconst [1] z:(SRADconst [63] x))) && z.Uses == 1 => (SRDconst [63] x) + +// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide +// This may interact with other patterns in the future. (Compare with arm64) +(MOV(B|H|W)Zreg x:(MOVBZload _ _)) => x +(MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) => x +(MOV(H|W)Zreg x:(MOVHZload _ _)) => x +(MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) => x +(MOV(H|W)reg x:(MOVHload _ _)) => x +(MOV(H|W)reg x:(MOVHloadidx _ _ _)) => x +(MOV(WZ|W)reg x:(MOV(WZ|W)load _ _)) => x +(MOV(WZ|W)reg x:(MOV(WZ|W)loadidx _ _ _)) => x +(MOV(B|W)Zreg x:(Select0 (LoweredAtomicLoad(8|32) _ _))) => x + +// don't extend if argument is already extended +(MOVBreg x:(Arg )) && is8BitInt(t) && t.IsSigned() => x +(MOVBZreg x:(Arg )) && is8BitInt(t) && !t.IsSigned() => x +(MOVHreg x:(Arg )) && (is8BitInt(t) || is16BitInt(t)) && t.IsSigned() => x +(MOVHZreg x:(Arg )) && (is8BitInt(t) || is16BitInt(t)) && !t.IsSigned() => x +(MOVWreg x:(Arg )) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && t.IsSigned() => x +(MOVWZreg x:(Arg )) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !t.IsSigned() => x + +(MOVBZreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))]) +(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))]) +(MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))]) +(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))]) +(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))]) +(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))]) + +// Implement clrsldi and clrslwi extended mnemonics as described in +// ISA 3.0 section C.8. AuxInt field contains values needed for +// the instructions, packed together since there is only one available. +(SLDconst [c] z:(MOVBZreg x)) && c < 8 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x) +(SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x) +(SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x) + +(SLDconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) +(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) +(SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x) +(SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x) +(SLWconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) +(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) +// special case for power9 +(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x) + +// Lose widening ops fed to stores +(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstore [off] {sym} ptr (SRWconst x [c]) mem) +(MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstore [off] {sym} ptr (SRWconst x [c]) mem) +(MOVBstoreidx ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstoreidx ptr idx x mem) +(MOVHstoreidx ptr idx (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstoreidx ptr idx x mem) +(MOVWstoreidx ptr idx (MOV(W|WZ)reg x) mem) => (MOVWstoreidx ptr idx x mem) +(MOVBstoreidx ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstoreidx ptr idx (SRWconst x [c]) mem) +(MOVBstoreidx ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstoreidx ptr idx (SRWconst x [c]) mem) +(MOVHBRstore ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHBRstore ptr x mem) +(MOVWBRstore ptr (MOV(W|WZ)reg x) mem) => (MOVWBRstore ptr x mem) + +// Lose W-widening ops fed to compare-W +(CMP(W|WU) x (MOV(W|WZ)reg y)) => (CMP(W|WU) x y) +(CMP(W|WU) (MOV(W|WZ)reg x) y) => (CMP(W|WU) x y) + +(CMP x (MOVDconst [c])) && is16Bit(c) => (CMPconst x [c]) +(CMP (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPconst y [c])) +(CMPW x (MOVDconst [c])) && is16Bit(c) => (CMPWconst x [int32(c)]) +(CMPW (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPWconst y [int32(c)])) + +(CMPU x (MOVDconst [c])) && isU16Bit(c) => (CMPUconst x [c]) +(CMPU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPUconst y [c])) +(CMPWU x (MOVDconst [c])) && isU16Bit(c) => (CMPWUconst x [int32(c)]) +(CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)])) + +// Canonicalize the order of arguments to comparisons - helps with CSE. +((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x)) + +// SETBC auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 1 : 0 +// SETBCR auxInt values 0=LT 1=GT 2=EQ Crbit==1 ? 0 : 1 +(Equal cmp) => (SETBC [2] cmp) +(NotEqual cmp) => (SETBCR [2] cmp) +(LessThan cmp) => (SETBC [0] cmp) +(FLessThan cmp) => (SETBC [0] cmp) +(FLessEqual cmp) => (OR (SETBC [2] cmp) (SETBC [0] cmp)) +(GreaterEqual cmp) => (SETBCR [0] cmp) +(GreaterThan cmp) => (SETBC [1] cmp) +(FGreaterEqual cmp) => (OR (SETBC [2] cmp) (SETBC [1] cmp)) +(FGreaterThan cmp) => (SETBC [1] cmp) +(LessEqual cmp) => (SETBCR [1] cmp) + +(SETBC [0] (FlagLT)) => (MOVDconst [1]) +(SETBC [0] (Flag(GT|EQ))) => (MOVDconst [0]) +(SETBC [1] (FlagGT)) => (MOVDconst [1]) +(SETBC [1] (Flag(LT|EQ))) => (MOVDconst [0]) +(SETBC [2] (FlagEQ)) => (MOVDconst [1]) +(SETBC [2] (Flag(LT|GT))) => (MOVDconst [0]) + +(SETBCR [0] (FlagLT)) => (MOVDconst [0]) +(SETBCR [0] (Flag(GT|EQ))) => (MOVDconst [1]) +(SETBCR [1] (FlagGT)) => (MOVDconst [0]) +(SETBCR [1] (Flag(LT|EQ))) => (MOVDconst [1]) +(SETBCR [2] (FlagEQ)) => (MOVDconst [0]) +(SETBCR [2] (Flag(LT|GT))) => (MOVDconst [1]) + +(SETBC [0] (InvertFlags bool)) => (SETBC [1] bool) +(SETBC [1] (InvertFlags bool)) => (SETBC [0] bool) +(SETBC [2] (InvertFlags bool)) => (SETBC [2] bool) + +(SETBCR [0] (InvertFlags bool)) => (SETBCR [1] bool) +(SETBCR [1] (InvertFlags bool)) => (SETBCR [0] bool) +(SETBCR [2] (InvertFlags bool)) => (SETBCR [2] bool) + +// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1 +// ISEL auxInt values 4=GE 5=LE 6=NE !arg2 ? arg1 : arg0 + +(ISEL [2] x _ (FlagEQ)) => x +(ISEL [2] _ y (Flag(LT|GT))) => y + +(ISEL [6] _ y (FlagEQ)) => y +(ISEL [6] x _ (Flag(LT|GT))) => x + +(ISEL [0] _ y (Flag(EQ|GT))) => y +(ISEL [0] x _ (FlagLT)) => x + +(ISEL [5] _ x (Flag(EQ|LT))) => x +(ISEL [5] y _ (FlagGT)) => y + +(ISEL [1] _ y (Flag(EQ|LT))) => y +(ISEL [1] x _ (FlagGT)) => x + +(ISEL [4] x _ (Flag(EQ|GT))) => x +(ISEL [4] _ y (FlagLT)) => y + +(ISEL [2] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [n] z)))) => (ISEL [2] x y (Select1 (ANDCCconst [n] z ))) +(ISEL [6] x y ((CMP|CMPW)const [0] (Select0 (ANDCCconst [n] z)))) => (ISEL [6] x y (Select1 (ANDCCconst [n] z ))) +(SETBC [n] (InvertFlags bool)) => (SETBCR [n] bool) +(SETBCR [n] (InvertFlags bool)) => (SETBC [n] bool) + +(ISEL [n] x y (InvertFlags bool)) && n%4 == 0 => (ISEL [n+1] x y bool) +(ISEL [n] x y (InvertFlags bool)) && n%4 == 1 => (ISEL [n-1] x y bool) +(ISEL [n] x y (InvertFlags bool)) && n%4 == 2 => (ISEL [n] x y bool) +(XORconst [1] (SETBCR [n] cmp)) => (SETBC [n] cmp) +(XORconst [1] (SETBC [n] cmp)) => (SETBCR [n] cmp) + +(SETBC [2] ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (XORconst [1] (Select0 (ANDCCconst [1] z ))) +(SETBCR [2] ((CMP|CMPW)const [0] (Select0 (ANDCCconst [1] z)))) => (Select0 (ANDCCconst [1] z )) + +(SETBC [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) => (SETBC [2] (Select1 (ANDCCconst [n] z ))) +(SETBCR [2] (CMPWconst [0] (Select0 (ANDCCconst [n] z)))) => (SETBCR [2] (Select1 (ANDCCconst [n] z ))) + +// Only CMPconst for these in case AND|OR|XOR result is > 32 bits +(SETBC [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBC [2] (Select1 (ANDCC y z ))) +(SETBCR [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBCR [2] (Select1 (ANDCC y z ))) + +(SETBC [2] (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (SETBC [2] (Select1 (ORCC y z ))) +(SETBCR [2] (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (SETBCR [2] (Select1 (ORCC y z ))) + +(SETBC [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBC [2] (Select1 (XORCC y z ))) +(SETBCR [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBCR [2] (Select1 (XORCC y z ))) + +// A particular pattern seen in cgo code: +(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (Select0 (ANDCCconst [c&0xFF] x)) + +// floating point negative abs +(FNEG (F(ABS|NABS) x)) => (F(NABS|ABS) x) + +// floating-point fused multiply-add/sub +(F(ADD|SUB) (FMUL x y) z) && x.Block.Func.useFMA(v) => (FM(ADD|SUB) x y z) +(F(ADDS|SUBS) (FMULS x y) z) && x.Block.Func.useFMA(v) => (FM(ADDS|SUBS) x y z) + +// Arch-specific inlining for small or disjoint runtime.memmove +(SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem))))) + && sz >= 0 + && isSameCall(sym, "runtime.memmove") + && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 + && isInlinableMemmove(dst, src, sz, config) + && clobber(s1, s2, s3, call) + => (Move [sz] dst src mem) + +// Match post-lowering calls, register version. +(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem)) + && sz >= 0 + && isSameCall(sym, "runtime.memmove") + && call.Uses == 1 + && isInlinableMemmove(dst, src, sz, config) + && clobber(call) + => (Move [sz] dst src mem) + +// Prefetch instructions (TH specified using aux field) +// For DCBT Ra,Rb,TH, A value of TH indicates: +// 0, hint this cache line will be used soon. (PrefetchCache) +// 16, hint this cache line will not be used for long. (PrefetchCacheStreamed) +// See ISA 3.0 Book II 4.3.2 for more detail. https://openpower.foundation/specifications/isa/ +(PrefetchCache ptr mem) => (DCBT ptr mem [0]) +(PrefetchCacheStreamed ptr mem) => (DCBT ptr mem [16]) + +// Use byte reverse instructions on Power10 +(Bswap(16|32|64) x) && buildcfg.GOPPC64>=10 => (BR(H|W|D) x) + +// Fold bit reversal into loads. +(BR(W|H) x:(MOV(W|H)Zload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRload (MOVDaddr [off] {sym} ptr) mem) +(BR(W|H) x:(MOV(W|H)Zloadidx ptr idx mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRloadidx ptr idx mem) +(BRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVDBRload (MOVDaddr [off] {sym} ptr) mem) +(BRD x:(MOVDloadidx ptr idx mem)) && x.Uses == 1 => @x.Block (MOVDBRloadidx ptr idx mem) + +// Fold bit reversal into stores. +(MOV(D|W|H)store [off] {sym} ptr r:(BR(D|W|H) val) mem) && r.Uses == 1 => (MOV(D|W|H)BRstore (MOVDaddr [off] {sym} ptr) val mem) +(MOV(D|W|H)storeidx ptr idx r:(BR(D|W|H) val) mem) && r.Uses == 1 => (MOV(D|W|H)BRstoreidx ptr idx val mem) + +// GOPPC64<10 rules. +// These Bswap operations should only be introduced by the memcombine pass in places where they can be folded into loads or stores. +(Bswap(32|16) x:(MOV(W|H)Zload [off] {sym} ptr mem)) => @x.Block (MOV(W|H)BRload (MOVDaddr [off] {sym} ptr) mem) +(Bswap(32|16) x:(MOV(W|H)Zloadidx ptr idx mem)) => @x.Block (MOV(W|H)BRloadidx ptr idx mem) +(Bswap64 x:(MOVDload [off] {sym} ptr mem)) => @x.Block (MOVDBRload (MOVDaddr [off] {sym} ptr) mem) +(Bswap64 x:(MOVDloadidx ptr idx mem)) => @x.Block (MOVDBRloadidx ptr idx mem) +(MOV(D|W|H)store [off] {sym} ptr (Bswap(64|32|16) val) mem) => (MOV(D|W|H)BRstore (MOVDaddr [off] {sym} ptr) val mem) +(MOV(D|W|H)storeidx ptr idx (Bswap(64|32|16) val) mem) => (MOV(D|W|H)BRstoreidx ptr idx val mem) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go new file mode 100644 index 0000000000000000000000000000000000000000..7aa2e6c35173485e5ce52574e3d23cd75471b398 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/PPC64Ops.go @@ -0,0 +1,755 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +// Notes: +// - Less-than-64-bit integer types live in the low portion of registers. +// The upper portion is junk. +// - Boolean types are zero or 1; stored in a byte, with upper bytes of the register containing junk. +// - *const instructions may use a constant larger than the instruction can encode. +// In this case the assembler expands to multiple instructions and uses tmp +// register (R31). + +var regNamesPPC64 = []string{ + "R0", // REGZERO, not used, but simplifies counting in regalloc + "SP", // REGSP + "SB", // REGSB + "R3", + "R4", + "R5", + "R6", + "R7", + "R8", + "R9", + "R10", + "R11", // REGCTXT for closures + "R12", + "R13", // REGTLS + "R14", + "R15", + "R16", + "R17", + "R18", + "R19", + "R20", + "R21", + "R22", + "R23", + "R24", + "R25", + "R26", + "R27", + "R28", + "R29", + "g", // REGG. Using name "g" and setting Config.hasGReg makes it "just happen". + "R31", // REGTMP + + "F0", + "F1", + "F2", + "F3", + "F4", + "F5", + "F6", + "F7", + "F8", + "F9", + "F10", + "F11", + "F12", + "F13", + "F14", + "F15", + "F16", + "F17", + "F18", + "F19", + "F20", + "F21", + "F22", + "F23", + "F24", + "F25", + "F26", + "F27", + "F28", + "F29", + "F30", + // "F31", the allocator is limited to 64 entries. We sacrifice this FPR to support XER. + + "XER", + + // If you add registers, update asyncPreempt in runtime. + + // "CR0", + // "CR1", + // "CR2", + // "CR3", + // "CR4", + // "CR5", + // "CR6", + // "CR7", + + // "CR", + // "LR", + // "CTR", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNamesPPC64) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNamesPPC64 { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + var ( + gp = buildReg("R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29") + fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30") + sp = buildReg("SP") + sb = buildReg("SB") + gr = buildReg("g") + xer = buildReg("XER") + // cr = buildReg("CR") + // ctr = buildReg("CTR") + // lr = buildReg("LR") + tmp = buildReg("R31") + ctxt = buildReg("R11") + callptr = buildReg("R12") + // tls = buildReg("R13") + gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} + gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}} + xergp = regInfo{inputs: []regMask{xer}, outputs: []regMask{gp}, clobbers: xer} + gp11cxer = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}, clobbers: xer} + gp11xer = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp, xer}} + gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}} + gp21a0 = regInfo{inputs: []regMask{gp, gp | sp | sb}, outputs: []regMask{gp}} + gp21cxer = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}, clobbers: xer} + gp21xer = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, xer}, clobbers: xer} + gp2xer1xer = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, xer}, outputs: []regMask{gp, xer}, clobbers: xer} + gp31 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}} + gp1cr = regInfo{inputs: []regMask{gp | sp | sb}} + gp2cr = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}} + crgp = regInfo{inputs: nil, outputs: []regMask{gp}} + crgp11 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}} + crgp21 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}} + gpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}} + gploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}} + prefreg = regInfo{inputs: []regMask{gp | sp | sb}} + gpstore = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}} + gpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}} + gpstorezero = regInfo{inputs: []regMask{gp | sp | sb}} // ppc64.REGZERO is reserved zero value + gpxchg = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}} + gpcas = regInfo{inputs: []regMask{gp | sp | sb, gp, gp}, outputs: []regMask{gp}} + fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} + fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} + fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}} + gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} + fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}} + fp2cr = regInfo{inputs: []regMask{fp, fp}} + fpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{fp}} + fploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{fp}} + fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}} + fpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, fp}} + callerSave = regMask(gp | fp | gr | xer) + r3 = buildReg("R3") + r4 = buildReg("R4") + r5 = buildReg("R5") + r6 = buildReg("R6") + ) + ops := []opData{ + {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1 + {name: "ADDCC", argLength: 2, reg: gp21, asm: "ADDCC", commutative: true, typ: "(Int,Flags)"}, // arg0 + arg1 + {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"}, // arg0 + auxInt + {name: "ADDCCconst", argLength: 1, reg: gp11cxer, asm: "ADDCCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0 + auxInt sets CC, clobbers XER + {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1 + {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1 + {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1 + {name: "SUBCC", argLength: 2, reg: gp21, asm: "SUBCC", typ: "(Int,Flags)"}, // arg0-arg1 sets CC + {name: "SUBFCconst", argLength: 1, reg: gp11cxer, asm: "SUBC", aux: "Int64"}, // auxInt - arg0 (carry is ignored) + {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1 + {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1 + + {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit) + {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit) + {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit) + {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit) + {name: "MADDLD", argLength: 3, reg: gp31, asm: "MADDLD", typ: "Int64"}, // (arg0*arg1)+arg2 (signed 64-bit) + + {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", commutative: true}, // (arg0 * arg1) >> 64, signed + {name: "MULHW", argLength: 2, reg: gp21, asm: "MULHW", commutative: true}, // (arg0 * arg1) >> 32, signed + {name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", commutative: true}, // (arg0 * arg1) >> 64, unsigned + {name: "MULHWU", argLength: 2, reg: gp21, asm: "MULHWU", commutative: true}, // (arg0 * arg1) >> 32, unsigned + + {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true}, // arg0*arg1 + {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0*arg1 + + {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD"}, // arg0*arg1 + arg2 + {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS"}, // arg0*arg1 + arg2 + {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB"}, // arg0*arg1 - arg2 + {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // arg0*arg1 - arg2 + + {name: "SRAD", argLength: 2, reg: gp21cxer, asm: "SRAD"}, // signed arg0 >> (arg1&127), 64 bit width (note: 127, not 63!) + {name: "SRAW", argLength: 2, reg: gp21cxer, asm: "SRAW"}, // signed arg0 >> (arg1&63), 32 bit width + {name: "SRD", argLength: 2, reg: gp21, asm: "SRD"}, // unsigned arg0 >> (arg1&127), 64 bit width + {name: "SRW", argLength: 2, reg: gp21, asm: "SRW"}, // unsigned arg0 >> (arg1&63), 32 bit width + {name: "SLD", argLength: 2, reg: gp21, asm: "SLD"}, // arg0 << (arg1&127), 64 bit width + {name: "SLW", argLength: 2, reg: gp21, asm: "SLW"}, // arg0 << (arg1&63), 32 bit width + + {name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64 + {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32 + // The following are ops to implement the extended mnemonics for shifts as described in section C.8 of the ISA. + // The constant shift values are packed into the aux int32. + {name: "CLRLSLWI", argLength: 1, reg: gp11, asm: "CLRLSLWI", aux: "Int32"}, // + {name: "CLRLSLDI", argLength: 1, reg: gp11, asm: "CLRLSLDI", aux: "Int32"}, // + + // Operations which consume or generate the CA (xer) + {name: "ADDC", argLength: 2, reg: gp21xer, asm: "ADDC", commutative: true, typ: "(UInt64, UInt64)"}, // arg0 + arg1 -> out, CA + {name: "SUBC", argLength: 2, reg: gp21xer, asm: "SUBC", typ: "(UInt64, UInt64)"}, // arg0 - arg1 -> out, CA + {name: "ADDCconst", argLength: 1, reg: gp11xer, asm: "ADDC", typ: "(UInt64, UInt64)", aux: "Int64"}, // arg0 + imm16 -> out, CA + {name: "SUBCconst", argLength: 1, reg: gp11xer, asm: "SUBC", typ: "(UInt64, UInt64)", aux: "Int64"}, // imm16 - arg0 -> out, CA + {name: "ADDE", argLength: 3, reg: gp2xer1xer, asm: "ADDE", typ: "(UInt64, UInt64)", commutative: true}, // arg0 + arg1 + CA (arg2) -> out, CA + {name: "SUBE", argLength: 3, reg: gp2xer1xer, asm: "SUBE", typ: "(UInt64, UInt64)"}, // arg0 - arg1 - CA (arg2) -> out, CA + {name: "ADDZEzero", argLength: 1, reg: xergp, asm: "ADDZE", typ: "UInt64"}, // CA (arg0) + $0 -> out + {name: "SUBZEzero", argLength: 1, reg: xergp, asm: "SUBZE", typ: "UInt64"}, // $0 - CA (arg0) -> out + + {name: "SRADconst", argLength: 1, reg: gp11cxer, asm: "SRAD", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width + {name: "SRAWconst", argLength: 1, reg: gp11cxer, asm: "SRAW", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width + {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width + {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width + {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 64, 64 bit width + {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 32, 32 bit width + + {name: "ROTLconst", argLength: 1, reg: gp11, asm: "ROTL", aux: "Int64"}, // arg0 rotate left by auxInt bits + {name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits + {name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"}, + + {name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux + {name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux + {name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above + {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int64"}, // Auxint is encoded similarly to RLWINM, but only MB and SH are valid. ME is always 63. + {name: "RLDICR", argLength: 1, reg: gp11, asm: "RLDICR", aux: "Int64"}, // Likewise, but only ME and SH are valid. MB is always 0. + + {name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD"}, // count leading zeros + {name: "CNTLZDCC", argLength: 1, reg: gp11, asm: "CNTLZDCC", typ: "(Int, Flags)"}, // count leading zeros, sets CC + {name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW"}, // count leading zeros (32 bit) + + {name: "CNTTZD", argLength: 1, reg: gp11, asm: "CNTTZD"}, // count trailing zeros + {name: "CNTTZW", argLength: 1, reg: gp11, asm: "CNTTZW"}, // count trailing zeros (32 bit) + + {name: "POPCNTD", argLength: 1, reg: gp11, asm: "POPCNTD"}, // number of set bits in arg0 + {name: "POPCNTW", argLength: 1, reg: gp11, asm: "POPCNTW"}, // number of set bits in each word of arg0 placed in corresponding word + {name: "POPCNTB", argLength: 1, reg: gp11, asm: "POPCNTB"}, // number of set bits in each byte of arg0 placed in corresponding byte + + {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV"}, // arg0/arg1 + {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0/arg1 + + {name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", typ: "Int64"}, // arg0/arg1 (signed 64-bit) + {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"}, // arg0/arg1 (signed 32-bit) + {name: "DIVDU", argLength: 2, reg: gp21, asm: "DIVDU", typ: "Int64"}, // arg0/arg1 (unsigned 64-bit) + {name: "DIVWU", argLength: 2, reg: gp21, asm: "DIVWU", typ: "Int32"}, // arg0/arg1 (unsigned 32-bit) + + {name: "MODUD", argLength: 2, reg: gp21, asm: "MODUD", typ: "UInt64"}, // arg0 % arg1 (unsigned 64-bit) + {name: "MODSD", argLength: 2, reg: gp21, asm: "MODSD", typ: "Int64"}, // arg0 % arg1 (signed 64-bit) + {name: "MODUW", argLength: 2, reg: gp21, asm: "MODUW", typ: "UInt32"}, // arg0 % arg1 (unsigned 32-bit) + {name: "MODSW", argLength: 2, reg: gp21, asm: "MODSW", typ: "Int32"}, // arg0 % arg1 (signed 32-bit) + // MOD is implemented as rem := arg0 - (arg0/arg1) * arg1 + + // Conversions are all float-to-float register operations. "Integer" refers to encoding in the FP register. + {name: "FCTIDZ", argLength: 1, reg: fp11, asm: "FCTIDZ", typ: "Float64"}, // convert float to 64-bit int round towards zero + {name: "FCTIWZ", argLength: 1, reg: fp11, asm: "FCTIWZ", typ: "Float64"}, // convert float to 32-bit int round towards zero + {name: "FCFID", argLength: 1, reg: fp11, asm: "FCFID", typ: "Float64"}, // convert 64-bit integer to float + {name: "FCFIDS", argLength: 1, reg: fp11, asm: "FCFIDS", typ: "Float32"}, // convert 32-bit integer to float + {name: "FRSP", argLength: 1, reg: fp11, asm: "FRSP", typ: "Float64"}, // round float to 32-bit value + + // Movement between float and integer registers with no change in bits; accomplished with stores+loads on PPC. + // Because the 32-bit load-literal-bits instructions have impoverished addressability, always widen the + // data instead and use FMOVDload and FMOVDstore instead (this will also dodge endianess issues). + // There are optimizations that should apply -- (Xi2f64 (MOVWload (not-ADD-ptr+offset) ) ) could use + // the word-load instructions. (Xi2f64 (MOVDload ptr )) can be (FMOVDload ptr) + + {name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"}, // move 64 bits of F register into G register + {name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register + + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1 + {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1 + {name: "ANDNCC", argLength: 2, reg: gp21, asm: "ANDNCC", typ: "(Int64,Flags)"}, // arg0&^arg1 sets CC + {name: "ANDCC", argLength: 2, reg: gp21, asm: "ANDCC", commutative: true, typ: "(Int64,Flags)"}, // arg0&arg1 sets CC + {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1 + {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1 + {name: "ORCC", argLength: 2, reg: gp21, asm: "ORCC", commutative: true, typ: "(Int,Flags)"}, // arg0|arg1 sets CC + {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1) + {name: "NORCC", argLength: 2, reg: gp21, asm: "NORCC", commutative: true, typ: "(Int,Flags)"}, // ^(arg0|arg1) sets CC + {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1 + {name: "XORCC", argLength: 2, reg: gp21, asm: "XORCC", commutative: true, typ: "(Int,Flags)"}, // arg0^arg1 sets CC + {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1 + {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer) + {name: "NEGCC", argLength: 1, reg: gp11, asm: "NEGCC", typ: "(Int,Flags)"}, // -arg0 (integer) sets CC + {name: "BRD", argLength: 1, reg: gp11, asm: "BRD"}, // reversebytes64(arg0) + {name: "BRW", argLength: 1, reg: gp11, asm: "BRW"}, // reversebytes32(arg0) + {name: "BRH", argLength: 1, reg: gp11, asm: "BRH"}, // reversebytes16(arg0) + {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point) + {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point) + {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision) + {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64 + {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64 + {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64 + {name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"}, // round(arg0), float64 + {name: "FABS", argLength: 1, reg: fp11, asm: "FABS"}, // abs(arg0), float64 + {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64 + {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64 + + {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux + {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux + {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always. + + {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64 + {name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64 + {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH", typ: "Int64"}, // sign extend int16 to int64 + {name: "MOVHZreg", argLength: 1, reg: gp11, asm: "MOVHZ", typ: "Int64"}, // zero extend uint16 to uint64 + {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW", typ: "Int64"}, // sign extend int32 to int64 + {name: "MOVWZreg", argLength: 1, reg: gp11, asm: "MOVWZ", typ: "Int64"}, // zero extend uint32 to uint64 + + // Load bytes in the endian order of the arch from arg0+aux+auxint into a 64 bit register. + {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte zero extend + {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes sign extend + {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes zero extend + {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes sign extend + {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes zero extend + {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes + + // Load bytes in reverse endian order of the arch from arg0 into a 64 bit register, all zero extend. + // The generated instructions are indexed loads with no offset field in the instruction so the aux fields are not used. + // In these cases the index register field is set to 0 and the full address is in the base register. + {name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", typ: "UInt64", faultOnNilArg0: true}, // load 8 bytes reverse order + {name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", typ: "UInt32", faultOnNilArg0: true}, // load 4 bytes zero extend reverse order + {name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", typ: "UInt16", faultOnNilArg0: true}, // load 2 bytes zero extend reverse order + + // In these cases an index register is used in addition to a base register + // Loads from memory location arg[0] + arg[1]. + {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", typ: "UInt8"}, // zero extend uint8 to uint64 + {name: "MOVHloadidx", argLength: 3, reg: gploadidx, asm: "MOVH", typ: "Int16"}, // sign extend int16 to int64 + {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", typ: "UInt16"}, // zero extend uint16 to uint64 + {name: "MOVWloadidx", argLength: 3, reg: gploadidx, asm: "MOVW", typ: "Int32"}, // sign extend int32 to int64 + {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", typ: "UInt32"}, // zero extend uint32 to uint64 + {name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", typ: "Int64"}, + {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", typ: "Int16"}, // sign extend int16 to int64 + {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", typ: "Int32"}, // sign extend int32 to int64 + {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", typ: "Int64"}, + {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", typ: "Float64"}, + {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", typ: "Float32"}, + + // Prefetch instruction + // Do prefetch of address generated with arg0 and arg1 with option aux. arg0=addr,arg1=memory, aux=option. + {name: "DCBT", argLength: 2, aux: "Int64", reg: prefreg, asm: "DCBT", hasSideEffects: true}, + + // Store bytes in the reverse endian order of the arch into arg0. + // These are indexed stores with no offset field in the instruction so the auxint fields are not used. + {name: "MOVDBRstore", argLength: 3, reg: gpstore, asm: "MOVDBR", typ: "Mem", faultOnNilArg0: true}, // store 8 bytes reverse order + {name: "MOVWBRstore", argLength: 3, reg: gpstore, asm: "MOVWBR", typ: "Mem", faultOnNilArg0: true}, // store 4 bytes reverse order + {name: "MOVHBRstore", argLength: 3, reg: gpstore, asm: "MOVHBR", typ: "Mem", faultOnNilArg0: true}, // store 2 bytes reverse order + + // Floating point loads from arg0+aux+auxint + {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load double float + {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load single float + + // Store bytes in the endian order of the arch into arg0+aux+auxint + {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte + {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes + {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes + {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes + + // Store floating point value into arg0+aux+auxint + {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double flot + {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store single float + + // Stores using index and base registers + // Stores to arg[0] + arg[1] + {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", typ: "Mem"}, // store bye + {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", typ: "Mem"}, // store half word + {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", typ: "Mem"}, // store word + {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", typ: "Mem"}, // store double word + {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", typ: "Mem"}, // store double float + {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", typ: "Mem"}, // store single float + {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVHBR", typ: "Mem"}, // store half word reversed byte using index reg + {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVWBR", typ: "Mem"}, // store word reversed byte using index reg + {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVDBR", typ: "Mem"}, // store double word reversed byte using index reg + + // The following ops store 0 into arg0+aux+auxint arg1=mem + {name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 1 byte + {name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 2 bytes + {name: "MOVWstorezero", argLength: 2, reg: gpstorezero, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 4 bytes + {name: "MOVDstorezero", argLength: 2, reg: gpstorezero, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 8 bytes + + {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{sp | sb | gp}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB/GP + + {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "Int64", rematerializeable: true}, // + {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", rematerializeable: true}, // + {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float32", asm: "FMOVS", rematerializeable: true}, // + {name: "FCMPU", argLength: 2, reg: fp2cr, asm: "FCMPU", typ: "Flags"}, + + {name: "CMP", argLength: 2, reg: gp2cr, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPU", argLength: 2, reg: gp2cr, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPW", argLength: 2, reg: gp2cr, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPWU", argLength: 2, reg: gp2cr, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPconst", argLength: 1, reg: gp1cr, asm: "CMP", aux: "Int64", typ: "Flags"}, + {name: "CMPUconst", argLength: 1, reg: gp1cr, asm: "CMPU", aux: "Int64", typ: "Flags"}, + {name: "CMPWconst", argLength: 1, reg: gp1cr, asm: "CMPW", aux: "Int32", typ: "Flags"}, + {name: "CMPWUconst", argLength: 1, reg: gp1cr, asm: "CMPWU", aux: "Int32", typ: "Flags"}, + + // ISEL arg2 ? arg0 : arg1 + // ISELZ arg1 ? arg0 : $0 + // auxInt values 0=LT 1=GT 2=EQ 3=SO (summary overflow/unordered) 4=GE 5=LE 6=NE 7=NSO (not summary overflow/not unordered) + // Note, auxInt^4 inverts the comparison condition. For example, LT^4 becomes GE, and "ISEL [a] x y z" is equivalent to ISEL [a^4] y x z". + {name: "ISEL", argLength: 3, reg: crgp21, asm: "ISEL", aux: "Int32", typ: "Int32"}, + {name: "ISELZ", argLength: 2, reg: crgp11, asm: "ISEL", aux: "Int32"}, + + // SETBC auxInt values 0=LT 1=GT 2=EQ (CRbit=1)? 1 : 0 + {name: "SETBC", argLength: 1, reg: crgp, asm: "SETBC", aux: "Int32", typ: "Int32"}, + // SETBCR auxInt values 0=LT 1=GT 2=EQ (CRbit=1)? 0 : 1 + {name: "SETBCR", argLength: 1, reg: crgp, asm: "SETBCR", aux: "Int32", typ: "Int32"}, + + // pseudo-ops + {name: "Equal", argLength: 1, reg: crgp}, // bool, true flags encode x==y false otherwise. + {name: "NotEqual", argLength: 1, reg: crgp}, // bool, true flags encode x!=y false otherwise. + {name: "LessThan", argLength: 1, reg: crgp}, // bool, true flags encode x which is wrong for NaN + {name: "GreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise. + {name: "FGreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise. + {name: "GreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise. + {name: "FGreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.; PPC >= === !< which is wrong for NaN + + // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, + // and sorts it to the very beginning of the block to prevent other + // use of the closure pointer. + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}, zeroWidth: true}, + + // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem. + {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, + + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + + //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, + // Round ops to block fused-multiply-add extraction. + {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, + {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, + + {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + + // large or unaligned zeroing + // arg0 = address of memory to zero (in R3, changed as side effect) + // returns mem + // + // a loop is generated when there is more than one iteration + // needed to clear 4 doublewords + // + // XXLXOR VS32,VS32,VS32 + // MOVD $len/32,R31 + // MOVD R31,CTR + // MOVD $16,R31 + // loop: + // STXVD2X VS32,(R0)(R3) + // STXVD2X VS32,(R31)(R3) + // ADD R3,32 + // BC loop + + // remaining doubleword clears generated as needed + // MOVD R0,(R3) + // MOVD R0,8(R3) + // MOVD R0,16(R3) + // MOVD R0,24(R3) + + // one or more of these to clear remainder < 8 bytes + // MOVW R0,n1(R3) + // MOVH R0,n2(R3) + // MOVB R0,n3(R3) + { + name: "LoweredZero", + aux: "Int64", + argLength: 2, + reg: regInfo{ + inputs: []regMask{buildReg("R20")}, + clobbers: buildReg("R20"), + }, + clobberFlags: true, + typ: "Mem", + faultOnNilArg0: true, + unsafePoint: true, + }, + { + name: "LoweredZeroShort", + aux: "Int64", + argLength: 2, + reg: regInfo{ + inputs: []regMask{gp}}, + typ: "Mem", + faultOnNilArg0: true, + unsafePoint: true, + }, + { + name: "LoweredQuadZeroShort", + aux: "Int64", + argLength: 2, + reg: regInfo{ + inputs: []regMask{gp}, + }, + typ: "Mem", + faultOnNilArg0: true, + unsafePoint: true, + }, + { + name: "LoweredQuadZero", + aux: "Int64", + argLength: 2, + reg: regInfo{ + inputs: []regMask{buildReg("R20")}, + clobbers: buildReg("R20"), + }, + clobberFlags: true, + typ: "Mem", + faultOnNilArg0: true, + unsafePoint: true, + }, + + // R31 is temp register + // Loop code: + // MOVD len/32,R31 set up loop ctr + // MOVD R31,CTR + // MOVD $16,R31 index register + // loop: + // LXVD2X (R0)(R4),VS32 + // LXVD2X (R31)(R4),VS33 + // ADD R4,$32 increment src + // STXVD2X VS32,(R0)(R3) + // STXVD2X VS33,(R31)(R3) + // ADD R3,$32 increment dst + // BC 16,0,loop branch ctr + // For this purpose, VS32 and VS33 are treated as + // scratch registers. Since regalloc does not + // track vector registers, even if it could be marked + // as clobbered it would have no effect. + // TODO: If vector registers are managed by regalloc + // mark these as clobbered. + // + // Bytes not moved by this loop are moved + // with a combination of the following instructions, + // starting with the largest sizes and generating as + // many as needed, using the appropriate offset value. + // MOVD n(R4),R14 + // MOVD R14,n(R3) + // MOVW n1(R4),R14 + // MOVW R14,n1(R3) + // MOVH n2(R4),R14 + // MOVH R14,n2(R3) + // MOVB n3(R4),R14 + // MOVB R14,n3(R3) + + { + name: "LoweredMove", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R20"), buildReg("R21")}, + clobbers: buildReg("R20 R21"), + }, + clobberFlags: true, + typ: "Mem", + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + }, + { + name: "LoweredMoveShort", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{gp, gp}, + }, + typ: "Mem", + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + }, + + // The following is similar to the LoweredMove, but uses + // LXV instead of LXVD2X, which does not require an index + // register and will do 4 in a loop instead of only. + { + name: "LoweredQuadMove", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R20"), buildReg("R21")}, + clobbers: buildReg("R20 R21"), + }, + clobberFlags: true, + typ: "Mem", + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + }, + + { + name: "LoweredQuadMoveShort", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{gp, gp}, + }, + typ: "Mem", + faultOnNilArg0: true, + faultOnNilArg1: true, + unsafePoint: true, + }, + + {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true}, + + {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, typ: "UInt8", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, + {name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, + + // atomic add32, 64 + // LWSYNC + // LDAR (Rarg0), Rout + // ADD Rarg1, Rout + // STDCCC Rout, (Rarg0) + // BNE -3(PC) + // return new sum + {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic exchange32, 64 + // LWSYNC + // LDAR (Rarg0), Rout + // STDCCC Rarg1, (Rarg0) + // BNE -2(PC) + // ISYNC + // return old val + {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic compare and swap. + // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero. + // if *arg0 == arg1 { + // *arg0 = arg2 + // return (true, memory) + // } else { + // return (false, memory) + // } + // SYNC + // LDAR (Rarg0), Rtmp + // CMP Rarg1, Rtmp + // BNE 3(PC) + // STDCCC Rarg2, (Rarg0) + // BNE -4(PC) + // CBNZ Rtmp, -4(PC) + // CSET EQ, Rout + {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic 8/32 and/or. + // *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero. + // LBAR/LWAT (Rarg0), Rtmp + // AND/OR Rarg1, Rtmp + // STBCCC/STWCCC Rtmp, (Rarg0), Rtmp + // BNE Rtmp, -3(PC) + {name: "LoweredAtomicAnd8", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicAnd32", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicOr8", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicOr32", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true}, + + // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed + // It preserves R0 through R17 (except special registers R1, R2, R11, R12, R13), g, and R20 and R21, + // but may clobber anything else, including R31 (REGTMP). + // Returns a pointer to a write barrier buffer in R29. + {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ buildReg("R0 R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17 R20 R21 g")) | buildReg("R31"), outputs: []regMask{buildReg("R29")}}, clobberFlags: true, aux: "Int64"}, + + {name: "LoweredPubBarrier", argLength: 1, asm: "LWSYNC", hasSideEffects: true}, // Do data barrier. arg0=memory + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r6}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r5}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + + // (InvertFlags (CMP a b)) == (CMP b a) + // So if we want (LessThan (CMP a b)) but we can't do that because a is a constant, + // then we do (LessThan (InvertFlags (CMP b a))) instead. + // Rewrites will convert this to (GreaterThan (CMP b a)). + // InvertFlags is a pseudo-op which can't appear in assembly output. + {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 + + // Constant flag values. For any comparison, there are 3 possible + // outcomes: either the three from the signed total order (<,==,>) + // or the three from the unsigned total order, depending on which + // comparison operation was used (CMP or CMPU -- PPC is different from + // the other architectures, which have a single comparison producing + // both signed and unsigned comparison results.) + + // These ops are for temporary use by rewrite rules. They + // cannot appear in the generated assembly. + {name: "FlagEQ"}, // equal + {name: "FlagLT"}, // signed < or unsigned < + {name: "FlagGT"}, // signed > or unsigned > + } + + blocks := []blockData{ + {name: "EQ", controls: 1}, + {name: "NE", controls: 1}, + {name: "LT", controls: 1}, + {name: "LE", controls: 1}, + {name: "GT", controls: 1}, + {name: "GE", controls: 1}, + {name: "FLT", controls: 1}, + {name: "FLE", controls: 1}, + {name: "FGT", controls: 1}, + {name: "FGE", controls: 1}, + } + + archs = append(archs, arch{ + name: "PPC64", + pkg: "cmd/internal/obj/ppc64", + genfile: "../../ppc64/ssa.go", + ops: ops, + blocks: blocks, + regnames: regNamesPPC64, + ParamIntRegNames: "R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17", + ParamFloatRegNames: "F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12", + gpregmask: gp, + fpregmask: fp, + specialregmask: xer, + framepointerreg: -1, + linkreg: -1, // not used + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules new file mode 100644 index 0000000000000000000000000000000000000000..2eecf94300a5b454b70d6ec4d6e8b04786606521 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/PPC64latelower.rules @@ -0,0 +1,55 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains rules used by the laterLower pass. + +// Simplify ISEL x $0 z into ISELZ +(ISEL [a] x (MOVDconst [0]) z) => (ISELZ [a] x z) +// Simplify ISEL $0 y z into ISELZ by inverting comparison and reversing arguments. +(ISEL [a] (MOVDconst [0]) y z) => (ISELZ [a^0x4] y z) + +// SETBC, SETBCR is supported on ISA 3.1(Power10) and newer, use ISELZ for +// older targets +(SETBC [2] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [2] (MOVDconst [1]) cmp) +(SETBCR [2] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [6] (MOVDconst [1]) cmp) +(SETBC [0] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [0] (MOVDconst [1]) cmp) +(SETBCR [0] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [4] (MOVDconst [1]) cmp) +(SETBC [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [1] (MOVDconst [1]) cmp) +(SETBCR [1] cmp) && buildcfg.GOPPC64 <= 9 => (ISELZ [5] (MOVDconst [1]) cmp) + +// Avoid using ANDCCconst if the value for CR0 is not needed, since ANDCCconst +// always sets it. +(Select0 z:(ANDCCconst [m] x)) && z.Uses == 1 && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] x) +// The upper bits of the smaller than register values is undefined. Take advantage of that. +(AND x:(MOVDconst [m]) n) && t.Size() <= 2 => (Select0 (ANDCCconst [int64(int16(m))] n)) + +// Convert simple bit masks to an equivalent rldic[lr] if possible. +(AND x:(MOVDconst [m]) n) && isPPC64ValidShiftMask(m) => (RLDICL [encodePPC64RotateMask(0,m,64)] n) +(AND x:(MOVDconst [m]) n) && m != 0 && isPPC64ValidShiftMask(^m) => (RLDICR [encodePPC64RotateMask(0,m,64)] n) + +// If the RLDICL does not rotate its value, a shifted value can be merged. +(RLDICL [em] x:(SRDconst [s] a)) && (em&0xFF0000) == 0 => (RLDICL [mergePPC64RLDICLandSRDconst(em, s)] a) + +// Convert rotated 32 bit masks on 32 bit values into rlwinm. In general, this leaves the upper 32 bits in an undefined state. +(AND x:(MOVDconst [m]) n) && t.Size() == 4 && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(0,m,32)] n) + +// When PCRel is supported, paddi can add a 34b signed constant in one instruction. +(ADD (MOVDconst [m]) x) && supportsPPC64PCRel() && (m<<30)>>30 == m => (ADDconst [m] x) + + +// Where possible and practical, generate CC opcodes. Due to the structure of the rules, there are limits to how +// a Value can be rewritten which make it impossible to correctly rewrite sibling Value users. To workaround this +// case, candidates for CC opcodes are converted in two steps: +// 1. Convert all (x (Op ...) ...) into (x (Select0 (OpCC ...) ...). See convertPPC64OpToOpCC for more +// detail on how and why this is done there. +// 2. Rewrite (CMPconst [0] (Select0 (OpCC ...))) into (Select1 (OpCC...)) +// Note: to minimize potentially expensive regeneration of CC opcodes during the flagalloc pass, only rewrite if +// both ops are in the same block. +(CMPconst [0] z:((ADD|AND|ANDN|OR|SUB|NOR|XOR) x y)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z)) +(CMPconst [0] z:((NEG|CNTLZD) x)) && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z)) +// Note: ADDCCconst only assembles to 1 instruction for int16 constants. +(CMPconst [0] z:(ADDconst [c] x)) && int64(int16(c)) == c && v.Block == z.Block => (CMPconst [0] convertPPC64OpToOpCC(z)) +// And finally, fixup the flag user. +(CMPconst [0] (Select0 z:((ADD|AND|ANDN|OR|SUB|NOR|XOR)CC x y))) => (Select1 z) +(CMPconst [0] (Select0 z:((ADDCCconst|NEGCC|CNTLZDCC) y))) => (Select1 z) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/README b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/README new file mode 100644 index 0000000000000000000000000000000000000000..74b81c2814330c354b84a3f35a3513e63f101b29 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/README @@ -0,0 +1,11 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +This command generates Go code (in the parent directory) for all +the architecture-specific opcodes, blocks, and rewrites. See the +"Hacking on SSA" section in the parent directory's README.md for +more information. + +To regenerate everything, run "go generate" on the ssa package +in the parent directory. diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/RISCV64.rules new file mode 100644 index 0000000000000000000000000000000000000000..fc206c42d3d7fe6d33f8ee5fbc2d4ca1eeb66140 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/RISCV64.rules @@ -0,0 +1,821 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Lowering arithmetic +(Add(Ptr|64|32|16|8) ...) => (ADD ...) +(Add(64|32)F ...) => (FADD(D|S) ...) + +(Sub(Ptr|64|32|16|8) ...) => (SUB ...) +(Sub(64|32)F ...) => (FSUB(D|S) ...) + +(Mul64 ...) => (MUL ...) +(Mul64uhilo ...) => (LoweredMuluhilo ...) +(Mul64uover ...) => (LoweredMuluover ...) +(Mul32 ...) => (MULW ...) +(Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y)) +(Mul8 x y) => (MULW (SignExt8to32 x) (SignExt8to32 y)) +(Mul(64|32)F ...) => (FMUL(D|S) ...) + +(Div(64|32)F ...) => (FDIV(D|S) ...) + +(Div64 x y [false]) => (DIV x y) +(Div64u ...) => (DIVU ...) +(Div32 x y [false]) => (DIVW x y) +(Div32u ...) => (DIVUW ...) +(Div16 x y [false]) => (DIVW (SignExt16to32 x) (SignExt16to32 y)) +(Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y)) +(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y)) +(Div8u x y) => (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y)) + +(Hmul64 ...) => (MULH ...) +(Hmul64u ...) => (MULHU ...) +(Hmul32 x y) => (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y))) +(Hmul32u x y) => (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y))) + +(Select0 (Add64carry x y c)) => (ADD (ADD x y) c) +(Select1 (Add64carry x y c)) => + (OR (SLTU s:(ADD x y) x) (SLTU (ADD s c) s)) + +(Select0 (Sub64borrow x y c)) => (SUB (SUB x y) c) +(Select1 (Sub64borrow x y c)) => + (OR (SLTU x s:(SUB x y)) (SLTU s (SUB s c))) + +// (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1) +(Avg64u x y) => (ADD (ADD (SRLI [1] x) (SRLI [1] y)) (ANDI [1] (AND x y))) + +(Mod64 x y [false]) => (REM x y) +(Mod64u ...) => (REMU ...) +(Mod32 x y [false]) => (REMW x y) +(Mod32u ...) => (REMUW ...) +(Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y)) +(Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y)) +(Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y)) +(Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y)) + +(And(64|32|16|8) ...) => (AND ...) +(Or(64|32|16|8) ...) => (OR ...) +(Xor(64|32|16|8) ...) => (XOR ...) + +(Neg(64|32|16|8) ...) => (NEG ...) +(Neg(64|32)F ...) => (FNEG(D|S) ...) + +(Com(64|32|16|8) ...) => (NOT ...) + + +(Sqrt ...) => (FSQRTD ...) +(Sqrt32 ...) => (FSQRTS ...) + +(Copysign ...) => (FSGNJD ...) + +(Abs ...) => (FABSD ...) + +(FMA ...) => (FMADDD ...) + +// Sign and zero extension. + +(SignExt8to16 ...) => (MOVBreg ...) +(SignExt8to32 ...) => (MOVBreg ...) +(SignExt8to64 ...) => (MOVBreg ...) +(SignExt16to32 ...) => (MOVHreg ...) +(SignExt16to64 ...) => (MOVHreg ...) +(SignExt32to64 ...) => (MOVWreg ...) + +(ZeroExt8to16 ...) => (MOVBUreg ...) +(ZeroExt8to32 ...) => (MOVBUreg ...) +(ZeroExt8to64 ...) => (MOVBUreg ...) +(ZeroExt16to32 ...) => (MOVHUreg ...) +(ZeroExt16to64 ...) => (MOVHUreg ...) +(ZeroExt32to64 ...) => (MOVWUreg ...) + +(Cvt32to32F ...) => (FCVTSW ...) +(Cvt32to64F ...) => (FCVTDW ...) +(Cvt64to32F ...) => (FCVTSL ...) +(Cvt64to64F ...) => (FCVTDL ...) + +(Cvt32Fto32 ...) => (FCVTWS ...) +(Cvt32Fto64 ...) => (FCVTLS ...) +(Cvt64Fto32 ...) => (FCVTWD ...) +(Cvt64Fto64 ...) => (FCVTLD ...) + +(Cvt32Fto64F ...) => (FCVTDS ...) +(Cvt64Fto32F ...) => (FCVTSD ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +(Round(32|64)F ...) => (LoweredRound(32|64)F ...) + +(Slicemask x) => (SRAI [63] (NEG x)) + +// Truncations +// We ignore the unused high parts of registers, so truncates are just copies. +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) +(Trunc64to8 ...) => (Copy ...) +(Trunc64to16 ...) => (Copy ...) +(Trunc64to32 ...) => (Copy ...) + +// Shifts + +// SLL only considers the bottom 6 bits of y. If y > 64, the result should +// always be 0. +// +// Breaking down the operation: +// +// (SLL x y) generates x << (y & 63). +// +// If y < 64, this is the value we want. Otherwise, we want zero. +// +// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise. +(Lsh8x8 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh8x16 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh8x32 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh8x64 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg8 (SLTIU [64] y))) +(Lsh16x8 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh16x16 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh16x32 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh16x64 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg16 (SLTIU [64] y))) +(Lsh32x8 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh32x16 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh32x32 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh32x64 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg32 (SLTIU [64] y))) +(Lsh64x8 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh64x16 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh64x32 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh64x64 x y) && !shiftIsBounded(v) => (AND (SLL x y) (Neg64 (SLTIU [64] y))) + +(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) +(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) +(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) +(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y) + +// SRL only considers the bottom 6 bits of y, similarly SRLW only considers the +// bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds +// the maximum value. See Lsh above for a detailed description. +(Rsh8Ux8 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh8Ux16 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh8Ux32 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh8Ux64 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] y))) +(Rsh16Ux8 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh16Ux16 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh16Ux32 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh16Ux64 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] y))) +(Rsh32Ux8 x y) && !shiftIsBounded(v) => (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt8to64 y)))) +(Rsh32Ux16 x y) && !shiftIsBounded(v) => (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt16to64 y)))) +(Rsh32Ux32 x y) && !shiftIsBounded(v) => (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt32to64 y)))) +(Rsh32Ux64 x y) && !shiftIsBounded(v) => (AND (SRLW x y) (Neg32 (SLTIU [32] y))) +(Rsh64Ux8 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh64Ux16 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh64Ux32 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh64Ux64 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] y))) + +(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y) +(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y) +(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x y) +(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y) + +// SRA only considers the bottom 6 bits of y, similarly SRAW only considers the +// bottom 5 bits. If y is greater than the maximum value (either 63 or 31 +// depending on the instruction), the result of the shift should be either 0 +// or -1 based on the sign bit of x. +// +// We implement this by performing the max shift (-1) if y > the maximum value. +// +// We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves +// us with -1 (0xffff...) if y >= 64. Similarly, we OR (uint64(y < 32) - 1) into y +// before passing it to SRAW. +// +// We don't need to sign-extend the OR result, as it will be at minimum 8 bits, +// more than the 5 or 6 bits SRAW and SRA care about. +(Rsh8x8 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh8x16 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh8x32 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh8x64 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh16x8 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh16x16 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh16x32 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh16x64 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh32x8 x y) && !shiftIsBounded(v) => (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt8to64 y))))) +(Rsh32x16 x y) && !shiftIsBounded(v) => (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt16to64 y))))) +(Rsh32x32 x y) && !shiftIsBounded(v) => (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt32to64 y))))) +(Rsh32x64 x y) && !shiftIsBounded(v) => (SRAW x (OR y (ADDI [-1] (SLTIU [32] y)))) +(Rsh64x8 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh64x16 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh64x32 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh64x64 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] y)))) + +(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y) +(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y) +(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y) +(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y) + +// Rotates. +(RotateLeft8 x (MOVDconst [c])) => (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) +(RotateLeft16 x (MOVDconst [c])) => (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) +(RotateLeft32 x (MOVDconst [c])) => (Or32 (Lsh32x64 x (MOVDconst [c&31])) (Rsh32Ux64 x (MOVDconst [-c&31]))) +(RotateLeft64 x (MOVDconst [c])) => (Or64 (Lsh64x64 x (MOVDconst [c&63])) (Rsh64Ux64 x (MOVDconst [-c&63]))) + +(Less64 ...) => (SLT ...) +(Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y)) +(Less16 x y) => (SLT (SignExt16to64 x) (SignExt16to64 y)) +(Less8 x y) => (SLT (SignExt8to64 x) (SignExt8to64 y)) +(Less64U ...) => (SLTU ...) +(Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y)) +(Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y)) +(Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y)) +(Less(64|32)F ...) => (FLT(D|S) ...) + +// Convert x <= y to !(y > x). +(Leq(64|32|16|8) x y) => (Not (Less(64|32|16|8) y x)) +(Leq(64|32|16|8)U x y) => (Not (Less(64|32|16|8)U y x)) +(Leq(64|32)F ...) => (FLE(D|S) ...) + +(EqPtr x y) => (SEQZ (SUB x y)) +(Eq64 x y) => (SEQZ (SUB x y)) +(Eq32 x y) && x.Type.IsSigned() => (SEQZ (SUB (SignExt32to64 x) (SignExt32to64 y))) +(Eq32 x y) && !x.Type.IsSigned() => (SEQZ (SUB (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Eq16 x y) => (SEQZ (SUB (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Eq8 x y) => (SEQZ (SUB (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Eq(64|32)F ...) => (FEQ(D|S) ...) + +(NeqPtr x y) => (Not (EqPtr x y)) +(Neq64 x y) => (Not (Eq64 x y)) +(Neq32 x y) => (Not (Eq32 x y)) +(Neq16 x y) => (Not (Eq16 x y)) +(Neq8 x y) => (Not (Eq8 x y)) +(Neq(64|32)F ...) => (FNE(D|S) ...) + +// Loads +(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) +(Load ptr mem) && ( is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem) +(Load ptr mem) && ( is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem) +(Load ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem) +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (FMOVWload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem) + +// Stores +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem) + +// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis +// knows what variables are being read/written by the ops. +(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + +(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + +(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => + (MOVBUload [off1+int32(off2)] {sym} base mem) +(MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => + (MOVBload [off1+int32(off2)] {sym} base mem) +(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => + (MOVHUload [off1+int32(off2)] {sym} base mem) +(MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => + (MOVHload [off1+int32(off2)] {sym} base mem) +(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => + (MOVWUload [off1+int32(off2)] {sym} base mem) +(MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => + (MOVWload [off1+int32(off2)] {sym} base mem) +(MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) => + (MOVDload [off1+int32(off2)] {sym} base mem) + +(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) => + (MOVBstore [off1+int32(off2)] {sym} base val mem) +(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) => + (MOVHstore [off1+int32(off2)] {sym} base val mem) +(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) => + (MOVWstore [off1+int32(off2)] {sym} base val mem) +(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) => + (MOVDstore [off1+int32(off2)] {sym} base val mem) +(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem) + +// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis +// with OffPtr -> ADDI. +(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x) + +// Small zeroing +(Zero [0] _ mem) => mem +(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem) +(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore ptr (MOVDconst [0]) mem) +(Zero [2] ptr mem) => + (MOVBstore [1] ptr (MOVDconst [0]) + (MOVBstore ptr (MOVDconst [0]) mem)) +(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore ptr (MOVDconst [0]) mem) +(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] ptr (MOVDconst [0]) + (MOVHstore ptr (MOVDconst [0]) mem)) +(Zero [4] ptr mem) => + (MOVBstore [3] ptr (MOVDconst [0]) + (MOVBstore [2] ptr (MOVDconst [0]) + (MOVBstore [1] ptr (MOVDconst [0]) + (MOVBstore ptr (MOVDconst [0]) mem)))) +(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVDstore ptr (MOVDconst [0]) mem) +(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore [4] ptr (MOVDconst [0]) + (MOVWstore ptr (MOVDconst [0]) mem)) +(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [6] ptr (MOVDconst [0]) + (MOVHstore [4] ptr (MOVDconst [0]) + (MOVHstore [2] ptr (MOVDconst [0]) + (MOVHstore ptr (MOVDconst [0]) mem)))) + +(Zero [3] ptr mem) => + (MOVBstore [2] ptr (MOVDconst [0]) + (MOVBstore [1] ptr (MOVDconst [0]) + (MOVBstore ptr (MOVDconst [0]) mem))) +(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [4] ptr (MOVDconst [0]) + (MOVHstore [2] ptr (MOVDconst [0]) + (MOVHstore ptr (MOVDconst [0]) mem))) +(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore [8] ptr (MOVDconst [0]) + (MOVWstore [4] ptr (MOVDconst [0]) + (MOVWstore ptr (MOVDconst [0]) mem))) +(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVDstore [8] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)) +(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVDstore [16] ptr (MOVDconst [0]) + (MOVDstore [8] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem))) +(Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVDstore [24] ptr (MOVDconst [0]) + (MOVDstore [16] ptr (MOVDconst [0]) + (MOVDstore [8] ptr (MOVDconst [0]) + (MOVDstore ptr (MOVDconst [0]) mem)))) + +// Medium 8-aligned zeroing uses a Duff's device +// 8 and 128 are magic constants, see runtime/mkduff.go +(Zero [s] {t} ptr mem) + && s%8 == 0 && s <= 8*128 + && t.Alignment()%8 == 0 && !config.noDuffDevice => + (DUFFZERO [8 * (128 - s/8)] ptr mem) + +// Generic zeroing uses a loop +(Zero [s] {t} ptr mem) => + (LoweredZero [t.Alignment()] + ptr + (ADD ptr (MOVDconst [s-moveSize(t.Alignment(), config)])) + mem) + +// Checks +(IsNonNil ...) => (SNEZ ...) +(IsInBounds ...) => (Less64U ...) +(IsSliceInBounds ...) => (Leq64U ...) + +// Trivial lowering +(NilCheck ...) => (LoweredNilCheck ...) +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +// Publication barrier as intrinsic +(PubBarrier ...) => (LoweredPubBarrier ...) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +// Small moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) +(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore dst (MOVHload src mem) mem) +(Move [2] dst src mem) => + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem)) +(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore dst (MOVWload src mem) mem) +(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem)) +(Move [4] dst src mem) => + (MOVBstore [3] dst (MOVBload [3] src mem) + (MOVBstore [2] dst (MOVBload [2] src mem) + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem)))) +(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVDstore dst (MOVDload src mem) mem) +(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVWstore dst (MOVWload src mem) mem)) +(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [6] dst (MOVHload [6] src mem) + (MOVHstore [4] dst (MOVHload [4] src mem) + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem)))) + +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBload [2] src mem) + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem))) +(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [4] dst (MOVHload [4] src mem) + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem))) +(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore [8] dst (MOVWload [8] src mem) + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVWstore dst (MOVWload src mem) mem))) +(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVDstore [8] dst (MOVDload [8] src mem) + (MOVDstore dst (MOVDload src mem) mem)) +(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVDstore [16] dst (MOVDload [16] src mem) + (MOVDstore [8] dst (MOVDload [8] src mem) + (MOVDstore dst (MOVDload src mem) mem))) +(Move [32] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVDstore [24] dst (MOVDload [24] src mem) + (MOVDstore [16] dst (MOVDload [16] src mem) + (MOVDstore [8] dst (MOVDload [8] src mem) + (MOVDstore dst (MOVDload src mem) mem)))) + +// Medium 8-aligned move uses a Duff's device +// 16 and 128 are magic constants, see runtime/mkduff.go +(Move [s] {t} dst src mem) + && s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 + && !config.noDuffDevice && logLargeCopy(v, s) => + (DUFFCOPY [16 * (128 - s/8)] dst src mem) + +// Generic move uses a loop +(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) => + (LoweredMove [t.Alignment()] + dst + src + (ADDI [s-moveSize(t.Alignment(), config)] src) + mem) + +// Boolean ops; 0=false, 1=true +(AndB ...) => (AND ...) +(OrB ...) => (OR ...) +(EqB x y) => (SEQZ (SUB x y)) +(NeqB x y) => (SNEZ (SUB x y)) +(Not ...) => (SEQZ ...) + +// Lowering pointer arithmetic +// TODO: Special handling for SP offsets, like ARM +(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr) +(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr) +(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr) + +(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)]) +(Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))])) +(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))])) +(ConstNil) => (MOVDconst [0]) +(ConstBool [val]) => (MOVDconst [int64(b2i(val))]) + +(Addr {sym} base) => (MOVaddr {sym} [0] base) +(LocalAddr {sym} base mem) && t.Elem().HasPointers() => (MOVaddr {sym} (SPanchored base mem)) +(LocalAddr {sym} base _) && !t.Elem().HasPointers() => (MOVaddr {sym} base) + +// Calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// Atomic Intrinsics +(AtomicLoad(Ptr|64|32|8) ...) => (LoweredAtomicLoad(64|64|32|8) ...) +(AtomicStore(PtrNoWB|64|32|8) ...) => (LoweredAtomicStore(64|64|32|8) ...) +(AtomicAdd(64|32) ...) => (LoweredAtomicAdd(64|32) ...) + +// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8))) +(AtomicAnd8 ptr val mem) => + (LoweredAtomicAnd32 (ANDI [^3] ptr) + (NOT (SLL (XORI [0xff] (ZeroExt8to32 val)) + (SLLI [3] (ANDI [3] ptr)))) mem) + +(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...) + +(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem) +(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...) + +(AtomicExchange(64|32) ...) => (LoweredAtomicExchange(64|32) ...) + +// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8)) +(AtomicOr8 ptr val mem) => + (LoweredAtomicOr32 (ANDI [^3] ptr) + (SLL (ZeroExt8to32 val) + (SLLI [3] (ANDI [3] ptr))) mem) + +(AtomicOr32 ...) => (LoweredAtomicOr32 ...) + +// Conditional branches +(If cond yes no) => (BNEZ (MOVBUreg cond) yes no) + +// Optimizations + +// Absorb SEQZ/SNEZ into branch. +(BEQZ (SEQZ x) yes no) => (BNEZ x yes no) +(BEQZ (SNEZ x) yes no) => (BEQZ x yes no) +(BNEZ (SEQZ x) yes no) => (BEQZ x yes no) +(BNEZ (SNEZ x) yes no) => (BNEZ x yes no) + +// Remove redundant NEG from BEQZ/BNEZ. +(BEQZ (NEG x) yes no) => (BEQZ x yes no) +(BNEZ (NEG x) yes no) => (BNEZ x yes no) + +// Negate comparison with FNES/FNED. +(BEQZ (FNES x y) yes no) => (BNEZ (FEQS x y) yes no) +(BNEZ (FNES x y) yes no) => (BEQZ (FEQS x y) yes no) +(BEQZ (FNED x y) yes no) => (BNEZ (FEQD x y) yes no) +(BNEZ (FNED x y) yes no) => (BEQZ (FEQD x y) yes no) + +// Convert BEQZ/BNEZ into more optimal branch conditions. +(BEQZ (SUB x y) yes no) => (BEQ x y yes no) +(BNEZ (SUB x y) yes no) => (BNE x y yes no) +(BEQZ (SLT x y) yes no) => (BGE x y yes no) +(BNEZ (SLT x y) yes no) => (BLT x y yes no) +(BEQZ (SLTU x y) yes no) => (BGEU x y yes no) +(BNEZ (SLTU x y) yes no) => (BLTU x y yes no) +(BEQZ (SLTI [x] y) yes no) => (BGE y (MOVDconst [x]) yes no) +(BNEZ (SLTI [x] y) yes no) => (BLT y (MOVDconst [x]) yes no) +(BEQZ (SLTIU [x] y) yes no) => (BGEU y (MOVDconst [x]) yes no) +(BNEZ (SLTIU [x] y) yes no) => (BLTU y (MOVDconst [x]) yes no) + +// Convert branch with zero to more optimal branch zero. +(BEQ (MOVDconst [0]) cond yes no) => (BEQZ cond yes no) +(BEQ cond (MOVDconst [0]) yes no) => (BEQZ cond yes no) +(BNE (MOVDconst [0]) cond yes no) => (BNEZ cond yes no) +(BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no) +(BLT (MOVDconst [0]) cond yes no) => (BGTZ cond yes no) +(BLT cond (MOVDconst [0]) yes no) => (BLTZ cond yes no) +(BGE (MOVDconst [0]) cond yes no) => (BLEZ cond yes no) +(BGE cond (MOVDconst [0]) yes no) => (BGEZ cond yes no) + +// Remove redundant NEG from SEQZ/SNEZ. +(SEQZ (NEG x)) => (SEQZ x) +(SNEZ (NEG x)) => (SNEZ x) + +// Remove redundant SEQZ/SNEZ. +(SEQZ (SEQZ x)) => (SNEZ x) +(SEQZ (SNEZ x)) => (SEQZ x) +(SNEZ (SEQZ x)) => (SEQZ x) +(SNEZ (SNEZ x)) => (SNEZ x) + +// Store zero. +(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) +(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem) +(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem) + +// Boolean ops are already extended. +(MOVBUreg x:((FLES|FLTS|FEQS|FNES) _ _)) => x +(MOVBUreg x:((FLED|FLTD|FEQD|FNED) _ _)) => x +(MOVBUreg x:((SEQZ|SNEZ) _)) => x +(MOVBUreg x:((SLT|SLTU) _ _)) => x + +// Avoid extending when already sufficiently masked. +(MOVBreg x:(ANDI [c] y)) && c >= 0 && int64(int8(c)) == c => x +(MOVHreg x:(ANDI [c] y)) && c >= 0 && int64(int16(c)) == c => x +(MOVWreg x:(ANDI [c] y)) && c >= 0 && int64(int32(c)) == c => x +(MOVBUreg x:(ANDI [c] y)) && c >= 0 && int64(uint8(c)) == c => x +(MOVHUreg x:(ANDI [c] y)) && c >= 0 && int64(uint16(c)) == c => x +(MOVWUreg x:(ANDI [c] y)) && c >= 0 && int64(uint32(c)) == c => x + +// Combine masking and zero extension. +(MOVBUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint8(c))] x) +(MOVHUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint16(c))] x) +(MOVWUreg (ANDI [c] x)) && c < 0 => (AND (MOVDconst [int64(uint32(c))]) x) + +// Avoid sign/zero extension for consts. +(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))]) +(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))]) +(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))]) +(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))]) +(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))]) +(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))]) + +// Avoid sign/zero extension after properly typed load. +(MOVBreg x:(MOVBload _ _)) => (MOVDreg x) +(MOVHreg x:(MOVBload _ _)) => (MOVDreg x) +(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x) +(MOVHreg x:(MOVHload _ _)) => (MOVDreg x) +(MOVWreg x:(MOVBload _ _)) => (MOVDreg x) +(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x) +(MOVWreg x:(MOVHload _ _)) => (MOVDreg x) +(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x) +(MOVWreg x:(MOVWload _ _)) => (MOVDreg x) +(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x) +(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x) +(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x) +(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x) +(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x) +(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x) + +// Avoid zero extension after properly typed atomic operation. +(MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _))) => (MOVDreg x) +(MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _))) => (MOVDreg x) +(MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _))) => (MOVDreg x) + +// Avoid sign extension after word arithmetic. +(MOVWreg x:(ADDIW _)) => (MOVDreg x) +(MOVWreg x:(SUBW _ _)) => (MOVDreg x) +(MOVWreg x:(NEGW _)) => (MOVDreg x) +(MOVWreg x:(MULW _ _)) => (MOVDreg x) +(MOVWreg x:(DIVW _ _)) => (MOVDreg x) +(MOVWreg x:(DIVUW _ _)) => (MOVDreg x) +(MOVWreg x:(REMW _ _)) => (MOVDreg x) +(MOVWreg x:(REMUW _ _)) => (MOVDreg x) + +// Fold double extensions. +(MOVBreg x:(MOVBreg _)) => (MOVDreg x) +(MOVHreg x:(MOVBreg _)) => (MOVDreg x) +(MOVHreg x:(MOVBUreg _)) => (MOVDreg x) +(MOVHreg x:(MOVHreg _)) => (MOVDreg x) +(MOVWreg x:(MOVBreg _)) => (MOVDreg x) +(MOVWreg x:(MOVBUreg _)) => (MOVDreg x) +(MOVWreg x:(MOVHreg _)) => (MOVDreg x) +(MOVWreg x:(MOVWreg _)) => (MOVDreg x) +(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x) +(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x) +(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x) +(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x) +(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x) +(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x) + +// Do not extend before store. +(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem) + +// Replace extend after load with alternate load where possible. +(MOVBreg x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload [off] {sym} ptr mem) +(MOVHreg x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload [off] {sym} ptr mem) +(MOVWreg x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload [off] {sym} ptr mem) +(MOVBUreg x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload [off] {sym} ptr mem) +(MOVHUreg x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload [off] {sym} ptr mem) +(MOVWUreg x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload [off] {sym} ptr mem) + +// If a register move has only 1 use, just use the same register without emitting instruction +// MOVnop does not emit an instruction, only for ensuring the type. +(MOVDreg x) && x.Uses == 1 => (MOVDnop x) + +// TODO: we should be able to get rid of MOVDnop all together. +// But for now, this is enough to get rid of lots of them. +(MOVDnop (MOVDconst [c])) => (MOVDconst [c]) + +// Avoid unnecessary zero and sign extension when right shifting. +(SRAI [x] (MOVWreg y)) && x >= 0 && x <= 31 => (SRAIW [int64(x)] y) +(SRLI [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW [int64(x)] y) + +// Replace right shifts that exceed size of signed type. +(SRAI [x] (MOVBreg y)) && x >= 8 => (SRAI [63] (SLLI [56] y)) +(SRAI [x] (MOVHreg y)) && x >= 16 => (SRAI [63] (SLLI [48] y)) +(SRAI [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y) + +// Eliminate right shifts that exceed size of unsigned type. +(SRLI [x] (MOVBUreg y)) && x >= 8 => (MOVDconst [0]) +(SRLI [x] (MOVHUreg y)) && x >= 16 => (MOVDconst [0]) +(SRLI [x] (MOVWUreg y)) && x >= 32 => (MOVDconst [0]) + +// Fold constant into immediate instructions where possible. +(ADD (MOVDconst [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x) +(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x) +(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x) +(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x) +(SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x) +(SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x) +(SRLW x (MOVDconst [val])) => (SRLIW [int64(val&31)] x) +(SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x) +(SRAW x (MOVDconst [val])) => (SRAIW [int64(val&31)] x) +(SLT x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI [val] x) +(SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x) + +// Convert const subtraction into ADDI with negative immediate, where possible. +(SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x) +(SUB (MOVDconst [val]) y) && is32Bit(-val) => (NEG (ADDI [-val] y)) + +// Subtraction of zero. +(SUB x (MOVDconst [0])) => x +(SUBW x (MOVDconst [0])) => (ADDIW [0] x) + +// Subtraction from zero. +(SUB (MOVDconst [0]) x) => (NEG x) +(SUBW (MOVDconst [0]) x) => (NEGW x) + +// Fold negation into subtraction. +(NEG (SUB x y)) => (SUB y x) +(NEG s:(ADDI [val] (SUB x y))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] (SUB y x)) + +// Double negation. +(NEG (NEG x)) => x + +// Addition of zero or two constants. +(ADDI [0] x) => x +(ADDI [x] (MOVDconst [y])) && is32Bit(x + y) => (MOVDconst [x + y]) + +// ANDI with all zeros, all ones or two constants. +(ANDI [0] x) => (MOVDconst [0]) +(ANDI [-1] x) => x +(ANDI [x] (MOVDconst [y])) => (MOVDconst [x & y]) + +// ORI with all zeroes, all ones or two constants. +(ORI [0] x) => x +(ORI [-1] x) => (MOVDconst [-1]) +(ORI [x] (MOVDconst [y])) => (MOVDconst [x | y]) + +// Combine operations with immediate. +(ADDI [x] (ADDI [y] z)) && is32Bit(x + y) => (ADDI [x + y] z) +(ANDI [x] (ANDI [y] z)) => (ANDI [x & y] z) +(ORI [x] (ORI [y] z)) => (ORI [x | y] z) + +// Negation of a constant. +(NEG (MOVDconst [x])) => (MOVDconst [-x]) +(NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))]) + +// Shift of a constant. +(SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)]) +(SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))]) +(SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)]) + +// SLTI/SLTIU with constants. +(SLTI [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))]) +(SLTIU [x] (MOVDconst [y])) => (MOVDconst [b2i(uint64(y) < uint64(x))]) + +// SLTI/SLTIU with known outcomes. +(SLTI [x] (ANDI [y] _)) && y >= 0 && int64(y) < int64(x) => (MOVDconst [1]) +(SLTIU [x] (ANDI [y] _)) && y >= 0 && uint64(y) < uint64(x) => (MOVDconst [1]) +(SLTI [x] (ORI [y] _)) && y >= 0 && int64(y) >= int64(x) => (MOVDconst [0]) +(SLTIU [x] (ORI [y] _)) && y >= 0 && uint64(y) >= uint64(x) => (MOVDconst [0]) + +// SLT/SLTU with known outcomes. +(SLT x x) => (MOVDconst [0]) +(SLTU x x) => (MOVDconst [0]) + +// Deadcode for LoweredMuluhilo +(Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y) +(Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y) + +(FADD(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FMADD(S|D) x y a) +(FSUB(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FNMSUB(S|D) x y a) +(FSUB(S|D) (FMUL(S|D) x y) a) && a.Block.Func.useFMA(v) => (FMSUB(S|D) x y a) + +// Merge negation into fused multiply-add and multiply-subtract. +// +// Key: +// +// [+ -](x * y [+ -] z). +// _ N A S +// D U +// D B +// +// Note: multiplication commutativity handled by rule generator. +(F(MADD|NMADD|MSUB|NMSUB)S neg:(FNEGS x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)S x y z) +(F(MADD|NMADD|MSUB|NMSUB)S x y neg:(FNEGS z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)S x y z) +(F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z) +(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go new file mode 100644 index 0000000000000000000000000000000000000000..93f20f8a99a138f2ed0a47ac85d39bd60600558c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go @@ -0,0 +1,492 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" +) + +// Notes: +// - Boolean types occupy the entire register. 0=false, 1=true. + +// Suffixes encode the bit width of various instructions: +// +// D (double word) = 64 bit int +// W (word) = 32 bit int +// H (half word) = 16 bit int +// B (byte) = 8 bit int +// S (single) = 32 bit float +// D (double) = 64 bit float +// L = 64 bit int, used when the opcode starts with F + +const ( + riscv64REG_G = 27 + riscv64REG_CTXT = 26 + riscv64REG_LR = 1 + riscv64REG_SP = 2 + riscv64REG_GP = 3 + riscv64REG_TP = 4 + riscv64REG_TMP = 31 + riscv64REG_ZERO = 0 +) + +func riscv64RegName(r int) string { + switch { + case r == riscv64REG_G: + return "g" + case r == riscv64REG_SP: + return "SP" + case 0 <= r && r <= 31: + return fmt.Sprintf("X%d", r) + case 32 <= r && r <= 63: + return fmt.Sprintf("F%d", r-32) + default: + panic(fmt.Sprintf("unknown register %d", r)) + } +} + +func init() { + var regNamesRISCV64 []string + var gpMask, fpMask, gpgMask, gpspMask, gpspsbMask, gpspsbgMask regMask + regNamed := make(map[string]regMask) + + // Build the list of register names, creating an appropriately indexed + // regMask for the gp and fp registers as we go. + // + // If name is specified, use it rather than the riscv reg number. + addreg := func(r int, name string) regMask { + mask := regMask(1) << uint(len(regNamesRISCV64)) + if name == "" { + name = riscv64RegName(r) + } + regNamesRISCV64 = append(regNamesRISCV64, name) + regNamed[name] = mask + return mask + } + + // General purpose registers. + for r := 0; r <= 31; r++ { + if r == riscv64REG_LR { + // LR is not used by regalloc, so we skip it to leave + // room for pseudo-register SB. + continue + } + + mask := addreg(r, "") + + // Add general purpose registers to gpMask. + switch r { + // ZERO, GP, TP and TMP are not in any gp mask. + case riscv64REG_ZERO, riscv64REG_GP, riscv64REG_TP, riscv64REG_TMP: + case riscv64REG_G: + gpgMask |= mask + gpspsbgMask |= mask + case riscv64REG_SP: + gpspMask |= mask + gpspsbMask |= mask + gpspsbgMask |= mask + default: + gpMask |= mask + gpgMask |= mask + gpspMask |= mask + gpspsbMask |= mask + gpspsbgMask |= mask + } + } + + // Floating pointer registers. + for r := 32; r <= 63; r++ { + mask := addreg(r, "") + fpMask |= mask + } + + // Pseudo-register: SB + mask := addreg(-1, "SB") + gpspsbMask |= mask + gpspsbgMask |= mask + + if len(regNamesRISCV64) > 64 { + // regMask is only 64 bits. + panic("Too many RISCV64 registers") + } + + regCtxt := regNamed["X26"] + callerSave := gpMask | fpMask | regNamed["g"] + + var ( + gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register + gpstore0 = regInfo{inputs: []regMask{gpspsbMask}} + gp01 = regInfo{outputs: []regMask{gpMask}} + gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}} + gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}} + gp22 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask, gpMask}} + gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}} + gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}} + gpxchg = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}, outputs: []regMask{gpMask}} + gpcas = regInfo{inputs: []regMask{gpspsbgMask, gpgMask, gpgMask}, outputs: []regMask{gpMask}} + gpatomic = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}} + + fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}} + fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}} + fp31 = regInfo{inputs: []regMask{fpMask, fpMask, fpMask}, outputs: []regMask{fpMask}} + gpfp = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{fpMask}} + fpgp = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{gpMask}} + fpstore = regInfo{inputs: []regMask{gpspsbMask, fpMask, 0}} + fpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{fpMask}} + fp2gp = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{gpMask}} + + call = regInfo{clobbers: callerSave} + callClosure = regInfo{inputs: []regMask{gpspMask, regCtxt, 0}, clobbers: callerSave} + callInter = regInfo{inputs: []regMask{gpMask}, clobbers: callerSave} + ) + + RISCV64ops := []opData{ + {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1 + {name: "ADDI", argLength: 1, reg: gp11sb, asm: "ADDI", aux: "Int64"}, // arg0 + auxint + {name: "ADDIW", argLength: 1, reg: gp11, asm: "ADDIW", aux: "Int64"}, // 32 low bits of arg0 + auxint, sign extended to 64 bits + {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 + {name: "NEGW", argLength: 1, reg: gp11, asm: "NEGW"}, // -arg0 of 32 bits, sign extended to 64 bits + {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1 + {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW"}, // 32 low bits of arg 0 - 32 low bits of arg 1, sign extended to 64 bits + + // M extension. H means high (i.e., it returns the top bits of + // the result). U means unsigned. W means word (i.e., 32-bit). + {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true, typ: "Int64"}, // arg0 * arg1 + {name: "MULW", argLength: 2, reg: gp21, asm: "MULW", commutative: true, typ: "Int32"}, + {name: "MULH", argLength: 2, reg: gp21, asm: "MULH", commutative: true, typ: "Int64"}, + {name: "MULHU", argLength: 2, reg: gp21, asm: "MULHU", commutative: true, typ: "UInt64"}, + {name: "LoweredMuluhilo", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, return (hi, lo) + {name: "LoweredMuluover", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, return (64 bits of arg0*arg1, overflow) + + {name: "DIV", argLength: 2, reg: gp21, asm: "DIV", typ: "Int64"}, // arg0 / arg1 + {name: "DIVU", argLength: 2, reg: gp21, asm: "DIVU", typ: "UInt64"}, + {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"}, + {name: "DIVUW", argLength: 2, reg: gp21, asm: "DIVUW", typ: "UInt32"}, + {name: "REM", argLength: 2, reg: gp21, asm: "REM", typ: "Int64"}, // arg0 % arg1 + {name: "REMU", argLength: 2, reg: gp21, asm: "REMU", typ: "UInt64"}, + {name: "REMW", argLength: 2, reg: gp21, asm: "REMW", typ: "Int32"}, + {name: "REMUW", argLength: 2, reg: gp21, asm: "REMUW", typ: "UInt32"}, + + {name: "MOVaddr", argLength: 1, reg: gp11sb, asm: "MOV", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux + // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address + + {name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint + + // Loads: load bits from arg0+auxint+aux and extend to 64 bits; arg1=mem + {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, sign extend + {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, sign extend + {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, sign extend + {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOV", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // 64 bits + {name: "MOVBUload", argLength: 2, reg: gpload, asm: "MOVBU", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, zero extend + {name: "MOVHUload", argLength: 2, reg: gpload, asm: "MOVHU", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, zero extend + {name: "MOVWUload", argLength: 2, reg: gpload, asm: "MOVWU", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, zero extend + + // Stores: store lowest bits in arg1 to arg0+auxint+aux; arg2=mem + {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits + {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits + {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits + {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOV", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits + + // Stores: store of zero in arg0+auxint+aux; arg1=mem + {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits + {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits + {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits + {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits + + // Conversions + {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte + {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half + {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word + {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOV"}, // move from arg0 + {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte + {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half + {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word + + {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}, resultInArg0: true}, // nop, return arg0 in same register + + // Shift ops + {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << (aux1 & 63) + {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> (aux1 & 63), signed + {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // arg0 >> (aux1 & 31), signed + {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> (aux1 & 63), unsigned + {name: "SRLW", argLength: 2, reg: gp21, asm: "SRLW"}, // arg0 >> (aux1 & 31), unsigned + {name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"}, // arg0 << auxint, shift amount 0-63 + {name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-63 + {name: "SRAIW", argLength: 1, reg: gp11, asm: "SRAIW", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-31 + {name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-63 + {name: "SRLIW", argLength: 1, reg: gp11, asm: "SRLIW", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-31 + + // Bitwise ops + {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true}, // arg0 ^ arg1 + {name: "XORI", argLength: 1, reg: gp11, asm: "XORI", aux: "Int64"}, // arg0 ^ auxint + {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1 + {name: "ORI", argLength: 1, reg: gp11, asm: "ORI", aux: "Int64"}, // arg0 | auxint + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1 + {name: "ANDI", argLength: 1, reg: gp11, asm: "ANDI", aux: "Int64"}, // arg0 & auxint + {name: "NOT", argLength: 1, reg: gp11, asm: "NOT"}, // ^arg0 + + // Generate boolean values + {name: "SEQZ", argLength: 1, reg: gp11, asm: "SEQZ"}, // arg0 == 0, result is 0 or 1 + {name: "SNEZ", argLength: 1, reg: gp11, asm: "SNEZ"}, // arg0 != 0, result is 0 or 1 + {name: "SLT", argLength: 2, reg: gp21, asm: "SLT"}, // arg0 < arg1, result is 0 or 1 + {name: "SLTI", argLength: 1, reg: gp11, asm: "SLTI", aux: "Int64"}, // arg0 < auxint, result is 0 or 1 + {name: "SLTU", argLength: 2, reg: gp21, asm: "SLTU"}, // arg0 < arg1, unsigned, result is 0 or 1 + {name: "SLTIU", argLength: 1, reg: gp11, asm: "SLTIU", aux: "Int64"}, // arg0 < auxint, unsigned, result is 0 or 1 + + // Round ops to block fused-multiply-add extraction. + {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true}, + {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true}, + + // Calls + {name: "CALLstatic", argLength: -1, reg: call, aux: "CallOff", call: true}, // call static function aux.(*gc.Sym). last arg=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: -1, reg: call, aux: "CallOff", call: true, tailCall: true}, // tail call static function aux.(*gc.Sym). last arg=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: -1, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: -1, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem + + // duffzero + // arg0 = address of memory to zero (in X25, changed as side effect) + // arg1 = mem + // auxint = offset into duffzero code to start executing + // X1 (link register) changed because of function call + // returns mem + { + name: "DUFFZERO", + aux: "Int64", + argLength: 2, + reg: regInfo{ + inputs: []regMask{regNamed["X25"]}, + clobbers: regNamed["X1"] | regNamed["X25"], + }, + typ: "Mem", + faultOnNilArg0: true, + }, + + // duffcopy + // arg0 = address of dst memory (in X25, changed as side effect) + // arg1 = address of src memory (in X24, changed as side effect) + // arg2 = mem + // auxint = offset into duffcopy code to start executing + // X1 (link register) changed because of function call + // returns mem + { + name: "DUFFCOPY", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{regNamed["X25"], regNamed["X24"]}, + clobbers: regNamed["X1"] | regNamed["X24"] | regNamed["X25"], + }, + typ: "Mem", + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // Generic moves and zeros + + // general unaligned zeroing + // arg0 = address of memory to zero (in X5, changed as side effect) + // arg1 = address of the last element to zero (inclusive) + // arg2 = mem + // auxint = element size + // returns mem + // mov ZERO, (X5) + // ADD $sz, X5 + // BGEU Rarg1, X5, -2(PC) + { + name: "LoweredZero", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{regNamed["X5"], gpMask}, + clobbers: regNamed["X5"], + }, + typ: "Mem", + faultOnNilArg0: true, + }, + + // general unaligned move + // arg0 = address of dst memory (in X5, changed as side effect) + // arg1 = address of src memory (in X6, changed as side effect) + // arg2 = address of the last element of src (can't be X7 as we clobber it before using arg2) + // arg3 = mem + // auxint = alignment + // clobbers X7 as a tmp register. + // returns mem + // mov (X6), X7 + // mov X7, (X5) + // ADD $sz, X5 + // ADD $sz, X6 + // BGEU Rarg2, X5, -4(PC) + { + name: "LoweredMove", + aux: "Int64", + argLength: 4, + reg: regInfo{ + inputs: []regMask{regNamed["X5"], regNamed["X6"], gpMask &^ regNamed["X7"]}, + clobbers: regNamed["X5"] | regNamed["X6"] | regNamed["X7"], + }, + typ: "Mem", + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // Atomic loads. + // load from arg0. arg1=mem. + // returns so they can be properly ordered with other loads. + {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true}, + + // Atomic stores. + // store arg1 to *arg0. arg2=mem. returns memory. + {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + + // Atomic exchange. + // store arg1 to *arg0. arg2=mem. returns . + {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true}, + + // Atomic add. + // *arg0 += arg1. arg2=mem. returns . + {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // Atomic compare and swap. + // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. + // if *arg0 == arg1 { + // *arg0 = arg2 + // return (true, memory) + // } else { + // return (false, memory) + // } + // MOV $0, Rout + // LR (Rarg0), Rtmp + // BNE Rtmp, Rarg1, 3(PC) + // SC Rarg2, (Rarg0), Rtmp + // BNE Rtmp, ZERO, -3(PC) + // MOV $1, Rout + {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // Atomic 32 bit AND/OR. + // *arg0 &= (|=) arg1. arg2=mem. returns nil. + {name: "LoweredAtomicAnd32", argLength: 3, reg: gpatomic, asm: "AMOANDW", faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicOr32", argLength: 3, reg: gpatomic, asm: "AMOORW", faultOnNilArg0: true, hasSideEffects: true}, + + // Lowering pass-throughs + {name: "LoweredNilCheck", argLength: 2, faultOnNilArg0: true, nilCheck: true, reg: regInfo{inputs: []regMask{gpspMask}}}, // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{regCtxt}}}, // scheduler ensures only at beginning of entry block + + // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem. + {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, + + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + + // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, auxint=# of buffer entries needed + // It saves all GP registers if necessary, + // but clobbers RA (LR) because it's a call + // and T6 (REG_TMP). + // Returns a pointer to a write barrier buffer in X24. + {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ (gpMask | regNamed["g"])) | regNamed["X1"], outputs: []regMask{regNamed["X24"]}}, clobberFlags: true, aux: "Int64"}, + + // Do data barrier. arg0=memorys + {name: "LoweredPubBarrier", argLength: 1, asm: "FENCE", hasSideEffects: true}, + + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X7"], regNamed["X28"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X6"], regNamed["X7"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + + // F extension. + {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true, typ: "Float32"}, // arg0 + arg1 + {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS", commutative: false, typ: "Float32"}, // arg0 - arg1 + {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, typ: "Float32"}, // arg0 * arg1 + {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", commutative: false, typ: "Float32"}, // arg0 / arg1 + {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS", commutative: true, typ: "Float32"}, // (arg0 * arg1) + arg2 + {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS", commutative: true, typ: "Float32"}, // (arg0 * arg1) - arg2 + {name: "FNMADDS", argLength: 3, reg: fp31, asm: "FNMADDS", commutative: true, typ: "Float32"}, // -(arg0 * arg1) + arg2 + {name: "FNMSUBS", argLength: 3, reg: fp31, asm: "FNMSUBS", commutative: true, typ: "Float32"}, // -(arg0 * arg1) - arg2 + {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS", typ: "Float32"}, // sqrt(arg0) + {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS", typ: "Float32"}, // -arg0 + {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float + {name: "FCVTSW", argLength: 1, reg: gpfp, asm: "FCVTSW", typ: "Float32"}, // float32(low 32 bits of arg0) + {name: "FCVTSL", argLength: 1, reg: gpfp, asm: "FCVTSL", typ: "Float32"}, // float32(arg0) + {name: "FCVTWS", argLength: 1, reg: fpgp, asm: "FCVTWS", typ: "Int32"}, // int32(arg0) + {name: "FCVTLS", argLength: 1, reg: fpgp, asm: "FCVTLS", typ: "Int64"}, // int64(arg0) + {name: "FMOVWload", argLength: 2, reg: fpload, asm: "MOVF", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load float32 from arg0+auxint+aux + {name: "FMOVWstore", argLength: 3, reg: fpstore, asm: "MOVF", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float32 to arg0+auxint+aux + {name: "FEQS", argLength: 2, reg: fp2gp, asm: "FEQS", commutative: true}, // arg0 == arg1 + {name: "FNES", argLength: 2, reg: fp2gp, asm: "FNES", commutative: true}, // arg0 != arg1 + {name: "FLTS", argLength: 2, reg: fp2gp, asm: "FLTS"}, // arg0 < arg1 + {name: "FLES", argLength: 2, reg: fp2gp, asm: "FLES"}, // arg0 <= arg1 + + // D extension. + {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true, typ: "Float64"}, // arg0 + arg1 + {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD", commutative: false, typ: "Float64"}, // arg0 - arg1 + {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true, typ: "Float64"}, // arg0 * arg1 + {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD", commutative: false, typ: "Float64"}, // arg0 / arg1 + {name: "FMADDD", argLength: 3, reg: fp31, asm: "FMADDD", commutative: true, typ: "Float64"}, // (arg0 * arg1) + arg2 + {name: "FMSUBD", argLength: 3, reg: fp31, asm: "FMSUBD", commutative: true, typ: "Float64"}, // (arg0 * arg1) - arg2 + {name: "FNMADDD", argLength: 3, reg: fp31, asm: "FNMADDD", commutative: true, typ: "Float64"}, // -(arg0 * arg1) + arg2 + {name: "FNMSUBD", argLength: 3, reg: fp31, asm: "FNMSUBD", commutative: true, typ: "Float64"}, // -(arg0 * arg1) - arg2 + {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD", typ: "Float64"}, // sqrt(arg0) + {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD", typ: "Float64"}, // -arg0 + {name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD", typ: "Float64"}, // abs(arg0) + {name: "FSGNJD", argLength: 2, reg: fp21, asm: "FSGNJD", typ: "Float64"}, // copy sign of arg1 to arg0 + {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float + {name: "FCVTDW", argLength: 1, reg: gpfp, asm: "FCVTDW", typ: "Float64"}, // float64(low 32 bits of arg0) + {name: "FCVTDL", argLength: 1, reg: gpfp, asm: "FCVTDL", typ: "Float64"}, // float64(arg0) + {name: "FCVTWD", argLength: 1, reg: fpgp, asm: "FCVTWD", typ: "Int32"}, // int32(arg0) + {name: "FCVTLD", argLength: 1, reg: fpgp, asm: "FCVTLD", typ: "Int64"}, // int64(arg0) + {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS", typ: "Float64"}, // float64(arg0) + {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD", typ: "Float32"}, // float32(arg0) + {name: "FMOVDload", argLength: 2, reg: fpload, asm: "MOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load float64 from arg0+auxint+aux + {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float6 to arg0+auxint+aux + {name: "FEQD", argLength: 2, reg: fp2gp, asm: "FEQD", commutative: true}, // arg0 == arg1 + {name: "FNED", argLength: 2, reg: fp2gp, asm: "FNED", commutative: true}, // arg0 != arg1 + {name: "FLTD", argLength: 2, reg: fp2gp, asm: "FLTD"}, // arg0 < arg1 + {name: "FLED", argLength: 2, reg: fp2gp, asm: "FLED"}, // arg0 <= arg1 + } + + RISCV64blocks := []blockData{ + {name: "BEQ", controls: 2}, + {name: "BNE", controls: 2}, + {name: "BLT", controls: 2}, + {name: "BGE", controls: 2}, + {name: "BLTU", controls: 2}, + {name: "BGEU", controls: 2}, + + {name: "BEQZ", controls: 1}, + {name: "BNEZ", controls: 1}, + {name: "BLEZ", controls: 1}, + {name: "BGEZ", controls: 1}, + {name: "BLTZ", controls: 1}, + {name: "BGTZ", controls: 1}, + } + + archs = append(archs, arch{ + name: "RISCV64", + pkg: "cmd/internal/obj/riscv", + genfile: "../../riscv64/ssa.go", + ops: RISCV64ops, + blocks: RISCV64blocks, + regnames: regNamesRISCV64, + gpregmask: gpMask, + fpregmask: fpMask, + framepointerreg: -1, // not used + // Integer parameters passed in register X10-X17, X8-X9, X18-X23 + ParamIntRegNames: "X10 X11 X12 X13 X14 X15 X16 X17 X8 X9 X18 X19 X20 X21 X22 X23", + // Float parameters passed in register F10-F17, F8-F9, F18-F23 + ParamFloatRegNames: "F10 F11 F12 F13 F14 F15 F16 F17 F8 F9 F18 F19 F20 F21 F22 F23", + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/RISCV64latelower.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/RISCV64latelower.rules new file mode 100644 index 0000000000000000000000000000000000000000..cd55331dfd185003056e72666ea6fd1bee0c4975 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/RISCV64latelower.rules @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fold constant shift with extension. +(SRAI [c] (MOVBreg x)) && c < 8 => (SRAI [56+c] (SLLI [56] x)) +(SRAI [c] (MOVHreg x)) && c < 16 => (SRAI [48+c] (SLLI [48] x)) +(SRAI [c] (MOVWreg x)) && c < 32 => (SRAI [32+c] (SLLI [32] x)) +(SRLI [c] (MOVBUreg x)) && c < 8 => (SRLI [56+c] (SLLI [56] x)) +(SRLI [c] (MOVHUreg x)) && c < 16 => (SRLI [48+c] (SLLI [48] x)) +(SRLI [c] (MOVWUreg x)) && c < 32 => (SRLI [32+c] (SLLI [32] x)) +(SLLI [c] (MOVBUreg x)) && c <= 56 => (SRLI [56-c] (SLLI [56] x)) +(SLLI [c] (MOVHUreg x)) && c <= 48 => (SRLI [48-c] (SLLI [48] x)) +(SLLI [c] (MOVWUreg x)) && c <= 32 => (SRLI [32-c] (SLLI [32] x)) + +// Shift by zero. +(SRAI [0] x) => x +(SRLI [0] x) => x +(SLLI [0] x) => x diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/S390X.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/S390X.rules new file mode 100644 index 0000000000000000000000000000000000000000..2a6d7e737cd187b202922a5393775e29d488585e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/S390X.rules @@ -0,0 +1,1368 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Lowering arithmetic +(Add(64|Ptr) ...) => (ADD ...) +(Add(32|16|8) ...) => (ADDW ...) +(Add32F x y) => (Select0 (FADDS x y)) +(Add64F x y) => (Select0 (FADD x y)) + +(Sub(64|Ptr) ...) => (SUB ...) +(Sub(32|16|8) ...) => (SUBW ...) +(Sub32F x y) => (Select0 (FSUBS x y)) +(Sub64F x y) => (Select0 (FSUB x y)) + +(Mul64 ...) => (MULLD ...) +(Mul(32|16|8) ...) => (MULLW ...) +(Mul32F ...) => (FMULS ...) +(Mul64F ...) => (FMUL ...) +(Mul64uhilo ...) => (MLGR ...) + +(Div32F ...) => (FDIVS ...) +(Div64F ...) => (FDIV ...) + +(Div64 x y) => (DIVD x y) +(Div64u ...) => (DIVDU ...) +// DIVW/DIVWU has a 64-bit dividend and a 32-bit divisor, +// so a sign/zero extension of the dividend is required. +(Div32 x y) => (DIVW (MOVWreg x) y) +(Div32u x y) => (DIVWU (MOVWZreg x) y) +(Div16 x y) => (DIVW (MOVHreg x) (MOVHreg y)) +(Div16u x y) => (DIVWU (MOVHZreg x) (MOVHZreg y)) +(Div8 x y) => (DIVW (MOVBreg x) (MOVBreg y)) +(Div8u x y) => (DIVWU (MOVBZreg x) (MOVBZreg y)) + +(Hmul(64|64u) ...) => (MULH(D|DU) ...) +(Hmul32 x y) => (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y))) +(Hmul32u x y) => (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y))) + +(Mod64 x y) => (MODD x y) +(Mod64u ...) => (MODDU ...) +// MODW/MODWU has a 64-bit dividend and a 32-bit divisor, +// so a sign/zero extension of the dividend is required. +(Mod32 x y) => (MODW (MOVWreg x) y) +(Mod32u x y) => (MODWU (MOVWZreg x) y) +(Mod16 x y) => (MODW (MOVHreg x) (MOVHreg y)) +(Mod16u x y) => (MODWU (MOVHZreg x) (MOVHZreg y)) +(Mod8 x y) => (MODW (MOVBreg x) (MOVBreg y)) +(Mod8u x y) => (MODWU (MOVBZreg x) (MOVBZreg y)) + +// (x + y) / 2 with x>=y -> (x - y) / 2 + y +(Avg64u x y) => (ADD (SRDconst (SUB x y) [1]) y) + +(And64 ...) => (AND ...) +(And(32|16|8) ...) => (ANDW ...) + +(Or64 ...) => (OR ...) +(Or(32|16|8) ...) => (ORW ...) + +(Xor64 ...) => (XOR ...) +(Xor(32|16|8) ...) => (XORW ...) + +(Neg64 ...) => (NEG ...) +(Neg(32|16|8) ...) => (NEGW ...) +(Neg32F ...) => (FNEGS ...) +(Neg64F ...) => (FNEG ...) + +(Com64 ...) => (NOT ...) +(Com(32|16|8) ...) => (NOTW ...) +(NOT x) => (XOR (MOVDconst [-1]) x) +(NOTW x) => (XORWconst [-1] x) + +// Lowering boolean ops +(AndB ...) => (ANDW ...) +(OrB ...) => (ORW ...) +(Not x) => (XORWconst [1] x) + +// Lowering pointer arithmetic +(OffPtr [off] ptr:(SP)) => (MOVDaddr [int32(off)] ptr) +(OffPtr [off] ptr) && is32Bit(off) => (ADDconst [int32(off)] ptr) +(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr) + +// TODO: optimize these cases? +(Ctz64NonZero ...) => (Ctz64 ...) +(Ctz32NonZero ...) => (Ctz32 ...) + +// Ctz(x) = 64 - findLeftmostOne((x-1)&^x) +(Ctz64 x) => (SUB (MOVDconst [64]) (FLOGR (AND (SUBconst [1] x) (NOT x)))) +(Ctz32 x) => (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW (SUBWconst [1] x) (NOTW x))))) + +(BitLen64 x) => (SUB (MOVDconst [64]) (FLOGR x)) + +// POPCNT treats the input register as a vector of 8 bytes, producing +// a population count for each individual byte. For inputs larger than +// a single byte we therefore need to sum the individual bytes produced +// by the POPCNT instruction. For example, the following instruction +// sequence could be used to calculate the population count of a 4-byte +// value: +// +// MOVD $0x12345678, R1 // R1=0x12345678 <-- input +// POPCNT R1, R2 // R2=0x02030404 +// SRW $16, R2, R3 // R3=0x00000203 +// ADDW R2, R3, R4 // R4=0x02030607 +// SRW $8, R4, R5 // R5=0x00020306 +// ADDW R4, R5, R6 // R6=0x0205090d +// MOVBZ R6, R7 // R7=0x0000000d <-- result is 13 +// +(PopCount8 x) => (POPCNT (MOVBZreg x)) +(PopCount16 x) => (MOVBZreg (SumBytes2 (POPCNT x))) +(PopCount32 x) => (MOVBZreg (SumBytes4 (POPCNT x))) +(PopCount64 x) => (MOVBZreg (SumBytes8 (POPCNT x))) + +// SumBytes{2,4,8} pseudo operations sum the values of the rightmost +// 2, 4 or 8 bytes respectively. The result is a single byte however +// other bytes might contain junk so a zero extension is required if +// the desired output type is larger than 1 byte. +(SumBytes2 x) => (ADDW (SRWconst x [8]) x) +(SumBytes4 x) => (SumBytes2 (ADDW (SRWconst x [16]) x)) +(SumBytes8 x) => (SumBytes4 (ADDW (SRDconst x [32]) x)) + +(Bswap64 ...) => (MOVDBR ...) +(Bswap32 ...) => (MOVWBR ...) + +// add with carry +(Select0 (Add64carry x y c)) + => (Select0 (ADDE x y (Select1 (ADDCconst c [-1])))) +(Select1 (Add64carry x y c)) + => (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 (ADDE x y (Select1 (ADDCconst c [-1])))))) + +// subtract with borrow +(Select0 (Sub64borrow x y c)) + => (Select0 (SUBE x y (Select1 (SUBC (MOVDconst [0]) c)))) +(Select1 (Sub64borrow x y c)) + => (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 (SUBE x y (Select1 (SUBC (MOVDconst [0]) c))))))) + +// math package intrinsics +(Sqrt ...) => (FSQRT ...) +(Floor x) => (FIDBR [7] x) +(Ceil x) => (FIDBR [6] x) +(Trunc x) => (FIDBR [5] x) +(RoundToEven x) => (FIDBR [4] x) +(Round x) => (FIDBR [1] x) +(FMA x y z) => (FMADD z x y) + +(Sqrt32 ...) => (FSQRTS ...) + +// Atomic loads and stores. +// The SYNC instruction (fast-BCR-serialization) prevents store-load +// reordering. Other sequences of memory operations (load-load, +// store-store and load-store) are already guaranteed not to be reordered. +(AtomicLoad(8|32|Acq32|64|Ptr) ptr mem) => (MOV(BZ|WZ|WZ|D|D)atomicload ptr mem) +(AtomicStore(8|32|64|PtrNoWB) ptr val mem) => (SYNC (MOV(B|W|D|D)atomicstore ptr val mem)) + +// Store-release doesn't require store-load ordering. +(AtomicStoreRel32 ptr val mem) => (MOVWatomicstore ptr val mem) + +// Atomic adds. +(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (LAA ptr val mem)) +(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (LAAG ptr val mem)) +(Select0 (AddTupleFirst32 val tuple)) => (ADDW val (Select0 tuple)) +(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple) +(Select0 (AddTupleFirst64 val tuple)) => (ADD val (Select0 tuple)) +(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple) + +// Atomic exchanges. +(AtomicExchange32 ptr val mem) => (LoweredAtomicExchange32 ptr val mem) +(AtomicExchange64 ptr val mem) => (LoweredAtomicExchange64 ptr val mem) + +// Atomic compare and swap. +(AtomicCompareAndSwap32 ptr old new_ mem) => (LoweredAtomicCas32 ptr old new_ mem) +(AtomicCompareAndSwap64 ptr old new_ mem) => (LoweredAtomicCas64 ptr old new_ mem) + +// Atomic and: *(*uint8)(ptr) &= val +// +// Round pointer down to nearest word boundary and pad value with ones before +// applying atomic AND operation to target word. +// +// *(*uint32)(ptr &^ 3) &= rotateleft(uint32(val) | 0xffffff00, ((3 << 3) ^ ((ptr & 3) << 3)) +// +(AtomicAnd8 ptr val mem) + => (LANfloor + ptr + (RLL + (ORWconst val [-1<<8]) + (RXSBG {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) + mem) + +// Atomic or: *(*uint8)(ptr) |= val +// +// Round pointer down to nearest word boundary and pad value with zeros before +// applying atomic OR operation to target word. +// +// *(*uint32)(ptr &^ 3) |= uint32(val) << ((3 << 3) ^ ((ptr & 3) << 3)) +// +(AtomicOr8 ptr val mem) + => (LAOfloor + ptr + (SLW + (MOVBZreg val) + (RXSBG {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) + mem) + +(AtomicAnd32 ...) => (LAN ...) +(AtomicOr32 ...) => (LAO ...) + +// Lowering extension +// Note: we always extend to 64 bits even though some ops don't need that many result bits. +(SignExt8to(16|32|64) ...) => (MOVBreg ...) +(SignExt16to(32|64) ...) => (MOVHreg ...) +(SignExt32to64 ...) => (MOVWreg ...) + +(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...) +(ZeroExt16to(32|64) ...) => (MOVHZreg ...) +(ZeroExt32to64 ...) => (MOVWZreg ...) + +(Slicemask x) => (SRADconst (NEG x) [63]) + +// Lowering truncation +// Because we ignore high parts of registers, truncates are just copies. +(Trunc(16|32|64)to8 ...) => (Copy ...) +(Trunc(32|64)to16 ...) => (Copy ...) +(Trunc64to32 ...) => (Copy ...) + +// Lowering float <-> int +(Cvt32to32F ...) => (CEFBRA ...) +(Cvt32to64F ...) => (CDFBRA ...) +(Cvt64to32F ...) => (CEGBRA ...) +(Cvt64to64F ...) => (CDGBRA ...) + +(Cvt32Fto32 ...) => (CFEBRA ...) +(Cvt32Fto64 ...) => (CGEBRA ...) +(Cvt64Fto32 ...) => (CFDBRA ...) +(Cvt64Fto64 ...) => (CGDBRA ...) + +// Lowering float <-> uint +(Cvt32Uto32F ...) => (CELFBR ...) +(Cvt32Uto64F ...) => (CDLFBR ...) +(Cvt64Uto32F ...) => (CELGBR ...) +(Cvt64Uto64F ...) => (CDLGBR ...) + +(Cvt32Fto32U ...) => (CLFEBR ...) +(Cvt32Fto64U ...) => (CLGEBR ...) +(Cvt64Fto32U ...) => (CLFDBR ...) +(Cvt64Fto64U ...) => (CLGDBR ...) + +// Lowering float32 <-> float64 +(Cvt32Fto64F ...) => (LDEBR ...) +(Cvt64Fto32F ...) => (LEDBR ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +(Round(32|64)F ...) => (LoweredRound(32|64)F ...) + +// Lowering shifts + +// Lower bounded shifts first. No need to check shift value. +(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y) +(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y) +(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y) +(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y) +(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y) +(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y) +(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y) +(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y) +(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD x y) +(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y) +(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y) +(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y) + +// Unsigned shifts need to return 0 if shift amount is >= width of shifted value. +// result = shift >= 64 ? 0 : arg << shift +(Lsh(64|32|16|8)x64 x y) => (LOCGR {s390x.GreaterOrEqual} (SL(D|W|W|W) x y) (MOVDconst [0]) (CMPUconst y [64])) +(Lsh(64|32|16|8)x32 x y) => (LOCGR {s390x.GreaterOrEqual} (SL(D|W|W|W) x y) (MOVDconst [0]) (CMPWUconst y [64])) +(Lsh(64|32|16|8)x16 x y) => (LOCGR {s390x.GreaterOrEqual} (SL(D|W|W|W) x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) +(Lsh(64|32|16|8)x8 x y) => (LOCGR {s390x.GreaterOrEqual} (SL(D|W|W|W) x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) + +(Rsh(64|32)Ux64 x y) => (LOCGR {s390x.GreaterOrEqual} (SR(D|W) x y) (MOVDconst [0]) (CMPUconst y [64])) +(Rsh(64|32)Ux32 x y) => (LOCGR {s390x.GreaterOrEqual} (SR(D|W) x y) (MOVDconst [0]) (CMPWUconst y [64])) +(Rsh(64|32)Ux16 x y) => (LOCGR {s390x.GreaterOrEqual} (SR(D|W) x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) +(Rsh(64|32)Ux8 x y) => (LOCGR {s390x.GreaterOrEqual} (SR(D|W) x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) + +(Rsh(16|8)Ux64 x y) => (LOCGR {s390x.GreaterOrEqual} (SRW (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPUconst y [64])) +(Rsh(16|8)Ux32 x y) => (LOCGR {s390x.GreaterOrEqual} (SRW (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst y [64])) +(Rsh(16|8)Ux16 x y) => (LOCGR {s390x.GreaterOrEqual} (SRW (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64])) +(Rsh(16|8)Ux8 x y) => (LOCGR {s390x.GreaterOrEqual} (SRW (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64])) + +// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. +// We implement this by setting the shift value to 63 (all ones) if the shift value is more than 63. +// result = arg >> (shift >= 64 ? 63 : shift) +(Rsh(64|32)x64 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPUconst y [64]))) +(Rsh(64|32)x32 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst y [64]))) +(Rsh(64|32)x16 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVHZreg y) [64]))) +(Rsh(64|32)x8 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVBZreg y) [64]))) + +(Rsh(16|8)x64 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPUconst y [64]))) +(Rsh(16|8)x32 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst y [64]))) +(Rsh(16|8)x16 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVHZreg y) [64]))) +(Rsh(16|8)x8 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} y (MOVDconst [63]) (CMPWUconst (MOVBZreg y) [64]))) + +// Lowering rotates +(RotateLeft8 x (MOVDconst [c])) => (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) +(RotateLeft16 x (MOVDconst [c])) => (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) +(RotateLeft32 ...) => (RLL ...) +(RotateLeft64 ...) => (RLLG ...) + +// Lowering comparisons +(Less64 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Less32 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) +(Less(16|8) x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y))) +(Less64U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) +(Less32U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) +(Less(16|8)U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y))) +(Less64F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) +(Less32F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + +(Leq64 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Leq32 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) +(Leq(16|8) x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y))) +(Leq64U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y)) +(Leq32U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y)) +(Leq(16|8)U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y))) +(Leq64F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) +(Leq32F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + +(Eq(64|Ptr) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Eq32 x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) +(Eq(16|8|B) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y))) +(Eq64F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) +(Eq32F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + +(Neq(64|Ptr) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y)) +(Neq32 x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y)) +(Neq(16|8|B) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y))) +(Neq64F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y)) +(Neq32F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y)) + +// Lowering loads +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem) +(Load ptr mem) && is32BitInt(t) && t.IsSigned() => (MOVWload ptr mem) +(Load ptr mem) && is32BitInt(t) && !t.IsSigned() => (MOVWZload ptr mem) +(Load ptr mem) && is16BitInt(t) && t.IsSigned() => (MOVHload ptr mem) +(Load ptr mem) && is16BitInt(t) && !t.IsSigned() => (MOVHZload ptr mem) +(Load ptr mem) && is8BitInt(t) && t.IsSigned() => (MOVBload ptr mem) +(Load ptr mem) && (t.IsBoolean() || (is8BitInt(t) && !t.IsSigned())) => (MOVBZload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem) + +// Lowering stores +(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVSstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) + +// Lowering moves + +// Load and store for small copies. +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem) +(Move [2] dst src mem) => (MOVHstore dst (MOVHZload src mem) mem) +(Move [4] dst src mem) => (MOVWstore dst (MOVWZload src mem) mem) +(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem) +(Move [16] dst src mem) => + (MOVDstore [8] dst (MOVDload [8] src mem) + (MOVDstore dst (MOVDload src mem) mem)) +(Move [24] dst src mem) => + (MOVDstore [16] dst (MOVDload [16] src mem) + (MOVDstore [8] dst (MOVDload [8] src mem) + (MOVDstore dst (MOVDload src mem) mem))) +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBZload [2] src mem) + (MOVHstore dst (MOVHZload src mem) mem)) +(Move [5] dst src mem) => + (MOVBstore [4] dst (MOVBZload [4] src mem) + (MOVWstore dst (MOVWZload src mem) mem)) +(Move [6] dst src mem) => + (MOVHstore [4] dst (MOVHZload [4] src mem) + (MOVWstore dst (MOVWZload src mem) mem)) +(Move [7] dst src mem) => + (MOVBstore [6] dst (MOVBZload [6] src mem) + (MOVHstore [4] dst (MOVHZload [4] src mem) + (MOVWstore dst (MOVWZload src mem) mem))) + +// MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes). +(Move [s] dst src mem) && s > 0 && s <= 256 && logLargeCopy(v, s) => + (MVC [makeValAndOff(int32(s), 0)] dst src mem) +(Move [s] dst src mem) && s > 256 && s <= 512 && logLargeCopy(v, s) => + (MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)) +(Move [s] dst src mem) && s > 512 && s <= 768 && logLargeCopy(v, s) => + (MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))) +(Move [s] dst src mem) && s > 768 && s <= 1024 && logLargeCopy(v, s) => + (MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))) + +// Move more than 1024 bytes using a loop. +(Move [s] dst src mem) && s > 1024 && logLargeCopy(v, s) => + (LoweredMove [s%256] dst src (ADD src (MOVDconst [(s/256)*256])) mem) + +// Lowering Zero instructions +(Zero [0] _ mem) => mem +(Zero [1] destptr mem) => (MOVBstoreconst [0] destptr mem) +(Zero [2] destptr mem) => (MOVHstoreconst [0] destptr mem) +(Zero [4] destptr mem) => (MOVWstoreconst [0] destptr mem) +(Zero [8] destptr mem) => (MOVDstoreconst [0] destptr mem) +(Zero [3] destptr mem) => + (MOVBstoreconst [makeValAndOff(0,2)] destptr + (MOVHstoreconst [0] destptr mem)) +(Zero [5] destptr mem) => + (MOVBstoreconst [makeValAndOff(0,4)] destptr + (MOVWstoreconst [0] destptr mem)) +(Zero [6] destptr mem) => + (MOVHstoreconst [makeValAndOff(0,4)] destptr + (MOVWstoreconst [0] destptr mem)) +(Zero [7] destptr mem) => + (MOVWstoreconst [makeValAndOff(0,3)] destptr + (MOVWstoreconst [0] destptr mem)) + +(Zero [s] destptr mem) && s > 0 && s <= 1024 => + (CLEAR [makeValAndOff(int32(s), 0)] destptr mem) + +// Zero more than 1024 bytes using a loop. +(Zero [s] destptr mem) && s > 1024 => + (LoweredZero [s%256] destptr (ADDconst destptr [(int32(s)/256)*256]) mem) + +// Lowering constants +(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)]) +(Const(32|64)F ...) => (FMOV(S|D)const ...) +(ConstNil) => (MOVDconst [0]) +(ConstBool [t]) => (MOVDconst [b2i(t)]) + +// Lowering calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// Miscellaneous +(IsNonNil p) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0])) +(IsInBounds idx len) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len)) +(IsSliceInBounds idx len) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len)) +(NilCheck ...) => (LoweredNilCheck ...) +(GetG ...) => (LoweredGetG ...) +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) +(Addr {sym} base) => (MOVDaddr {sym} base) +(LocalAddr {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem)) +(LocalAddr {sym} base _) && !t.Elem().HasPointers() => (MOVDaddr {sym} base) +(ITab (Load ptr mem)) => (MOVDload ptr mem) + +// block rewrites +(If cond yes no) => (CLIJ {s390x.LessOrGreater} (MOVBZreg cond) [0] yes no) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +// *************************** +// Above: lowering rules +// Below: optimizations +// *************************** +// TODO: Should the optimizations be a separate pass? + +// Note: when removing unnecessary sign/zero extensions. +// +// After a value is spilled it is restored using a sign- or zero-extension +// to register-width as appropriate for its type. For example, a uint8 will +// be restored using a MOVBZ (llgc) instruction which will zero extend the +// 8-bit value to 64-bits. +// +// This is a hazard when folding sign- and zero-extensions since we need to +// ensure not only that the value in the argument register is correctly +// extended but also that it will still be correctly extended if it is +// spilled and restored. +// +// In general this means we need type checks when the RHS of a rule is an +// OpCopy (i.e. "(... x:(...) ...) -> x"). + +// Merge double extensions. +(MOV(H|HZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x) +(MOV(W|WZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x) +(MOV(W|WZ)reg e:(MOV(H|HZ)reg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x) + +// Bypass redundant sign extensions. +(MOV(B|BZ)reg e:(MOVBreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x) +(MOV(B|BZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x) +(MOV(B|BZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x) +(MOV(H|HZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x) +(MOV(H|HZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x) +(MOV(W|WZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x) + +// Bypass redundant zero extensions. +(MOV(B|BZ)reg e:(MOVBZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x) +(MOV(B|BZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x) +(MOV(B|BZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x) +(MOV(H|HZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x) +(MOV(H|HZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x) +(MOV(W|WZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x) + +// Remove zero extensions after zero extending load. +// Note: take care that if x is spilled it is restored correctly. +(MOV(B|H|W)Zreg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x +(MOV(H|W)Zreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x +(MOVWZreg x:(MOVWZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 4) => x + +// Remove sign extensions after sign extending load. +// Note: take care that if x is spilled it is restored correctly. +(MOV(B|H|W)reg x:(MOVBload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x +(MOV(H|W)reg x:(MOVHload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x +(MOVWreg x:(MOVWload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x + +// Remove sign extensions after zero extending load. +// These type checks are probably unnecessary but do them anyway just in case. +(MOV(H|W)reg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x +(MOVWreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x + +// Fold sign and zero extensions into loads. +// +// Note: The combined instruction must end up in the same block +// as the original load. If not, we end up making a value with +// memory type live in two different blocks, which can lead to +// multiple memory values alive simultaneously. +// +// Make sure we don't combine these ops if the load has another use. +// This prevents a single load from being split into multiple loads +// which then might return different values. See test/atomicload.go. +(MOV(B|H|W)Zreg x:(MOV(B|H|W)load [o] {s} p mem)) + && x.Uses == 1 + && clobber(x) + => @x.Block (MOV(B|H|W)Zload [o] {s} p mem) +(MOV(B|H|W)reg x:(MOV(B|H|W)Zload [o] {s} p mem)) + && x.Uses == 1 + && clobber(x) + => @x.Block (MOV(B|H|W)load [o] {s} p mem) + +// Remove zero extensions after argument load. +(MOVBZreg x:(Arg )) && !t.IsSigned() && t.Size() == 1 => x +(MOVHZreg x:(Arg )) && !t.IsSigned() && t.Size() <= 2 => x +(MOVWZreg x:(Arg )) && !t.IsSigned() && t.Size() <= 4 => x + +// Remove sign extensions after argument load. +(MOVBreg x:(Arg )) && t.IsSigned() && t.Size() == 1 => x +(MOVHreg x:(Arg )) && t.IsSigned() && t.Size() <= 2 => x +(MOVWreg x:(Arg )) && t.IsSigned() && t.Size() <= 4 => x + +// Fold zero extensions into constants. +(MOVBZreg (MOVDconst [c])) => (MOVDconst [int64( uint8(c))]) +(MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))]) +(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))]) + +// Fold sign extensions into constants. +(MOVBreg (MOVDconst [c])) => (MOVDconst [int64( int8(c))]) +(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))]) +(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))]) + +// Remove zero extension of conditional move. +// Note: only for MOVBZreg for now since it is added as part of 'if' statement lowering. +(MOVBZreg x:(LOCGR (MOVDconst [c]) (MOVDconst [d]) _)) + && int64(uint8(c)) == c + && int64(uint8(d)) == d + && (!x.Type.IsSigned() || x.Type.Size() > 1) + => x + +// Fold boolean tests into blocks. +// Note: this must match If statement lowering. +(CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no) + && int32(x) != 0 + => (BRC {d} cmp yes no) + +// Canonicalize BRC condition code mask by removing impossible conditions. +// Integer comparisons cannot generate the unordered condition. +(BRC {c} x:((CMP|CMPW|CMPU|CMPWU) _ _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no) +(BRC {c} x:((CMP|CMPW|CMPU|CMPWU)const _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no) + +// Compare-and-branch. +// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered. +(BRC {c} (CMP x y) yes no) => (CGRJ {c&^s390x.Unordered} x y yes no) +(BRC {c} (CMPW x y) yes no) => (CRJ {c&^s390x.Unordered} x y yes no) +(BRC {c} (CMPU x y) yes no) => (CLGRJ {c&^s390x.Unordered} x y yes no) +(BRC {c} (CMPWU x y) yes no) => (CLRJ {c&^s390x.Unordered} x y yes no) + +// Compare-and-branch (immediate). +// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered. +(BRC {c} (CMPconst x [y]) yes no) && y == int32( int8(y)) => (CGIJ {c&^s390x.Unordered} x [ int8(y)] yes no) +(BRC {c} (CMPWconst x [y]) yes no) && y == int32( int8(y)) => (CIJ {c&^s390x.Unordered} x [ int8(y)] yes no) +(BRC {c} (CMPUconst x [y]) yes no) && y == int32(uint8(y)) => (CLGIJ {c&^s390x.Unordered} x [uint8(y)] yes no) +(BRC {c} (CMPWUconst x [y]) yes no) && y == int32(uint8(y)) => (CLIJ {c&^s390x.Unordered} x [uint8(y)] yes no) + +// Absorb immediate into compare-and-branch. +(C(R|GR)J {c} x (MOVDconst [y]) yes no) && is8Bit(y) => (C(I|GI)J {c} x [ int8(y)] yes no) +(CL(R|GR)J {c} x (MOVDconst [y]) yes no) && isU8Bit(y) => (CL(I|GI)J {c} x [uint8(y)] yes no) +(C(R|GR)J {c} (MOVDconst [x]) y yes no) && is8Bit(x) => (C(I|GI)J {c.ReverseComparison()} y [ int8(x)] yes no) +(CL(R|GR)J {c} (MOVDconst [x]) y yes no) && isU8Bit(x) => (CL(I|GI)J {c.ReverseComparison()} y [uint8(x)] yes no) + +// Prefer comparison with immediate to compare-and-branch. +(CGRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPconst x [int32(y)]) yes no) +(CRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPWconst x [int32(y)]) yes no) +(CLGRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPUconst x [int32(y)]) yes no) +(CLRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPWUconst x [int32(y)]) yes no) +(CGRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPconst y [int32(x)]) yes no) +(CRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPWconst y [int32(x)]) yes no) +(CLGRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPUconst y [int32(x)]) yes no) +(CLRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPWUconst y [int32(x)]) yes no) + +// Absorb sign/zero extensions into 32-bit compare-and-branch. +(CIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CIJ {c} x [y] yes no) +(CLIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CLIJ {c} x [y] yes no) + +// Bring out-of-range signed immediates into range by varying branch condition. +(BRC {s390x.Less} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.LessOrEqual} x [ 127] yes no) +(BRC {s390x.Less} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.LessOrEqual} x [ 127] yes no) +(BRC {s390x.LessOrEqual} (CMPconst x [-129]) yes no) => (CGIJ {s390x.Less} x [-128] yes no) +(BRC {s390x.LessOrEqual} (CMPWconst x [-129]) yes no) => (CIJ {s390x.Less} x [-128] yes no) +(BRC {s390x.Greater} (CMPconst x [-129]) yes no) => (CGIJ {s390x.GreaterOrEqual} x [-128] yes no) +(BRC {s390x.Greater} (CMPWconst x [-129]) yes no) => (CIJ {s390x.GreaterOrEqual} x [-128] yes no) +(BRC {s390x.GreaterOrEqual} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.Greater} x [ 127] yes no) +(BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.Greater} x [ 127] yes no) + +// Bring out-of-range unsigned immediates into range by varying branch condition. +(BRC {s390x.Less} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.LessOrEqual} x [255] yes no) +(BRC {s390x.GreaterOrEqual} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.Greater} x [255] yes no) + +// Bring out-of-range immediates into range by switching signedness (only == and !=). +(BRC {c} (CMPconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLGIJ {c} x [uint8(y)] yes no) +(BRC {c} (CMPWconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLIJ {c} x [uint8(y)] yes no) +(BRC {c} (CMPUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CGIJ {c} x [ int8(y)] yes no) +(BRC {c} (CMPWUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CIJ {c} x [ int8(y)] yes no) + +// Fold constants into instructions. +(ADD x (MOVDconst [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [int32(c)] x) +(ADDW x (MOVDconst [c])) => (ADDWconst [int32(c)] x) + +(SUB x (MOVDconst [c])) && is32Bit(c) => (SUBconst x [int32(c)]) +(SUB (MOVDconst [c]) x) && is32Bit(c) => (NEG (SUBconst x [int32(c)])) +(SUBW x (MOVDconst [c])) => (SUBWconst x [int32(c)]) +(SUBW (MOVDconst [c]) x) => (NEGW (SUBWconst x [int32(c)])) + +(MULLD x (MOVDconst [c])) && is32Bit(c) => (MULLDconst [int32(c)] x) +(MULLW x (MOVDconst [c])) => (MULLWconst [int32(c)] x) + +// NILF instructions leave the high 32 bits unchanged which is +// equivalent to the leftmost 32 bits being set. +// TODO(mundaym): modify the assembler to accept 64-bit values +// and use isU32Bit(^c). +(AND x (MOVDconst [c])) + && s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil + => (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))}) +(AND x (MOVDconst [c])) + && is32Bit(c) + && c < 0 + => (ANDconst [c] x) +(AND x (MOVDconst [c])) + && is32Bit(c) + && c >= 0 + => (MOVWZreg (ANDWconst [int32(c)] x)) + +(ANDW x (MOVDconst [c])) => (ANDWconst [int32(c)] x) + +((AND|ANDW)const [c] ((AND|ANDW)const [d] x)) => ((AND|ANDW)const [c&d] x) + +((OR|XOR) x (MOVDconst [c])) && isU32Bit(c) => ((OR|XOR)const [c] x) +((OR|XOR)W x (MOVDconst [c])) => ((OR|XOR)Wconst [int32(c)] x) + +// Constant shifts. +(S(LD|RD|RAD) x (MOVDconst [c])) => (S(LD|RD|RAD)const x [uint8(c&63)]) +(S(LW|RW|RAW) x (MOVDconst [c])) && c&32 == 0 => (S(LW|RW|RAW)const x [uint8(c&31)]) +(S(LW|RW) _ (MOVDconst [c])) && c&32 != 0 => (MOVDconst [0]) +(SRAW x (MOVDconst [c])) && c&32 != 0 => (SRAWconst x [31]) + +// Shifts only use the rightmost 6 bits of the shift value. +(S(LD|RD|RAD|LW|RW|RAW) x (RISBGZ y {r})) + && r.Amount == 0 + && r.OutMask()&63 == 63 + => (S(LD|RD|RAD|LW|RW|RAW) x y) +(S(LD|RD|RAD|LW|RW|RAW) x (AND (MOVDconst [c]) y)) + => (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [int32(c&63)] y)) +(S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [c] y)) && c&63 == 63 + => (S(LD|RD|RAD|LW|RW|RAW) x y) +(SLD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLD x y) +(SRD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRD x y) +(SRAD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAD x y) +(SLW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLW x y) +(SRW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRW x y) +(SRAW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAW x y) + +// Match rotate by constant. +(RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))}) +(RLL x (MOVDconst [c])) => (RLLconst x [uint8(c&31)]) + +// Signed 64-bit comparison with immediate. +(CMP x (MOVDconst [c])) && is32Bit(c) => (CMPconst x [int32(c)]) +(CMP (MOVDconst [c]) x) && is32Bit(c) => (InvertFlags (CMPconst x [int32(c)])) + +// Unsigned 64-bit comparison with immediate. +(CMPU x (MOVDconst [c])) && isU32Bit(c) => (CMPUconst x [int32(c)]) +(CMPU (MOVDconst [c]) x) && isU32Bit(c) => (InvertFlags (CMPUconst x [int32(c)])) + +// Signed and unsigned 32-bit comparison with immediate. +(CMP(W|WU) x (MOVDconst [c])) => (CMP(W|WU)const x [int32(c)]) +(CMP(W|WU) (MOVDconst [c]) x) => (InvertFlags (CMP(W|WU)const x [int32(c)])) + +// Match (x >> c) << d to 'rotate then insert selected bits [into zero]'. +(SLDconst (SRDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))}) + +// Match (x << c) >> d to 'rotate then insert selected bits [into zero]'. +(SRDconst (SLDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63))}) + +// Absorb input zero extension into 'rotate then insert selected bits [into zero]'. +(RISBGZ (MOVWZreg x) {r}) && r.InMerge(0xffffffff) != nil => (RISBGZ x {*r.InMerge(0xffffffff)}) +(RISBGZ (MOVHZreg x) {r}) && r.InMerge(0x0000ffff) != nil => (RISBGZ x {*r.InMerge(0x0000ffff)}) +(RISBGZ (MOVBZreg x) {r}) && r.InMerge(0x000000ff) != nil => (RISBGZ x {*r.InMerge(0x000000ff)}) + +// Absorb 'rotate then insert selected bits [into zero]' into zero extension. +(MOVWZreg (RISBGZ x {r})) && r.OutMerge(0xffffffff) != nil => (RISBGZ x {*r.OutMerge(0xffffffff)}) +(MOVHZreg (RISBGZ x {r})) && r.OutMerge(0x0000ffff) != nil => (RISBGZ x {*r.OutMerge(0x0000ffff)}) +(MOVBZreg (RISBGZ x {r})) && r.OutMerge(0x000000ff) != nil => (RISBGZ x {*r.OutMerge(0x000000ff)}) + +// Absorb shift into 'rotate then insert selected bits [into zero]'. +// +// Any unsigned shift can be represented as a rotate and mask operation: +// +// x << c => RotateLeft64(x, c) & (^uint64(0) << c) +// x >> c => RotateLeft64(x, -c) & (^uint64(0) >> c) +// +// Therefore when a shift is used as the input to a rotate then insert +// selected bits instruction we can merge the two together. We just have +// to be careful that the resultant mask is representable (non-zero and +// contiguous). For example, assuming that x is variable and c, y and m +// are constants, a shift followed by a rotate then insert selected bits +// could be represented as: +// +// RotateLeft64(RotateLeft64(x, c) & (^uint64(0) << c), y) & m +// +// We can split the rotation by y into two, one rotate for x and one for +// the mask: +// +// RotateLeft64(RotateLeft64(x, c), y) & (RotateLeft64(^uint64(0) << c, y)) & m +// +// The rotations of x by c followed by y can then be combined: +// +// RotateLeft64(x, c+y) & (RotateLeft64(^uint64(0) << c, y)) & m +// ^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// rotate mask +// +// To perform this optimization we therefore just need to check that it +// is valid to merge the shift mask (^(uint64(0)< (RISBGZ x {(*r.InMerge(^uint64(0)<>c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)}) + +// Absorb 'rotate then insert selected bits [into zero]' into left shift. +(SLDconst (RISBGZ x {r}) [c]) + && s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil + => (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)}) + +// Absorb 'rotate then insert selected bits [into zero]' into right shift. +(SRDconst (RISBGZ x {r}) [c]) + && s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil + => (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)}) + +// Merge 'rotate then insert selected bits [into zero]' instructions together. +(RISBGZ (RISBGZ x {y}) {z}) + && z.InMerge(y.OutMask()) != nil + => (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)}) + +// Convert RISBGZ into 64-bit shift (helps CSE). +(RISBGZ x {r}) && r.End == 63 && r.Start == -r.Amount&63 => (SRDconst x [-r.Amount&63]) +(RISBGZ x {r}) && r.Start == 0 && r.End == 63-r.Amount => (SLDconst x [r.Amount]) + +// Optimize single bit isolation when it is known to be equivalent to +// the most significant bit due to mask produced by arithmetic shift. +// Simply isolate the most significant bit itself and place it in the +// correct position. +// +// Example: (int64(x) >> 63) & 0x8 -> RISBGZ $60, $60, $4, Rsrc, Rdst +(RISBGZ (SRADconst x [c]) {r}) + && r.Start == r.End // single bit selected + && (r.Start+r.Amount)&63 <= c // equivalent to most significant bit of x + => (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)}) + +// Canonicalize the order of arguments to comparisons - helps with CSE. +((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x)) + +// Use sign/zero extend instead of RISBGZ. +(RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x) +(RISBGZ x {r}) && r == s390x.NewRotateParams(48, 63, 0) => (MOVHZreg x) +(RISBGZ x {r}) && r == s390x.NewRotateParams(32, 63, 0) => (MOVWZreg x) + +// Use sign/zero extend instead of ANDW. +(ANDWconst [0x00ff] x) => (MOVBZreg x) +(ANDWconst [0xffff] x) => (MOVHZreg x) + +// Strength reduce multiplication to the sum (or difference) of two powers of two. +// +// Examples: +// 5x -> 4x + 1x +// 10x -> 8x + 2x +// 120x -> 128x - 8x +// -120x -> 8x - 128x +// +// We know that the rightmost bit of any positive value, once isolated, must either +// be a power of 2 (because it is a single bit) or 0 (if the original value is 0). +// In all of these rules we use a rightmost bit calculation to determine one operand +// for the addition or subtraction. We then just need to calculate if the other +// operand is a valid power of 2 before we can match the rule. +// +// Notes: +// - the generic rules have already matched single powers of two so we ignore them here +// - isPowerOfTwo32 asserts that its argument is greater than 0 +// - c&(c-1) = clear rightmost bit +// - c&^(c-1) = isolate rightmost bit + +// c = 2ˣ + 2ʸ => c - 2ˣ = 2ʸ +(MULL(D|W)const x [c]) && isPowerOfTwo32(c&(c-1)) + => ((ADD|ADDW) (SL(D|W)const x [uint8(log32(c&(c-1)))]) + (SL(D|W)const x [uint8(log32(c&^(c-1)))])) + +// c = 2ʸ - 2ˣ => c + 2ˣ = 2ʸ +(MULL(D|W)const x [c]) && isPowerOfTwo32(c+(c&^(c-1))) + => ((SUB|SUBW) (SL(D|W)const x [uint8(log32(c+(c&^(c-1))))]) + (SL(D|W)const x [uint8(log32(c&^(c-1)))])) + +// c = 2ˣ - 2ʸ => -c + 2ˣ = 2ʸ +(MULL(D|W)const x [c]) && isPowerOfTwo32(-c+(-c&^(-c-1))) + => ((SUB|SUBW) (SL(D|W)const x [uint8(log32(-c&^(-c-1)))]) + (SL(D|W)const x [uint8(log32(-c+(-c&^(-c-1))))])) + +// Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them). +(ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x) +(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x) +(ADD idx (MOVDaddr [c] {s} ptr)) && ptr.Op != OpSB => (MOVDaddridx [c] {s} ptr idx) + +// fold ADDconst into MOVDaddrx +(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y) +(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y) +(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y) + +// reverse ordering of compare instruction +(LOCGR {c} x y (InvertFlags cmp)) => (LOCGR {c.ReverseComparison()} x y cmp) + +// replace load from same location as preceding store with copy +(MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x +(MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWreg x) +(MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHreg x) +(MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBreg x) +(MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWZreg x) +(MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHZreg x) +(MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBZreg x) +(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LGDR x) +(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LDGR x) +(FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x +(FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x + +// prefer FPR <-> GPR moves over combined load ops +(MULLDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (MULLD x (LGDR y)) +(ADDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (ADD x (LGDR y)) +(SUBload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (SUB x (LGDR y)) +(ORload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (OR x (LGDR y)) +(ANDload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (AND x (LGDR y)) +(XORload [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (XOR x (LGDR y)) + +// detect attempts to set/clear the sign bit +// may need to be reworked when NIHH/OIHH are added +(RISBGZ (LGDR x) {r}) && r == s390x.NewRotateParams(1, 63, 0) => (LGDR (LPDFR x)) +(LDGR (RISBGZ x {r})) && r == s390x.NewRotateParams(1, 63, 0) => (LPDFR (LDGR x)) +(OR (MOVDconst [-1<<63]) (LGDR x)) => (LGDR (LNDFR x)) +(LDGR (OR (MOVDconst [-1<<63]) x)) => (LNDFR (LDGR x)) + +// detect attempts to set the sign bit with load +(LDGR x:(ORload [off] {sym} (MOVDconst [-1<<63]) ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (LNDFR (LDGR (MOVDload [off] {sym} ptr mem))) + +// detect copysign +(OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR y))) + && r == s390x.NewRotateParams(0, 0, 0) + => (LGDR (CPSDR y x)) +(OR (RISBGZ (LGDR x) {r}) (MOVDconst [c])) + && c >= 0 + && r == s390x.NewRotateParams(0, 0, 0) + => (LGDR (CPSDR (FMOVDconst [math.Float64frombits(uint64(c))]) x)) +(CPSDR y (FMOVDconst [c])) && !math.Signbit(c) => (LPDFR y) +(CPSDR y (FMOVDconst [c])) && math.Signbit(c) => (LNDFR y) + +// absorb negations into set/clear sign bit +(FNEG (LPDFR x)) => (LNDFR x) +(FNEG (LNDFR x)) => (LPDFR x) +(FNEGS (LPDFR x)) => (LNDFR x) +(FNEGS (LNDFR x)) => (LPDFR x) + +// no need to convert float32 to float64 to set/clear sign bit +(LEDBR (LPDFR (LDEBR x))) => (LPDFR x) +(LEDBR (LNDFR (LDEBR x))) => (LNDFR x) + +// remove unnecessary FPR <-> GPR moves +(LDGR (LGDR x)) => x +(LGDR (LDGR x)) => x + +// Don't extend before storing +(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWZreg x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHZreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBZreg x) mem) => (MOVBstore [off] {sym} ptr x mem) + +// Fold constants into memory operations. +// Note that this is not always a good idea because if not all the uses of +// the ADDconst get eliminated, we still have to compute the ADDconst and we now +// have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one. +// Nevertheless, let's do it! +(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDload [off1+off2] {sym} ptr mem) +(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWload [off1+off2] {sym} ptr mem) +(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHload [off1+off2] {sym} ptr mem) +(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBload [off1+off2] {sym} ptr mem) +(MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWZload [off1+off2] {sym} ptr mem) +(MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHZload [off1+off2] {sym} ptr mem) +(MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBZload [off1+off2] {sym} ptr mem) +(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSload [off1+off2] {sym} ptr mem) +(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDload [off1+off2] {sym} ptr mem) + +(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDstore [off1+off2] {sym} ptr val mem) +(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWstore [off1+off2] {sym} ptr val mem) +(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHstore [off1+off2] {sym} ptr val mem) +(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBstore [off1+off2] {sym} ptr val mem) +(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSstore [off1+off2] {sym} ptr val mem) +(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDstore [off1+off2] {sym} ptr val mem) + +(ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDload [off1+off2] {sym} x ptr mem) +(ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDWload [off1+off2] {sym} x ptr mem) +(MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLDload [off1+off2] {sym} x ptr mem) +(MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLWload [off1+off2] {sym} x ptr mem) +(SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBload [off1+off2] {sym} x ptr mem) +(SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBWload [off1+off2] {sym} x ptr mem) + +(ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDload [off1+off2] {sym} x ptr mem) +(ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDWload [off1+off2] {sym} x ptr mem) +(ORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORload [off1+off2] {sym} x ptr mem) +(ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORWload [off1+off2] {sym} x ptr mem) +(XORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORload [off1+off2] {sym} x ptr mem) +(XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORWload [off1+off2] {sym} x ptr mem) + +// Fold constants into stores. +(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB => + (MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) +(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB => + (MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem) +(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && isU12Bit(int64(off)) && ptr.Op != OpSB => + (MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem) +(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && is20Bit(int64(off)) && ptr.Op != OpSB => + (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem) + +// Fold address offsets into constant stores. +(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) => + (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem) +(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) => + (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem) +(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) => + (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem) +(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off64()+int64(off)) => + (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem) + +// Merge address calculations into loads and stores. +// Offsets from SB must not be merged into unaligned memory accesses because +// loads/stores using PC-relative addressing directly must be aligned to the +// size of the target. +(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) => + (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVWZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) => + (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVHZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) => + (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + +(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) => + (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) => + (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) + +(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) => + (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) => + (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) => + (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) => + (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + +(ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + +(ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem) +(XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem) + +// Cannot store constant to SB directly (no 'move relative long immediate' instructions). +(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) +(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) +(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) +(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) => + (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem) + +// MOVDaddr into MOVDaddridx +(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB => + (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) +(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB => + (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y) + +// Absorb InvertFlags into branches. +(BRC {c} (InvertFlags cmp) yes no) => (BRC {c.ReverseComparison()} cmp yes no) + +// Constant comparisons. +(CMPconst (MOVDconst [x]) [y]) && x==int64(y) => (FlagEQ) +(CMPconst (MOVDconst [x]) [y]) && x (FlagLT) +(CMPconst (MOVDconst [x]) [y]) && x>int64(y) => (FlagGT) +(CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) => (FlagEQ) +(CMPUconst (MOVDconst [x]) [y]) && uint64(x) (FlagLT) +(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT) + +(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ) +(CMPWconst (MOVDconst [x]) [y]) && int32(x) (FlagLT) +(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT) +(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)==uint32(y) => (FlagEQ) +(CMPWUconst (MOVDconst [x]) [y]) && uint32(x) (FlagLT) +(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT) + +(CMP(W|WU)const (MOVBZreg _) [c]) && 0xff < c => (FlagLT) +(CMP(W|WU)const (MOVHZreg _) [c]) && 0xffff < c => (FlagLT) + +(CMPconst (SRDconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT) +(CMPWconst (SRWconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT) + +(CMPUconst (SRDconst _ [c]) [n]) && c > 0 && c < 64 && (1< (FlagLT) +(CMPWUconst (SRWconst _ [c]) [n]) && c > 0 && c < 32 && (1< (FlagLT) + +(CMPWconst (ANDWconst _ [m]) [n]) && int32(m) >= 0 && int32(m) < int32(n) => (FlagLT) +(CMPWUconst (ANDWconst _ [m]) [n]) && uint32(m) < uint32(n) => (FlagLT) + +(CMPconst (RISBGZ x {r}) [c]) && c > 0 && r.OutMask() < uint64(c) => (FlagLT) +(CMPUconst (RISBGZ x {r}) [c]) && r.OutMask() < uint64(uint32(c)) => (FlagLT) + +// Constant compare-and-branch with immediate. +(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int64(x) == int64(y) => (First yes no) +(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int64(x) < int64(y) => (First yes no) +(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int64(x) > int64(y) => (First yes no) +(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int32(x) == int32(y) => (First yes no) +(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int32(x) < int32(y) => (First yes no) +(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int32(x) > int32(y) => (First yes no) +(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint64(x) == uint64(y) => (First yes no) +(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint64(x) < uint64(y) => (First yes no) +(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint64(x) > uint64(y) => (First yes no) +(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint32(x) == uint32(y) => (First yes no) +(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint32(x) < uint32(y) => (First yes no) +(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint32(x) > uint32(y) => (First yes no) +(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int64(x) == int64(y) => (First no yes) +(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int64(x) < int64(y) => (First no yes) +(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int64(x) > int64(y) => (First no yes) +(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int32(x) == int32(y) => (First no yes) +(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int32(x) < int32(y) => (First no yes) +(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int32(x) > int32(y) => (First no yes) +(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint64(x) == uint64(y) => (First no yes) +(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint64(x) < uint64(y) => (First no yes) +(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint64(x) > uint64(y) => (First no yes) +(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint32(x) == uint32(y) => (First no yes) +(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint32(x) < uint32(y) => (First no yes) +(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint32(x) > uint32(y) => (First no yes) + +// Constant compare-and-branch with immediate when unsigned comparison with zero. +(C(L|LG)IJ {s390x.GreaterOrEqual} _ [0] yes no) => (First yes no) +(C(L|LG)IJ {s390x.Less} _ [0] yes no) => (First no yes) + +// Constant compare-and-branch when operands match. +(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal != 0 => (First yes no) +(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal == 0 => (First no yes) + +// Convert 64-bit comparisons to 32-bit comparisons and signed comparisons +// to unsigned comparisons. +// Helps simplify constant comparison detection. +(CM(P|PU)const (MOV(W|WZ)reg x) [c]) => (CMP(W|WU)const x [c]) +(CM(P|P|PU|PU)const x:(MOV(H|HZ|H|HZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c]) +(CM(P|P|PU|PU)const x:(MOV(B|BZ|B|BZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c]) +(CMPconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 && c >= 0 => (CMPWUconst x [c]) +(CMPUconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 => (CMPWUconst x [c]) +(CMPconst x:(SRDconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPUconst x [n]) +(CMPWconst x:(SRWconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPWUconst x [n]) + +// Absorb sign and zero extensions into 32-bit comparisons. +(CMP(W|W|WU|WU) x (MOV(W|WZ|W|WZ)reg y)) => (CMP(W|W|WU|WU) x y) +(CMP(W|W|WU|WU) (MOV(W|WZ|W|WZ)reg x) y) => (CMP(W|W|WU|WU) x y) +(CMP(W|W|WU|WU)const (MOV(W|WZ|W|WZ)reg x) [c]) => (CMP(W|W|WU|WU)const x [c]) + +// Absorb flag constants into branches. +(BRC {c} (FlagEQ) yes no) && c&s390x.Equal != 0 => (First yes no) +(BRC {c} (FlagLT) yes no) && c&s390x.Less != 0 => (First yes no) +(BRC {c} (FlagGT) yes no) && c&s390x.Greater != 0 => (First yes no) +(BRC {c} (FlagOV) yes no) && c&s390x.Unordered != 0 => (First yes no) + +(BRC {c} (FlagEQ) yes no) && c&s390x.Equal == 0 => (First no yes) +(BRC {c} (FlagLT) yes no) && c&s390x.Less == 0 => (First no yes) +(BRC {c} (FlagGT) yes no) && c&s390x.Greater == 0 => (First no yes) +(BRC {c} (FlagOV) yes no) && c&s390x.Unordered == 0 => (First no yes) + +// Absorb flag constants into SETxx ops. +(LOCGR {c} _ x (FlagEQ)) && c&s390x.Equal != 0 => x +(LOCGR {c} _ x (FlagLT)) && c&s390x.Less != 0 => x +(LOCGR {c} _ x (FlagGT)) && c&s390x.Greater != 0 => x +(LOCGR {c} _ x (FlagOV)) && c&s390x.Unordered != 0 => x + +(LOCGR {c} x _ (FlagEQ)) && c&s390x.Equal == 0 => x +(LOCGR {c} x _ (FlagLT)) && c&s390x.Less == 0 => x +(LOCGR {c} x _ (FlagGT)) && c&s390x.Greater == 0 => x +(LOCGR {c} x _ (FlagOV)) && c&s390x.Unordered == 0 => x + +// Remove redundant *const ops +(ADDconst [0] x) => x +(ADDWconst [c] x) && int32(c)==0 => x +(SUBconst [0] x) => x +(SUBWconst [c] x) && int32(c) == 0 => x +(ANDconst [0] _) => (MOVDconst [0]) +(ANDWconst [c] _) && int32(c)==0 => (MOVDconst [0]) +(ANDconst [-1] x) => x +(ANDWconst [c] x) && int32(c)==-1 => x +(ORconst [0] x) => x +(ORWconst [c] x) && int32(c)==0 => x +(ORconst [-1] _) => (MOVDconst [-1]) +(ORWconst [c] _) && int32(c)==-1 => (MOVDconst [-1]) +(XORconst [0] x) => x +(XORWconst [c] x) && int32(c)==0 => x + +// Shifts by zero (may be inserted during multiplication strength reduction). +((SLD|SLW|SRD|SRW|SRAD|SRAW)const x [0]) => x + +// Convert constant subtracts to constant adds. +(SUBconst [c] x) && c != -(1<<31) => (ADDconst [-c] x) +(SUBWconst [c] x) => (ADDWconst [-int32(c)] x) + +// generic constant folding +// TODO: more of this +(ADDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d]) +(ADDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d]) +(ADDconst [c] (ADDconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDconst [c+d] x) +(ADDWconst [c] (ADDWconst [d] x)) => (ADDWconst [int32(c+d)] x) +(SUBconst (MOVDconst [d]) [c]) => (MOVDconst [d-int64(c)]) +(SUBconst (SUBconst x [d]) [c]) && is32Bit(-int64(c)-int64(d)) => (ADDconst [-c-d] x) +(SRADconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)]) +(SRAWconst [c] (MOVDconst [d])) => (MOVDconst [int64(int32(d))>>uint64(c)]) +(NEG (MOVDconst [c])) => (MOVDconst [-c]) +(NEGW (MOVDconst [c])) => (MOVDconst [int64(int32(-c))]) +(MULLDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)*d]) +(MULLWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c*int32(d))]) +(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d]) +(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d]) +(ANDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)&d]) +(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d]) +(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d]) +(ORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)|d]) +(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d]) +(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d]) +(XORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)^d]) +(LoweredRound32F x:(FMOVSconst)) => x +(LoweredRound64F x:(FMOVDconst)) => x + +// generic simplifications +// TODO: more of this +(ADD x (NEG y)) => (SUB x y) +(ADDW x (NEGW y)) => (SUBW x y) +(SUB x x) => (MOVDconst [0]) +(SUBW x x) => (MOVDconst [0]) +(AND x x) => x +(ANDW x x) => x +(OR x x) => x +(ORW x x) => x +(XOR x x) => (MOVDconst [0]) +(XORW x x) => (MOVDconst [0]) +(NEG (ADDconst [c] (NEG x))) && c != -(1<<31) => (ADDconst [-c] x) +(MOVBZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst [int32( uint8(m))] x)) +(MOVHZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst [int32(uint16(m))] x)) +(MOVBreg (ANDWconst [m] x)) && int8(m) >= 0 => (MOVWZreg (ANDWconst [int32( uint8(m))] x)) +(MOVHreg (ANDWconst [m] x)) && int16(m) >= 0 => (MOVWZreg (ANDWconst [int32(uint16(m))] x)) + +// carry flag generation +// (only constant fold carry of zero) +(Select1 (ADDCconst (MOVDconst [c]) [d])) + && uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0 + => (FlagEQ) +(Select1 (ADDCconst (MOVDconst [c]) [d])) + && uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0 + => (FlagLT) + +// borrow flag generation +// (only constant fold borrow of zero) +(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d]))) + && uint64(d) <= uint64(c) && c-d == 0 + => (FlagGT) +(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d]))) + && uint64(d) <= uint64(c) && c-d != 0 + => (FlagOV) + +// add with carry +(ADDE x y (FlagEQ)) => (ADDC x y) +(ADDE x y (FlagLT)) => (ADDC x y) +(ADDC x (MOVDconst [c])) && is16Bit(c) => (ADDCconst x [int16(c)]) +(Select0 (ADDCconst (MOVDconst [c]) [d])) => (MOVDconst [c+int64(d)]) + +// subtract with borrow +(SUBE x y (FlagGT)) => (SUBC x y) +(SUBE x y (FlagOV)) => (SUBC x y) +(Select0 (SUBC (MOVDconst [c]) (MOVDconst [d]))) => (MOVDconst [c-d]) + +// collapse carry chain +(ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c))))) + => (ADDE x y c) + +// collapse borrow chain +(SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c)))))) + => (SUBE x y c) + +// branch on carry +(C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.NoCarry} carry) +(C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.Carry} carry) +(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry) +(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.NoCarry} carry) +(C(G|LG)IJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry) + +// branch on borrow +(C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.NoBorrow} borrow) +(C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.Borrow} borrow) +(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow) +(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.NoBorrow} borrow) +(C(G|LG)IJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow) + +// fused multiply-add +(Select0 (F(ADD|SUB) (FMUL y z) x)) && x.Block.Func.useFMA(v) => (FM(ADD|SUB) x y z) +(Select0 (F(ADDS|SUBS) (FMULS y z) x)) && x.Block.Func.useFMA(v) => (FM(ADDS|SUBS) x y z) + +// Convert floating point comparisons against zero into 'load and test' instructions. +(F(CMP|CMPS) x (FMOV(D|S)const [0.0])) => (LT(D|E)BR x) +(F(CMP|CMPS) (FMOV(D|S)const [0.0]) x) => (InvertFlags (LT(D|E)BR x)) + +// FSUB, FSUBS, FADD, FADDS now produce a condition code representing the +// comparison of the result with 0.0. If a compare with zero instruction +// (e.g. LTDBR) is following one of those instructions, we can use the +// generated flag and remove the comparison instruction. +// Note: when inserting Select1 ops we need to ensure they are in the +// same block as their argument. We could also use @x.Block for this +// but moving the flag generating value to a different block seems to +// increase the likelihood that the flags value will have to be regenerated +// by flagalloc which is not what we want. +(LTDBR (Select0 x:(F(ADD|SUB) _ _))) && b == x.Block => (Select1 x) +(LTEBR (Select0 x:(F(ADDS|SUBS) _ _))) && b == x.Block => (Select1 x) + +// Fold memory operations into operations. +// Exclude global data (SB) because these instructions cannot handle relative addresses. +// TODO(mundaym): indexed versions of these? +((ADD|SUB|MULLD|AND|OR|XOR) x g:(MOVDload [off] {sym} ptr mem)) + && ptr.Op != OpSB + && is20Bit(int64(off)) + && canMergeLoadClobber(v, g, x) + && clobber(g) + => ((ADD|SUB|MULLD|AND|OR|XOR)load [off] {sym} x ptr mem) +((ADD|SUB|MULL|AND|OR|XOR)W x g:(MOVWload [off] {sym} ptr mem)) + && ptr.Op != OpSB + && is20Bit(int64(off)) + && canMergeLoadClobber(v, g, x) + && clobber(g) + => ((ADD|SUB|MULL|AND|OR|XOR)Wload [off] {sym} x ptr mem) +((ADD|SUB|MULL|AND|OR|XOR)W x g:(MOVWZload [off] {sym} ptr mem)) + && ptr.Op != OpSB + && is20Bit(int64(off)) + && canMergeLoadClobber(v, g, x) + && clobber(g) + => ((ADD|SUB|MULL|AND|OR|XOR)Wload [off] {sym} x ptr mem) + +// Combine stores into store multiples. +// 32-bit +(MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem)) + && p.Op != OpSB + && x.Uses == 1 + && is20Bit(int64(i)-4) + && setPos(v, x.Pos) + && clobber(x) + => (STM2 [i-4] {s} p w0 w1 mem) +(MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem)) + && x.Uses == 1 + && is20Bit(int64(i)-8) + && setPos(v, x.Pos) + && clobber(x) + => (STM3 [i-8] {s} p w0 w1 w2 mem) +(MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem)) + && x.Uses == 1 + && is20Bit(int64(i)-12) + && setPos(v, x.Pos) + && clobber(x) + => (STM4 [i-12] {s} p w0 w1 w2 w3 mem) +(STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem)) + && x.Uses == 1 + && is20Bit(int64(i)-8) + && setPos(v, x.Pos) + && clobber(x) + => (STM4 [i-8] {s} p w0 w1 w2 w3 mem) +// 64-bit +(MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem)) + && p.Op != OpSB + && x.Uses == 1 + && is20Bit(int64(i)-8) + && setPos(v, x.Pos) + && clobber(x) + => (STMG2 [i-8] {s} p w0 w1 mem) +(MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem)) + && x.Uses == 1 + && is20Bit(int64(i)-16) + && setPos(v, x.Pos) + && clobber(x) + => (STMG3 [i-16] {s} p w0 w1 w2 mem) +(MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem)) + && x.Uses == 1 + && is20Bit(int64(i)-24) + && setPos(v, x.Pos) + && clobber(x) + => (STMG4 [i-24] {s} p w0 w1 w2 w3 mem) +(STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem)) + && x.Uses == 1 + && is20Bit(int64(i)-16) + && setPos(v, x.Pos) + && clobber(x) + => (STMG4 [i-16] {s} p w0 w1 w2 w3 mem) + +// Convert 32-bit store multiples into 64-bit stores. +(STM2 [i] {s} p (SRDconst [32] x) x mem) => (MOVDstore [i] {s} p x mem) + +// Fold bit reversal into loads. +(MOVWBR x:(MOVWZload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVWZreg (MOVWBRload [off] {sym} ptr mem)) // need zero extension? +(MOVWBR x:(MOVWZloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 => @x.Block (MOVWZreg (MOVWBRloadidx [off] {sym} ptr idx mem)) // need zero extension? +(MOVDBR x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVDBRload [off] {sym} ptr mem) +(MOVDBR x:(MOVDloadidx [off] {sym} ptr idx mem)) && x.Uses == 1 => @x.Block (MOVDBRloadidx [off] {sym} ptr idx mem) + +// Fold bit reversal into stores. +(MOV(D|W)store [off] {sym} ptr r:(MOV(D|W)BR x) mem) && r.Uses == 1 => (MOV(D|W)BRstore [off] {sym} ptr x mem) +(MOV(D|W)storeidx [off] {sym} ptr idx r:(MOV(D|W)BR x) mem) && r.Uses == 1 => (MOV(D|W)BRstoreidx [off] {sym} ptr idx x mem) + +// Special bswap16 rules +(Bswap16 x:(MOVHZload [off] {sym} ptr mem)) => @x.Block (MOVHZreg (MOVHBRload [off] {sym} ptr mem)) +(Bswap16 x:(MOVHZloadidx [off] {sym} ptr idx mem)) => @x.Block (MOVHZreg (MOVHBRloadidx [off] {sym} ptr idx mem)) +(MOVHstore [off] {sym} ptr (Bswap16 val) mem) => (MOVHBRstore [off] {sym} ptr val mem) +(MOVHstoreidx [off] {sym} ptr idx (Bswap16 val) mem) => (MOVHBRstoreidx [off] {sym} ptr idx val mem) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/S390XOps.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/S390XOps.go new file mode 100644 index 0000000000000000000000000000000000000000..c4766c12f5d723ecc8e5cbbf7c0b54d049057bb7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/S390XOps.go @@ -0,0 +1,819 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +// Notes: +// - Integer types live in the low portion of registers. Upper portions are junk. +// - Boolean types use the low-order byte of a register. 0=false, 1=true. +// Upper bytes are junk. +// - When doing sub-register operations, we try to write the whole +// destination register to avoid a partial-register write. +// - Unused portions of AuxInt (or the Val portion of ValAndOff) are +// filled by sign-extending the used portion. Users of AuxInt which interpret +// AuxInt as unsigned (e.g. shifts) must be careful. +// - The SB 'register' is implemented using instruction-relative addressing. This +// places some limitations on when and how memory operands that are addressed +// relative to SB can be used: +// +// 1. Pseudo-instructions do not always map to a single machine instruction when +// using the SB 'register' to address data. This is because many machine +// instructions do not have relative long (RL suffix) equivalents. For example, +// ADDload, which is assembled as AG. +// +// 2. Loads and stores using relative addressing require the data be aligned +// according to its size (8-bytes for double words, 4-bytes for words +// and so on). +// +// We can always work around these by inserting LARL instructions (load address +// relative long) in the assembler, but typically this results in worse code +// generation because the address can't be re-used. Inserting instructions in the +// assembler also means clobbering the temp register and it is a long-term goal +// to prevent the compiler doing this so that it can be allocated as a normal +// register. +// +// For more information about the z/Architecture, the instruction set and the +// addressing modes it supports take a look at the z/Architecture Principles of +// Operation: http://publibfp.boulder.ibm.com/epubs/pdf/dz9zr010.pdf +// +// Suffixes encode the bit width of pseudo-instructions. +// D (double word) = 64 bit (frequently omitted) +// W (word) = 32 bit +// H (half word) = 16 bit +// B (byte) = 8 bit +// S (single prec.) = 32 bit (double precision is omitted) + +// copied from ../../s390x/reg.go +var regNamesS390X = []string{ + "R0", + "R1", + "R2", + "R3", + "R4", + "R5", + "R6", + "R7", + "R8", + "R9", + "R10", + "R11", + "R12", + "g", // R13 + "R14", + "SP", // R15 + "F0", + "F1", + "F2", + "F3", + "F4", + "F5", + "F6", + "F7", + "F8", + "F9", + "F10", + "F11", + "F12", + "F13", + "F14", + "F15", + + // If you add registers, update asyncPreempt in runtime. + + //pseudo-registers + "SB", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNamesS390X) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNamesS390X { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + // Common individual register masks + var ( + sp = buildReg("SP") + sb = buildReg("SB") + r0 = buildReg("R0") + tmp = buildReg("R11") // R11 is used as a temporary in a small number of instructions. + + // R10 is reserved by the assembler. + gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14") + gpg = gp | buildReg("g") + gpsp = gp | sp + + // R0 is considered to contain the value 0 in address calculations. + ptr = gp &^ r0 + ptrsp = ptr | sp + ptrspsb = ptrsp | sb + + fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15") + callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g + r1 = buildReg("R1") + r2 = buildReg("R2") + r3 = buildReg("R3") + r9 = buildReg("R9") + ) + // Common slices of register masks + var ( + gponly = []regMask{gp} + fponly = []regMask{fp} + ) + + // Common regInfo + var ( + gp01 = regInfo{inputs: []regMask{}, outputs: gponly} + gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly} + gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly} + gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} + gp21tmp = regInfo{inputs: []regMask{gp &^ tmp, gp &^ tmp}, outputs: []regMask{gp &^ tmp}, clobbers: tmp} + + // R0 evaluates to 0 when used as the number of bits to shift + // so we need to exclude it from that operand. + sh21 = regInfo{inputs: []regMask{gp, ptr}, outputs: gponly} + + addr = regInfo{inputs: []regMask{sp | sb}, outputs: gponly} + addridx = regInfo{inputs: []regMask{sp | sb, ptrsp}, outputs: gponly} + + gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}} + gp1flags = regInfo{inputs: []regMask{gpsp}} + gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + gp11flags = regInfo{inputs: []regMask{gp}, outputs: gponly} + gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + gp2flags1flags = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} + + gpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: gponly} + gploadidx = regInfo{inputs: []regMask{ptrspsb, ptrsp, 0}, outputs: gponly} + gpopload = regInfo{inputs: []regMask{gp, ptrsp, 0}, outputs: gponly} + gpstore = regInfo{inputs: []regMask{ptrspsb, gpsp, 0}} + gpstoreconst = regInfo{inputs: []regMask{ptrspsb, 0}} + gpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, gpsp, 0}} + gpstorebr = regInfo{inputs: []regMask{ptrsp, gpsp, 0}} + gpstorelaa = regInfo{inputs: []regMask{ptrspsb, gpsp, 0}, outputs: gponly} + gpstorelab = regInfo{inputs: []regMask{r1, gpsp, 0}, clobbers: r1} + + gpmvc = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}} + + fp01 = regInfo{inputs: []regMask{}, outputs: fponly} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly} + fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly} + fp21clobber = regInfo{inputs: []regMask{fp, fp}, outputs: fponly} + fpgp = regInfo{inputs: fponly, outputs: gponly} + gpfp = regInfo{inputs: gponly, outputs: fponly} + fp11 = regInfo{inputs: fponly, outputs: fponly} + fp1flags = regInfo{inputs: []regMask{fp}} + fp11clobber = regInfo{inputs: fponly, outputs: fponly} + fp2flags = regInfo{inputs: []regMask{fp, fp}} + + fpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: fponly} + fploadidx = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}, outputs: fponly} + + fpstore = regInfo{inputs: []regMask{ptrspsb, fp, 0}} + fpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, fp, 0}} + + sync = regInfo{inputs: []regMask{0}} + + // LoweredAtomicCas may overwrite arg1, so force it to R0 for now. + cas = regInfo{inputs: []regMask{ptrsp, r0, gpsp, 0}, outputs: []regMask{gp, 0}, clobbers: r0} + + // LoweredAtomicExchange overwrites the output before executing + // CS{,G}, so the output register must not be the same as the + // input register. For now we just force the output register to + // R0. + exchange = regInfo{inputs: []regMask{ptrsp, gpsp &^ r0, 0}, outputs: []regMask{r0, 0}} + ) + + var S390Xops = []opData{ + // fp ops + {name: "FADDS", argLength: 2, reg: fp21clobber, typ: "(Float32,Flags)", asm: "FADDS", commutative: true, resultInArg0: true}, // fp32 arg0 + arg1 + {name: "FADD", argLength: 2, reg: fp21clobber, typ: "(Float64,Flags)", asm: "FADD", commutative: true, resultInArg0: true}, // fp64 arg0 + arg1 + {name: "FSUBS", argLength: 2, reg: fp21clobber, typ: "(Float32,Flags)", asm: "FSUBS", resultInArg0: true}, // fp32 arg0 - arg1 + {name: "FSUB", argLength: 2, reg: fp21clobber, typ: "(Float64,Flags)", asm: "FSUB", resultInArg0: true}, // fp64 arg0 - arg1 + {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, resultInArg0: true}, // fp32 arg0 * arg1 + {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true, resultInArg0: true}, // fp64 arg0 * arg1 + {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", resultInArg0: true}, // fp32 arg0 / arg1 + {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV", resultInArg0: true}, // fp64 arg0 / arg1 + {name: "FNEGS", argLength: 1, reg: fp11clobber, asm: "FNEGS", clobberFlags: true}, // fp32 -arg0 + {name: "FNEG", argLength: 1, reg: fp11clobber, asm: "FNEG", clobberFlags: true}, // fp64 -arg0 + {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS", resultInArg0: true}, // fp32 arg1 * arg2 + arg0 + {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD", resultInArg0: true}, // fp64 arg1 * arg2 + arg0 + {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS", resultInArg0: true}, // fp32 arg1 * arg2 - arg0 + {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB", resultInArg0: true}, // fp64 arg1 * arg2 - arg0 + {name: "LPDFR", argLength: 1, reg: fp11, asm: "LPDFR"}, // fp64/fp32 set sign bit + {name: "LNDFR", argLength: 1, reg: fp11, asm: "LNDFR"}, // fp64/fp32 clear sign bit + {name: "CPSDR", argLength: 2, reg: fp21, asm: "CPSDR"}, // fp64/fp32 copy arg1 sign bit to arg0 + + // Round to integer, float64 only. + // + // aux | rounding mode + // ----+----------------------------------- + // 1 | round to nearest, ties away from 0 + // 4 | round to nearest, ties to even + // 5 | round toward 0 + // 6 | round toward +∞ + // 7 | round toward -∞ + {name: "FIDBR", argLength: 1, reg: fp11, asm: "FIDBR", aux: "Int8"}, + + {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load + {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load + {name: "FMOVSconst", reg: fp01, asm: "FMOVS", aux: "Float32", rematerializeable: true}, // fp32 constant + {name: "FMOVDconst", reg: fp01, asm: "FMOVD", aux: "Float64", rematerializeable: true}, // fp64 constant + {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by i + {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by i + + {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp32 store + {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp64 store + {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by i store + {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store + + // binary ops + {name: "ADD", argLength: 2, reg: gp21sp, asm: "ADD", commutative: true, clobberFlags: true}, // arg0 + arg1 + {name: "ADDW", argLength: 2, reg: gp21sp, asm: "ADDW", commutative: true, clobberFlags: true}, // arg0 + arg1 + {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint + {name: "ADDWconst", argLength: 1, reg: gp11sp, asm: "ADDW", aux: "Int32", clobberFlags: true}, // arg0 + auxint + {name: "ADDload", argLength: 3, reg: gpopload, asm: "ADD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + *arg1. arg2=mem + {name: "ADDWload", argLength: 3, reg: gpopload, asm: "ADDW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + *arg1. arg2=mem + + {name: "SUB", argLength: 2, reg: gp21, asm: "SUB", clobberFlags: true}, // arg0 - arg1 + {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW", clobberFlags: true}, // arg0 - arg1 + {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint + {name: "SUBWconst", argLength: 1, reg: gp11, asm: "SUBW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint + {name: "SUBload", argLength: 3, reg: gpopload, asm: "SUB", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - *arg1. arg2=mem + {name: "SUBWload", argLength: 3, reg: gpopload, asm: "SUBW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - *arg1. arg2=mem + + {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 + {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 + {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint + {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint + {name: "MULLDload", argLength: 3, reg: gpopload, asm: "MULLD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem + {name: "MULLWload", argLength: 3, reg: gpopload, asm: "MULLW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem + + {name: "MULHD", argLength: 2, reg: gp21tmp, asm: "MULHD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width + {name: "MULHDU", argLength: 2, reg: gp21tmp, asm: "MULHDU", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width + + {name: "DIVD", argLength: 2, reg: gp21tmp, asm: "DIVD", resultInArg0: true, clobberFlags: true}, // arg0 / arg1 + {name: "DIVW", argLength: 2, reg: gp21tmp, asm: "DIVW", resultInArg0: true, clobberFlags: true}, // arg0 / arg1 + {name: "DIVDU", argLength: 2, reg: gp21tmp, asm: "DIVDU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1 + {name: "DIVWU", argLength: 2, reg: gp21tmp, asm: "DIVWU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1 + + {name: "MODD", argLength: 2, reg: gp21tmp, asm: "MODD", resultInArg0: true, clobberFlags: true}, // arg0 % arg1 + {name: "MODW", argLength: 2, reg: gp21tmp, asm: "MODW", resultInArg0: true, clobberFlags: true}, // arg0 % arg1 + + {name: "MODDU", argLength: 2, reg: gp21tmp, asm: "MODDU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1 + {name: "MODWU", argLength: 2, reg: gp21tmp, asm: "MODWU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1 + + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true, clobberFlags: true}, // arg0 & arg1 + {name: "ANDW", argLength: 2, reg: gp21, asm: "ANDW", commutative: true, clobberFlags: true}, // arg0 & arg1 + {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint + {name: "ANDWconst", argLength: 1, reg: gp11, asm: "ANDW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint + {name: "ANDload", argLength: 3, reg: gpopload, asm: "AND", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & *arg1. arg2=mem + {name: "ANDWload", argLength: 3, reg: gpopload, asm: "ANDW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & *arg1. arg2=mem + + {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true, clobberFlags: true}, // arg0 | arg1 + {name: "ORW", argLength: 2, reg: gp21, asm: "ORW", commutative: true, clobberFlags: true}, // arg0 | arg1 + {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint + {name: "ORWconst", argLength: 1, reg: gp11, asm: "ORW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint + {name: "ORload", argLength: 3, reg: gpopload, asm: "OR", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | *arg1. arg2=mem + {name: "ORWload", argLength: 3, reg: gpopload, asm: "ORW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | *arg1. arg2=mem + + {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, clobberFlags: true}, // arg0 ^ arg1 + {name: "XORW", argLength: 2, reg: gp21, asm: "XORW", commutative: true, clobberFlags: true}, // arg0 ^ arg1 + {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint + {name: "XORWconst", argLength: 1, reg: gp11, asm: "XORW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint + {name: "XORload", argLength: 3, reg: gpopload, asm: "XOR", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ *arg1. arg2=mem + {name: "XORWload", argLength: 3, reg: gpopload, asm: "XORW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ *arg1. arg2=mem + + // Arithmetic ops with carry/borrow chain. + // + // A carry is represented by a condition code of 2 or 3 (GT or OV). + // A borrow is represented by a condition code of 0 or 1 (EQ or LT). + {name: "ADDC", argLength: 2, reg: gp21flags, asm: "ADDC", typ: "(UInt64,Flags)", commutative: true}, // (arg0 + arg1, carry out) + {name: "ADDCconst", argLength: 1, reg: gp11flags, asm: "ADDC", typ: "(UInt64,Flags)", aux: "Int16"}, // (arg0 + auxint, carry out) + {name: "ADDE", argLength: 3, reg: gp2flags1flags, asm: "ADDE", typ: "(UInt64,Flags)", commutative: true, resultInArg0: true}, // (arg0 + arg1 + arg2 (carry in), carry out) + {name: "SUBC", argLength: 2, reg: gp21flags, asm: "SUBC", typ: "(UInt64,Flags)"}, // (arg0 - arg1, borrow out) + {name: "SUBE", argLength: 3, reg: gp2flags1flags, asm: "SUBE", typ: "(UInt64,Flags)", resultInArg0: true}, // (arg0 - arg1 - arg2 (borrow in), borrow out) + + // Comparisons. + {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1 + + {name: "CMPU", argLength: 2, reg: gp2flags, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1 + {name: "CMPWU", argLength: 2, reg: gp2flags, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1 + + {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint + {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint + {name: "CMPUconst", argLength: 1, reg: gp1flags, asm: "CMPU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint + {name: "CMPWUconst", argLength: 1, reg: gp1flags, asm: "CMPWU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint + + {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "CEBR", typ: "Flags"}, // arg0 compare to arg1, f32 + {name: "FCMP", argLength: 2, reg: fp2flags, asm: "FCMPU", typ: "Flags"}, // arg0 compare to arg1, f64 + {name: "LTDBR", argLength: 1, reg: fp1flags, asm: "LTDBR", typ: "Flags"}, // arg0 compare to 0, f64 + {name: "LTEBR", argLength: 1, reg: fp1flags, asm: "LTEBR", typ: "Flags"}, // arg0 compare to 0, f32 + + {name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64 + {name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 64 + {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "UInt8"}, // arg0 << auxint, shift amount 0-63 + {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "UInt8"}, // arg0 << auxint, shift amount 0-31 + + {name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64 + {name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned uint32(arg0) >> arg1, shift amount is mod 64 + {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "UInt8"}, // unsigned arg0 >> auxint, shift amount 0-63 + {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "UInt8"}, // unsigned uint32(arg0) >> auxint, shift amount 0-31 + + // Arithmetic shifts clobber flags. + {name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64 + {name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed int32(arg0) >> arg1, shift amount is mod 64 + {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "UInt8", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63 + {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "UInt8", clobberFlags: true}, // signed int32(arg0) >> auxint, shift amount 0-31 + + // Rotate instructions. + // Note: no RLLGconst - use RISBGZ instead. + {name: "RLLG", argLength: 2, reg: sh21, asm: "RLLG"}, // arg0 rotate left arg1, rotate amount 0-63 + {name: "RLL", argLength: 2, reg: sh21, asm: "RLL"}, // arg0 rotate left arg1, rotate amount 0-31 + {name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "UInt8"}, // arg0 rotate left auxint, rotate amount 0-31 + + // Rotate then (and|or|xor|insert) selected bits instructions. + // + // Aux is an s390x.RotateParams struct containing Start, End and rotation + // Amount fields. + // + // arg1 is rotated left by the rotation amount then the bits from the start + // bit to the end bit (inclusive) are combined with arg0 using the logical + // operation specified. Bit indices are specified from left to right - the + // MSB is 0 and the LSB is 63. + // + // Examples: + // | aux | + // | instruction | start | end | amount | arg0 | arg1 | result | + // +-------------+-------+-----+--------+-----------------------+-----------------------+-----------------------+ + // | RXSBG (XOR) | 0 | 1 | 0 | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_ffff | 0x3fff_ffff_ffff_ffff | + // | RXSBG (XOR) | 62 | 63 | 0 | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_fffc | + // | RXSBG (XOR) | 0 | 47 | 16 | 0xffff_ffff_ffff_ffff | 0x0000_0000_0000_ffff | 0xffff_ffff_0000_ffff | + // +-------------+-------+-----+--------+-----------------------+-----------------------+-----------------------+ + // + {name: "RXSBG", argLength: 2, reg: gp21, asm: "RXSBG", resultInArg0: true, aux: "S390XRotateParams", clobberFlags: true}, // rotate then xor selected bits + {name: "RISBGZ", argLength: 1, reg: gp11, asm: "RISBGZ", aux: "S390XRotateParams", clobberFlags: true}, // rotate then insert selected bits [into zero] + + // unary ops + {name: "NEG", argLength: 1, reg: gp11, asm: "NEG", clobberFlags: true}, // -arg0 + {name: "NEGW", argLength: 1, reg: gp11, asm: "NEGW", clobberFlags: true}, // -arg0 + + {name: "NOT", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0 + {name: "NOTW", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0 + + {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) + {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0), float32 + + // Conditional register-register moves. + // The aux for these values is an s390x.CCMask value representing the condition code mask. + {name: "LOCGR", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "LOCGR", aux: "S390XCCMask"}, // load arg1 into arg0 if the condition code in arg2 matches a masked bit in aux. + + {name: "MOVBreg", argLength: 1, reg: gp11sp, asm: "MOVB", typ: "Int64"}, // sign extend arg0 from int8 to int64 + {name: "MOVBZreg", argLength: 1, reg: gp11sp, asm: "MOVBZ", typ: "UInt64"}, // zero extend arg0 from int8 to int64 + {name: "MOVHreg", argLength: 1, reg: gp11sp, asm: "MOVH", typ: "Int64"}, // sign extend arg0 from int16 to int64 + {name: "MOVHZreg", argLength: 1, reg: gp11sp, asm: "MOVHZ", typ: "UInt64"}, // zero extend arg0 from int16 to int64 + {name: "MOVWreg", argLength: 1, reg: gp11sp, asm: "MOVW", typ: "Int64"}, // sign extend arg0 from int32 to int64 + {name: "MOVWZreg", argLength: 1, reg: gp11sp, asm: "MOVWZ", typ: "UInt64"}, // zero extend arg0 from int32 to int64 + + {name: "MOVDconst", reg: gp01, asm: "MOVD", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint + + {name: "LDGR", argLength: 1, reg: gpfp, asm: "LDGR"}, // move int64 to float64 (no conversion) + {name: "LGDR", argLength: 1, reg: fpgp, asm: "LGDR"}, // move float64 to int64 (no conversion) + + {name: "CFDBRA", argLength: 1, reg: fpgp, asm: "CFDBRA", clobberFlags: true}, // convert float64 to int32 + {name: "CGDBRA", argLength: 1, reg: fpgp, asm: "CGDBRA", clobberFlags: true}, // convert float64 to int64 + {name: "CFEBRA", argLength: 1, reg: fpgp, asm: "CFEBRA", clobberFlags: true}, // convert float32 to int32 + {name: "CGEBRA", argLength: 1, reg: fpgp, asm: "CGEBRA", clobberFlags: true}, // convert float32 to int64 + {name: "CEFBRA", argLength: 1, reg: gpfp, asm: "CEFBRA", clobberFlags: true}, // convert int32 to float32 + {name: "CDFBRA", argLength: 1, reg: gpfp, asm: "CDFBRA", clobberFlags: true}, // convert int32 to float64 + {name: "CEGBRA", argLength: 1, reg: gpfp, asm: "CEGBRA", clobberFlags: true}, // convert int64 to float32 + {name: "CDGBRA", argLength: 1, reg: gpfp, asm: "CDGBRA", clobberFlags: true}, // convert int64 to float64 + {name: "CLFEBR", argLength: 1, reg: fpgp, asm: "CLFEBR", clobberFlags: true}, // convert float32 to uint32 + {name: "CLFDBR", argLength: 1, reg: fpgp, asm: "CLFDBR", clobberFlags: true}, // convert float64 to uint32 + {name: "CLGEBR", argLength: 1, reg: fpgp, asm: "CLGEBR", clobberFlags: true}, // convert float32 to uint64 + {name: "CLGDBR", argLength: 1, reg: fpgp, asm: "CLGDBR", clobberFlags: true}, // convert float64 to uint64 + {name: "CELFBR", argLength: 1, reg: gpfp, asm: "CELFBR", clobberFlags: true}, // convert uint32 to float32 + {name: "CDLFBR", argLength: 1, reg: gpfp, asm: "CDLFBR", clobberFlags: true}, // convert uint32 to float64 + {name: "CELGBR", argLength: 1, reg: gpfp, asm: "CELGBR", clobberFlags: true}, // convert uint64 to float32 + {name: "CDLGBR", argLength: 1, reg: gpfp, asm: "CDLGBR", clobberFlags: true}, // convert uint64 to float64 + + {name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32 + {name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64 + + {name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux + {name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux + + // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address + {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64 + {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64 + {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend. + {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64 + {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem + + {name: "MOVWBR", argLength: 1, reg: gp11, asm: "MOVWBR"}, // arg0 swap bytes + {name: "MOVDBR", argLength: 1, reg: gp11, asm: "MOVDBR"}, // arg0 swap bytes + + {name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes. + {name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes. + {name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes. + + {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem + {name: "MOVHBRstore", argLength: 3, reg: gpstorebr, asm: "MOVHBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes. + {name: "MOVWBRstore", argLength: 3, reg: gpstorebr, asm: "MOVWBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes. + {name: "MOVDBRstore", argLength: 3, reg: gpstorebr, asm: "MOVDBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes. + + {name: "MVC", argLength: 3, reg: gpmvc, asm: "MVC", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, faultOnNilArg1: true, symEffect: "None"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size,off + + // indexed loads/stores + {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem. Zero extend. + {name: "MOVBloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVB", aux: "SymOff", typ: "Int8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem. Sign extend. + {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend. + {name: "MOVHloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVH", aux: "SymOff", typ: "Int16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Sign extend. + {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend. + {name: "MOVWloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVW", aux: "SymOff", typ: "Int32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Sign extend. + {name: "MOVDloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVD", aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem + {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHBR", aux: "SymOff", typ: "Int16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. + {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWBR", aux: "SymOff", typ: "Int32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. + {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVDBR", aux: "SymOff", typ: "Int64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes. + {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVH", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVD", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem + {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVHBR", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes. + {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVWBR", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes. + {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVDBR", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes. + + // For storeconst ops, the AuxInt field encodes both + // the value to store and an address offset of the store. + // Cast AuxInt to a ValAndOff to extract Val and Off fields. + {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem + {name: "MOVHstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVH", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 2 bytes of ... + {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 4 bytes of ... + {name: "MOVDstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVD", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of ... + + {name: "CLEAR", argLength: 2, reg: regInfo{inputs: []regMask{ptr, 0}}, asm: "CLEAR", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"}, + + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{ptrsp, buildReg("R12"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{ptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + + // (InvertFlags (CMP a b)) == (CMP b a) + // InvertFlags is a pseudo-op which can't appear in assembly output. + {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 + + // Pseudo-ops + {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem + // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, + // and sorts it to the very beginning of the block to prevent other + // use of R12 (the closure pointer) + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R12")}}, zeroWidth: true}, + // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. + // LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem. + {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{ptrsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, + // Round ops to block fused-multiply-add extraction. + {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, + {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, + + // LoweredWB invokes runtime.gcWriteBarrier. arg0=mem, aux=# of buffer entries needed + // It saves all GP registers if necessary, + // but clobbers R14 (LR) because it's a call, + // and also clobbers R1 as the PLT stub does. + // Returns a pointer to a write barrier buffer in R9. + {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ gpg) | buildReg("R14") | r1, outputs: []regMask{r9}}, clobberFlags: true, aux: "Int64"}, + + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go). + + // Constant condition code values. The condition code can be 0, 1, 2 or 3. + {name: "FlagEQ"}, // CC=0 (equal) + {name: "FlagLT"}, // CC=1 (less than) + {name: "FlagGT"}, // CC=2 (greater than) + {name: "FlagOV"}, // CC=3 (overflow) + + // Fast-BCR-serialization to ensure store-load ordering. + {name: "SYNC", argLength: 1, reg: sync, asm: "SYNC", typ: "Mem"}, + + // Atomic loads. These are just normal loads but return tuples + // so they can be properly ordered with other loads. + // load from arg0+auxint+aux. arg1=mem. + {name: "MOVBZatomicload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "MOVWZatomicload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + {name: "MOVDatomicload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, + + // Atomic stores. These are just normal stores. + // store arg1 to arg0+auxint+aux. arg2=mem. + {name: "MOVBatomicstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"}, + {name: "MOVWatomicstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"}, + {name: "MOVDatomicstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"}, + + // Atomic adds. + // *(arg0+auxint+aux) += arg1. arg2=mem. + // Returns a tuple of . + {name: "LAA", argLength: 3, reg: gpstorelaa, asm: "LAA", typ: "(UInt32,Mem)", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, + {name: "LAAG", argLength: 3, reg: gpstorelaa, asm: "LAAG", typ: "(UInt64,Mem)", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, + {name: "AddTupleFirst32", argLength: 2}, // arg1=tuple . Returns . + {name: "AddTupleFirst64", argLength: 2}, // arg1=tuple . Returns . + + // Atomic bitwise operations. + // Note: 'floor' operations round the pointer down to the nearest word boundary + // which reflects how they are used in the runtime. + {name: "LAN", argLength: 3, reg: gpstore, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 &= arg1. arg2 = mem. + {name: "LANfloor", argLength: 3, reg: gpstorelab, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) &= arg1. arg2 = mem. + {name: "LAO", argLength: 3, reg: gpstore, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 |= arg1. arg2 = mem. + {name: "LAOfloor", argLength: 3, reg: gpstorelab, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) |= arg1. arg2 = mem. + + // Compare and swap. + // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. + // if *(arg0+auxint+aux) == arg1 { + // *(arg0+auxint+aux) = arg2 + // return (true, memory) + // } else { + // return (false, memory) + // } + // Note that these instructions also return the old value in arg1, but we ignore it. + // TODO: have these return flags instead of bool. The current system generates: + // CS ... + // MOVD $0, ret + // BNE 2(PC) + // MOVD $1, ret + // CMPW ret, $0 + // BNE ... + // instead of just + // CS ... + // BEQ ... + // but we can't do that because memory-using ops can't generate flags yet + // (flagalloc wants to move flag-generating instructions around). + {name: "LoweredAtomicCas32", argLength: 4, reg: cas, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, + {name: "LoweredAtomicCas64", argLength: 4, reg: cas, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, + + // Lowered atomic swaps, emulated using compare-and-swap. + // store arg1 to arg0+auxint+aux, arg2=mem. + {name: "LoweredAtomicExchange32", argLength: 3, reg: exchange, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, + {name: "LoweredAtomicExchange64", argLength: 3, reg: exchange, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, + + // find leftmost one + { + name: "FLOGR", + argLength: 1, + reg: regInfo{inputs: gponly, outputs: []regMask{buildReg("R0")}, clobbers: buildReg("R1")}, + asm: "FLOGR", + typ: "UInt64", + clobberFlags: true, + }, + + // population count + // + // Counts the number of ones in each byte of arg0 + // and places the result into the corresponding byte + // of the result. + { + name: "POPCNT", + argLength: 1, + reg: gp11, + asm: "POPCNT", + typ: "UInt64", + clobberFlags: true, + }, + + // unsigned multiplication (64x64 → 128) + // + // Multiply the two 64-bit input operands together and place the 128-bit result into + // an even-odd register pair. The second register in the target pair also contains + // one of the input operands. Since we don't currently have a way to specify an + // even-odd register pair we hardcode this register pair as R2:R3. + { + name: "MLGR", + argLength: 2, + reg: regInfo{inputs: []regMask{gp, r3}, outputs: []regMask{r2, r3}}, + asm: "MLGR", + }, + + // pseudo operations to sum the output of the POPCNT instruction + {name: "SumBytes2", argLength: 1, typ: "UInt8"}, // sum the rightmost 2 bytes in arg0 ignoring overflow + {name: "SumBytes4", argLength: 1, typ: "UInt8"}, // sum the rightmost 4 bytes in arg0 ignoring overflow + {name: "SumBytes8", argLength: 1, typ: "UInt8"}, // sum all the bytes in arg0 ignoring overflow + + // store multiple + { + name: "STMG2", + argLength: 4, + reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}}, + aux: "SymOff", + typ: "Mem", + asm: "STMG", + faultOnNilArg0: true, + symEffect: "Write", + clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets + }, + { + name: "STMG3", + argLength: 5, + reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}}, + aux: "SymOff", + typ: "Mem", + asm: "STMG", + faultOnNilArg0: true, + symEffect: "Write", + clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets + }, + { + name: "STMG4", + argLength: 6, + reg: regInfo{inputs: []regMask{ + ptrsp, + buildReg("R1"), + buildReg("R2"), + buildReg("R3"), + buildReg("R4"), + 0, + }}, + aux: "SymOff", + typ: "Mem", + asm: "STMG", + faultOnNilArg0: true, + symEffect: "Write", + clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets + }, + { + name: "STM2", + argLength: 4, + reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}}, + aux: "SymOff", + typ: "Mem", + asm: "STMY", + faultOnNilArg0: true, + symEffect: "Write", + clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets + }, + { + name: "STM3", + argLength: 5, + reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}}, + aux: "SymOff", + typ: "Mem", + asm: "STMY", + faultOnNilArg0: true, + symEffect: "Write", + clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets + }, + { + name: "STM4", + argLength: 6, + reg: regInfo{inputs: []regMask{ + ptrsp, + buildReg("R1"), + buildReg("R2"), + buildReg("R3"), + buildReg("R4"), + 0, + }}, + aux: "SymOff", + typ: "Mem", + asm: "STMY", + faultOnNilArg0: true, + symEffect: "Write", + clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets + }, + + // large move + // auxint = remaining bytes after loop (rem) + // arg0 = address of dst memory (in R1, changed as a side effect) + // arg1 = address of src memory (in R2, changed as a side effect) + // arg2 = pointer to last address to move in loop + 256 + // arg3 = mem + // returns mem + // + // mvc: MVC $256, 0(R2), 0(R1) + // MOVD $256(R1), R1 + // MOVD $256(R2), R2 + // CMP R2, Rarg2 + // BNE mvc + // MVC $rem, 0(R2), 0(R1) // if rem > 0 + { + name: "LoweredMove", + aux: "Int64", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("R1"), buildReg("R2"), gpsp}, + clobbers: buildReg("R1 R2"), + }, + clobberFlags: true, + typ: "Mem", + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // large clear + // auxint = remaining bytes after loop (rem) + // arg0 = address of dst memory (in R1, changed as a side effect) + // arg1 = pointer to last address to zero in loop + 256 + // arg2 = mem + // returns mem + // + // clear: CLEAR $256, 0(R1) + // MOVD $256(R1), R1 + // CMP R1, Rarg2 + // BNE clear + // CLEAR $rem, 0(R1) // if rem > 0 + { + name: "LoweredZero", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R1"), gpsp}, + clobbers: buildReg("R1"), + }, + clobberFlags: true, + typ: "Mem", + faultOnNilArg0: true, + }, + } + + // All blocks on s390x have their condition code mask (s390x.CCMask) as the Aux value. + // The condition code mask is a 4-bit mask where each bit corresponds to a condition + // code value. If the value of the condition code matches a bit set in the condition + // code mask then the first successor is executed. Otherwise the second successor is + // executed. + // + // | condition code value | mask bit | + // +----------------------+------------+ + // | 0 (equal) | 0b1000 (8) | + // | 1 (less than) | 0b0100 (4) | + // | 2 (greater than) | 0b0010 (2) | + // | 3 (unordered) | 0b0001 (1) | + // + // Note: that compare-and-branch instructions must not have bit 3 (0b0001) set. + var S390Xblocks = []blockData{ + // branch on condition + {name: "BRC", controls: 1, aux: "S390XCCMask"}, // condition code value (flags) is Controls[0] + + // compare-and-branch (register-register) + // - integrates comparison of Controls[0] with Controls[1] + // - both control values must be in general purpose registers + {name: "CRJ", controls: 2, aux: "S390XCCMask"}, // signed 32-bit integer comparison + {name: "CGRJ", controls: 2, aux: "S390XCCMask"}, // signed 64-bit integer comparison + {name: "CLRJ", controls: 2, aux: "S390XCCMask"}, // unsigned 32-bit integer comparison + {name: "CLGRJ", controls: 2, aux: "S390XCCMask"}, // unsigned 64-bit integer comparison + + // compare-and-branch (register-immediate) + // - integrates comparison of Controls[0] with AuxInt + // - control value must be in a general purpose register + // - the AuxInt value is sign-extended for signed comparisons + // and zero-extended for unsigned comparisons + {name: "CIJ", controls: 1, aux: "S390XCCMaskInt8"}, // signed 32-bit integer comparison + {name: "CGIJ", controls: 1, aux: "S390XCCMaskInt8"}, // signed 64-bit integer comparison + {name: "CLIJ", controls: 1, aux: "S390XCCMaskUint8"}, // unsigned 32-bit integer comparison + {name: "CLGIJ", controls: 1, aux: "S390XCCMaskUint8"}, // unsigned 64-bit integer comparison + } + + archs = append(archs, arch{ + name: "S390X", + pkg: "cmd/internal/obj/s390x", + genfile: "../../s390x/ssa.go", + ops: S390Xops, + blocks: S390Xblocks, + regnames: regNamesS390X, + gpregmask: gp, + fpregmask: fp, + framepointerreg: -1, // not used + linkreg: int8(num["R14"]), + imports: []string{ + "cmd/internal/obj/s390x", + }, + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/Wasm.rules new file mode 100644 index 0000000000000000000000000000000000000000..91a9fc5e4a9772b3e5e337c5c1078170700a1dd7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/Wasm.rules @@ -0,0 +1,397 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Lowering arithmetic +(Add(64|32|16|8|Ptr) ...) => (I64Add ...) +(Add(64|32)F ...) => (F(64|32)Add ...) + +(Sub(64|32|16|8|Ptr) ...) => (I64Sub ...) +(Sub(64|32)F ...) => (F(64|32)Sub ...) + +(Mul(64|32|16|8) ...) => (I64Mul ...) +(Mul(64|32)F ...) => (F(64|32)Mul ...) + +(Div64 [false] x y) => (I64DivS x y) +(Div32 [false] x y) => (I64DivS (SignExt32to64 x) (SignExt32to64 y)) +(Div16 [false] x y) => (I64DivS (SignExt16to64 x) (SignExt16to64 y)) +(Div8 x y) => (I64DivS (SignExt8to64 x) (SignExt8to64 y)) +(Div64u ...) => (I64DivU ...) +(Div32u x y) => (I64DivU (ZeroExt32to64 x) (ZeroExt32to64 y)) +(Div16u x y) => (I64DivU (ZeroExt16to64 x) (ZeroExt16to64 y)) +(Div8u x y) => (I64DivU (ZeroExt8to64 x) (ZeroExt8to64 y)) +(Div(64|32)F ...) => (F(64|32)Div ...) + +(Mod64 [false] x y) => (I64RemS x y) +(Mod32 [false] x y) => (I64RemS (SignExt32to64 x) (SignExt32to64 y)) +(Mod16 [false] x y) => (I64RemS (SignExt16to64 x) (SignExt16to64 y)) +(Mod8 x y) => (I64RemS (SignExt8to64 x) (SignExt8to64 y)) +(Mod64u ...) => (I64RemU ...) +(Mod32u x y) => (I64RemU (ZeroExt32to64 x) (ZeroExt32to64 y)) +(Mod16u x y) => (I64RemU (ZeroExt16to64 x) (ZeroExt16to64 y)) +(Mod8u x y) => (I64RemU (ZeroExt8to64 x) (ZeroExt8to64 y)) + +(And(64|32|16|8|B) ...) => (I64And ...) + +(Or(64|32|16|8|B) ...) => (I64Or ...) + +(Xor(64|32|16|8) ...) => (I64Xor ...) + +(Neg(64|32|16|8) x) => (I64Sub (I64Const [0]) x) +(Neg(64|32)F ...) => (F(64|32)Neg ...) + +(Com(64|32|16|8) x) => (I64Xor x (I64Const [-1])) + +(Not ...) => (I64Eqz ...) + +// Lowering pointer arithmetic +(OffPtr ...) => (I64AddConst ...) + +// Lowering extension +// It is unnecessary to extend loads +(SignExt32to64 x:(I64Load32S _ _)) => x +(SignExt16to(64|32) x:(I64Load16S _ _)) => x +(SignExt8to(64|32|16) x:(I64Load8S _ _)) => x +(ZeroExt32to64 x:(I64Load32U _ _)) => x +(ZeroExt16to(64|32) x:(I64Load16U _ _)) => x +(ZeroExt8to(64|32|16) x:(I64Load8U _ _)) => x +(SignExt32to64 x) && buildcfg.GOWASM.SignExt => (I64Extend32S x) +(SignExt8to(64|32|16) x) && buildcfg.GOWASM.SignExt => (I64Extend8S x) +(SignExt16to(64|32) x) && buildcfg.GOWASM.SignExt => (I64Extend16S x) +(SignExt32to64 x) => (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32])) +(SignExt16to(64|32) x) => (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48])) +(SignExt8to(64|32|16) x) => (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56])) +(ZeroExt32to64 x) => (I64And x (I64Const [0xffffffff])) +(ZeroExt16to(64|32) x) => (I64And x (I64Const [0xffff])) +(ZeroExt8to(64|32|16) x) => (I64And x (I64Const [0xff])) + +(Slicemask x) => (I64ShrS (I64Sub (I64Const [0]) x) (I64Const [63])) + +// Lowering truncation +// Because we ignore the high parts, truncates are just copies. +(Trunc64to(32|16|8) ...) => (Copy ...) +(Trunc32to(16|8) ...) => (Copy ...) +(Trunc16to8 ...) => (Copy ...) + +// Lowering float <=> int +(Cvt32to(64|32)F x) => (F(64|32)ConvertI64S (SignExt32to64 x)) +(Cvt64to(64|32)F ...) => (F(64|32)ConvertI64S ...) +(Cvt32Uto(64|32)F x) => (F(64|32)ConvertI64U (ZeroExt32to64 x)) +(Cvt64Uto(64|32)F ...) => (F(64|32)ConvertI64U ...) + +(Cvt32Fto32 ...) => (I64TruncSatF32S ...) +(Cvt32Fto64 ...) => (I64TruncSatF32S ...) +(Cvt64Fto32 ...) => (I64TruncSatF64S ...) +(Cvt64Fto64 ...) => (I64TruncSatF64S ...) +(Cvt32Fto32U ...) => (I64TruncSatF32U ...) +(Cvt32Fto64U ...) => (I64TruncSatF32U ...) +(Cvt64Fto32U ...) => (I64TruncSatF64U ...) +(Cvt64Fto64U ...) => (I64TruncSatF64U ...) + +(Cvt32Fto64F ...) => (F64PromoteF32 ...) +(Cvt64Fto32F ...) => (F32DemoteF64 ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +(Round32F ...) => (Copy ...) +(Round64F ...) => (Copy ...) + +// Lowering shifts +// Unsigned shifts need to return 0 if shift amount is >= width of shifted value. + +(Lsh64x64 x y) && shiftIsBounded(v) => (I64Shl x y) +(Lsh64x64 x (I64Const [c])) && uint64(c) < 64 => (I64Shl x (I64Const [c])) +(Lsh64x64 x (I64Const [c])) && uint64(c) >= 64 => (I64Const [0]) +(Lsh64x64 x y) => (Select (I64Shl x y) (I64Const [0]) (I64LtU y (I64Const [64]))) +(Lsh64x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y)) + +(Lsh32x64 ...) => (Lsh64x64 ...) +(Lsh32x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y)) + +(Lsh16x64 ...) => (Lsh64x64 ...) +(Lsh16x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y)) + +(Lsh8x64 ...) => (Lsh64x64 ...) +(Lsh8x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y)) + +(Rsh64Ux64 x y) && shiftIsBounded(v) => (I64ShrU x y) +(Rsh64Ux64 x (I64Const [c])) && uint64(c) < 64 => (I64ShrU x (I64Const [c])) +(Rsh64Ux64 x (I64Const [c])) && uint64(c) >= 64 => (I64Const [0]) +(Rsh64Ux64 x y) => (Select (I64ShrU x y) (I64Const [0]) (I64LtU y (I64Const [64]))) +(Rsh64Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] x (ZeroExt(32|16|8)to64 y)) + +(Rsh32Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt32to64 x) y) +(Rsh32Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt(32|16|8)to64 y)) + +(Rsh16Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt16to64 x) y) +(Rsh16Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt(32|16|8)to64 y)) + +(Rsh8Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt8to64 x) y) +(Rsh8Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt(32|16|8)to64 y)) + +// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. +// We implement this by setting the shift value to (width - 1) if the shift value is >= width. + +(Rsh64x64 x y) && shiftIsBounded(v) => (I64ShrS x y) +(Rsh64x64 x (I64Const [c])) && uint64(c) < 64 => (I64ShrS x (I64Const [c])) +(Rsh64x64 x (I64Const [c])) && uint64(c) >= 64 => (I64ShrS x (I64Const [63])) +(Rsh64x64 x y) => (I64ShrS x (Select y (I64Const [63]) (I64LtU y (I64Const [64])))) +(Rsh64x(32|16|8) [c] x y) => (Rsh64x64 [c] x (ZeroExt(32|16|8)to64 y)) + +(Rsh32x64 [c] x y) => (Rsh64x64 [c] (SignExt32to64 x) y) +(Rsh32x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt(32|16|8)to64 y)) + +(Rsh16x64 [c] x y) => (Rsh64x64 [c] (SignExt16to64 x) y) +(Rsh16x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt(32|16|8)to64 y)) + +(Rsh8x64 [c] x y) => (Rsh64x64 [c] (SignExt8to64 x) y) +(Rsh8x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt(32|16|8)to64 y)) + +// Lowering rotates +(RotateLeft8 x (I64Const [c])) => (Or8 (Lsh8x64 x (I64Const [c&7])) (Rsh8Ux64 x (I64Const [-c&7]))) +(RotateLeft16 x (I64Const [c])) => (Or16 (Lsh16x64 x (I64Const [c&15])) (Rsh16Ux64 x (I64Const [-c&15]))) +(RotateLeft32 ...) => (I32Rotl ...) +(RotateLeft64 ...) => (I64Rotl ...) + +// Lowering comparisons +(Less64 ...) => (I64LtS ...) +(Less32 x y) => (I64LtS (SignExt32to64 x) (SignExt32to64 y)) +(Less16 x y) => (I64LtS (SignExt16to64 x) (SignExt16to64 y)) +(Less8 x y) => (I64LtS (SignExt8to64 x) (SignExt8to64 y)) +(Less64U ...) => (I64LtU ...) +(Less32U x y) => (I64LtU (ZeroExt32to64 x) (ZeroExt32to64 y)) +(Less16U x y) => (I64LtU (ZeroExt16to64 x) (ZeroExt16to64 y)) +(Less8U x y) => (I64LtU (ZeroExt8to64 x) (ZeroExt8to64 y)) +(Less(64|32)F ...) => (F(64|32)Lt ...) + +(Leq64 ...) => (I64LeS ...) +(Leq32 x y) => (I64LeS (SignExt32to64 x) (SignExt32to64 y)) +(Leq16 x y) => (I64LeS (SignExt16to64 x) (SignExt16to64 y)) +(Leq8 x y) => (I64LeS (SignExt8to64 x) (SignExt8to64 y)) +(Leq64U ...) => (I64LeU ...) +(Leq32U x y) => (I64LeU (ZeroExt32to64 x) (ZeroExt32to64 y)) +(Leq16U x y) => (I64LeU (ZeroExt16to64 x) (ZeroExt16to64 y)) +(Leq8U x y) => (I64LeU (ZeroExt8to64 x) (ZeroExt8to64 y)) +(Leq(64|32)F ...) => (F(64|32)Le ...) + +(Eq64 ...) => (I64Eq ...) +(Eq32 x y) => (I64Eq (ZeroExt32to64 x) (ZeroExt32to64 y)) +(Eq16 x y) => (I64Eq (ZeroExt16to64 x) (ZeroExt16to64 y)) +(Eq8 x y) => (I64Eq (ZeroExt8to64 x) (ZeroExt8to64 y)) +(EqB ...) => (I64Eq ...) +(EqPtr ...) => (I64Eq ...) +(Eq(64|32)F ...) => (F(64|32)Eq ...) + +(Neq64 ...) => (I64Ne ...) +(Neq32 x y) => (I64Ne (ZeroExt32to64 x) (ZeroExt32to64 y)) +(Neq16 x y) => (I64Ne (ZeroExt16to64 x) (ZeroExt16to64 y)) +(Neq8 x y) => (I64Ne (ZeroExt8to64 x) (ZeroExt8to64 y)) +(NeqB ...) => (I64Ne ...) +(NeqPtr ...) => (I64Ne ...) +(Neq(64|32)F ...) => (F(64|32)Ne ...) + +// Lowering loads +(Load ptr mem) && is32BitFloat(t) => (F32Load ptr mem) +(Load ptr mem) && is64BitFloat(t) => (F64Load ptr mem) +(Load ptr mem) && t.Size() == 8 => (I64Load ptr mem) +(Load ptr mem) && t.Size() == 4 && !t.IsSigned() => (I64Load32U ptr mem) +(Load ptr mem) && t.Size() == 4 && t.IsSigned() => (I64Load32S ptr mem) +(Load ptr mem) && t.Size() == 2 && !t.IsSigned() => (I64Load16U ptr mem) +(Load ptr mem) && t.Size() == 2 && t.IsSigned() => (I64Load16S ptr mem) +(Load ptr mem) && t.Size() == 1 && !t.IsSigned() => (I64Load8U ptr mem) +(Load ptr mem) && t.Size() == 1 && t.IsSigned() => (I64Load8S ptr mem) + +// Lowering stores +(Store {t} ptr val mem) && is64BitFloat(t) => (F64Store ptr val mem) +(Store {t} ptr val mem) && is32BitFloat(t) => (F32Store ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 => (I64Store ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 => (I64Store32 ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (I64Store16 ptr val mem) +(Store {t} ptr val mem) && t.Size() == 1 => (I64Store8 ptr val mem) + +// Lowering moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (I64Store8 dst (I64Load8U src mem) mem) +(Move [2] dst src mem) => (I64Store16 dst (I64Load16U src mem) mem) +(Move [4] dst src mem) => (I64Store32 dst (I64Load32U src mem) mem) +(Move [8] dst src mem) => (I64Store dst (I64Load src mem) mem) +(Move [16] dst src mem) => + (I64Store [8] dst (I64Load [8] src mem) + (I64Store dst (I64Load src mem) mem)) +(Move [3] dst src mem) => + (I64Store8 [2] dst (I64Load8U [2] src mem) + (I64Store16 dst (I64Load16U src mem) mem)) +(Move [5] dst src mem) => + (I64Store8 [4] dst (I64Load8U [4] src mem) + (I64Store32 dst (I64Load32U src mem) mem)) +(Move [6] dst src mem) => + (I64Store16 [4] dst (I64Load16U [4] src mem) + (I64Store32 dst (I64Load32U src mem) mem)) +(Move [7] dst src mem) => + (I64Store32 [3] dst (I64Load32U [3] src mem) + (I64Store32 dst (I64Load32U src mem) mem)) +(Move [s] dst src mem) && s > 8 && s < 16 => + (I64Store [s-8] dst (I64Load [s-8] src mem) + (I64Store dst (I64Load src mem) mem)) + +// Large copying uses helper. +(Move [s] dst src mem) && logLargeCopy(v, s) => + (LoweredMove [s] dst src mem) + +// Lowering Zero instructions +(Zero [0] _ mem) => mem +(Zero [1] destptr mem) => (I64Store8 destptr (I64Const [0]) mem) +(Zero [2] destptr mem) => (I64Store16 destptr (I64Const [0]) mem) +(Zero [4] destptr mem) => (I64Store32 destptr (I64Const [0]) mem) +(Zero [8] destptr mem) => (I64Store destptr (I64Const [0]) mem) + +(Zero [3] destptr mem) => + (I64Store8 [2] destptr (I64Const [0]) + (I64Store16 destptr (I64Const [0]) mem)) +(Zero [5] destptr mem) => + (I64Store8 [4] destptr (I64Const [0]) + (I64Store32 destptr (I64Const [0]) mem)) +(Zero [6] destptr mem) => + (I64Store16 [4] destptr (I64Const [0]) + (I64Store32 destptr (I64Const [0]) mem)) +(Zero [7] destptr mem) => + (I64Store32 [3] destptr (I64Const [0]) + (I64Store32 destptr (I64Const [0]) mem)) + +// Strip off any fractional word zeroing. +(Zero [s] destptr mem) && s%8 != 0 && s > 8 && s < 32 => + (Zero [s-s%8] (OffPtr destptr [s%8]) + (I64Store destptr (I64Const [0]) mem)) + +// Zero small numbers of words directly. +(Zero [16] destptr mem) => + (I64Store [8] destptr (I64Const [0]) + (I64Store destptr (I64Const [0]) mem)) +(Zero [24] destptr mem) => + (I64Store [16] destptr (I64Const [0]) + (I64Store [8] destptr (I64Const [0]) + (I64Store destptr (I64Const [0]) mem))) +(Zero [32] destptr mem) => + (I64Store [24] destptr (I64Const [0]) + (I64Store [16] destptr (I64Const [0]) + (I64Store [8] destptr (I64Const [0]) + (I64Store destptr (I64Const [0]) mem)))) + +// Large zeroing uses helper. +(Zero [s] destptr mem) => + (LoweredZero [s] destptr mem) + +// Lowering constants +(Const64 ...) => (I64Const ...) +(Const(32|16|8) [c]) => (I64Const [int64(c)]) +(Const(64|32)F ...) => (F(64|32)Const ...) +(ConstNil) => (I64Const [0]) +(ConstBool [c]) => (I64Const [b2i(c)]) + +// Lowering calls +(StaticCall ...) => (LoweredStaticCall ...) +(ClosureCall ...) => (LoweredClosureCall ...) +(InterCall ...) => (LoweredInterCall ...) +(TailCall ...) => (LoweredTailCall ...) + +// Miscellaneous +(Convert ...) => (LoweredConvert ...) +(IsNonNil p) => (I64Eqz (I64Eqz p)) +(IsInBounds ...) => (I64LtU ...) +(IsSliceInBounds ...) => (I64LeU ...) +(NilCheck ...) => (LoweredNilCheck ...) +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(Addr {sym} base) => (LoweredAddr {sym} [0] base) +(LocalAddr {sym} base mem) && t.Elem().HasPointers() => (LoweredAddr {sym} (SPanchored base mem)) +(LocalAddr {sym} base _) && !t.Elem().HasPointers() => (LoweredAddr {sym} base) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +// --- Intrinsics --- +(Sqrt ...) => (F64Sqrt ...) +(Trunc ...) => (F64Trunc ...) +(Ceil ...) => (F64Ceil ...) +(Floor ...) => (F64Floor ...) +(RoundToEven ...) => (F64Nearest ...) +(Abs ...) => (F64Abs ...) +(Copysign ...) => (F64Copysign ...) + +(Sqrt32 ...) => (F32Sqrt ...) + +(Ctz64 ...) => (I64Ctz ...) +(Ctz32 x) => (I64Ctz (I64Or x (I64Const [0x100000000]))) +(Ctz16 x) => (I64Ctz (I64Or x (I64Const [0x10000]))) +(Ctz8 x) => (I64Ctz (I64Or x (I64Const [0x100]))) + +(Ctz(64|32|16|8)NonZero ...) => (I64Ctz ...) + +(BitLen64 x) => (I64Sub (I64Const [64]) (I64Clz x)) + +(PopCount64 ...) => (I64Popcnt ...) +(PopCount32 x) => (I64Popcnt (ZeroExt32to64 x)) +(PopCount16 x) => (I64Popcnt (ZeroExt16to64 x)) +(PopCount8 x) => (I64Popcnt (ZeroExt8to64 x)) + +(CondSelect ...) => (Select ...) + +// --- Optimizations --- +(I64Add (I64Const [x]) (I64Const [y])) => (I64Const [x + y]) +(I64Mul (I64Const [x]) (I64Const [y])) => (I64Const [x * y]) +(I64And (I64Const [x]) (I64Const [y])) => (I64Const [x & y]) +(I64Or (I64Const [x]) (I64Const [y])) => (I64Const [x | y]) +(I64Xor (I64Const [x]) (I64Const [y])) => (I64Const [x ^ y]) +(F64Add (F64Const [x]) (F64Const [y])) => (F64Const [x + y]) +(F64Mul (F64Const [x]) (F64Const [y])) && !math.IsNaN(x * y) => (F64Const [x * y]) +(I64Eq (I64Const [x]) (I64Const [y])) && x == y => (I64Const [1]) +(I64Eq (I64Const [x]) (I64Const [y])) && x != y => (I64Const [0]) +(I64Ne (I64Const [x]) (I64Const [y])) && x == y => (I64Const [0]) +(I64Ne (I64Const [x]) (I64Const [y])) && x != y => (I64Const [1]) + +(I64Shl (I64Const [x]) (I64Const [y])) => (I64Const [x << uint64(y)]) +(I64ShrU (I64Const [x]) (I64Const [y])) => (I64Const [int64(uint64(x) >> uint64(y))]) +(I64ShrS (I64Const [x]) (I64Const [y])) => (I64Const [x >> uint64(y)]) + +// TODO: declare these operations as commutative and get rid of these rules? +(I64Add (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Add y (I64Const [x])) +(I64Mul (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Mul y (I64Const [x])) +(I64And (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64And y (I64Const [x])) +(I64Or (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Or y (I64Const [x])) +(I64Xor (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Xor y (I64Const [x])) +(F64Add (F64Const [x]) y) && y.Op != OpWasmF64Const => (F64Add y (F64Const [x])) +(F64Mul (F64Const [x]) y) && y.Op != OpWasmF64Const => (F64Mul y (F64Const [x])) +(I64Eq (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Eq y (I64Const [x])) +(I64Ne (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Ne y (I64Const [x])) + +(I64Eq x (I64Const [0])) => (I64Eqz x) +(I64LtU (I64Const [0]) x) => (I64Eqz (I64Eqz x)) +(I64LeU x (I64Const [0])) => (I64Eqz x) +(I64LtU x (I64Const [1])) => (I64Eqz x) +(I64LeU (I64Const [1]) x) => (I64Eqz (I64Eqz x)) +(I64Ne x (I64Const [0])) => (I64Eqz (I64Eqz x)) + +(I64Add x (I64Const [y])) && !t.IsPtr() => (I64AddConst [y] x) +(I64AddConst [0] x) => x +(I64Eqz (I64Eqz (I64Eqz x))) => (I64Eqz x) + +// folding offset into load/store +((I64Load|I64Load32U|I64Load32S|I64Load16U|I64Load16S|I64Load8U|I64Load8S) [off] (I64AddConst [off2] ptr) mem) + && isU32Bit(off+off2) => + ((I64Load|I64Load32U|I64Load32S|I64Load16U|I64Load16S|I64Load8U|I64Load8S) [off+off2] ptr mem) + +((I64Store|I64Store32|I64Store16|I64Store8) [off] (I64AddConst [off2] ptr) val mem) + && isU32Bit(off+off2) => + ((I64Store|I64Store32|I64Store16|I64Store8) [off+off2] ptr val mem) + +// folding offset into address +(I64AddConst [off] (LoweredAddr {sym} [off2] base)) && isU32Bit(off+int64(off2)) => + (LoweredAddr {sym} [int32(off)+off2] base) +(I64AddConst [off] x:(SP)) && isU32Bit(off) => (LoweredAddr [int32(off)] x) // so it is rematerializeable + +// transforming readonly globals into constants +(I64Load [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))]) +(I64Load32U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))]) +(I64Load16U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))]) +(I64Load8U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read8(sym, off+int64(off2)))]) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/WasmOps.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/WasmOps.go new file mode 100644 index 0000000000000000000000000000000000000000..45bbed5f520201c2b02e28b5e6ca0640b907e798 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/WasmOps.go @@ -0,0 +1,277 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import "strings" + +var regNamesWasm = []string{ + "R0", + "R1", + "R2", + "R3", + "R4", + "R5", + "R6", + "R7", + "R8", + "R9", + "R10", + "R11", + "R12", + "R13", + "R14", + "R15", + + "F0", + "F1", + "F2", + "F3", + "F4", + "F5", + "F6", + "F7", + "F8", + "F9", + "F10", + "F11", + "F12", + "F13", + "F14", + "F15", + + "F16", + "F17", + "F18", + "F19", + "F20", + "F21", + "F22", + "F23", + "F24", + "F25", + "F26", + "F27", + "F28", + "F29", + "F30", + "F31", + + "SP", + "g", + + // pseudo-registers + "SB", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNamesWasm) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNamesWasm { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + var ( + gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15") + fp32 = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15") + fp64 = buildReg("F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31") + gpsp = gp | buildReg("SP") + gpspsb = gpsp | buildReg("SB") + // The "registers", which are actually local variables, can get clobbered + // if we're switching goroutines, because it unwinds the WebAssembly stack. + callerSave = gp | fp32 | fp64 | buildReg("g") + ) + + // Common regInfo + var ( + gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} + gp11 = regInfo{inputs: []regMask{gpsp}, outputs: []regMask{gp}} + gp21 = regInfo{inputs: []regMask{gpsp, gpsp}, outputs: []regMask{gp}} + gp31 = regInfo{inputs: []regMask{gpsp, gpsp, gpsp}, outputs: []regMask{gp}} + fp32_01 = regInfo{inputs: nil, outputs: []regMask{fp32}} + fp32_11 = regInfo{inputs: []regMask{fp32}, outputs: []regMask{fp32}} + fp32_21 = regInfo{inputs: []regMask{fp32, fp32}, outputs: []regMask{fp32}} + fp32_21gp = regInfo{inputs: []regMask{fp32, fp32}, outputs: []regMask{gp}} + fp64_01 = regInfo{inputs: nil, outputs: []regMask{fp64}} + fp64_11 = regInfo{inputs: []regMask{fp64}, outputs: []regMask{fp64}} + fp64_21 = regInfo{inputs: []regMask{fp64, fp64}, outputs: []regMask{fp64}} + fp64_21gp = regInfo{inputs: []regMask{fp64, fp64}, outputs: []regMask{gp}} + gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{gp}} + gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}} + fp32load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{fp32}} + fp32store = regInfo{inputs: []regMask{gpspsb, fp32, 0}} + fp64load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{fp64}} + fp64store = regInfo{inputs: []regMask{gpspsb, fp64, 0}} + ) + + var WasmOps = []opData{ + {name: "LoweredStaticCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "LoweredTailCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "LoweredClosureCall", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp, 0}, clobbers: callerSave}, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "LoweredInterCall", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + + {name: "LoweredAddr", argLength: 1, reg: gp11, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // returns base+aux+auxint, arg0=base + {name: "LoweredMove", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp}}, aux: "Int64"}, // large move. arg0=dst, arg1=src, arg2=mem, auxint=len, returns mem + {name: "LoweredZero", argLength: 2, reg: regInfo{inputs: []regMask{gp}}, aux: "Int64"}, // large zeroing. arg0=start, arg1=mem, auxint=len, returns mem + + {name: "LoweredGetClosurePtr", reg: gp01}, // returns wasm.REG_CTXT, the closure pointer + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, // returns the PC of the caller of the current function + {name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true}, // returns the SP of the caller of the current function. arg0=mem. + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem + {name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: callerSave, outputs: []regMask{gp}}, aux: "Int64"}, // invokes runtime.gcWriteBarrier{auxint}. arg0=mem, auxint=# of buffer entries needed. Returns a pointer to a write barrier buffer. + + // LoweredConvert converts between pointers and integers. + // We have a special op for this so as to not confuse GCCallOff + // (particularly stack maps). It takes a memory arg so it + // gets correctly ordered with respect to GC safepoints. + // arg0=ptr/int arg1=mem, output=int/ptr + // + // TODO(neelance): LoweredConvert should not be necessary any more, since OpConvert does not need to be lowered any more (CL 108496). + {name: "LoweredConvert", argLength: 2, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}}, + + // The following are native WebAssembly instructions, see https://webassembly.github.io/spec/core/syntax/instructions.html + + {name: "Select", asm: "Select", argLength: 3, reg: gp31}, // returns arg0 if arg2 != 0, otherwise returns arg1 + + {name: "I64Load8U", asm: "I64Load8U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt8"}, // read unsigned 8-bit integer from address arg0+aux, arg1=mem + {name: "I64Load8S", asm: "I64Load8S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int8"}, // read signed 8-bit integer from address arg0+aux, arg1=mem + {name: "I64Load16U", asm: "I64Load16U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt16"}, // read unsigned 16-bit integer from address arg0+aux, arg1=mem + {name: "I64Load16S", asm: "I64Load16S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int16"}, // read signed 16-bit integer from address arg0+aux, arg1=mem + {name: "I64Load32U", asm: "I64Load32U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt32"}, // read unsigned 32-bit integer from address arg0+aux, arg1=mem + {name: "I64Load32S", asm: "I64Load32S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int32"}, // read signed 32-bit integer from address arg0+aux, arg1=mem + {name: "I64Load", asm: "I64Load", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt64"}, // read 64-bit integer from address arg0+aux, arg1=mem + {name: "I64Store8", asm: "I64Store8", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 8-bit integer arg1 at address arg0+aux, arg2=mem, returns mem + {name: "I64Store16", asm: "I64Store16", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 16-bit integer arg1 at address arg0+aux, arg2=mem, returns mem + {name: "I64Store32", asm: "I64Store32", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 32-bit integer arg1 at address arg0+aux, arg2=mem, returns mem + {name: "I64Store", asm: "I64Store", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 64-bit integer arg1 at address arg0+aux, arg2=mem, returns mem + + {name: "F32Load", asm: "F32Load", argLength: 2, reg: fp32load, aux: "Int64", typ: "Float32"}, // read 32-bit float from address arg0+aux, arg1=mem + {name: "F64Load", asm: "F64Load", argLength: 2, reg: fp64load, aux: "Int64", typ: "Float64"}, // read 64-bit float from address arg0+aux, arg1=mem + {name: "F32Store", asm: "F32Store", argLength: 3, reg: fp32store, aux: "Int64", typ: "Mem"}, // store 32-bit float arg1 at address arg0+aux, arg2=mem, returns mem + {name: "F64Store", asm: "F64Store", argLength: 3, reg: fp64store, aux: "Int64", typ: "Mem"}, // store 64-bit float arg1 at address arg0+aux, arg2=mem, returns mem + + {name: "I64Const", reg: gp01, aux: "Int64", rematerializeable: true, typ: "Int64"}, // returns the constant integer aux + {name: "F32Const", reg: fp32_01, aux: "Float32", rematerializeable: true, typ: "Float32"}, // returns the constant float aux + {name: "F64Const", reg: fp64_01, aux: "Float64", rematerializeable: true, typ: "Float64"}, // returns the constant float aux + + {name: "I64Eqz", asm: "I64Eqz", argLength: 1, reg: gp11, typ: "Bool"}, // arg0 == 0 + {name: "I64Eq", asm: "I64Eq", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 == arg1 + {name: "I64Ne", asm: "I64Ne", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 != arg1 + {name: "I64LtS", asm: "I64LtS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 < arg1 (signed) + {name: "I64LtU", asm: "I64LtU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 < arg1 (unsigned) + {name: "I64GtS", asm: "I64GtS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 > arg1 (signed) + {name: "I64GtU", asm: "I64GtU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 > arg1 (unsigned) + {name: "I64LeS", asm: "I64LeS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 <= arg1 (signed) + {name: "I64LeU", asm: "I64LeU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 <= arg1 (unsigned) + {name: "I64GeS", asm: "I64GeS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 >= arg1 (signed) + {name: "I64GeU", asm: "I64GeU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 >= arg1 (unsigned) + + {name: "F32Eq", asm: "F32Eq", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 == arg1 + {name: "F32Ne", asm: "F32Ne", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 != arg1 + {name: "F32Lt", asm: "F32Lt", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 < arg1 + {name: "F32Gt", asm: "F32Gt", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 > arg1 + {name: "F32Le", asm: "F32Le", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 <= arg1 + {name: "F32Ge", asm: "F32Ge", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 >= arg1 + + {name: "F64Eq", asm: "F64Eq", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 == arg1 + {name: "F64Ne", asm: "F64Ne", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 != arg1 + {name: "F64Lt", asm: "F64Lt", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 < arg1 + {name: "F64Gt", asm: "F64Gt", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 > arg1 + {name: "F64Le", asm: "F64Le", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 <= arg1 + {name: "F64Ge", asm: "F64Ge", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 >= arg1 + + {name: "I64Add", asm: "I64Add", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 + arg1 + {name: "I64AddConst", asm: "I64Add", argLength: 1, reg: gp11, aux: "Int64", typ: "Int64"}, // arg0 + aux + {name: "I64Sub", asm: "I64Sub", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 - arg1 + {name: "I64Mul", asm: "I64Mul", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 * arg1 + {name: "I64DivS", asm: "I64DivS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 / arg1 (signed) + {name: "I64DivU", asm: "I64DivU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 / arg1 (unsigned) + {name: "I64RemS", asm: "I64RemS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 % arg1 (signed) + {name: "I64RemU", asm: "I64RemU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 % arg1 (unsigned) + {name: "I64And", asm: "I64And", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 & arg1 + {name: "I64Or", asm: "I64Or", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 | arg1 + {name: "I64Xor", asm: "I64Xor", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 ^ arg1 + {name: "I64Shl", asm: "I64Shl", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 << (arg1 % 64) + {name: "I64ShrS", asm: "I64ShrS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 >> (arg1 % 64) (signed) + {name: "I64ShrU", asm: "I64ShrU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 >> (arg1 % 64) (unsigned) + + {name: "F32Neg", asm: "F32Neg", argLength: 1, reg: fp32_11, typ: "Float32"}, // -arg0 + {name: "F32Add", asm: "F32Add", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 + arg1 + {name: "F32Sub", asm: "F32Sub", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 - arg1 + {name: "F32Mul", asm: "F32Mul", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 * arg1 + {name: "F32Div", asm: "F32Div", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 / arg1 + + {name: "F64Neg", asm: "F64Neg", argLength: 1, reg: fp64_11, typ: "Float64"}, // -arg0 + {name: "F64Add", asm: "F64Add", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 + arg1 + {name: "F64Sub", asm: "F64Sub", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 - arg1 + {name: "F64Mul", asm: "F64Mul", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 * arg1 + {name: "F64Div", asm: "F64Div", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 / arg1 + + {name: "I64TruncSatF64S", asm: "I64TruncSatF64S", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) + {name: "I64TruncSatF64U", asm: "I64TruncSatF64U", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating) + {name: "I64TruncSatF32S", asm: "I64TruncSatF32S", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating) + {name: "I64TruncSatF32U", asm: "I64TruncSatF32U", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating) + {name: "F32ConvertI64S", asm: "F32ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the signed integer arg0 to a float + {name: "F32ConvertI64U", asm: "F32ConvertI64U", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the unsigned integer arg0 to a float + {name: "F64ConvertI64S", asm: "F64ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp64}}, typ: "Float64"}, // converts the signed integer arg0 to a float + {name: "F64ConvertI64U", asm: "F64ConvertI64U", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp64}}, typ: "Float64"}, // converts the unsigned integer arg0 to a float + {name: "F32DemoteF64", asm: "F32DemoteF64", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{fp32}}, typ: "Float32"}, + {name: "F64PromoteF32", asm: "F64PromoteF32", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{fp64}}, typ: "Float64"}, + + {name: "I64Extend8S", asm: "I64Extend8S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 8 to 64 bit + {name: "I64Extend16S", asm: "I64Extend16S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 16 to 64 bit + {name: "I64Extend32S", asm: "I64Extend32S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 32 to 64 bit + + {name: "F32Sqrt", asm: "F32Sqrt", argLength: 1, reg: fp32_11, typ: "Float32"}, // sqrt(arg0) + {name: "F32Trunc", asm: "F32Trunc", argLength: 1, reg: fp32_11, typ: "Float32"}, // trunc(arg0) + {name: "F32Ceil", asm: "F32Ceil", argLength: 1, reg: fp32_11, typ: "Float32"}, // ceil(arg0) + {name: "F32Floor", asm: "F32Floor", argLength: 1, reg: fp32_11, typ: "Float32"}, // floor(arg0) + {name: "F32Nearest", asm: "F32Nearest", argLength: 1, reg: fp32_11, typ: "Float32"}, // round(arg0) + {name: "F32Abs", asm: "F32Abs", argLength: 1, reg: fp32_11, typ: "Float32"}, // abs(arg0) + {name: "F32Copysign", asm: "F32Copysign", argLength: 2, reg: fp32_21, typ: "Float32"}, // copysign(arg0, arg1) + + {name: "F64Sqrt", asm: "F64Sqrt", argLength: 1, reg: fp64_11, typ: "Float64"}, // sqrt(arg0) + {name: "F64Trunc", asm: "F64Trunc", argLength: 1, reg: fp64_11, typ: "Float64"}, // trunc(arg0) + {name: "F64Ceil", asm: "F64Ceil", argLength: 1, reg: fp64_11, typ: "Float64"}, // ceil(arg0) + {name: "F64Floor", asm: "F64Floor", argLength: 1, reg: fp64_11, typ: "Float64"}, // floor(arg0) + {name: "F64Nearest", asm: "F64Nearest", argLength: 1, reg: fp64_11, typ: "Float64"}, // round(arg0) + {name: "F64Abs", asm: "F64Abs", argLength: 1, reg: fp64_11, typ: "Float64"}, // abs(arg0) + {name: "F64Copysign", asm: "F64Copysign", argLength: 2, reg: fp64_21, typ: "Float64"}, // copysign(arg0, arg1) + + {name: "I64Ctz", asm: "I64Ctz", argLength: 1, reg: gp11, typ: "Int64"}, // ctz(arg0) + {name: "I64Clz", asm: "I64Clz", argLength: 1, reg: gp11, typ: "Int64"}, // clz(arg0) + {name: "I32Rotl", asm: "I32Rotl", argLength: 2, reg: gp21, typ: "Int32"}, // rotl(arg0, arg1) + {name: "I64Rotl", asm: "I64Rotl", argLength: 2, reg: gp21, typ: "Int64"}, // rotl(arg0, arg1) + {name: "I64Popcnt", asm: "I64Popcnt", argLength: 1, reg: gp11, typ: "Int64"}, // popcnt(arg0) + } + + archs = append(archs, arch{ + name: "Wasm", + pkg: "cmd/internal/obj/wasm", + genfile: "../../wasm/ssa.go", + ops: WasmOps, + blocks: nil, + regnames: regNamesWasm, + gpregmask: gp, + fpregmask: fp32 | fp64, + fp32regmask: fp32, + fp64regmask: fp64, + framepointerreg: -1, // not used + linkreg: -1, // not used + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/allocators.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/allocators.go new file mode 100644 index 0000000000000000000000000000000000000000..5869a61e8238a17ccb40587825ce00c6a7cecd15 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/allocators.go @@ -0,0 +1,229 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// TODO: should we share backing storage for similarly-shaped types? +// e.g. []*Value and []*Block, or even []int32 and []bool. + +import ( + "bytes" + "fmt" + "go/format" + "io" + "log" + "os" +) + +type allocator struct { + name string // name for alloc/free functions + typ string // the type they return/accept + mak string // code to make a new object (takes power-of-2 size as fmt arg) + capacity string // code to calculate the capacity of an object. Should always report a power of 2. + resize string // code to shrink to sub-power-of-two size (takes size as fmt arg) + clear string // code for clearing object before putting it on the free list + minLog int // log_2 of minimum allocation size + maxLog int // log_2 of maximum allocation size +} + +type derived struct { + name string // name for alloc/free functions + typ string // the type they return/accept + base string // underlying allocator +} + +func genAllocators() { + allocators := []allocator{ + { + name: "ValueSlice", + typ: "[]*Value", + capacity: "cap(%s)", + mak: "make([]*Value, %s)", + resize: "%s[:%s]", + clear: "for i := range %[1]s {\n%[1]s[i] = nil\n}", + minLog: 5, + maxLog: 32, + }, + { + name: "Int64Slice", + typ: "[]int64", + capacity: "cap(%s)", + mak: "make([]int64, %s)", + resize: "%s[:%s]", + clear: "for i := range %[1]s {\n%[1]s[i] = 0\n}", + minLog: 5, + maxLog: 32, + }, + { + name: "SparseSet", + typ: "*sparseSet", + capacity: "%s.cap()", + mak: "newSparseSet(%s)", + resize: "", // larger-sized sparse sets are ok + clear: "%s.clear()", + minLog: 5, + maxLog: 32, + }, + { + name: "SparseMap", + typ: "*sparseMap", + capacity: "%s.cap()", + mak: "newSparseMap(%s)", + resize: "", // larger-sized sparse maps are ok + clear: "%s.clear()", + minLog: 5, + maxLog: 32, + }, + { + name: "SparseMapPos", + typ: "*sparseMapPos", + capacity: "%s.cap()", + mak: "newSparseMapPos(%s)", + resize: "", // larger-sized sparse maps are ok + clear: "%s.clear()", + minLog: 5, + maxLog: 32, + }, + } + deriveds := []derived{ + { + name: "BlockSlice", + typ: "[]*Block", + base: "ValueSlice", + }, + { + name: "IntSlice", + typ: "[]int", + base: "Int64Slice", + }, + { + name: "Int32Slice", + typ: "[]int32", + base: "Int64Slice", + }, + { + name: "Int8Slice", + typ: "[]int8", + base: "Int64Slice", + }, + { + name: "BoolSlice", + typ: "[]bool", + base: "Int64Slice", + }, + { + name: "IDSlice", + typ: "[]ID", + base: "Int64Slice", + }, + } + + w := new(bytes.Buffer) + fmt.Fprintf(w, "// Code generated from _gen/allocators.go using 'go generate'; DO NOT EDIT.\n") + fmt.Fprintln(w) + fmt.Fprintln(w, "package ssa") + + fmt.Fprintln(w, "import (") + fmt.Fprintln(w, "\"internal/unsafeheader\"") + fmt.Fprintln(w, "\"math/bits\"") + fmt.Fprintln(w, "\"sync\"") + fmt.Fprintln(w, "\"unsafe\"") + fmt.Fprintln(w, ")") + for _, a := range allocators { + genAllocator(w, a) + } + for _, d := range deriveds { + for _, base := range allocators { + if base.name == d.base { + genDerived(w, d, base) + break + } + } + } + // gofmt result + b := w.Bytes() + var err error + b, err = format.Source(b) + if err != nil { + fmt.Printf("%s\n", w.Bytes()) + panic(err) + } + + if err := os.WriteFile("../allocators.go", b, 0666); err != nil { + log.Fatalf("can't write output: %v\n", err) + } +} +func genAllocator(w io.Writer, a allocator) { + fmt.Fprintf(w, "var poolFree%s [%d]sync.Pool\n", a.name, a.maxLog-a.minLog) + fmt.Fprintf(w, "func (c *Cache) alloc%s(n int) %s {\n", a.name, a.typ) + fmt.Fprintf(w, "var s %s\n", a.typ) + fmt.Fprintf(w, "n2 := n\n") + fmt.Fprintf(w, "if n2 < %d { n2 = %d }\n", 1<main_test.go <<-EOF + //go:build ignore + + package main + + import "testing" + + func TestCoverage(t *testing.T) { main() } +EOF + +go test -run='^TestCoverage$' -coverprofile=cover.out "$@" *.go + +rm -f main_test.go diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/dec.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/dec.rules new file mode 100644 index 0000000000000000000000000000000000000000..7944947e062b9134a320c82ac57327bb95f2ea4e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/dec.rules @@ -0,0 +1,201 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains rules to decompose builtin compound types +// (complex,string,slice,interface) into their constituent +// types. These rules work together with the decomposeBuiltIn +// pass which handles phis of these types. + +(Store {t} _ _ mem) && t.Size() == 0 => mem + +// complex ops +(ComplexReal (ComplexMake real _ )) => real +(ComplexImag (ComplexMake _ imag )) => imag + +(Load ptr mem) && t.IsComplex() && t.Size() == 8 => + (ComplexMake + (Load ptr mem) + (Load + (OffPtr [4] ptr) + mem) + ) +(Store {t} dst (ComplexMake real imag) mem) && t.Size() == 8 => + (Store {typ.Float32} + (OffPtr [4] dst) + imag + (Store {typ.Float32} dst real mem)) +(Load ptr mem) && t.IsComplex() && t.Size() == 16 => + (ComplexMake + (Load ptr mem) + (Load + (OffPtr [8] ptr) + mem) + ) +(Store {t} dst (ComplexMake real imag) mem) && t.Size() == 16 => + (Store {typ.Float64} + (OffPtr [8] dst) + imag + (Store {typ.Float64} dst real mem)) + +// string ops +(StringPtr (StringMake ptr _)) => ptr +(StringLen (StringMake _ len)) => len + +(Load ptr mem) && t.IsString() => + (StringMake + (Load ptr mem) + (Load + (OffPtr [config.PtrSize] ptr) + mem)) +(Store dst (StringMake ptr len) mem) => + (Store {typ.Int} + (OffPtr [config.PtrSize] dst) + len + (Store {typ.BytePtr} dst ptr mem)) + +// slice ops +(SlicePtr (SliceMake ptr _ _ )) => ptr +(SliceLen (SliceMake _ len _)) => len +(SliceCap (SliceMake _ _ cap)) => cap +(SlicePtrUnchecked (SliceMake ptr _ _ )) => ptr + +(Load ptr mem) && t.IsSlice() => + (SliceMake + (Load ptr mem) + (Load + (OffPtr [config.PtrSize] ptr) + mem) + (Load + (OffPtr [2*config.PtrSize] ptr) + mem)) +(Store {t} dst (SliceMake ptr len cap) mem) => + (Store {typ.Int} + (OffPtr [2*config.PtrSize] dst) + cap + (Store {typ.Int} + (OffPtr [config.PtrSize] dst) + len + (Store {t.Elem().PtrTo()} dst ptr mem))) + +// interface ops +(ITab (IMake itab _)) => itab +(IData (IMake _ data)) => data + +(Load ptr mem) && t.IsInterface() => + (IMake + (Load ptr mem) + (Load + (OffPtr [config.PtrSize] ptr) + mem)) +(Store dst (IMake itab data) mem) => + (Store {typ.BytePtr} + (OffPtr [config.PtrSize] dst) + data + (Store {typ.Uintptr} dst itab mem)) + +// Helpers for expand calls +// Some of these are copied from generic.rules + +(IMake _typ (StructMake1 val)) => (IMake _typ val) +(StructSelect [0] (IData x)) => (IData x) + +(StructSelect (StructMake1 x)) => x +(StructSelect [0] (StructMake2 x _)) => x +(StructSelect [1] (StructMake2 _ x)) => x +(StructSelect [0] (StructMake3 x _ _)) => x +(StructSelect [1] (StructMake3 _ x _)) => x +(StructSelect [2] (StructMake3 _ _ x)) => x +(StructSelect [0] (StructMake4 x _ _ _)) => x +(StructSelect [1] (StructMake4 _ x _ _)) => x +(StructSelect [2] (StructMake4 _ _ x _)) => x +(StructSelect [3] (StructMake4 _ _ _ x)) => x + +// Special case coming from immediate interface rewriting +// Typical case: (StructSelect [0] (IData (IMake typ dat)) rewrites to (StructSelect [0] dat) +// but because the interface is immediate, the type of "IData" is a one-element struct containing +// a pointer that is not the pointer type of dat (can be a *uint8). +// More annoying case: (ArraySelect[0] (StructSelect[0] isAPtr)) +// There, result of the StructSelect is an Array (not a pointer) and +// the pre-rewrite input to the ArraySelect is a struct, not a pointer. +(StructSelect [0] x) && x.Type.IsPtrShaped() => x +(ArraySelect [0] x) && x.Type.IsPtrShaped() => x + +// These, too. Bits is bits. +(ArrayMake1 x) && x.Type.IsPtrShaped() => x +(StructMake1 x) && x.Type.IsPtrShaped() => x + +(Store dst (StructMake1 f0) mem) => + (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem) +(Store dst (StructMake2 f0 f1) mem) => + (Store {t.FieldType(1)} + (OffPtr [t.FieldOff(1)] dst) + f1 + (Store {t.FieldType(0)} + (OffPtr [0] dst) + f0 mem)) +(Store dst (StructMake3 f0 f1 f2) mem) => + (Store {t.FieldType(2)} + (OffPtr [t.FieldOff(2)] dst) + f2 + (Store {t.FieldType(1)} + (OffPtr [t.FieldOff(1)] dst) + f1 + (Store {t.FieldType(0)} + (OffPtr [0] dst) + f0 mem))) +(Store dst (StructMake4 f0 f1 f2 f3) mem) => + (Store {t.FieldType(3)} + (OffPtr [t.FieldOff(3)] dst) + f3 + (Store {t.FieldType(2)} + (OffPtr [t.FieldOff(2)] dst) + f2 + (Store {t.FieldType(1)} + (OffPtr [t.FieldOff(1)] dst) + f1 + (Store {t.FieldType(0)} + (OffPtr [0] dst) + f0 mem)))) + +(ArraySelect (ArrayMake1 x)) => x +(ArraySelect [0] (IData x)) => (IData x) + +(Store dst (ArrayMake1 e) mem) => (Store {e.Type} dst e mem) + +// NOTE removed must-not-be-SSA condition. +(ArraySelect [i] x:(Load ptr mem)) => + @x.Block (Load (OffPtr [t.Elem().Size()*i] ptr) mem) + +(StringPtr x:(Load ptr mem)) && t.IsString() => @x.Block (Load ptr mem) +(StringLen x:(Load ptr mem)) && t.IsString() => @x.Block (Load + (OffPtr [config.PtrSize] ptr) + mem) + +// NOTE removed must-not-be-SSA condition. +(StructSelect [i] x:(Load ptr mem)) => + @x.Block (Load (OffPtr [t.FieldOff(int(i))] ptr) mem) + +(ITab x:(Load ptr mem)) && t.IsInterface() => @x.Block (Load ptr mem) + +(IData x:(Load ptr mem)) && t.IsInterface() => @x.Block (Load + (OffPtr [config.PtrSize] ptr) + mem) + +(SlicePtr x:(Load ptr mem)) && t.IsSlice() => @x.Block (Load ptr mem) +(SliceLen x:(Load ptr mem)) && t.IsSlice() => @x.Block (Load + (OffPtr [config.PtrSize] ptr) + mem) +(SliceCap x:(Load ptr mem)) && t.IsSlice() => @x.Block (Load + (OffPtr [2*config.PtrSize] ptr) + mem) + +(ComplexReal x:(Load ptr mem)) && t.IsComplex() && t.Size() == 8 => @x.Block (Load ptr mem) +(ComplexImag x:(Load ptr mem)) && t.IsComplex() && t.Size() == 8 => @x.Block (Load + (OffPtr [4] ptr) + mem) + +(ComplexReal x:(Load ptr mem)) && t.IsComplex() && t.Size() == 16 => @x.Block (Load ptr mem) +(ComplexImag x:(Load ptr mem)) && t.IsComplex() && t.Size() == 16 => @x.Block (Load + (OffPtr [8] ptr) + mem) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/dec64.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/dec64.rules new file mode 100644 index 0000000000000000000000000000000000000000..ba776af1a705c623fca0c16702b34dd09cf158dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/dec64.rules @@ -0,0 +1,401 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains rules to decompose [u]int64 types on 32-bit +// architectures. These rules work together with the decomposeBuiltIn +// pass which handles phis of these typ. + +(Int64Hi (Int64Make hi _)) => hi +(Int64Lo (Int64Make _ lo)) => lo + +(Load ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() => + (Int64Make + (Load (OffPtr [4] ptr) mem) + (Load ptr mem)) + +(Load ptr mem) && is64BitInt(t) && !config.BigEndian && !t.IsSigned() => + (Int64Make + (Load (OffPtr [4] ptr) mem) + (Load ptr mem)) + +(Load ptr mem) && is64BitInt(t) && config.BigEndian && t.IsSigned() => + (Int64Make + (Load ptr mem) + (Load (OffPtr [4] ptr) mem)) + +(Load ptr mem) && is64BitInt(t) && config.BigEndian && !t.IsSigned() => + (Int64Make + (Load ptr mem) + (Load (OffPtr [4] ptr) mem)) + +(Store {t} dst (Int64Make hi lo) mem) && t.Size() == 8 && !config.BigEndian => + (Store {hi.Type} + (OffPtr [4] dst) + hi + (Store {lo.Type} dst lo mem)) + +(Store {t} dst (Int64Make hi lo) mem) && t.Size() == 8 && config.BigEndian => + (Store {lo.Type} + (OffPtr [4] dst) + lo + (Store {hi.Type} dst hi mem)) + +// These are not enabled during decomposeBuiltin if late call expansion, but they are always enabled for softFloat +(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") => + (Int64Make + (Arg {n} [off+4]) + (Arg {n} [off])) +(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") => + (Int64Make + (Arg {n} [off+4]) + (Arg {n} [off])) + +(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") => + (Int64Make + (Arg {n} [off]) + (Arg {n} [off+4])) +(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") => + (Int64Make + (Arg {n} [off]) + (Arg {n} [off+4])) + +(Add64 x y) => + (Int64Make + (Add32withcarry + (Int64Hi x) + (Int64Hi y) + (Select1 (Add32carry (Int64Lo x) (Int64Lo y)))) + (Select0 (Add32carry (Int64Lo x) (Int64Lo y)))) + +(Sub64 x y) => + (Int64Make + (Sub32withcarry + (Int64Hi x) + (Int64Hi y) + (Select1 (Sub32carry (Int64Lo x) (Int64Lo y)))) + (Select0 (Sub32carry (Int64Lo x) (Int64Lo y)))) + +(Mul64 x y) => + (Int64Make + (Add32 + (Mul32 (Int64Lo x) (Int64Hi y)) + (Add32 + (Mul32 (Int64Hi x) (Int64Lo y)) + (Select0 (Mul32uhilo (Int64Lo x) (Int64Lo y))))) + (Select1 (Mul32uhilo (Int64Lo x) (Int64Lo y)))) + +(And64 x y) => + (Int64Make + (And32 (Int64Hi x) (Int64Hi y)) + (And32 (Int64Lo x) (Int64Lo y))) + +(Or64 x y) => + (Int64Make + (Or32 (Int64Hi x) (Int64Hi y)) + (Or32 (Int64Lo x) (Int64Lo y))) + +(Xor64 x y) => + (Int64Make + (Xor32 (Int64Hi x) (Int64Hi y)) + (Xor32 (Int64Lo x) (Int64Lo y))) + +(Neg64 x) => (Sub64 (Const64 [0]) x) + +(Com64 x) => + (Int64Make + (Com32 (Int64Hi x)) + (Com32 (Int64Lo x))) + +// Sadly, just because we know that x is non-zero, +// we don't know whether either component is, +// so just treat Ctz64NonZero the same as Ctz64. +(Ctz64NonZero ...) => (Ctz64 ...) + +(Ctz64 x) => + (Add32 + (Ctz32 (Int64Lo x)) + (And32 + (Com32 (Zeromask (Int64Lo x))) + (Ctz32 (Int64Hi x)))) + +(BitLen64 x) => + (Add32 + (BitLen32 (Int64Hi x)) + (BitLen32 + (Or32 + (Int64Lo x) + (Zeromask (Int64Hi x))))) + +(Bswap64 x) => + (Int64Make + (Bswap32 (Int64Lo x)) + (Bswap32 (Int64Hi x))) + +(SignExt32to64 x) => (Int64Make (Signmask x) x) +(SignExt16to64 x) => (SignExt32to64 (SignExt16to32 x)) +(SignExt8to64 x) => (SignExt32to64 (SignExt8to32 x)) + +(ZeroExt32to64 x) => (Int64Make (Const32 [0]) x) +(ZeroExt16to64 x) => (ZeroExt32to64 (ZeroExt16to32 x)) +(ZeroExt8to64 x) => (ZeroExt32to64 (ZeroExt8to32 x)) + +(Trunc64to32 (Int64Make _ lo)) => lo +(Trunc64to16 (Int64Make _ lo)) => (Trunc32to16 lo) +(Trunc64to8 (Int64Make _ lo)) => (Trunc32to8 lo) +// Most general +(Trunc64to32 x) => (Int64Lo x) +(Trunc64to16 x) => (Trunc32to16 (Int64Lo x)) +(Trunc64to8 x) => (Trunc32to8 (Int64Lo x)) + +(Lsh32x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0]) +(Rsh32x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask x) +(Rsh32Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0]) +(Lsh16x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0]) +(Rsh16x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask (SignExt16to32 x)) +(Rsh16Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0]) +(Lsh8x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0]) +(Rsh8x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask (SignExt8to32 x)) +(Rsh8Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0]) + +(Lsh32x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh32x32 [c] x lo) +(Rsh32x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh32x32 [c] x lo) +(Rsh32Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh32Ux32 [c] x lo) +(Lsh16x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh16x32 [c] x lo) +(Rsh16x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh16x32 [c] x lo) +(Rsh16Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh16Ux32 [c] x lo) +(Lsh8x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh8x32 [c] x lo) +(Rsh8x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh8x32 [c] x lo) +(Rsh8Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh8Ux32 [c] x lo) + +(Lsh64x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const64 [0]) +(Rsh64x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x))) +(Rsh64Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const64 [0]) + +(Lsh64x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh64x32 [c] x lo) +(Rsh64x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh64x32 [c] x lo) +(Rsh64Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh64Ux32 [c] x lo) + +// turn x64 non-constant shifts to x32 shifts +// if high 32-bit of the shift is nonzero, make a huge shift +(Lsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Lsh64x32 x (Or32 (Zeromask hi) lo)) +(Rsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Rsh64x32 x (Or32 (Zeromask hi) lo)) +(Rsh64Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Rsh64Ux32 x (Or32 (Zeromask hi) lo)) +(Lsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Lsh32x32 x (Or32 (Zeromask hi) lo)) +(Rsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Rsh32x32 x (Or32 (Zeromask hi) lo)) +(Rsh32Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Rsh32Ux32 x (Or32 (Zeromask hi) lo)) +(Lsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Lsh16x32 x (Or32 (Zeromask hi) lo)) +(Rsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Rsh16x32 x (Or32 (Zeromask hi) lo)) +(Rsh16Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Rsh16Ux32 x (Or32 (Zeromask hi) lo)) +(Lsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Lsh8x32 x (Or32 (Zeromask hi) lo)) +(Rsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Rsh8x32 x (Or32 (Zeromask hi) lo)) +(Rsh8Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 => + (Rsh8Ux32 x (Or32 (Zeromask hi) lo)) + +// Most general +(Lsh64x64 x y) => (Lsh64x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(Rsh64x64 x y) => (Rsh64x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(Rsh64Ux64 x y) => (Rsh64Ux32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(Lsh32x64 x y) => (Lsh32x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(Rsh32x64 x y) => (Rsh32x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(Rsh32Ux64 x y) => (Rsh32Ux32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(Lsh16x64 x y) => (Lsh16x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(Rsh16x64 x y) => (Rsh16x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(Rsh16Ux64 x y) => (Rsh16Ux32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(Lsh8x64 x y) => (Lsh8x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(Rsh8x64 x y) => (Rsh8x32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) +(Rsh8Ux64 x y) => (Rsh8Ux32 x (Or32 (Zeromask (Int64Hi y)) (Int64Lo y))) + +(RotateLeft64 x (Int64Make hi lo)) => (RotateLeft64 x lo) +(RotateLeft32 x (Int64Make hi lo)) => (RotateLeft32 x lo) +(RotateLeft16 x (Int64Make hi lo)) => (RotateLeft16 x lo) +(RotateLeft8 x (Int64Make hi lo)) => (RotateLeft8 x lo) + +// Clean up constants a little +(Or32 (Zeromask (Const32 [c])) y) && c == 0 => y +(Or32 (Zeromask (Const32 [c])) y) && c != 0 => (Const32 [-1]) + +// 64x left shift +// result.hi = hi<>(32-s) | lo<<(s-32) // >> is unsigned, large shifts result 0 +// result.lo = lo< + (Int64Make + (Or32 + (Or32 + (Lsh32x32 (Int64Hi x) s) + (Rsh32Ux32 + (Int64Lo x) + (Sub32 (Const32 [32]) s))) + (Lsh32x32 + (Int64Lo x) + (Sub32 s (Const32 [32])))) + (Lsh32x32 (Int64Lo x) s)) +(Lsh64x16 x s) => + (Int64Make + (Or32 + (Or32 + (Lsh32x16 (Int64Hi x) s) + (Rsh32Ux16 + (Int64Lo x) + (Sub16 (Const16 [32]) s))) + (Lsh32x16 + (Int64Lo x) + (Sub16 s (Const16 [32])))) + (Lsh32x16 (Int64Lo x) s)) +(Lsh64x8 x s) => + (Int64Make + (Or32 + (Or32 + (Lsh32x8 (Int64Hi x) s) + (Rsh32Ux8 + (Int64Lo x) + (Sub8 (Const8 [32]) s))) + (Lsh32x8 + (Int64Lo x) + (Sub8 s (Const8 [32])))) + (Lsh32x8 (Int64Lo x) s)) + +// 64x unsigned right shift +// result.hi = hi>>s +// result.lo = lo>>s | hi<<(32-s) | hi>>(s-32) // >> is unsigned, large shifts result 0 +(Rsh64Ux32 x s) => + (Int64Make + (Rsh32Ux32 (Int64Hi x) s) + (Or32 + (Or32 + (Rsh32Ux32 (Int64Lo x) s) + (Lsh32x32 + (Int64Hi x) + (Sub32 (Const32 [32]) s))) + (Rsh32Ux32 + (Int64Hi x) + (Sub32 s (Const32 [32]))))) +(Rsh64Ux16 x s) => + (Int64Make + (Rsh32Ux16 (Int64Hi x) s) + (Or32 + (Or32 + (Rsh32Ux16 (Int64Lo x) s) + (Lsh32x16 + (Int64Hi x) + (Sub16 (Const16 [32]) s))) + (Rsh32Ux16 + (Int64Hi x) + (Sub16 s (Const16 [32]))))) +(Rsh64Ux8 x s) => + (Int64Make + (Rsh32Ux8 (Int64Hi x) s) + (Or32 + (Or32 + (Rsh32Ux8 (Int64Lo x) s) + (Lsh32x8 + (Int64Hi x) + (Sub8 (Const8 [32]) s))) + (Rsh32Ux8 + (Int64Hi x) + (Sub8 s (Const8 [32]))))) + +// 64x signed right shift +// result.hi = hi>>s +// result.lo = lo>>s | hi<<(32-s) | (hi>>(s-32))&zeromask(s>>5) // hi>>(s-32) is signed, large shifts result 0/-1 +(Rsh64x32 x s) => + (Int64Make + (Rsh32x32 (Int64Hi x) s) + (Or32 + (Or32 + (Rsh32Ux32 (Int64Lo x) s) + (Lsh32x32 + (Int64Hi x) + (Sub32 (Const32 [32]) s))) + (And32 + (Rsh32x32 + (Int64Hi x) + (Sub32 s (Const32 [32]))) + (Zeromask + (Rsh32Ux32 s (Const32 [5])))))) +(Rsh64x16 x s) => + (Int64Make + (Rsh32x16 (Int64Hi x) s) + (Or32 + (Or32 + (Rsh32Ux16 (Int64Lo x) s) + (Lsh32x16 + (Int64Hi x) + (Sub16 (Const16 [32]) s))) + (And32 + (Rsh32x16 + (Int64Hi x) + (Sub16 s (Const16 [32]))) + (Zeromask + (ZeroExt16to32 + (Rsh16Ux32 s (Const32 [5]))))))) +(Rsh64x8 x s) => + (Int64Make + (Rsh32x8 (Int64Hi x) s) + (Or32 + (Or32 + (Rsh32Ux8 (Int64Lo x) s) + (Lsh32x8 + (Int64Hi x) + (Sub8 (Const8 [32]) s))) + (And32 + (Rsh32x8 + (Int64Hi x) + (Sub8 s (Const8 [32]))) + (Zeromask + (ZeroExt8to32 + (Rsh8Ux32 s (Const32 [5]))))))) + +(Const64 [c]) && t.IsSigned() => + (Int64Make (Const32 [int32(c>>32)]) (Const32 [int32(c)])) +(Const64 [c]) && !t.IsSigned() => + (Int64Make (Const32 [int32(c>>32)]) (Const32 [int32(c)])) + +(Eq64 x y) => + (AndB + (Eq32 (Int64Hi x) (Int64Hi y)) + (Eq32 (Int64Lo x) (Int64Lo y))) + +(Neq64 x y) => + (OrB + (Neq32 (Int64Hi x) (Int64Hi y)) + (Neq32 (Int64Lo x) (Int64Lo y))) + +(Less64U x y) => + (OrB + (Less32U (Int64Hi x) (Int64Hi y)) + (AndB + (Eq32 (Int64Hi x) (Int64Hi y)) + (Less32U (Int64Lo x) (Int64Lo y)))) + +(Leq64U x y) => + (OrB + (Less32U (Int64Hi x) (Int64Hi y)) + (AndB + (Eq32 (Int64Hi x) (Int64Hi y)) + (Leq32U (Int64Lo x) (Int64Lo y)))) + +(Less64 x y) => + (OrB + (Less32 (Int64Hi x) (Int64Hi y)) + (AndB + (Eq32 (Int64Hi x) (Int64Hi y)) + (Less32U (Int64Lo x) (Int64Lo y)))) + +(Leq64 x y) => + (OrB + (Less32 (Int64Hi x) (Int64Hi y)) + (AndB + (Eq32 (Int64Hi x) (Int64Hi y)) + (Leq32U (Int64Lo x) (Int64Lo y)))) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/dec64Ops.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/dec64Ops.go new file mode 100644 index 0000000000000000000000000000000000000000..bba218ed40903f93f56ef28b7ba3d618f2303f30 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/dec64Ops.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +var dec64Ops = []opData{} + +var dec64Blocks = []blockData{} + +func init() { + archs = append(archs, arch{ + name: "dec64", + ops: dec64Ops, + blocks: dec64Blocks, + generic: true, + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/decOps.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/decOps.go new file mode 100644 index 0000000000000000000000000000000000000000..0cc11cb4c0f78c095a46b8e02dd4aa9d05deafde --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/decOps.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +var decOps = []opData{} + +var decBlocks = []blockData{} + +func init() { + archs = append(archs, arch{ + name: "dec", + ops: decOps, + blocks: decBlocks, + generic: true, + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/generic.rules b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/generic.rules new file mode 100644 index 0000000000000000000000000000000000000000..aeda62591a7750945aaee1928c3f1a471fa10d5e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -0,0 +1,2756 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Simplifications that apply to all backend architectures. As an example, this +// Go source code +// +// y := 0 * x +// +// can be translated into y := 0 without losing any information, which saves a +// pointless multiplication instruction. Other .rules files in this directory +// (for example AMD64.rules) contain rules specific to the architecture in the +// filename. The rules here apply to every architecture. +// +// The code for parsing this file lives in rulegen.go; this file generates +// ssa/rewritegeneric.go. + +// values are specified using the following format: +// (op [auxint] {aux} arg0 arg1 ...) +// the type, aux, and auxint fields are optional +// on the matching side +// - the type, aux, and auxint fields must match if they are specified. +// - the first occurrence of a variable defines that variable. Subsequent +// uses must match (be == to) the first use. +// - v is defined to be the value matched. +// - an additional conditional can be provided after the match pattern with "&&". +// on the generated side +// - the type of the top-level expression is the same as the one on the left-hand side. +// - the type of any subexpressions must be specified explicitly (or +// be specified in the op's type field). +// - auxint will be 0 if not specified. +// - aux will be nil if not specified. + +// blocks are specified using the following format: +// (kind controlvalue succ0 succ1 ...) +// controlvalue must be "nil" or a value expression +// succ* fields must be variables +// For now, the generated successors must be a permutation of the matched successors. + +// constant folding +(Trunc16to8 (Const16 [c])) => (Const8 [int8(c)]) +(Trunc32to8 (Const32 [c])) => (Const8 [int8(c)]) +(Trunc32to16 (Const32 [c])) => (Const16 [int16(c)]) +(Trunc64to8 (Const64 [c])) => (Const8 [int8(c)]) +(Trunc64to16 (Const64 [c])) => (Const16 [int16(c)]) +(Trunc64to32 (Const64 [c])) => (Const32 [int32(c)]) +(Cvt64Fto32F (Const64F [c])) => (Const32F [float32(c)]) +(Cvt32Fto64F (Const32F [c])) => (Const64F [float64(c)]) +(Cvt32to32F (Const32 [c])) => (Const32F [float32(c)]) +(Cvt32to64F (Const32 [c])) => (Const64F [float64(c)]) +(Cvt64to32F (Const64 [c])) => (Const32F [float32(c)]) +(Cvt64to64F (Const64 [c])) => (Const64F [float64(c)]) +(Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)]) +(Cvt32Fto64 (Const32F [c])) => (Const64 [int64(c)]) +(Cvt64Fto32 (Const64F [c])) => (Const32 [int32(c)]) +(Cvt64Fto64 (Const64F [c])) => (Const64 [int64(c)]) +(Round32F x:(Const32F)) => x +(Round64F x:(Const64F)) => x +(CvtBoolToUint8 (ConstBool [false])) => (Const8 [0]) +(CvtBoolToUint8 (ConstBool [true])) => (Const8 [1]) + +(Trunc16to8 (ZeroExt8to16 x)) => x +(Trunc32to8 (ZeroExt8to32 x)) => x +(Trunc32to16 (ZeroExt8to32 x)) => (ZeroExt8to16 x) +(Trunc32to16 (ZeroExt16to32 x)) => x +(Trunc64to8 (ZeroExt8to64 x)) => x +(Trunc64to16 (ZeroExt8to64 x)) => (ZeroExt8to16 x) +(Trunc64to16 (ZeroExt16to64 x)) => x +(Trunc64to32 (ZeroExt8to64 x)) => (ZeroExt8to32 x) +(Trunc64to32 (ZeroExt16to64 x)) => (ZeroExt16to32 x) +(Trunc64to32 (ZeroExt32to64 x)) => x +(Trunc16to8 (SignExt8to16 x)) => x +(Trunc32to8 (SignExt8to32 x)) => x +(Trunc32to16 (SignExt8to32 x)) => (SignExt8to16 x) +(Trunc32to16 (SignExt16to32 x)) => x +(Trunc64to8 (SignExt8to64 x)) => x +(Trunc64to16 (SignExt8to64 x)) => (SignExt8to16 x) +(Trunc64to16 (SignExt16to64 x)) => x +(Trunc64to32 (SignExt8to64 x)) => (SignExt8to32 x) +(Trunc64to32 (SignExt16to64 x)) => (SignExt16to32 x) +(Trunc64to32 (SignExt32to64 x)) => x + +(ZeroExt8to16 (Const8 [c])) => (Const16 [int16( uint8(c))]) +(ZeroExt8to32 (Const8 [c])) => (Const32 [int32( uint8(c))]) +(ZeroExt8to64 (Const8 [c])) => (Const64 [int64( uint8(c))]) +(ZeroExt16to32 (Const16 [c])) => (Const32 [int32(uint16(c))]) +(ZeroExt16to64 (Const16 [c])) => (Const64 [int64(uint16(c))]) +(ZeroExt32to64 (Const32 [c])) => (Const64 [int64(uint32(c))]) +(SignExt8to16 (Const8 [c])) => (Const16 [int16(c)]) +(SignExt8to32 (Const8 [c])) => (Const32 [int32(c)]) +(SignExt8to64 (Const8 [c])) => (Const64 [int64(c)]) +(SignExt16to32 (Const16 [c])) => (Const32 [int32(c)]) +(SignExt16to64 (Const16 [c])) => (Const64 [int64(c)]) +(SignExt32to64 (Const32 [c])) => (Const64 [int64(c)]) + +(Neg8 (Const8 [c])) => (Const8 [-c]) +(Neg16 (Const16 [c])) => (Const16 [-c]) +(Neg32 (Const32 [c])) => (Const32 [-c]) +(Neg64 (Const64 [c])) => (Const64 [-c]) +(Neg32F (Const32F [c])) && c != 0 => (Const32F [-c]) +(Neg64F (Const64F [c])) && c != 0 => (Const64F [-c]) + +(Add8 (Const8 [c]) (Const8 [d])) => (Const8 [c+d]) +(Add16 (Const16 [c]) (Const16 [d])) => (Const16 [c+d]) +(Add32 (Const32 [c]) (Const32 [d])) => (Const32 [c+d]) +(Add64 (Const64 [c]) (Const64 [d])) => (Const64 [c+d]) +(Add32F (Const32F [c]) (Const32F [d])) && c+d == c+d => (Const32F [c+d]) +(Add64F (Const64F [c]) (Const64F [d])) && c+d == c+d => (Const64F [c+d]) +(AddPtr x (Const64 [c])) => (OffPtr x [c]) +(AddPtr x (Const32 [c])) => (OffPtr x [int64(c)]) + +(Sub8 (Const8 [c]) (Const8 [d])) => (Const8 [c-d]) +(Sub16 (Const16 [c]) (Const16 [d])) => (Const16 [c-d]) +(Sub32 (Const32 [c]) (Const32 [d])) => (Const32 [c-d]) +(Sub64 (Const64 [c]) (Const64 [d])) => (Const64 [c-d]) +(Sub32F (Const32F [c]) (Const32F [d])) && c-d == c-d => (Const32F [c-d]) +(Sub64F (Const64F [c]) (Const64F [d])) && c-d == c-d => (Const64F [c-d]) + +(Mul8 (Const8 [c]) (Const8 [d])) => (Const8 [c*d]) +(Mul16 (Const16 [c]) (Const16 [d])) => (Const16 [c*d]) +(Mul32 (Const32 [c]) (Const32 [d])) => (Const32 [c*d]) +(Mul64 (Const64 [c]) (Const64 [d])) => (Const64 [c*d]) +(Mul32F (Const32F [c]) (Const32F [d])) && c*d == c*d => (Const32F [c*d]) +(Mul64F (Const64F [c]) (Const64F [d])) && c*d == c*d => (Const64F [c*d]) + +(And8 (Const8 [c]) (Const8 [d])) => (Const8 [c&d]) +(And16 (Const16 [c]) (Const16 [d])) => (Const16 [c&d]) +(And32 (Const32 [c]) (Const32 [d])) => (Const32 [c&d]) +(And64 (Const64 [c]) (Const64 [d])) => (Const64 [c&d]) + +(Or8 (Const8 [c]) (Const8 [d])) => (Const8 [c|d]) +(Or16 (Const16 [c]) (Const16 [d])) => (Const16 [c|d]) +(Or32 (Const32 [c]) (Const32 [d])) => (Const32 [c|d]) +(Or64 (Const64 [c]) (Const64 [d])) => (Const64 [c|d]) + +(Xor8 (Const8 [c]) (Const8 [d])) => (Const8 [c^d]) +(Xor16 (Const16 [c]) (Const16 [d])) => (Const16 [c^d]) +(Xor32 (Const32 [c]) (Const32 [d])) => (Const32 [c^d]) +(Xor64 (Const64 [c]) (Const64 [d])) => (Const64 [c^d]) + +(Ctz64 (Const64 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz64(c))]) +(Ctz32 (Const32 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz32(c))]) +(Ctz16 (Const16 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz16(c))]) +(Ctz8 (Const8 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz8(c))]) + +(Ctz64 (Const64 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz64(c))]) +(Ctz32 (Const32 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz32(c))]) +(Ctz16 (Const16 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz16(c))]) +(Ctz8 (Const8 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz8(c))]) + +(Div8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c/d]) +(Div16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c/d]) +(Div32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c/d]) +(Div64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c/d]) +(Div8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c)/uint8(d))]) +(Div16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c)/uint16(d))]) +(Div32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c)/uint32(d))]) +(Div64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c)/uint64(d))]) +(Div32F (Const32F [c]) (Const32F [d])) && c/d == c/d => (Const32F [c/d]) +(Div64F (Const64F [c]) (Const64F [d])) && c/d == c/d => (Const64F [c/d]) +(Select0 (Div128u (Const64 [0]) lo y)) => (Div64u lo y) +(Select1 (Div128u (Const64 [0]) lo y)) => (Mod64u lo y) + +(Not (ConstBool [c])) => (ConstBool [!c]) + +(Floor (Const64F [c])) => (Const64F [math.Floor(c)]) +(Ceil (Const64F [c])) => (Const64F [math.Ceil(c)]) +(Trunc (Const64F [c])) => (Const64F [math.Trunc(c)]) +(RoundToEven (Const64F [c])) => (Const64F [math.RoundToEven(c)]) + +// Convert x * 1 to x. +(Mul(8|16|32|64) (Const(8|16|32|64) [1]) x) => x +(Select0 (Mul(32|64)uover (Const(32|64) [1]) x)) => x +(Select1 (Mul(32|64)uover (Const(32|64) [1]) x)) => (ConstBool [false]) + +// Convert x * -1 to -x. +(Mul(8|16|32|64) (Const(8|16|32|64) [-1]) x) => (Neg(8|16|32|64) x) + +// DeMorgan's Laws +(And(8|16|32|64) (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (Or(8|16|32|64) x y)) +(Or(8|16|32|64) (Com(8|16|32|64) x) (Com(8|16|32|64) y)) => (Com(8|16|32|64) (And(8|16|32|64) x y)) + +// Convert multiplication by a power of two to a shift. +(Mul8 n (Const8 [c])) && isPowerOfTwo8(c) => (Lsh8x64 n (Const64 [log8(c)])) +(Mul16 n (Const16 [c])) && isPowerOfTwo16(c) => (Lsh16x64 n (Const64 [log16(c)])) +(Mul32 n (Const32 [c])) && isPowerOfTwo32(c) => (Lsh32x64 n (Const64 [log32(c)])) +(Mul64 n (Const64 [c])) && isPowerOfTwo64(c) => (Lsh64x64 n (Const64 [log64(c)])) +(Mul8 n (Const8 [c])) && t.IsSigned() && isPowerOfTwo8(-c) => (Neg8 (Lsh8x64 n (Const64 [log8(-c)]))) +(Mul16 n (Const16 [c])) && t.IsSigned() && isPowerOfTwo16(-c) => (Neg16 (Lsh16x64 n (Const64 [log16(-c)]))) +(Mul32 n (Const32 [c])) && t.IsSigned() && isPowerOfTwo32(-c) => (Neg32 (Lsh32x64 n (Const64 [log32(-c)]))) +(Mul64 n (Const64 [c])) && t.IsSigned() && isPowerOfTwo64(-c) => (Neg64 (Lsh64x64 n (Const64 [log64(-c)]))) + +(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c % d]) +(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c % d]) +(Mod32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c % d]) +(Mod64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c % d]) + +(Mod8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c) % uint8(d))]) +(Mod16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c) % uint16(d))]) +(Mod32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c) % uint32(d))]) +(Mod64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c) % uint64(d))]) + +(Lsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c << uint64(d)]) +(Rsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c >> uint64(d)]) +(Rsh64Ux64 (Const64 [c]) (Const64 [d])) => (Const64 [int64(uint64(c) >> uint64(d))]) +(Lsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c << uint64(d)]) +(Rsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c >> uint64(d)]) +(Rsh32Ux64 (Const32 [c]) (Const64 [d])) => (Const32 [int32(uint32(c) >> uint64(d))]) +(Lsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c << uint64(d)]) +(Rsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c >> uint64(d)]) +(Rsh16Ux64 (Const16 [c]) (Const64 [d])) => (Const16 [int16(uint16(c) >> uint64(d))]) +(Lsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c << uint64(d)]) +(Rsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c >> uint64(d)]) +(Rsh8Ux64 (Const8 [c]) (Const64 [d])) => (Const8 [int8(uint8(c) >> uint64(d))]) + +// Fold IsInBounds when the range of the index cannot exceed the limit. +(IsInBounds (ZeroExt8to32 _) (Const32 [c])) && (1 << 8) <= c => (ConstBool [true]) +(IsInBounds (ZeroExt8to64 _) (Const64 [c])) && (1 << 8) <= c => (ConstBool [true]) +(IsInBounds (ZeroExt16to32 _) (Const32 [c])) && (1 << 16) <= c => (ConstBool [true]) +(IsInBounds (ZeroExt16to64 _) (Const64 [c])) && (1 << 16) <= c => (ConstBool [true]) +(IsInBounds x x) => (ConstBool [false]) +(IsInBounds (And8 (Const8 [c]) _) (Const8 [d])) && 0 <= c && c < d => (ConstBool [true]) +(IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d])) && 0 <= c && int16(c) < d => (ConstBool [true]) +(IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true]) +(IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true]) +(IsInBounds (And16 (Const16 [c]) _) (Const16 [d])) && 0 <= c && c < d => (ConstBool [true]) +(IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true]) +(IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true]) +(IsInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c < d => (ConstBool [true]) +(IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true]) +(IsInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c < d => (ConstBool [true]) +(IsInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c < d]) +(IsInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c < d]) +// (Mod64u x y) is always between 0 (inclusive) and y (exclusive). +(IsInBounds (Mod32u _ y) y) => (ConstBool [true]) +(IsInBounds (Mod64u _ y) y) => (ConstBool [true]) +// Right shifting an unsigned number limits its value. +(IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 8 && 1< (ConstBool [true]) +(IsInBounds (ZeroExt8to32 (Rsh8Ux64 _ (Const64 [c]))) (Const32 [d])) && 0 < c && c < 8 && 1< (ConstBool [true]) +(IsInBounds (ZeroExt8to16 (Rsh8Ux64 _ (Const64 [c]))) (Const16 [d])) && 0 < c && c < 8 && 1< (ConstBool [true]) +(IsInBounds (Rsh8Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 8 && 1< (ConstBool [true]) +(IsInBounds (ZeroExt16to64 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1< (ConstBool [true]) +(IsInBounds (ZeroExt16to32 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1< (ConstBool [true]) +(IsInBounds (Rsh16Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 16 && 1< (ConstBool [true]) +(IsInBounds (ZeroExt32to64 (Rsh32Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 32 && 1< (ConstBool [true]) +(IsInBounds (Rsh32Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 32 && 1< (ConstBool [true]) +(IsInBounds (Rsh64Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 64 && 1< (ConstBool [true]) + +(IsSliceInBounds x x) => (ConstBool [true]) +(IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c <= d => (ConstBool [true]) +(IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c <= d => (ConstBool [true]) +(IsSliceInBounds (Const32 [0]) _) => (ConstBool [true]) +(IsSliceInBounds (Const64 [0]) _) => (ConstBool [true]) +(IsSliceInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c <= d]) +(IsSliceInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c <= d]) +(IsSliceInBounds (SliceLen x) (SliceCap x)) => (ConstBool [true]) + +(Eq(64|32|16|8) x x) => (ConstBool [true]) +(EqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c == d]) +(EqB (ConstBool [false]) x) => (Not x) +(EqB (ConstBool [true]) x) => x + +(Neq(64|32|16|8) x x) => (ConstBool [false]) +(NeqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c != d]) +(NeqB (ConstBool [false]) x) => x +(NeqB (ConstBool [true]) x) => (Not x) +(NeqB (Not x) (Not y)) => (NeqB x y) + +(Eq64 (Const64 [c]) (Add64 (Const64 [d]) x)) => (Eq64 (Const64 [c-d]) x) +(Eq32 (Const32 [c]) (Add32 (Const32 [d]) x)) => (Eq32 (Const32 [c-d]) x) +(Eq16 (Const16 [c]) (Add16 (Const16 [d]) x)) => (Eq16 (Const16 [c-d]) x) +(Eq8 (Const8 [c]) (Add8 (Const8 [d]) x)) => (Eq8 (Const8 [c-d]) x) + +(Neq64 (Const64 [c]) (Add64 (Const64 [d]) x)) => (Neq64 (Const64 [c-d]) x) +(Neq32 (Const32 [c]) (Add32 (Const32 [d]) x)) => (Neq32 (Const32 [c-d]) x) +(Neq16 (Const16 [c]) (Add16 (Const16 [d]) x)) => (Neq16 (Const16 [c-d]) x) +(Neq8 (Const8 [c]) (Add8 (Const8 [d]) x)) => (Neq8 (Const8 [c-d]) x) + +// signed integer range: ( c <= x && x (<|<=) d ) -> ( unsigned(x-c) (<|<=) unsigned(d-c) ) +(AndB (Leq64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c => ((Less|Leq)64U (Sub64 x (Const64 [c])) (Const64 [d-c])) +(AndB (Leq32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c => ((Less|Leq)32U (Sub32 x (Const32 [c])) (Const32 [d-c])) +(AndB (Leq16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c => ((Less|Leq)16U (Sub16 x (Const16 [c])) (Const16 [d-c])) +(AndB (Leq8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c => ((Less|Leq)8U (Sub8 x (Const8 [c])) (Const8 [d-c])) + +// signed integer range: ( c < x && x (<|<=) d ) -> ( unsigned(x-(c+1)) (<|<=) unsigned(d-(c+1)) ) +(AndB (Less64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1])) +(AndB (Less32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)32U (Sub32 x (Const32 [c+1])) (Const32 [d-c-1])) +(AndB (Less16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)16U (Sub16 x (Const16 [c+1])) (Const16 [d-c-1])) +(AndB (Less8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)8U (Sub8 x (Const8 [c+1])) (Const8 [d-c-1])) + +// unsigned integer range: ( c <= x && x (<|<=) d ) -> ( x-c (<|<=) d-c ) +(AndB (Leq64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c) => ((Less|Leq)64U (Sub64 x (Const64 [c])) (Const64 [d-c])) +(AndB (Leq32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c) => ((Less|Leq)32U (Sub32 x (Const32 [c])) (Const32 [d-c])) +(AndB (Leq16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c) => ((Less|Leq)16U (Sub16 x (Const16 [c])) (Const16 [d-c])) +(AndB (Leq8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c) => ((Less|Leq)8U (Sub8 x (Const8 [c])) (Const8 [d-c])) + +// unsigned integer range: ( c < x && x (<|<=) d ) -> ( x-(c+1) (<|<=) d-(c+1) ) +(AndB (Less64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) => ((Less|Leq)64U (Sub64 x (Const64 [c+1])) (Const64 [d-c-1])) +(AndB (Less32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) => ((Less|Leq)32U (Sub32 x (Const32 [c+1])) (Const32 [d-c-1])) +(AndB (Less16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) => ((Less|Leq)16U (Sub16 x (Const16 [c+1])) (Const16 [d-c-1])) +(AndB (Less8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) => ((Less|Leq)8U (Sub8 x (Const8 [c+1])) (Const8 [d-c-1])) + +// signed integer range: ( c (<|<=) x || x < d ) -> ( unsigned(c-d) (<|<=) unsigned(x-d) ) +(OrB ((Less|Leq)64 (Const64 [c]) x) (Less64 x (Const64 [d]))) && c >= d => ((Less|Leq)64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) +(OrB ((Less|Leq)32 (Const32 [c]) x) (Less32 x (Const32 [d]))) && c >= d => ((Less|Leq)32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) +(OrB ((Less|Leq)16 (Const16 [c]) x) (Less16 x (Const16 [d]))) && c >= d => ((Less|Leq)16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) +(OrB ((Less|Leq)8 (Const8 [c]) x) (Less8 x (Const8 [d]))) && c >= d => ((Less|Leq)8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + +// signed integer range: ( c (<|<=) x || x <= d ) -> ( unsigned(c-(d+1)) (<|<=) unsigned(x-(d+1)) ) +(OrB ((Less|Leq)64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) +(OrB ((Less|Leq)32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) +(OrB ((Less|Leq)16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) +(OrB ((Less|Leq)8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + +// unsigned integer range: ( c (<|<=) x || x < d ) -> ( c-d (<|<=) x-d ) +(OrB ((Less|Leq)64U (Const64 [c]) x) (Less64U x (Const64 [d]))) && uint64(c) >= uint64(d) => ((Less|Leq)64U (Const64 [c-d]) (Sub64 x (Const64 [d]))) +(OrB ((Less|Leq)32U (Const32 [c]) x) (Less32U x (Const32 [d]))) && uint32(c) >= uint32(d) => ((Less|Leq)32U (Const32 [c-d]) (Sub32 x (Const32 [d]))) +(OrB ((Less|Leq)16U (Const16 [c]) x) (Less16U x (Const16 [d]))) && uint16(c) >= uint16(d) => ((Less|Leq)16U (Const16 [c-d]) (Sub16 x (Const16 [d]))) +(OrB ((Less|Leq)8U (Const8 [c]) x) (Less8U x (Const8 [d]))) && uint8(c) >= uint8(d) => ((Less|Leq)8U (Const8 [c-d]) (Sub8 x (Const8 [d]))) + +// unsigned integer range: ( c (<|<=) x || x <= d ) -> ( c-(d+1) (<|<=) x-(d+1) ) +(OrB ((Less|Leq)64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) && uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) => ((Less|Leq)64U (Const64 [c-d-1]) (Sub64 x (Const64 [d+1]))) +(OrB ((Less|Leq)32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) && uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) => ((Less|Leq)32U (Const32 [c-d-1]) (Sub32 x (Const32 [d+1]))) +(OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) => ((Less|Leq)16U (Const16 [c-d-1]) (Sub16 x (Const16 [d+1]))) +(OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) => ((Less|Leq)8U (Const8 [c-d-1]) (Sub8 x (Const8 [d+1]))) + +// Canonicalize x-const to x+(-const) +(Sub64 x (Const64 [c])) && x.Op != OpConst64 => (Add64 (Const64 [-c]) x) +(Sub32 x (Const32 [c])) && x.Op != OpConst32 => (Add32 (Const32 [-c]) x) +(Sub16 x (Const16 [c])) && x.Op != OpConst16 => (Add16 (Const16 [-c]) x) +(Sub8 x (Const8 [c])) && x.Op != OpConst8 => (Add8 (Const8 [-c]) x) + +// fold negation into comparison operators +(Not (Eq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Neq(64|32|16|8|B|Ptr|64F|32F) x y) +(Not (Neq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Eq(64|32|16|8|B|Ptr|64F|32F) x y) + +(Not (Less(64|32|16|8) x y)) => (Leq(64|32|16|8) y x) +(Not (Less(64|32|16|8)U x y)) => (Leq(64|32|16|8)U y x) +(Not (Leq(64|32|16|8) x y)) => (Less(64|32|16|8) y x) +(Not (Leq(64|32|16|8)U x y)) => (Less(64|32|16|8)U y x) + +// Distribute multiplication c * (d+x) -> c*d + c*x. Useful for: +// a[i].b = ...; a[i+1].b = ... +(Mul64 (Const64 [c]) (Add64 (Const64 [d]) x)) => + (Add64 (Const64 [c*d]) (Mul64 (Const64 [c]) x)) +(Mul32 (Const32 [c]) (Add32 (Const32 [d]) x)) => + (Add32 (Const32 [c*d]) (Mul32 (Const32 [c]) x)) + +// Rewrite x*y ± x*z to x*(y±z) +(Add(64|32|16|8) (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z)) + => (Mul(64|32|16|8) x (Add(64|32|16|8) y z)) +(Sub(64|32|16|8) (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z)) + => (Mul(64|32|16|8) x (Sub(64|32|16|8) y z)) + +// rewrite shifts of 8/16/32 bit consts into 64 bit consts to reduce +// the number of the other rewrite rules for const shifts +(Lsh64x32 x (Const32 [c])) => (Lsh64x64 x (Const64 [int64(uint32(c))])) +(Lsh64x16 x (Const16 [c])) => (Lsh64x64 x (Const64 [int64(uint16(c))])) +(Lsh64x8 x (Const8 [c])) => (Lsh64x64 x (Const64 [int64(uint8(c))])) +(Rsh64x32 x (Const32 [c])) => (Rsh64x64 x (Const64 [int64(uint32(c))])) +(Rsh64x16 x (Const16 [c])) => (Rsh64x64 x (Const64 [int64(uint16(c))])) +(Rsh64x8 x (Const8 [c])) => (Rsh64x64 x (Const64 [int64(uint8(c))])) +(Rsh64Ux32 x (Const32 [c])) => (Rsh64Ux64 x (Const64 [int64(uint32(c))])) +(Rsh64Ux16 x (Const16 [c])) => (Rsh64Ux64 x (Const64 [int64(uint16(c))])) +(Rsh64Ux8 x (Const8 [c])) => (Rsh64Ux64 x (Const64 [int64(uint8(c))])) + +(Lsh32x32 x (Const32 [c])) => (Lsh32x64 x (Const64 [int64(uint32(c))])) +(Lsh32x16 x (Const16 [c])) => (Lsh32x64 x (Const64 [int64(uint16(c))])) +(Lsh32x8 x (Const8 [c])) => (Lsh32x64 x (Const64 [int64(uint8(c))])) +(Rsh32x32 x (Const32 [c])) => (Rsh32x64 x (Const64 [int64(uint32(c))])) +(Rsh32x16 x (Const16 [c])) => (Rsh32x64 x (Const64 [int64(uint16(c))])) +(Rsh32x8 x (Const8 [c])) => (Rsh32x64 x (Const64 [int64(uint8(c))])) +(Rsh32Ux32 x (Const32 [c])) => (Rsh32Ux64 x (Const64 [int64(uint32(c))])) +(Rsh32Ux16 x (Const16 [c])) => (Rsh32Ux64 x (Const64 [int64(uint16(c))])) +(Rsh32Ux8 x (Const8 [c])) => (Rsh32Ux64 x (Const64 [int64(uint8(c))])) + +(Lsh16x32 x (Const32 [c])) => (Lsh16x64 x (Const64 [int64(uint32(c))])) +(Lsh16x16 x (Const16 [c])) => (Lsh16x64 x (Const64 [int64(uint16(c))])) +(Lsh16x8 x (Const8 [c])) => (Lsh16x64 x (Const64 [int64(uint8(c))])) +(Rsh16x32 x (Const32 [c])) => (Rsh16x64 x (Const64 [int64(uint32(c))])) +(Rsh16x16 x (Const16 [c])) => (Rsh16x64 x (Const64 [int64(uint16(c))])) +(Rsh16x8 x (Const8 [c])) => (Rsh16x64 x (Const64 [int64(uint8(c))])) +(Rsh16Ux32 x (Const32 [c])) => (Rsh16Ux64 x (Const64 [int64(uint32(c))])) +(Rsh16Ux16 x (Const16 [c])) => (Rsh16Ux64 x (Const64 [int64(uint16(c))])) +(Rsh16Ux8 x (Const8 [c])) => (Rsh16Ux64 x (Const64 [int64(uint8(c))])) + +(Lsh8x32 x (Const32 [c])) => (Lsh8x64 x (Const64 [int64(uint32(c))])) +(Lsh8x16 x (Const16 [c])) => (Lsh8x64 x (Const64 [int64(uint16(c))])) +(Lsh8x8 x (Const8 [c])) => (Lsh8x64 x (Const64 [int64(uint8(c))])) +(Rsh8x32 x (Const32 [c])) => (Rsh8x64 x (Const64 [int64(uint32(c))])) +(Rsh8x16 x (Const16 [c])) => (Rsh8x64 x (Const64 [int64(uint16(c))])) +(Rsh8x8 x (Const8 [c])) => (Rsh8x64 x (Const64 [int64(uint8(c))])) +(Rsh8Ux32 x (Const32 [c])) => (Rsh8Ux64 x (Const64 [int64(uint32(c))])) +(Rsh8Ux16 x (Const16 [c])) => (Rsh8Ux64 x (Const64 [int64(uint16(c))])) +(Rsh8Ux8 x (Const8 [c])) => (Rsh8Ux64 x (Const64 [int64(uint8(c))])) + +// shifts by zero +(Lsh(64|32|16|8)x64 x (Const64 [0])) => x +(Rsh(64|32|16|8)x64 x (Const64 [0])) => x +(Rsh(64|32|16|8)Ux64 x (Const64 [0])) => x + +// rotates by multiples of register width +(RotateLeft64 x (Const64 [c])) && c%64 == 0 => x +(RotateLeft32 x (Const32 [c])) && c%32 == 0 => x +(RotateLeft16 x (Const16 [c])) && c%16 == 0 => x +(RotateLeft8 x (Const8 [c])) && c%8 == 0 => x + +// zero shifted +(Lsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0]) +(Rsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0]) +(Rsh64Ux(64|32|16|8) (Const64 [0]) _) => (Const64 [0]) +(Lsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0]) +(Rsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0]) +(Rsh32Ux(64|32|16|8) (Const32 [0]) _) => (Const32 [0]) +(Lsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0]) +(Rsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0]) +(Rsh16Ux(64|32|16|8) (Const16 [0]) _) => (Const16 [0]) +(Lsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0]) +(Rsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0]) +(Rsh8Ux(64|32|16|8) (Const8 [0]) _) => (Const8 [0]) + +// large left shifts of all values, and right shifts of unsigned values +((Lsh64|Rsh64U)x64 _ (Const64 [c])) && uint64(c) >= 64 => (Const64 [0]) +((Lsh32|Rsh32U)x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0]) +((Lsh16|Rsh16U)x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0]) +((Lsh8|Rsh8U)x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0]) + +// combine const shifts +(Lsh64x64 (Lsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh64x64 x (Const64 [c+d])) +(Lsh32x64 (Lsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh32x64 x (Const64 [c+d])) +(Lsh16x64 (Lsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh16x64 x (Const64 [c+d])) +(Lsh8x64 (Lsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh8x64 x (Const64 [c+d])) + +(Rsh64x64 (Rsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64x64 x (Const64 [c+d])) +(Rsh32x64 (Rsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32x64 x (Const64 [c+d])) +(Rsh16x64 (Rsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16x64 x (Const64 [c+d])) +(Rsh8x64 (Rsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8x64 x (Const64 [c+d])) + +(Rsh64Ux64 (Rsh64Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64Ux64 x (Const64 [c+d])) +(Rsh32Ux64 (Rsh32Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32Ux64 x (Const64 [c+d])) +(Rsh16Ux64 (Rsh16Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16Ux64 x (Const64 [c+d])) +(Rsh8Ux64 (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8Ux64 x (Const64 [c+d])) + +// Remove signed right shift before an unsigned right shift that extracts the sign bit. +(Rsh8Ux64 (Rsh8x64 x _) (Const64 [7] )) => (Rsh8Ux64 x (Const64 [7] )) +(Rsh16Ux64 (Rsh16x64 x _) (Const64 [15])) => (Rsh16Ux64 x (Const64 [15])) +(Rsh32Ux64 (Rsh32x64 x _) (Const64 [31])) => (Rsh32Ux64 x (Const64 [31])) +(Rsh64Ux64 (Rsh64x64 x _) (Const64 [63])) => (Rsh64Ux64 x (Const64 [63])) + +// Convert x>>c<= 0 && c < 64 && i.Uses == 1 => (And64 x (Const64 [int64(-1) << c])) +(Lsh32x64 i:(Rsh(32|32U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 32 && i.Uses == 1 => (And32 x (Const32 [int32(-1) << c])) +(Lsh16x64 i:(Rsh(16|16U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 16 && i.Uses == 1 => (And16 x (Const16 [int16(-1) << c])) +(Lsh8x64 i:(Rsh(8|8U)x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 8 && i.Uses == 1 => (And8 x (Const8 [int8(-1) << c])) +// similarly for x<>c +(Rsh64Ux64 i:(Lsh64x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 64 && i.Uses == 1 => (And64 x (Const64 [int64(^uint64(0)>>c)])) +(Rsh32Ux64 i:(Lsh32x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 32 && i.Uses == 1 => (And32 x (Const32 [int32(^uint32(0)>>c)])) +(Rsh16Ux64 i:(Lsh16x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 16 && i.Uses == 1 => (And16 x (Const16 [int16(^uint16(0)>>c)])) +(Rsh8Ux64 i:(Lsh8x64 x (Const64 [c])) (Const64 [c])) && c >= 0 && c < 8 && i.Uses == 1 => (And8 x (Const8 [int8 (^uint8 (0)>>c)])) + +// ((x >> c1) << c2) >> c3 +(Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) + => (Rsh(64|32|16|8)Ux64 x (Const64 [c1-c2+c3])) + +// ((x << c1) >> c2) << c3 +(Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3])) + && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3) + => (Lsh(64|32|16|8)x64 x (Const64 [c1-c2+c3])) + +// (x >> c) & uppermask = 0 +(And64 (Const64 [m]) (Rsh64Ux64 _ (Const64 [c]))) && c >= int64(64-ntz64(m)) => (Const64 [0]) +(And32 (Const32 [m]) (Rsh32Ux64 _ (Const64 [c]))) && c >= int64(32-ntz32(m)) => (Const32 [0]) +(And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c]))) && c >= int64(16-ntz16(m)) => (Const16 [0]) +(And8 (Const8 [m]) (Rsh8Ux64 _ (Const64 [c]))) && c >= int64(8-ntz8(m)) => (Const8 [0]) + +// (x << c) & lowermask = 0 +(And64 (Const64 [m]) (Lsh64x64 _ (Const64 [c]))) && c >= int64(64-nlz64(m)) => (Const64 [0]) +(And32 (Const32 [m]) (Lsh32x64 _ (Const64 [c]))) && c >= int64(32-nlz32(m)) => (Const32 [0]) +(And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c]))) && c >= int64(16-nlz16(m)) => (Const16 [0]) +(And8 (Const8 [m]) (Lsh8x64 _ (Const64 [c]))) && c >= int64(8-nlz8(m)) => (Const8 [0]) + +// replace shifts with zero extensions +(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (ZeroExt8to16 (Trunc16to8 x)) +(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (ZeroExt8to32 (Trunc32to8 x)) +(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (ZeroExt8to64 (Trunc64to8 x)) +(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (ZeroExt16to32 (Trunc32to16 x)) +(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (ZeroExt16to64 (Trunc64to16 x)) +(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (ZeroExt32to64 (Trunc64to32 x)) + +// replace shifts with sign extensions +(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (SignExt8to16 (Trunc16to8 x)) +(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (SignExt8to32 (Trunc32to8 x)) +(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (SignExt8to64 (Trunc64to8 x)) +(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (SignExt16to32 (Trunc32to16 x)) +(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (SignExt16to64 (Trunc64to16 x)) +(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (SignExt32to64 (Trunc64to32 x)) + +// constant comparisons +(Eq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c == d]) +(Neq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c != d]) +(Less(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c < d]) +(Leq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c <= d]) + +(Less64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) < uint64(d)]) +(Less32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) < uint32(d)]) +(Less16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) < uint16(d)]) +(Less8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) < uint8(d)]) + +(Leq64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) <= uint64(d)]) +(Leq32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) <= uint32(d)]) +(Leq16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) <= uint16(d)]) +(Leq8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) <= uint8(d)]) + +(Leq8 (Const8 [0]) (And8 _ (Const8 [c]))) && c >= 0 => (ConstBool [true]) +(Leq16 (Const16 [0]) (And16 _ (Const16 [c]))) && c >= 0 => (ConstBool [true]) +(Leq32 (Const32 [0]) (And32 _ (Const32 [c]))) && c >= 0 => (ConstBool [true]) +(Leq64 (Const64 [0]) (And64 _ (Const64 [c]))) && c >= 0 => (ConstBool [true]) + +(Leq8 (Const8 [0]) (Rsh8Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true]) +(Leq16 (Const16 [0]) (Rsh16Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true]) +(Leq32 (Const32 [0]) (Rsh32Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true]) +(Leq64 (Const64 [0]) (Rsh64Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true]) + +// prefer equalities with zero +(Less(64|32|16|8) (Const(64|32|16|8) [0]) x) && isNonNegative(x) => (Neq(64|32|16|8) (Const(64|32|16|8) [0]) x) +(Less(64|32|16|8) x (Const(64|32|16|8) [1])) && isNonNegative(x) => (Eq(64|32|16|8) (Const(64|32|16|8) [0]) x) +(Less(64|32|16|8)U x (Const(64|32|16|8) [1])) => (Eq(64|32|16|8) (Const(64|32|16|8) [0]) x) +(Leq(64|32|16|8)U (Const(64|32|16|8) [1]) x) => (Neq(64|32|16|8) (Const(64|32|16|8) [0]) x) + +// prefer comparisons with zero +(Less(64|32|16|8) x (Const(64|32|16|8) [1])) => (Leq(64|32|16|8) x (Const(64|32|16|8) [0])) +(Leq(64|32|16|8) x (Const(64|32|16|8) [-1])) => (Less(64|32|16|8) x (Const(64|32|16|8) [0])) +(Leq(64|32|16|8) (Const(64|32|16|8) [1]) x) => (Less(64|32|16|8) (Const(64|32|16|8) [0]) x) +(Less(64|32|16|8) (Const(64|32|16|8) [-1]) x) => (Leq(64|32|16|8) (Const(64|32|16|8) [0]) x) + +// constant floating point comparisons +(Eq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c == d]) +(Eq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c == d]) +(Neq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c != d]) +(Neq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c != d]) +(Less32F (Const32F [c]) (Const32F [d])) => (ConstBool [c < d]) +(Less64F (Const64F [c]) (Const64F [d])) => (ConstBool [c < d]) +(Leq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c <= d]) +(Leq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c <= d]) + +// simplifications +(Or(64|32|16|8) x x) => x +(Or(64|32|16|8) (Const(64|32|16|8) [0]) x) => x +(Or(64|32|16|8) (Const(64|32|16|8) [-1]) _) => (Const(64|32|16|8) [-1]) +(Or(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1]) + +(And(64|32|16|8) x x) => x +(And(64|32|16|8) (Const(64|32|16|8) [-1]) x) => x +(And(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0]) +(And(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [0]) + +(Xor(64|32|16|8) x x) => (Const(64|32|16|8) [0]) +(Xor(64|32|16|8) (Const(64|32|16|8) [0]) x) => x +(Xor(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1]) + +(Add(64|32|16|8) (Const(64|32|16|8) [0]) x) => x +(Sub(64|32|16|8) x x) => (Const(64|32|16|8) [0]) +(Mul(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0]) +(Select0 (Mul(64|32)uover (Const(64|32) [0]) x)) => (Const(64|32) [0]) +(Select1 (Mul(64|32)uover (Const(64|32) [0]) x)) => (ConstBool [false]) + +(Com(64|32|16|8) (Com(64|32|16|8) x)) => x +(Com(64|32|16|8) (Const(64|32|16|8) [c])) => (Const(64|32|16|8) [^c]) + +(Neg(64|32|16|8) (Sub(64|32|16|8) x y)) => (Sub(64|32|16|8) y x) +(Add(64|32|16|8) x (Neg(64|32|16|8) y)) => (Sub(64|32|16|8) x y) + +(Xor(64|32|16|8) (Const(64|32|16|8) [-1]) x) => (Com(64|32|16|8) x) + +(Sub(64|32|16|8) (Neg(64|32|16|8) x) (Com(64|32|16|8) x)) => (Const(64|32|16|8) [1]) +(Sub(64|32|16|8) (Com(64|32|16|8) x) (Neg(64|32|16|8) x)) => (Const(64|32|16|8) [-1]) +(Add(64|32|16|8) (Com(64|32|16|8) x) x) => (Const(64|32|16|8) [-1]) + +// Simplification when involving common integer +// (t + x) - (t + y) == x - y +// (t + x) - (y + t) == x - y +// (x + t) - (y + t) == x - y +// (x + t) - (t + y) == x - y +// (x - t) + (t + y) == x + y +// (x - t) + (y + t) == x + y +(Sub(64|32|16|8) (Add(64|32|16|8) t x) (Add(64|32|16|8) t y)) => (Sub(64|32|16|8) x y) +(Add(64|32|16|8) (Sub(64|32|16|8) x t) (Add(64|32|16|8) t y)) => (Add(64|32|16|8) x y) + +// ^(x-1) == ^x+1 == -x +(Add(64|32|16|8) (Const(64|32|16|8) [1]) (Com(64|32|16|8) x)) => (Neg(64|32|16|8) x) +(Com(64|32|16|8) (Add(64|32|16|8) (Const(64|32|16|8) [-1]) x)) => (Neg(64|32|16|8) x) + +// -(-x) == x +(Neg(64|32|16|8) (Neg(64|32|16|8) x)) => x + +// -^x == x+1 +(Neg(64|32|16|8) (Com(64|32|16|8) x)) => (Add(64|32|16|8) (Const(64|32|16|8) [1]) x) + +(And(64|32|16|8) x (And(64|32|16|8) x y)) => (And(64|32|16|8) x y) +(Or(64|32|16|8) x (Or(64|32|16|8) x y)) => (Or(64|32|16|8) x y) +(Xor(64|32|16|8) x (Xor(64|32|16|8) x y)) => y + +// Unsigned comparisons to zero. +(Less(64U|32U|16U|8U) _ (Const(64|32|16|8) [0])) => (ConstBool [false]) +(Leq(64U|32U|16U|8U) (Const(64|32|16|8) [0]) _) => (ConstBool [true]) + +// Ands clear bits. Ors set bits. +// If a subsequent Or will set all the bits +// that an And cleared, we can skip the And. +// This happens in bitmasking code like: +// x &^= 3 << shift // clear two old bits +// x |= v << shift // set two new bits +// when shift is a small constant and v ends up a constant 3. +(Or8 (And8 x (Const8 [c2])) (Const8 [c1])) && ^(c1 | c2) == 0 => (Or8 (Const8 [c1]) x) +(Or16 (And16 x (Const16 [c2])) (Const16 [c1])) && ^(c1 | c2) == 0 => (Or16 (Const16 [c1]) x) +(Or32 (And32 x (Const32 [c2])) (Const32 [c1])) && ^(c1 | c2) == 0 => (Or32 (Const32 [c1]) x) +(Or64 (And64 x (Const64 [c2])) (Const64 [c1])) && ^(c1 | c2) == 0 => (Or64 (Const64 [c1]) x) + +(Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF => (Trunc64to8 x) +(Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc64to16 x) +(Trunc64to32 (And64 (Const64 [y]) x)) && y&0xFFFFFFFF == 0xFFFFFFFF => (Trunc64to32 x) +(Trunc32to8 (And32 (Const32 [y]) x)) && y&0xFF == 0xFF => (Trunc32to8 x) +(Trunc32to16 (And32 (Const32 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc32to16 x) +(Trunc16to8 (And16 (Const16 [y]) x)) && y&0xFF == 0xFF => (Trunc16to8 x) + +(ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 56 => x +(ZeroExt16to64 (Trunc64to16 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 48 => x +(ZeroExt32to64 (Trunc64to32 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 32 => x +(ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 24 => x +(ZeroExt16to32 (Trunc32to16 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 16 => x +(ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s])))) && s >= 8 => x + +(SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s])))) && s >= 56 => x +(SignExt16to64 (Trunc64to16 x:(Rsh64x64 _ (Const64 [s])))) && s >= 48 => x +(SignExt32to64 (Trunc64to32 x:(Rsh64x64 _ (Const64 [s])))) && s >= 32 => x +(SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s])))) && s >= 24 => x +(SignExt16to32 (Trunc32to16 x:(Rsh32x64 _ (Const64 [s])))) && s >= 16 => x +(SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s])))) && s >= 8 => x + +(Slicemask (Const32 [x])) && x > 0 => (Const32 [-1]) +(Slicemask (Const32 [0])) => (Const32 [0]) +(Slicemask (Const64 [x])) && x > 0 => (Const64 [-1]) +(Slicemask (Const64 [0])) => (Const64 [0]) + +// simplifications often used for lengths. e.g. len(s[i:i+5])==5 +(Sub(64|32|16|8) (Add(64|32|16|8) x y) x) => y +(Sub(64|32|16|8) (Add(64|32|16|8) x y) y) => x +(Sub(64|32|16|8) (Sub(64|32|16|8) x y) x) => (Neg(64|32|16|8) y) +(Sub(64|32|16|8) x (Add(64|32|16|8) x y)) => (Neg(64|32|16|8) y) +(Add(64|32|16|8) x (Sub(64|32|16|8) y x)) => y +(Add(64|32|16|8) x (Add(64|32|16|8) y (Sub(64|32|16|8) z x))) => (Add(64|32|16|8) y z) + +// basic phi simplifications +(Phi (Const8 [c]) (Const8 [c])) => (Const8 [c]) +(Phi (Const16 [c]) (Const16 [c])) => (Const16 [c]) +(Phi (Const32 [c]) (Const32 [c])) => (Const32 [c]) +(Phi (Const64 [c]) (Const64 [c])) => (Const64 [c]) + +// slice and interface comparisons +// The frontend ensures that we can only compare against nil, +// so we need only compare the first word (interface type or slice ptr). +(EqInter x y) => (EqPtr (ITab x) (ITab y)) +(NeqInter x y) => (NeqPtr (ITab x) (ITab y)) +(EqSlice x y) => (EqPtr (SlicePtr x) (SlicePtr y)) +(NeqSlice x y) => (NeqPtr (SlicePtr x) (SlicePtr y)) + +// Load of store of same address, with compatibly typed value and same size +(Load p1 (Store {t2} p2 x _)) + && isSamePtr(p1, p2) + && t1.Compare(x.Type) == types.CMPeq + && t1.Size() == t2.Size() + => x +(Load p1 (Store {t2} p2 _ (Store {t3} p3 x _))) + && isSamePtr(p1, p3) + && t1.Compare(x.Type) == types.CMPeq + && t1.Size() == t2.Size() + && disjoint(p3, t3.Size(), p2, t2.Size()) + => x +(Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _)))) + && isSamePtr(p1, p4) + && t1.Compare(x.Type) == types.CMPeq + && t1.Size() == t2.Size() + && disjoint(p4, t4.Size(), p2, t2.Size()) + && disjoint(p4, t4.Size(), p3, t3.Size()) + => x +(Load p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _))))) + && isSamePtr(p1, p5) + && t1.Compare(x.Type) == types.CMPeq + && t1.Size() == t2.Size() + && disjoint(p5, t5.Size(), p2, t2.Size()) + && disjoint(p5, t5.Size(), p3, t3.Size()) + && disjoint(p5, t5.Size(), p4, t4.Size()) + => x + +// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits + (Load p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) => (Const64F [math.Float64frombits(uint64(x))]) + (Load p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) => (Const32F [math.Float32frombits(uint32(x))]) +(Load p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) => (Const64 [int64(math.Float64bits(x))]) +(Load p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) => (Const32 [int32(math.Float32bits(x))]) + +// Float Loads up to Zeros so they can be constant folded. +(Load op:(OffPtr [o1] p1) + (Store {t2} p2 _ + mem:(Zero [n] p3 _))) + && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) + && CanSSA(t1) + && disjoint(op, t1.Size(), p2, t2.Size()) + => @mem.Block (Load (OffPtr [o1] p3) mem) +(Load op:(OffPtr [o1] p1) + (Store {t2} p2 _ + (Store {t3} p3 _ + mem:(Zero [n] p4 _)))) + && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) + && CanSSA(t1) + && disjoint(op, t1.Size(), p2, t2.Size()) + && disjoint(op, t1.Size(), p3, t3.Size()) + => @mem.Block (Load (OffPtr [o1] p4) mem) +(Load op:(OffPtr [o1] p1) + (Store {t2} p2 _ + (Store {t3} p3 _ + (Store {t4} p4 _ + mem:(Zero [n] p5 _))))) + && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) + && CanSSA(t1) + && disjoint(op, t1.Size(), p2, t2.Size()) + && disjoint(op, t1.Size(), p3, t3.Size()) + && disjoint(op, t1.Size(), p4, t4.Size()) + => @mem.Block (Load (OffPtr [o1] p5) mem) +(Load op:(OffPtr [o1] p1) + (Store {t2} p2 _ + (Store {t3} p3 _ + (Store {t4} p4 _ + (Store {t5} p5 _ + mem:(Zero [n] p6 _)))))) + && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) + && CanSSA(t1) + && disjoint(op, t1.Size(), p2, t2.Size()) + && disjoint(op, t1.Size(), p3, t3.Size()) + && disjoint(op, t1.Size(), p4, t4.Size()) + && disjoint(op, t1.Size(), p5, t5.Size()) + => @mem.Block (Load (OffPtr [o1] p6) mem) + +// Zero to Load forwarding. +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && t1.IsBoolean() + && isSamePtr(p1, p2) + && n >= o + 1 + => (ConstBool [false]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is8BitInt(t1) + && isSamePtr(p1, p2) + && n >= o + 1 + => (Const8 [0]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is16BitInt(t1) + && isSamePtr(p1, p2) + && n >= o + 2 + => (Const16 [0]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is32BitInt(t1) + && isSamePtr(p1, p2) + && n >= o + 4 + => (Const32 [0]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is64BitInt(t1) + && isSamePtr(p1, p2) + && n >= o + 8 + => (Const64 [0]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is32BitFloat(t1) + && isSamePtr(p1, p2) + && n >= o + 4 + => (Const32F [0]) +(Load (OffPtr [o] p1) (Zero [n] p2 _)) + && is64BitFloat(t1) + && isSamePtr(p1, p2) + && n >= o + 8 + => (Const64F [0]) + +// Eliminate stores of values that have just been loaded from the same location. +// We also handle the common case where there are some intermediate stores. +(Store {t1} p1 (Load p2 mem) mem) + && isSamePtr(p1, p2) + && t2.Size() == t1.Size() + => mem +(Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ oldmem)) + && isSamePtr(p1, p2) + && t2.Size() == t1.Size() + && disjoint(p1, t1.Size(), p3, t3.Size()) + => mem +(Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem))) + && isSamePtr(p1, p2) + && t2.Size() == t1.Size() + && disjoint(p1, t1.Size(), p3, t3.Size()) + && disjoint(p1, t1.Size(), p4, t4.Size()) + => mem +(Store {t1} p1 (Load p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem)))) + && isSamePtr(p1, p2) + && t2.Size() == t1.Size() + && disjoint(p1, t1.Size(), p3, t3.Size()) + && disjoint(p1, t1.Size(), p4, t4.Size()) + && disjoint(p1, t1.Size(), p5, t5.Size()) + => mem + +// Don't Store zeros to cleared variables. +(Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _)) + && isConstZero(x) + && o >= 0 && t.Size() + o <= n && isSamePtr(p1, p2) + => mem +(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _))) + && isConstZero(x) + && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p3) + && disjoint(op, t1.Size(), p2, t2.Size()) + => mem +(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _)))) + && isConstZero(x) + && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p4) + && disjoint(op, t1.Size(), p2, t2.Size()) + && disjoint(op, t1.Size(), p3, t3.Size()) + => mem +(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _))))) + && isConstZero(x) + && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p5) + && disjoint(op, t1.Size(), p2, t2.Size()) + && disjoint(op, t1.Size(), p3, t3.Size()) + && disjoint(op, t1.Size(), p4, t4.Size()) + => mem + +// Collapse OffPtr +(OffPtr (OffPtr p [y]) [x]) => (OffPtr p [x+y]) +(OffPtr p [0]) && v.Type.Compare(p.Type) == types.CMPeq => p + +// indexing operations +// Note: bounds check has already been done +(PtrIndex ptr idx) && config.PtrSize == 4 && is32Bit(t.Elem().Size()) => (AddPtr ptr (Mul32 idx (Const32 [int32(t.Elem().Size())]))) +(PtrIndex ptr idx) && config.PtrSize == 8 => (AddPtr ptr (Mul64 idx (Const64 [t.Elem().Size()]))) + +// struct operations +(StructSelect (StructMake1 x)) => x +(StructSelect [0] (StructMake2 x _)) => x +(StructSelect [1] (StructMake2 _ x)) => x +(StructSelect [0] (StructMake3 x _ _)) => x +(StructSelect [1] (StructMake3 _ x _)) => x +(StructSelect [2] (StructMake3 _ _ x)) => x +(StructSelect [0] (StructMake4 x _ _ _)) => x +(StructSelect [1] (StructMake4 _ x _ _)) => x +(StructSelect [2] (StructMake4 _ _ x _)) => x +(StructSelect [3] (StructMake4 _ _ _ x)) => x + +(Load _ _) && t.IsStruct() && t.NumFields() == 0 && CanSSA(t) => + (StructMake0) +(Load ptr mem) && t.IsStruct() && t.NumFields() == 1 && CanSSA(t) => + (StructMake1 + (Load (OffPtr [0] ptr) mem)) +(Load ptr mem) && t.IsStruct() && t.NumFields() == 2 && CanSSA(t) => + (StructMake2 + (Load (OffPtr [0] ptr) mem) + (Load (OffPtr [t.FieldOff(1)] ptr) mem)) +(Load ptr mem) && t.IsStruct() && t.NumFields() == 3 && CanSSA(t) => + (StructMake3 + (Load (OffPtr [0] ptr) mem) + (Load (OffPtr [t.FieldOff(1)] ptr) mem) + (Load (OffPtr [t.FieldOff(2)] ptr) mem)) +(Load ptr mem) && t.IsStruct() && t.NumFields() == 4 && CanSSA(t) => + (StructMake4 + (Load (OffPtr [0] ptr) mem) + (Load (OffPtr [t.FieldOff(1)] ptr) mem) + (Load (OffPtr [t.FieldOff(2)] ptr) mem) + (Load (OffPtr [t.FieldOff(3)] ptr) mem)) + +(StructSelect [i] x:(Load ptr mem)) && !CanSSA(t) => + @x.Block (Load (OffPtr [t.FieldOff(int(i))] ptr) mem) + +(Store _ (StructMake0) mem) => mem +(Store dst (StructMake1 f0) mem) => + (Store {t.FieldType(0)} (OffPtr [0] dst) f0 mem) +(Store dst (StructMake2 f0 f1) mem) => + (Store {t.FieldType(1)} + (OffPtr [t.FieldOff(1)] dst) + f1 + (Store {t.FieldType(0)} + (OffPtr [0] dst) + f0 mem)) +(Store dst (StructMake3 f0 f1 f2) mem) => + (Store {t.FieldType(2)} + (OffPtr [t.FieldOff(2)] dst) + f2 + (Store {t.FieldType(1)} + (OffPtr [t.FieldOff(1)] dst) + f1 + (Store {t.FieldType(0)} + (OffPtr [0] dst) + f0 mem))) +(Store dst (StructMake4 f0 f1 f2 f3) mem) => + (Store {t.FieldType(3)} + (OffPtr [t.FieldOff(3)] dst) + f3 + (Store {t.FieldType(2)} + (OffPtr [t.FieldOff(2)] dst) + f2 + (Store {t.FieldType(1)} + (OffPtr [t.FieldOff(1)] dst) + f1 + (Store {t.FieldType(0)} + (OffPtr [0] dst) + f0 mem)))) + +// Putting struct{*byte} and similar into direct interfaces. +(IMake _typ (StructMake1 val)) => (IMake _typ val) +(StructSelect [0] (IData x)) => (IData x) + +// un-SSAable values use mem->mem copies +(Store {t} dst (Load src mem) mem) && !CanSSA(t) => + (Move {t} [t.Size()] dst src mem) +(Store {t} dst (Load src mem) (VarDef {x} mem)) && !CanSSA(t) => + (Move {t} [t.Size()] dst src (VarDef {x} mem)) + +// array ops +(ArraySelect (ArrayMake1 x)) => x + +(Load _ _) && t.IsArray() && t.NumElem() == 0 => + (ArrayMake0) + +(Load ptr mem) && t.IsArray() && t.NumElem() == 1 && CanSSA(t) => + (ArrayMake1 (Load ptr mem)) + +(Store _ (ArrayMake0) mem) => mem +(Store dst (ArrayMake1 e) mem) => (Store {e.Type} dst e mem) + +// Putting [1]*byte and similar into direct interfaces. +(IMake _typ (ArrayMake1 val)) => (IMake _typ val) +(ArraySelect [0] (IData x)) => (IData x) + +// string ops +// Decomposing StringMake and lowering of StringPtr and StringLen +// happens in a later pass, dec, so that these operations are available +// to other passes for optimizations. +(StringPtr (StringMake (Addr {s} base) _)) => (Addr {s} base) +(StringLen (StringMake _ (Const64 [c]))) => (Const64 [c]) +(ConstString {str}) && config.PtrSize == 4 && str == "" => + (StringMake (ConstNil) (Const32 [0])) +(ConstString {str}) && config.PtrSize == 8 && str == "" => + (StringMake (ConstNil) (Const64 [0])) +(ConstString {str}) && config.PtrSize == 4 && str != "" => + (StringMake + (Addr {fe.StringData(str)} + (SB)) + (Const32 [int32(len(str))])) +(ConstString {str}) && config.PtrSize == 8 && str != "" => + (StringMake + (Addr {fe.StringData(str)} + (SB)) + (Const64 [int64(len(str))])) + +// slice ops +// Only a few slice rules are provided here. See dec.rules for +// a more comprehensive set. +(SliceLen (SliceMake _ (Const64 [c]) _)) => (Const64 [c]) +(SliceCap (SliceMake _ _ (Const64 [c]))) => (Const64 [c]) +(SliceLen (SliceMake _ (Const32 [c]) _)) => (Const32 [c]) +(SliceCap (SliceMake _ _ (Const32 [c]))) => (Const32 [c]) +(SlicePtr (SliceMake (SlicePtr x) _ _)) => (SlicePtr x) +(SliceLen (SliceMake _ (SliceLen x) _)) => (SliceLen x) +(SliceCap (SliceMake _ _ (SliceCap x))) => (SliceCap x) +(SliceCap (SliceMake _ _ (SliceLen x))) => (SliceLen x) +(ConstSlice) && config.PtrSize == 4 => + (SliceMake + (ConstNil ) + (Const32 [0]) + (Const32 [0])) +(ConstSlice) && config.PtrSize == 8 => + (SliceMake + (ConstNil ) + (Const64 [0]) + (Const64 [0])) + +// interface ops +(ConstInterface) => + (IMake + (ConstNil ) + (ConstNil )) + +(NilCheck ptr:(GetG mem) mem) => ptr + +(If (Not cond) yes no) => (If cond no yes) +(If (ConstBool [c]) yes no) && c => (First yes no) +(If (ConstBool [c]) yes no) && !c => (First no yes) + +(Phi nx:(Not x) ny:(Not y)) && nx.Uses == 1 && ny.Uses == 1 => (Not (Phi x y)) + +// Get rid of Convert ops for pointer arithmetic on unsafe.Pointer. +(Convert (Add(64|32) (Convert ptr mem) off) mem) => (AddPtr ptr off) +(Convert (Convert ptr mem) mem) => ptr + +// strength reduction of divide by a constant. +// See ../magic.go for a detailed description of these algorithms. + +// Unsigned divide by power of 2. Strength reduce to a shift. +(Div8u n (Const8 [c])) && isPowerOfTwo8(c) => (Rsh8Ux64 n (Const64 [log8(c)])) +(Div16u n (Const16 [c])) && isPowerOfTwo16(c) => (Rsh16Ux64 n (Const64 [log16(c)])) +(Div32u n (Const32 [c])) && isPowerOfTwo32(c) => (Rsh32Ux64 n (Const64 [log32(c)])) +(Div64u n (Const64 [c])) && isPowerOfTwo64(c) => (Rsh64Ux64 n (Const64 [log64(c)])) +(Div64u n (Const64 [-1<<63])) => (Rsh64Ux64 n (Const64 [63])) + +// Signed non-negative divide by power of 2. +(Div8 n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo8(c) => (Rsh8Ux64 n (Const64 [log8(c)])) +(Div16 n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo16(c) => (Rsh16Ux64 n (Const64 [log16(c)])) +(Div32 n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo32(c) => (Rsh32Ux64 n (Const64 [log32(c)])) +(Div64 n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo64(c) => (Rsh64Ux64 n (Const64 [log64(c)])) +(Div64 n (Const64 [-1<<63])) && isNonNegative(n) => (Const64 [0]) + +// Unsigned divide, not a power of 2. Strength reduce to a multiply. +// For 8-bit divides, we just do a direct 9-bit by 8-bit multiply. +(Div8u x (Const8 [c])) && umagicOK8(c) => + (Trunc32to8 + (Rsh32Ux64 + (Mul32 + (Const32 [int32(1<<8+umagic8(c).m)]) + (ZeroExt8to32 x)) + (Const64 [8+umagic8(c).s]))) + +// For 16-bit divides on 64-bit machines, we do a direct 17-bit by 16-bit multiply. +(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 8 => + (Trunc64to16 + (Rsh64Ux64 + (Mul64 + (Const64 [int64(1<<16+umagic16(c).m)]) + (ZeroExt16to64 x)) + (Const64 [16+umagic16(c).s]))) + +// For 16-bit divides on 32-bit machines +(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0 => + (Trunc32to16 + (Rsh32Ux64 + (Mul32 + (Const32 [int32(1<<15+umagic16(c).m/2)]) + (ZeroExt16to32 x)) + (Const64 [16+umagic16(c).s-1]))) +(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && c&1 == 0 => + (Trunc32to16 + (Rsh32Ux64 + (Mul32 + (Const32 [int32(1<<15+(umagic16(c).m+1)/2)]) + (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) + (Const64 [16+umagic16(c).s-2]))) +(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && config.useAvg => + (Trunc32to16 + (Rsh32Ux64 + (Avg32u + (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) + (Mul32 + (Const32 [int32(umagic16(c).m)]) + (ZeroExt16to32 x))) + (Const64 [16+umagic16(c).s-1]))) + +// For 32-bit divides on 32-bit machines +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul => + (Rsh32Ux64 + (Hmul32u + (Const32 [int32(1<<31+umagic32(c).m/2)]) + x) + (Const64 [umagic32(c).s-1])) +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul => + (Rsh32Ux64 + (Hmul32u + (Const32 [int32(1<<31+(umagic32(c).m+1)/2)]) + (Rsh32Ux64 x (Const64 [1]))) + (Const64 [umagic32(c).s-2])) +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul => + (Rsh32Ux64 + (Avg32u + x + (Hmul32u + (Const32 [int32(umagic32(c).m)]) + x)) + (Const64 [umagic32(c).s-1])) + +// For 32-bit divides on 64-bit machines +// We'll use a regular (non-hi) multiply for this case. +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0 => + (Trunc64to32 + (Rsh64Ux64 + (Mul64 + (Const64 [int64(1<<31+umagic32(c).m/2)]) + (ZeroExt32to64 x)) + (Const64 [32+umagic32(c).s-1]))) +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && c&1 == 0 => + (Trunc64to32 + (Rsh64Ux64 + (Mul64 + (Const64 [int64(1<<31+(umagic32(c).m+1)/2)]) + (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) + (Const64 [32+umagic32(c).s-2]))) +(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && config.useAvg => + (Trunc64to32 + (Rsh64Ux64 + (Avg64u + (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) + (Mul64 + (Const64 [int64(umagic32(c).m)]) + (ZeroExt32to64 x))) + (Const64 [32+umagic32(c).s-1]))) + +// For unsigned 64-bit divides on 32-bit machines, +// if the constant fits in 16 bits (so that the last term +// fits in 32 bits), convert to three 32-bit divides by a constant. +// +// If 1<<32 = Q * c + R +// and x = hi << 32 + lo +// +// Then x = (hi/c*c + hi%c) << 32 + lo +// = hi/c*c<<32 + hi%c<<32 + lo +// = hi/c*c<<32 + (hi%c)*(Q*c+R) + lo/c*c + lo%c +// = hi/c*c<<32 + (hi%c)*Q*c + lo/c*c + (hi%c*R+lo%c) +// and x / c = (hi/c)<<32 + (hi%c)*Q + lo/c + (hi%c*R+lo%c)/c +(Div64u x (Const64 [c])) && c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul => + (Add64 + (Add64 + (Add64 + (Lsh64x64 + (ZeroExt32to64 + (Div32u + (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) + (Const32 [int32(c)]))) + (Const64 [32])) + (ZeroExt32to64 (Div32u (Trunc64to32 x) (Const32 [int32(c)])))) + (Mul64 + (ZeroExt32to64 + (Mod32u + (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) + (Const32 [int32(c)]))) + (Const64 [int64((1<<32)/c)]))) + (ZeroExt32to64 + (Div32u + (Add32 + (Mod32u (Trunc64to32 x) (Const32 [int32(c)])) + (Mul32 + (Mod32u + (Trunc64to32 (Rsh64Ux64 x (Const64 [32]))) + (Const32 [int32(c)])) + (Const32 [int32((1<<32)%c)]))) + (Const32 [int32(c)])))) + +// For 64-bit divides on 64-bit machines +// (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.) +(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul => + (Rsh64Ux64 + (Hmul64u + (Const64 [int64(1<<63+umagic64(c).m/2)]) + x) + (Const64 [umagic64(c).s-1])) +(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul => + (Rsh64Ux64 + (Hmul64u + (Const64 [int64(1<<63+(umagic64(c).m+1)/2)]) + (Rsh64Ux64 x (Const64 [1]))) + (Const64 [umagic64(c).s-2])) +(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul => + (Rsh64Ux64 + (Avg64u + x + (Hmul64u + (Const64 [int64(umagic64(c).m)]) + x)) + (Const64 [umagic64(c).s-1])) + +// Signed divide by a negative constant. Rewrite to divide by a positive constant. +(Div8 n (Const8 [c])) && c < 0 && c != -1<<7 => (Neg8 (Div8 n (Const8 [-c]))) +(Div16 n (Const16 [c])) && c < 0 && c != -1<<15 => (Neg16 (Div16 n (Const16 [-c]))) +(Div32 n (Const32 [c])) && c < 0 && c != -1<<31 => (Neg32 (Div32 n (Const32 [-c]))) +(Div64 n (Const64 [c])) && c < 0 && c != -1<<63 => (Neg64 (Div64 n (Const64 [-c]))) + +// Dividing by the most-negative number. Result is always 0 except +// if the input is also the most-negative number. +// We can detect that using the sign bit of x & -x. +(Div8 x (Const8 [-1<<7 ])) => (Rsh8Ux64 (And8 x (Neg8 x)) (Const64 [7 ])) +(Div16 x (Const16 [-1<<15])) => (Rsh16Ux64 (And16 x (Neg16 x)) (Const64 [15])) +(Div32 x (Const32 [-1<<31])) => (Rsh32Ux64 (And32 x (Neg32 x)) (Const64 [31])) +(Div64 x (Const64 [-1<<63])) => (Rsh64Ux64 (And64 x (Neg64 x)) (Const64 [63])) + +// Signed divide by power of 2. +// n / c = n >> log(c) if n >= 0 +// = (n+c-1) >> log(c) if n < 0 +// We conditionally add c-1 by adding n>>63>>(64-log(c)) (first shift signed, second shift unsigned). +(Div8 n (Const8 [c])) && isPowerOfTwo8(c) => + (Rsh8x64 + (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [int64( 8-log8(c))]))) + (Const64 [int64(log8(c))])) +(Div16 n (Const16 [c])) && isPowerOfTwo16(c) => + (Rsh16x64 + (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [int64(16-log16(c))]))) + (Const64 [int64(log16(c))])) +(Div32 n (Const32 [c])) && isPowerOfTwo32(c) => + (Rsh32x64 + (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [int64(32-log32(c))]))) + (Const64 [int64(log32(c))])) +(Div64 n (Const64 [c])) && isPowerOfTwo64(c) => + (Rsh64x64 + (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [int64(64-log64(c))]))) + (Const64 [int64(log64(c))])) + +// Signed divide, not a power of 2. Strength reduce to a multiply. +(Div8 x (Const8 [c])) && smagicOK8(c) => + (Sub8 + (Rsh32x64 + (Mul32 + (Const32 [int32(smagic8(c).m)]) + (SignExt8to32 x)) + (Const64 [8+smagic8(c).s])) + (Rsh32x64 + (SignExt8to32 x) + (Const64 [31]))) +(Div16 x (Const16 [c])) && smagicOK16(c) => + (Sub16 + (Rsh32x64 + (Mul32 + (Const32 [int32(smagic16(c).m)]) + (SignExt16to32 x)) + (Const64 [16+smagic16(c).s])) + (Rsh32x64 + (SignExt16to32 x) + (Const64 [31]))) +(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 8 => + (Sub32 + (Rsh64x64 + (Mul64 + (Const64 [int64(smagic32(c).m)]) + (SignExt32to64 x)) + (Const64 [32+smagic32(c).s])) + (Rsh64x64 + (SignExt32to64 x) + (Const64 [63]))) +(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul => + (Sub32 + (Rsh32x64 + (Hmul32 + (Const32 [int32(smagic32(c).m/2)]) + x) + (Const64 [smagic32(c).s-1])) + (Rsh32x64 + x + (Const64 [31]))) +(Div32 x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul => + (Sub32 + (Rsh32x64 + (Add32 + (Hmul32 + (Const32 [int32(smagic32(c).m)]) + x) + x) + (Const64 [smagic32(c).s])) + (Rsh32x64 + x + (Const64 [31]))) +(Div64 x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul => + (Sub64 + (Rsh64x64 + (Hmul64 + (Const64 [int64(smagic64(c).m/2)]) + x) + (Const64 [smagic64(c).s-1])) + (Rsh64x64 + x + (Const64 [63]))) +(Div64 x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul => + (Sub64 + (Rsh64x64 + (Add64 + (Hmul64 + (Const64 [int64(smagic64(c).m)]) + x) + x) + (Const64 [smagic64(c).s])) + (Rsh64x64 + x + (Const64 [63]))) + +// Unsigned mod by power of 2 constant. +(Mod8u n (Const8 [c])) && isPowerOfTwo8(c) => (And8 n (Const8 [c-1])) +(Mod16u n (Const16 [c])) && isPowerOfTwo16(c) => (And16 n (Const16 [c-1])) +(Mod32u n (Const32 [c])) && isPowerOfTwo32(c) => (And32 n (Const32 [c-1])) +(Mod64u n (Const64 [c])) && isPowerOfTwo64(c) => (And64 n (Const64 [c-1])) +(Mod64u n (Const64 [-1<<63])) => (And64 n (Const64 [1<<63-1])) + +// Signed non-negative mod by power of 2 constant. +(Mod8 n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo8(c) => (And8 n (Const8 [c-1])) +(Mod16 n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo16(c) => (And16 n (Const16 [c-1])) +(Mod32 n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo32(c) => (And32 n (Const32 [c-1])) +(Mod64 n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo64(c) => (And64 n (Const64 [c-1])) +(Mod64 n (Const64 [-1<<63])) && isNonNegative(n) => n + +// Signed mod by negative constant. +(Mod8 n (Const8 [c])) && c < 0 && c != -1<<7 => (Mod8 n (Const8 [-c])) +(Mod16 n (Const16 [c])) && c < 0 && c != -1<<15 => (Mod16 n (Const16 [-c])) +(Mod32 n (Const32 [c])) && c < 0 && c != -1<<31 => (Mod32 n (Const32 [-c])) +(Mod64 n (Const64 [c])) && c < 0 && c != -1<<63 => (Mod64 n (Const64 [-c])) + +// All other mods by constants, do A%B = A-(A/B*B). +// This implements % with two * and a bunch of ancillary ops. +// One of the * is free if the user's code also computes A/B. +(Mod8 x (Const8 [c])) && x.Op != OpConst8 && (c > 0 || c == -1<<7) + => (Sub8 x (Mul8 (Div8 x (Const8 [c])) (Const8 [c]))) +(Mod16 x (Const16 [c])) && x.Op != OpConst16 && (c > 0 || c == -1<<15) + => (Sub16 x (Mul16 (Div16 x (Const16 [c])) (Const16 [c]))) +(Mod32 x (Const32 [c])) && x.Op != OpConst32 && (c > 0 || c == -1<<31) + => (Sub32 x (Mul32 (Div32 x (Const32 [c])) (Const32 [c]))) +(Mod64 x (Const64 [c])) && x.Op != OpConst64 && (c > 0 || c == -1<<63) + => (Sub64 x (Mul64 (Div64 x (Const64 [c])) (Const64 [c]))) +(Mod8u x (Const8 [c])) && x.Op != OpConst8 && c > 0 && umagicOK8( c) + => (Sub8 x (Mul8 (Div8u x (Const8 [c])) (Const8 [c]))) +(Mod16u x (Const16 [c])) && x.Op != OpConst16 && c > 0 && umagicOK16(c) + => (Sub16 x (Mul16 (Div16u x (Const16 [c])) (Const16 [c]))) +(Mod32u x (Const32 [c])) && x.Op != OpConst32 && c > 0 && umagicOK32(c) + => (Sub32 x (Mul32 (Div32u x (Const32 [c])) (Const32 [c]))) +(Mod64u x (Const64 [c])) && x.Op != OpConst64 && c > 0 && umagicOK64(c) + => (Sub64 x (Mul64 (Div64u x (Const64 [c])) (Const64 [c]))) + +// For architectures without rotates on less than 32-bits, promote these checks to 32-bit. +(Eq8 (Mod8u x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config) => + (Eq32 (Mod32u (ZeroExt8to32 x) (Const32 [int32(uint8(c))])) (Const32 [0])) +(Eq16 (Mod16u x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config) => + (Eq32 (Mod32u (ZeroExt16to32 x) (Const32 [int32(uint16(c))])) (Const32 [0])) +(Eq8 (Mod8 x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config) => + (Eq32 (Mod32 (SignExt8to32 x) (Const32 [int32(c)])) (Const32 [0])) +(Eq16 (Mod16 x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config) => + (Eq32 (Mod32 (SignExt16to32 x) (Const32 [int32(c)])) (Const32 [0])) + +// Divisibility checks x%c == 0 convert to multiply and rotate. +// Note, x%c == 0 is rewritten as x == c*(x/c) during the opt pass +// where (x/c) is performed using multiplication with magic constants. +// To rewrite x%c == 0 requires pattern matching the rewritten expression +// and checking that the division by the same constant wasn't already calculated. +// This check is made by counting uses of the magic constant multiplication. +// Note that if there were an intermediate opt pass, this rule could be applied +// directly on the Div op and magic division rewrites could be delayed to late opt. + +// Unsigned divisibility checks convert to multiply and rotate. +(Eq8 x (Mul8 (Const8 [c]) + (Trunc32to8 + (Rsh32Ux64 + mul:(Mul32 + (Const32 [m]) + (ZeroExt8to32 x)) + (Const64 [s]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s + && x.Op != OpConst8 && udivisibleOK8(c) + => (Leq8U + (RotateLeft8 + (Mul8 + (Const8 [int8(udivisible8(c).m)]) + x) + (Const8 [int8(8-udivisible8(c).k)]) + ) + (Const8 [int8(udivisible8(c).max)]) + ) + +(Eq16 x (Mul16 (Const16 [c]) + (Trunc64to16 + (Rsh64Ux64 + mul:(Mul64 + (Const64 [m]) + (ZeroExt16to64 x)) + (Const64 [s]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s + && x.Op != OpConst16 && udivisibleOK16(c) + => (Leq16U + (RotateLeft16 + (Mul16 + (Const16 [int16(udivisible16(c).m)]) + x) + (Const16 [int16(16-udivisible16(c).k)]) + ) + (Const16 [int16(udivisible16(c).max)]) + ) + +(Eq16 x (Mul16 (Const16 [c]) + (Trunc32to16 + (Rsh32Ux64 + mul:(Mul32 + (Const32 [m]) + (ZeroExt16to32 x)) + (Const64 [s]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 + && x.Op != OpConst16 && udivisibleOK16(c) + => (Leq16U + (RotateLeft16 + (Mul16 + (Const16 [int16(udivisible16(c).m)]) + x) + (Const16 [int16(16-udivisible16(c).k)]) + ) + (Const16 [int16(udivisible16(c).max)]) + ) + +(Eq16 x (Mul16 (Const16 [c]) + (Trunc32to16 + (Rsh32Ux64 + mul:(Mul32 + (Const32 [m]) + (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) + (Const64 [s]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 + && x.Op != OpConst16 && udivisibleOK16(c) + => (Leq16U + (RotateLeft16 + (Mul16 + (Const16 [int16(udivisible16(c).m)]) + x) + (Const16 [int16(16-udivisible16(c).k)]) + ) + (Const16 [int16(udivisible16(c).max)]) + ) + +(Eq16 x (Mul16 (Const16 [c]) + (Trunc32to16 + (Rsh32Ux64 + (Avg32u + (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) + mul:(Mul32 + (Const32 [m]) + (ZeroExt16to32 x))) + (Const64 [s]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 + && x.Op != OpConst16 && udivisibleOK16(c) + => (Leq16U + (RotateLeft16 + (Mul16 + (Const16 [int16(udivisible16(c).m)]) + x) + (Const16 [int16(16-udivisible16(c).k)]) + ) + (Const16 [int16(udivisible16(c).max)]) + ) + +(Eq32 x (Mul32 (Const32 [c]) + (Rsh32Ux64 + mul:(Hmul32u + (Const32 [m]) + x) + (Const64 [s])) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 + && x.Op != OpConst32 && udivisibleOK32(c) + => (Leq32U + (RotateLeft32 + (Mul32 + (Const32 [int32(udivisible32(c).m)]) + x) + (Const32 [int32(32-udivisible32(c).k)]) + ) + (Const32 [int32(udivisible32(c).max)]) + ) + +(Eq32 x (Mul32 (Const32 [c]) + (Rsh32Ux64 + mul:(Hmul32u + (Const32 [m]) + (Rsh32Ux64 x (Const64 [1]))) + (Const64 [s])) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 + && x.Op != OpConst32 && udivisibleOK32(c) + => (Leq32U + (RotateLeft32 + (Mul32 + (Const32 [int32(udivisible32(c).m)]) + x) + (Const32 [int32(32-udivisible32(c).k)]) + ) + (Const32 [int32(udivisible32(c).max)]) + ) + +(Eq32 x (Mul32 (Const32 [c]) + (Rsh32Ux64 + (Avg32u + x + mul:(Hmul32u + (Const32 [m]) + x)) + (Const64 [s])) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int32(umagic32(c).m) && s == umagic32(c).s-1 + && x.Op != OpConst32 && udivisibleOK32(c) + => (Leq32U + (RotateLeft32 + (Mul32 + (Const32 [int32(udivisible32(c).m)]) + x) + (Const32 [int32(32-udivisible32(c).k)]) + ) + (Const32 [int32(udivisible32(c).max)]) + ) + +(Eq32 x (Mul32 (Const32 [c]) + (Trunc64to32 + (Rsh64Ux64 + mul:(Mul64 + (Const64 [m]) + (ZeroExt32to64 x)) + (Const64 [s]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 + && x.Op != OpConst32 && udivisibleOK32(c) + => (Leq32U + (RotateLeft32 + (Mul32 + (Const32 [int32(udivisible32(c).m)]) + x) + (Const32 [int32(32-udivisible32(c).k)]) + ) + (Const32 [int32(udivisible32(c).max)]) + ) + +(Eq32 x (Mul32 (Const32 [c]) + (Trunc64to32 + (Rsh64Ux64 + mul:(Mul64 + (Const64 [m]) + (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) + (Const64 [s]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 + && x.Op != OpConst32 && udivisibleOK32(c) + => (Leq32U + (RotateLeft32 + (Mul32 + (Const32 [int32(udivisible32(c).m)]) + x) + (Const32 [int32(32-udivisible32(c).k)]) + ) + (Const32 [int32(udivisible32(c).max)]) + ) + +(Eq32 x (Mul32 (Const32 [c]) + (Trunc64to32 + (Rsh64Ux64 + (Avg64u + (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) + mul:(Mul64 + (Const64 [m]) + (ZeroExt32to64 x))) + (Const64 [s]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 + && x.Op != OpConst32 && udivisibleOK32(c) + => (Leq32U + (RotateLeft32 + (Mul32 + (Const32 [int32(udivisible32(c).m)]) + x) + (Const32 [int32(32-udivisible32(c).k)]) + ) + (Const32 [int32(udivisible32(c).max)]) + ) + +(Eq64 x (Mul64 (Const64 [c]) + (Rsh64Ux64 + mul:(Hmul64u + (Const64 [m]) + x) + (Const64 [s])) + ) +) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 + && x.Op != OpConst64 && udivisibleOK64(c) + => (Leq64U + (RotateLeft64 + (Mul64 + (Const64 [int64(udivisible64(c).m)]) + x) + (Const64 [64-udivisible64(c).k]) + ) + (Const64 [int64(udivisible64(c).max)]) + ) +(Eq64 x (Mul64 (Const64 [c]) + (Rsh64Ux64 + mul:(Hmul64u + (Const64 [m]) + (Rsh64Ux64 x (Const64 [1]))) + (Const64 [s])) + ) +) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 + && x.Op != OpConst64 && udivisibleOK64(c) + => (Leq64U + (RotateLeft64 + (Mul64 + (Const64 [int64(udivisible64(c).m)]) + x) + (Const64 [64-udivisible64(c).k]) + ) + (Const64 [int64(udivisible64(c).max)]) + ) +(Eq64 x (Mul64 (Const64 [c]) + (Rsh64Ux64 + (Avg64u + x + mul:(Hmul64u + (Const64 [m]) + x)) + (Const64 [s])) + ) +) && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int64(umagic64(c).m) && s == umagic64(c).s-1 + && x.Op != OpConst64 && udivisibleOK64(c) + => (Leq64U + (RotateLeft64 + (Mul64 + (Const64 [int64(udivisible64(c).m)]) + x) + (Const64 [64-udivisible64(c).k]) + ) + (Const64 [int64(udivisible64(c).max)]) + ) + +// Signed divisibility checks convert to multiply, add and rotate. +(Eq8 x (Mul8 (Const8 [c]) + (Sub8 + (Rsh32x64 + mul:(Mul32 + (Const32 [m]) + (SignExt8to32 x)) + (Const64 [s])) + (Rsh32x64 + (SignExt8to32 x) + (Const64 [31]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int32(smagic8(c).m) && s == 8+smagic8(c).s + && x.Op != OpConst8 && sdivisibleOK8(c) + => (Leq8U + (RotateLeft8 + (Add8 + (Mul8 + (Const8 [int8(sdivisible8(c).m)]) + x) + (Const8 [int8(sdivisible8(c).a)]) + ) + (Const8 [int8(8-sdivisible8(c).k)]) + ) + (Const8 [int8(sdivisible8(c).max)]) + ) + +(Eq16 x (Mul16 (Const16 [c]) + (Sub16 + (Rsh32x64 + mul:(Mul32 + (Const32 [m]) + (SignExt16to32 x)) + (Const64 [s])) + (Rsh32x64 + (SignExt16to32 x) + (Const64 [31]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int32(smagic16(c).m) && s == 16+smagic16(c).s + && x.Op != OpConst16 && sdivisibleOK16(c) + => (Leq16U + (RotateLeft16 + (Add16 + (Mul16 + (Const16 [int16(sdivisible16(c).m)]) + x) + (Const16 [int16(sdivisible16(c).a)]) + ) + (Const16 [int16(16-sdivisible16(c).k)]) + ) + (Const16 [int16(sdivisible16(c).max)]) + ) + +(Eq32 x (Mul32 (Const32 [c]) + (Sub32 + (Rsh64x64 + mul:(Mul64 + (Const64 [m]) + (SignExt32to64 x)) + (Const64 [s])) + (Rsh64x64 + (SignExt32to64 x) + (Const64 [63]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int64(smagic32(c).m) && s == 32+smagic32(c).s + && x.Op != OpConst32 && sdivisibleOK32(c) + => (Leq32U + (RotateLeft32 + (Add32 + (Mul32 + (Const32 [int32(sdivisible32(c).m)]) + x) + (Const32 [int32(sdivisible32(c).a)]) + ) + (Const32 [int32(32-sdivisible32(c).k)]) + ) + (Const32 [int32(sdivisible32(c).max)]) + ) + +(Eq32 x (Mul32 (Const32 [c]) + (Sub32 + (Rsh32x64 + mul:(Hmul32 + (Const32 [m]) + x) + (Const64 [s])) + (Rsh32x64 + x + (Const64 [31]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 + && x.Op != OpConst32 && sdivisibleOK32(c) + => (Leq32U + (RotateLeft32 + (Add32 + (Mul32 + (Const32 [int32(sdivisible32(c).m)]) + x) + (Const32 [int32(sdivisible32(c).a)]) + ) + (Const32 [int32(32-sdivisible32(c).k)]) + ) + (Const32 [int32(sdivisible32(c).max)]) + ) + +(Eq32 x (Mul32 (Const32 [c]) + (Sub32 + (Rsh32x64 + (Add32 + mul:(Hmul32 + (Const32 [m]) + x) + x) + (Const64 [s])) + (Rsh32x64 + x + (Const64 [31]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int32(smagic32(c).m) && s == smagic32(c).s + && x.Op != OpConst32 && sdivisibleOK32(c) + => (Leq32U + (RotateLeft32 + (Add32 + (Mul32 + (Const32 [int32(sdivisible32(c).m)]) + x) + (Const32 [int32(sdivisible32(c).a)]) + ) + (Const32 [int32(32-sdivisible32(c).k)]) + ) + (Const32 [int32(sdivisible32(c).max)]) + ) + +(Eq64 x (Mul64 (Const64 [c]) + (Sub64 + (Rsh64x64 + mul:(Hmul64 + (Const64 [m]) + x) + (Const64 [s])) + (Rsh64x64 + x + (Const64 [63]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 + && x.Op != OpConst64 && sdivisibleOK64(c) + => (Leq64U + (RotateLeft64 + (Add64 + (Mul64 + (Const64 [int64(sdivisible64(c).m)]) + x) + (Const64 [int64(sdivisible64(c).a)]) + ) + (Const64 [64-sdivisible64(c).k]) + ) + (Const64 [int64(sdivisible64(c).max)]) + ) + +(Eq64 x (Mul64 (Const64 [c]) + (Sub64 + (Rsh64x64 + (Add64 + mul:(Hmul64 + (Const64 [m]) + x) + x) + (Const64 [s])) + (Rsh64x64 + x + (Const64 [63]))) + ) +) + && v.Block.Func.pass.name != "opt" && mul.Uses == 1 + && m == int64(smagic64(c).m) && s == smagic64(c).s + && x.Op != OpConst64 && sdivisibleOK64(c) + => (Leq64U + (RotateLeft64 + (Add64 + (Mul64 + (Const64 [int64(sdivisible64(c).m)]) + x) + (Const64 [int64(sdivisible64(c).a)]) + ) + (Const64 [64-sdivisible64(c).k]) + ) + (Const64 [int64(sdivisible64(c).max)]) + ) + +// Divisibility check for signed integers for power of two constant are simple mask. +// However, we must match against the rewritten n%c == 0 -> n - c*(n/c) == 0 -> n == c*(n/c) +// where n/c contains fixup code to handle signed n. +((Eq8|Neq8) n (Lsh8x64 + (Rsh8x64 + (Add8 n (Rsh8Ux64 (Rsh8x64 n (Const64 [ 7])) (Const64 [kbar]))) + (Const64 [k])) + (Const64 [k])) +) && k > 0 && k < 7 && kbar == 8 - k + => ((Eq8|Neq8) (And8 n (Const8 [1< [0])) + +((Eq16|Neq16) n (Lsh16x64 + (Rsh16x64 + (Add16 n (Rsh16Ux64 (Rsh16x64 n (Const64 [15])) (Const64 [kbar]))) + (Const64 [k])) + (Const64 [k])) +) && k > 0 && k < 15 && kbar == 16 - k + => ((Eq16|Neq16) (And16 n (Const16 [1< [0])) + +((Eq32|Neq32) n (Lsh32x64 + (Rsh32x64 + (Add32 n (Rsh32Ux64 (Rsh32x64 n (Const64 [31])) (Const64 [kbar]))) + (Const64 [k])) + (Const64 [k])) +) && k > 0 && k < 31 && kbar == 32 - k + => ((Eq32|Neq32) (And32 n (Const32 [1< [0])) + +((Eq64|Neq64) n (Lsh64x64 + (Rsh64x64 + (Add64 n (Rsh64Ux64 (Rsh64x64 n (Const64 [63])) (Const64 [kbar]))) + (Const64 [k])) + (Const64 [k])) +) && k > 0 && k < 63 && kbar == 64 - k + => ((Eq64|Neq64) (And64 n (Const64 [1< [0])) + +(Eq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Eq(8|16|32|64) x y) +(Neq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Neq(8|16|32|64) x y) + +// Optimize bitsets +(Eq8 (And8 x (Const8 [y])) (Const8 [y])) && oneBit8(y) + => (Neq8 (And8 x (Const8 [y])) (Const8 [0])) +(Eq16 (And16 x (Const16 [y])) (Const16 [y])) && oneBit16(y) + => (Neq16 (And16 x (Const16 [y])) (Const16 [0])) +(Eq32 (And32 x (Const32 [y])) (Const32 [y])) && oneBit32(y) + => (Neq32 (And32 x (Const32 [y])) (Const32 [0])) +(Eq64 (And64 x (Const64 [y])) (Const64 [y])) && oneBit64(y) + => (Neq64 (And64 x (Const64 [y])) (Const64 [0])) +(Neq8 (And8 x (Const8 [y])) (Const8 [y])) && oneBit8(y) + => (Eq8 (And8 x (Const8 [y])) (Const8 [0])) +(Neq16 (And16 x (Const16 [y])) (Const16 [y])) && oneBit16(y) + => (Eq16 (And16 x (Const16 [y])) (Const16 [0])) +(Neq32 (And32 x (Const32 [y])) (Const32 [y])) && oneBit32(y) + => (Eq32 (And32 x (Const32 [y])) (Const32 [0])) +(Neq64 (And64 x (Const64 [y])) (Const64 [y])) && oneBit64(y) + => (Eq64 (And64 x (Const64 [y])) (Const64 [0])) + +// Reassociate expressions involving +// constants such that constants come first, +// exposing obvious constant-folding opportunities. +// Reassociate (op (op y C) x) to (op C (op x y)) or similar, where C +// is constant, which pushes constants to the outside +// of the expression. At that point, any constant-folding +// opportunities should be obvious. +// Note: don't include AddPtr here! In order to maintain the +// invariant that pointers must stay within the pointed-to object, +// we can't pull part of a pointer computation above the AddPtr. +// See issue 37881. +// Note: we don't need to handle any (x-C) cases because we already rewrite +// (x-C) to (x+(-C)). + +// x + (C + z) -> C + (x + z) +(Add64 (Add64 i:(Const64 ) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Add64 z x)) +(Add32 (Add32 i:(Const32 ) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Add32 z x)) +(Add16 (Add16 i:(Const16 ) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Add16 z x)) +(Add8 (Add8 i:(Const8 ) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Add8 z x)) + +// x + (C - z) -> C + (x - z) +(Add64 (Sub64 i:(Const64 ) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 x z)) +(Add32 (Sub32 i:(Const32 ) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 x z)) +(Add16 (Sub16 i:(Const16 ) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 x z)) +(Add8 (Sub8 i:(Const8 ) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 x z)) + +// x - (C - z) -> x + (z - C) -> (x + z) - C +(Sub64 x (Sub64 i:(Const64 ) z)) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Add64 x z) i) +(Sub32 x (Sub32 i:(Const32 ) z)) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Add32 x z) i) +(Sub16 x (Sub16 i:(Const16 ) z)) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Add16 x z) i) +(Sub8 x (Sub8 i:(Const8 ) z)) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Add8 x z) i) + +// x - (z + C) -> x + (-z - C) -> (x - z) - C +(Sub64 x (Add64 z i:(Const64 ))) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Sub64 x z) i) +(Sub32 x (Add32 z i:(Const32 ))) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Sub32 x z) i) +(Sub16 x (Add16 z i:(Const16 ))) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Sub16 x z) i) +(Sub8 x (Add8 z i:(Const8 ))) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Sub8 x z) i) + +// (C - z) - x -> C - (z + x) +(Sub64 (Sub64 i:(Const64 ) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 i (Add64 z x)) +(Sub32 (Sub32 i:(Const32 ) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 i (Add32 z x)) +(Sub16 (Sub16 i:(Const16 ) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 i (Add16 z x)) +(Sub8 (Sub8 i:(Const8 ) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 i (Add8 z x)) + +// (z + C) -x -> C + (z - x) +(Sub64 (Add64 z i:(Const64 )) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 z x)) +(Sub32 (Add32 z i:(Const32 )) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 z x)) +(Sub16 (Add16 z i:(Const16 )) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 z x)) +(Sub8 (Add8 z i:(Const8 )) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 z x)) + +// x & (C & z) -> C & (x & z) +(And64 (And64 i:(Const64 ) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (And64 i (And64 z x)) +(And32 (And32 i:(Const32 ) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (And32 i (And32 z x)) +(And16 (And16 i:(Const16 ) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (And16 i (And16 z x)) +(And8 (And8 i:(Const8 ) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (And8 i (And8 z x)) + +// x | (C | z) -> C | (x | z) +(Or64 (Or64 i:(Const64 ) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Or64 i (Or64 z x)) +(Or32 (Or32 i:(Const32 ) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Or32 i (Or32 z x)) +(Or16 (Or16 i:(Const16 ) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Or16 i (Or16 z x)) +(Or8 (Or8 i:(Const8 ) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Or8 i (Or8 z x)) + +// x ^ (C ^ z) -> C ^ (x ^ z) +(Xor64 (Xor64 i:(Const64 ) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Xor64 i (Xor64 z x)) +(Xor32 (Xor32 i:(Const32 ) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Xor32 i (Xor32 z x)) +(Xor16 (Xor16 i:(Const16 ) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Xor16 i (Xor16 z x)) +(Xor8 (Xor8 i:(Const8 ) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Xor8 i (Xor8 z x)) + +// x * (D * z) = D * (x * z) +(Mul64 (Mul64 i:(Const64 ) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Mul64 i (Mul64 x z)) +(Mul32 (Mul32 i:(Const32 ) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Mul32 i (Mul32 x z)) +(Mul16 (Mul16 i:(Const16 ) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Mul16 i (Mul16 x z)) +(Mul8 (Mul8 i:(Const8 ) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Mul8 i (Mul8 x z)) + +// C + (D + x) -> (C + D) + x +(Add64 (Const64 [c]) (Add64 (Const64 [d]) x)) => (Add64 (Const64 [c+d]) x) +(Add32 (Const32 [c]) (Add32 (Const32 [d]) x)) => (Add32 (Const32 [c+d]) x) +(Add16 (Const16 [c]) (Add16 (Const16 [d]) x)) => (Add16 (Const16 [c+d]) x) +(Add8 (Const8 [c]) (Add8 (Const8 [d]) x)) => (Add8 (Const8 [c+d]) x) + +// C + (D - x) -> (C + D) - x +(Add64 (Const64 [c]) (Sub64 (Const64 [d]) x)) => (Sub64 (Const64 [c+d]) x) +(Add32 (Const32 [c]) (Sub32 (Const32 [d]) x)) => (Sub32 (Const32 [c+d]) x) +(Add16 (Const16 [c]) (Sub16 (Const16 [d]) x)) => (Sub16 (Const16 [c+d]) x) +(Add8 (Const8 [c]) (Sub8 (Const8 [d]) x)) => (Sub8 (Const8 [c+d]) x) + +// C - (D - x) -> (C - D) + x +(Sub64 (Const64 [c]) (Sub64 (Const64 [d]) x)) => (Add64 (Const64 [c-d]) x) +(Sub32 (Const32 [c]) (Sub32 (Const32 [d]) x)) => (Add32 (Const32 [c-d]) x) +(Sub16 (Const16 [c]) (Sub16 (Const16 [d]) x)) => (Add16 (Const16 [c-d]) x) +(Sub8 (Const8 [c]) (Sub8 (Const8 [d]) x)) => (Add8 (Const8 [c-d]) x) + +// C - (D + x) -> (C - D) - x +(Sub64 (Const64 [c]) (Add64 (Const64 [d]) x)) => (Sub64 (Const64 [c-d]) x) +(Sub32 (Const32 [c]) (Add32 (Const32 [d]) x)) => (Sub32 (Const32 [c-d]) x) +(Sub16 (Const16 [c]) (Add16 (Const16 [d]) x)) => (Sub16 (Const16 [c-d]) x) +(Sub8 (Const8 [c]) (Add8 (Const8 [d]) x)) => (Sub8 (Const8 [c-d]) x) + +// C & (D & x) -> (C & D) & x +(And64 (Const64 [c]) (And64 (Const64 [d]) x)) => (And64 (Const64 [c&d]) x) +(And32 (Const32 [c]) (And32 (Const32 [d]) x)) => (And32 (Const32 [c&d]) x) +(And16 (Const16 [c]) (And16 (Const16 [d]) x)) => (And16 (Const16 [c&d]) x) +(And8 (Const8 [c]) (And8 (Const8 [d]) x)) => (And8 (Const8 [c&d]) x) + +// C | (D | x) -> (C | D) | x +(Or64 (Const64 [c]) (Or64 (Const64 [d]) x)) => (Or64 (Const64 [c|d]) x) +(Or32 (Const32 [c]) (Or32 (Const32 [d]) x)) => (Or32 (Const32 [c|d]) x) +(Or16 (Const16 [c]) (Or16 (Const16 [d]) x)) => (Or16 (Const16 [c|d]) x) +(Or8 (Const8 [c]) (Or8 (Const8 [d]) x)) => (Or8 (Const8 [c|d]) x) + +// C ^ (D ^ x) -> (C ^ D) ^ x +(Xor64 (Const64 [c]) (Xor64 (Const64 [d]) x)) => (Xor64 (Const64 [c^d]) x) +(Xor32 (Const32 [c]) (Xor32 (Const32 [d]) x)) => (Xor32 (Const32 [c^d]) x) +(Xor16 (Const16 [c]) (Xor16 (Const16 [d]) x)) => (Xor16 (Const16 [c^d]) x) +(Xor8 (Const8 [c]) (Xor8 (Const8 [d]) x)) => (Xor8 (Const8 [c^d]) x) + +// C * (D * x) = (C * D) * x +(Mul64 (Const64 [c]) (Mul64 (Const64 [d]) x)) => (Mul64 (Const64 [c*d]) x) +(Mul32 (Const32 [c]) (Mul32 (Const32 [d]) x)) => (Mul32 (Const32 [c*d]) x) +(Mul16 (Const16 [c]) (Mul16 (Const16 [d]) x)) => (Mul16 (Const16 [c*d]) x) +(Mul8 (Const8 [c]) (Mul8 (Const8 [d]) x)) => (Mul8 (Const8 [c*d]) x) + +// floating point optimizations +(Mul(32|64)F x (Const(32|64)F [1])) => x +(Mul32F x (Const32F [-1])) => (Neg32F x) +(Mul64F x (Const64F [-1])) => (Neg64F x) +(Mul32F x (Const32F [2])) => (Add32F x x) +(Mul64F x (Const64F [2])) => (Add64F x x) + +(Div32F x (Const32F [c])) && reciprocalExact32(c) => (Mul32F x (Const32F [1/c])) +(Div64F x (Const64F [c])) && reciprocalExact64(c) => (Mul64F x (Const64F [1/c])) + +// rewrite single-precision sqrt expression "float32(math.Sqrt(float64(x)))" +(Cvt64Fto32F sqrt0:(Sqrt (Cvt32Fto64F x))) && sqrt0.Uses==1 => (Sqrt32 x) + +(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(c)) => (Const64F [math.Sqrt(c)]) + +// for rewriting results of some late-expanded rewrites (below) +(SelectN [0] (MakeResult x ___)) => x +(SelectN [1] (MakeResult x y ___)) => y +(SelectN [2] (MakeResult x y z ___)) => z + +// for late-expanded calls, recognize newobject and remove zeroing and nilchecks +(Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call)) + && isSameCall(call.Aux, "runtime.newobject") + => mem + +(Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call)) + && isConstZero(x) + && isSameCall(call.Aux, "runtime.newobject") + => mem + +(Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call)) + && isConstZero(x) + && isSameCall(call.Aux, "runtime.newobject") + => mem + +(NilCheck ptr:(SelectN [0] call:(StaticLECall _ _)) _) + && isSameCall(call.Aux, "runtime.newobject") + && warnRule(fe.Debug_checknil(), v, "removed nil check") + => ptr + +(NilCheck ptr:(OffPtr (SelectN [0] call:(StaticLECall _ _))) _) + && isSameCall(call.Aux, "runtime.newobject") + && warnRule(fe.Debug_checknil(), v, "removed nil check") + => ptr + +// Addresses of globals are always non-nil. +(NilCheck ptr:(Addr {_} (SB)) _) => ptr +(NilCheck ptr:(Convert (Addr {_} (SB)) _) _) => ptr + +// for late-expanded calls, recognize memequal applied to a single constant byte +// Support is limited by 1, 2, 4, 8 byte sizes +(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem) + && isSameCall(callAux, "runtime.memequal") + && symIsRO(scon) + => (MakeResult (Eq8 (Load sptr mem) (Const8 [int8(read8(scon,0))])) mem) + +(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [1]) mem) + && isSameCall(callAux, "runtime.memequal") + && symIsRO(scon) + => (MakeResult (Eq8 (Load sptr mem) (Const8 [int8(read8(scon,0))])) mem) + +(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [2]) mem) + && isSameCall(callAux, "runtime.memequal") + && symIsRO(scon) + && canLoadUnaligned(config) + => (MakeResult (Eq16 (Load sptr mem) (Const16 [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + +(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [2]) mem) + && isSameCall(callAux, "runtime.memequal") + && symIsRO(scon) + && canLoadUnaligned(config) + => (MakeResult (Eq16 (Load sptr mem) (Const16 [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + +(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [4]) mem) + && isSameCall(callAux, "runtime.memequal") + && symIsRO(scon) + && canLoadUnaligned(config) + => (MakeResult (Eq32 (Load sptr mem) (Const32 [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + +(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [4]) mem) + && isSameCall(callAux, "runtime.memequal") + && symIsRO(scon) + && canLoadUnaligned(config) + => (MakeResult (Eq32 (Load sptr mem) (Const32 [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + +(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [8]) mem) + && isSameCall(callAux, "runtime.memequal") + && symIsRO(scon) + && canLoadUnaligned(config) && config.PtrSize == 8 + => (MakeResult (Eq64 (Load sptr mem) (Const64 [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + +(StaticLECall {callAux} (Addr {scon} (SB)) sptr (Const64 [8]) mem) + && isSameCall(callAux, "runtime.memequal") + && symIsRO(scon) + && canLoadUnaligned(config) && config.PtrSize == 8 + => (MakeResult (Eq64 (Load sptr mem) (Const64 [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem) + +(StaticLECall {callAux} _ _ (Const64 [0]) mem) + && isSameCall(callAux, "runtime.memequal") + => (MakeResult (ConstBool [true]) mem) + +(Static(Call|LECall) {callAux} p q _ mem) + && isSameCall(callAux, "runtime.memequal") + && isSamePtr(p, q) + => (MakeResult (ConstBool [true]) mem) + +// Turn known-size calls to memclrNoHeapPointers into a Zero. +// Note that we are using types.Types[types.TUINT8] instead of sptr.Type.Elem() - see issue 55122 and CL 431496 for more details. +(SelectN [0] call:(StaticCall {sym} sptr (Const(64|32) [c]) mem)) + && isInlinableMemclr(config, int64(c)) + && isSameCall(sym, "runtime.memclrNoHeapPointers") + && call.Uses == 1 + && clobber(call) + => (Zero {types.Types[types.TUINT8]} [int64(c)] sptr mem) + +// Recognise make([]T, 0) and replace it with a pointer to the zerobase +(StaticLECall {callAux} _ (Const(64|32) [0]) (Const(64|32) [0]) mem) + && isSameCall(callAux, "runtime.makeslice") + => (MakeResult (Addr {ir.Syms.Zerobase} (SB)) mem) + +// Evaluate constant address comparisons. +(EqPtr x x) => (ConstBool [true]) +(NeqPtr x x) => (ConstBool [false]) +(EqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x == y]) +(EqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x == y && o == 0]) +(EqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x == y && o1 == o2]) +(NeqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x != y]) +(NeqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x != y || o != 0]) +(NeqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x != y || o1 != o2]) +(EqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x == y]) +(EqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x == y && o == 0]) +(EqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x == y && o1 == o2]) +(NeqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x != y]) +(NeqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x != y || o != 0]) +(NeqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x != y || o1 != o2]) +(EqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 == 0]) +(NeqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 != 0]) +(EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 == o2]) +(NeqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 != o2]) +(EqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c == d]) +(NeqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c != d]) +(EqPtr (Convert (Addr {x} _) _) (Addr {y} _)) => (ConstBool [x==y]) +(NeqPtr (Convert (Addr {x} _) _) (Addr {y} _)) => (ConstBool [x!=y]) + +(EqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [false]) +(EqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [false]) +(EqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [false]) +(EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [false]) +(NeqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [true]) +(NeqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [true]) +(NeqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [true]) +(NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [true]) + +// Simplify address comparisons. +(EqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (Not (IsNonNil o1)) +(NeqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (IsNonNil o1) +(EqPtr (Const(32|64) [0]) p) => (Not (IsNonNil p)) +(NeqPtr (Const(32|64) [0]) p) => (IsNonNil p) +(EqPtr (ConstNil) p) => (Not (IsNonNil p)) +(NeqPtr (ConstNil) p) => (IsNonNil p) + +// Evaluate constant user nil checks. +(IsNonNil (ConstNil)) => (ConstBool [false]) +(IsNonNil (Const(32|64) [c])) => (ConstBool [c != 0]) +(IsNonNil (Addr _) ) => (ConstBool [true]) +(IsNonNil (Convert (Addr _) _)) => (ConstBool [true]) +(IsNonNil (LocalAddr _ _)) => (ConstBool [true]) + +// Inline small or disjoint runtime.memmove calls with constant length. +// See the comment in op Move in genericOps.go for discussion of the type. +// +// Note that we've lost any knowledge of the type and alignment requirements +// of the source and destination. We only know the size, and that the type +// contains no pointers. +// The type of the move is not necessarily v.Args[0].Type().Elem()! +// See issue 55122 for details. +// +// Because expand calls runs after prove, constants useful to this pattern may not appear. +// Both versions need to exist; the memory and register variants. +// +// Match post-expansion calls, memory version. +(SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const(64|32) [sz]) s2:(Store _ src s3:(Store {t} _ dst mem))))) + && sz >= 0 + && isSameCall(sym, "runtime.memmove") + && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 + && isInlinableMemmove(dst, src, int64(sz), config) + && clobber(s1, s2, s3, call) + => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) + +// Match post-expansion calls, register version. +(SelectN [0] call:(StaticCall {sym} dst src (Const(64|32) [sz]) mem)) + && sz >= 0 + && call.Uses == 1 // this will exclude all calls with results + && isSameCall(sym, "runtime.memmove") + && isInlinableMemmove(dst, src, int64(sz), config) + && clobber(call) + => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) + +// Match pre-expansion calls. +(SelectN [0] call:(StaticLECall {sym} dst src (Const(64|32) [sz]) mem)) + && sz >= 0 + && call.Uses == 1 // this will exclude all calls with results + && isSameCall(sym, "runtime.memmove") + && isInlinableMemmove(dst, src, int64(sz), config) + && clobber(call) + => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem) + +// De-virtualize late-expanded interface calls into late-expanded static calls. +(InterLECall [argsize] {auxCall} (Addr {fn} (SB)) ___) => devirtLECall(v, fn.(*obj.LSym)) + +// Move and Zero optimizations. +// Move source and destination may overlap. + +// Convert Moves into Zeros when the source is known to be zeros. +(Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _)) && isSamePtr(src, dst2) + => (Zero {t} [n] dst1 mem) +(Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) && isSamePtr(src, dst0) + => (Zero {t} [n] dst1 mem) +(Move {t} [n] dst (Addr {sym} (SB)) mem) && symIsROZero(sym) => (Zero {t} [n] dst mem) + +// Don't Store to variables that are about to be overwritten by Move/Zero. +(Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem)) + && isSamePtr(p1, p2) && store.Uses == 1 + && n >= o2 + t2.Size() + && clobber(store) + => (Zero {t1} [n] p1 mem) +(Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem)) + && isSamePtr(dst1, dst2) && store.Uses == 1 + && n >= o2 + t2.Size() + && disjoint(src1, n, op, t2.Size()) + && clobber(store) + => (Move {t1} [n] dst1 src1 mem) + +// Don't Move to variables that are immediately completely overwritten. +(Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem)) + && move.Uses == 1 + && isSamePtr(dst1, dst2) + && clobber(move) + => (Zero {t} [n] dst1 mem) +(Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem)) + && move.Uses == 1 + && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) + && clobber(move) + => (Move {t} [n] dst1 src1 mem) +(Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) + && move.Uses == 1 && vardef.Uses == 1 + && isSamePtr(dst1, dst2) + && clobber(move, vardef) + => (Zero {t} [n] dst1 (VarDef {x} mem)) +(Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem))) + && move.Uses == 1 && vardef.Uses == 1 + && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) + && clobber(move, vardef) + => (Move {t} [n] dst1 src1 (VarDef {x} mem)) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [0] p2) d2 + m3:(Move [n] p3 _ mem))) + && m2.Uses == 1 && m3.Uses == 1 + && o1 == t2.Size() + && n == t2.Size() + t1.Size() + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && clobber(m2, m3) + => (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [o2] p2) d2 + m3:(Store {t3} op3:(OffPtr [0] p3) d3 + m4:(Move [n] p4 _ mem)))) + && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 + && o2 == t3.Size() + && o1-o2 == t2.Size() + && n == t3.Size() + t2.Size() + t1.Size() + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && clobber(m2, m3, m4) + => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [o2] p2) d2 + m3:(Store {t3} op3:(OffPtr [o3] p3) d3 + m4:(Store {t4} op4:(OffPtr [0] p4) d4 + m5:(Move [n] p5 _ mem))))) + && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 + && o3 == t4.Size() + && o2-o3 == t3.Size() + && o1-o2 == t2.Size() + && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && clobber(m2, m3, m4, m5) + => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) + +// Don't Zero variables that are immediately completely overwritten +// before being accessed. +(Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem)) + && zero.Uses == 1 + && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) + && clobber(zero) + => (Move {t} [n] dst1 src1 mem) +(Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem))) + && zero.Uses == 1 && vardef.Uses == 1 + && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) + && clobber(zero, vardef) + => (Move {t} [n] dst1 src1 (VarDef {x} mem)) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [0] p2) d2 + m3:(Zero [n] p3 mem))) + && m2.Uses == 1 && m3.Uses == 1 + && o1 == t2.Size() + && n == t2.Size() + t1.Size() + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && clobber(m2, m3) + => (Store {t1} op1 d1 (Store {t2} op2 d2 mem)) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [o2] p2) d2 + m3:(Store {t3} op3:(OffPtr [0] p3) d3 + m4:(Zero [n] p4 mem)))) + && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 + && o2 == t3.Size() + && o1-o2 == t2.Size() + && n == t3.Size() + t2.Size() + t1.Size() + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && clobber(m2, m3, m4) + => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem))) +(Store {t1} op1:(OffPtr [o1] p1) d1 + m2:(Store {t2} op2:(OffPtr [o2] p2) d2 + m3:(Store {t3} op3:(OffPtr [o3] p3) d3 + m4:(Store {t4} op4:(OffPtr [0] p4) d4 + m5:(Zero [n] p5 mem))))) + && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 + && o3 == t4.Size() + && o2-o3 == t3.Size() + && o1-o2 == t2.Size() + && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && clobber(m2, m3, m4, m5) + => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem)))) + +// Don't Move from memory if the values are likely to already be +// in registers. +(Move {t1} [n] dst p1 + mem:(Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [0] p3) d2 _))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && o2 == t3.Size() + && n == t2.Size() + t3.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [0] dst) d2 mem)) +(Move {t1} [n] dst p1 + mem:(Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [o3] p3) d2 + (Store {t4} op4:(OffPtr [0] p4) d3 _)))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && t4.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && o3 == t4.Size() + && o2-o3 == t3.Size() + && n == t2.Size() + t3.Size() + t4.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [0] dst) d3 mem))) +(Move {t1} [n] dst p1 + mem:(Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [o3] p3) d2 + (Store {t4} op4:(OffPtr [o4] p4) d3 + (Store {t5} op5:(OffPtr [0] p5) d4 _))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && t4.Alignment() <= t1.Alignment() + && t5.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && registerizable(b, t5) + && o4 == t5.Size() + && o3-o4 == t4.Size() + && o2-o3 == t3.Size() + && n == t2.Size() + t3.Size() + t4.Size() + t5.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Store {t5} (OffPtr [0] dst) d4 mem)))) + +// Same thing but with VarDef in the middle. +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [0] p3) d2 _)))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && o2 == t3.Size() + && n == t2.Size() + t3.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [0] dst) d2 mem)) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [o3] p3) d2 + (Store {t4} op4:(OffPtr [0] p4) d3 _))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && t4.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && o3 == t4.Size() + && o2-o3 == t3.Size() + && n == t2.Size() + t3.Size() + t4.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [0] dst) d3 mem))) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} op2:(OffPtr [o2] p2) d1 + (Store {t3} op3:(OffPtr [o3] p3) d2 + (Store {t4} op4:(OffPtr [o4] p4) d3 + (Store {t5} op5:(OffPtr [0] p5) d4 _)))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && t4.Alignment() <= t1.Alignment() + && t5.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && registerizable(b, t5) + && o4 == t5.Size() + && o3-o4 == t4.Size() + && o2-o3 == t3.Size() + && n == t2.Size() + t3.Size() + t4.Size() + t5.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Store {t5} (OffPtr [0] dst) d4 mem)))) + +// Prefer to Zero and Store than to Move. +(Move {t1} [n] dst p1 + mem:(Store {t2} op2:(OffPtr [o2] p2) d1 + (Zero {t3} [n] p3 _))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && n >= o2 + t2.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Zero {t1} [n] dst mem)) +(Move {t1} [n] dst p1 + mem:(Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Zero {t4} [n] p4 _)))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && t4.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && n >= o2 + t2.Size() + && n >= o3 + t3.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Zero {t1} [n] dst mem))) +(Move {t1} [n] dst p1 + mem:(Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Store {t4} (OffPtr [o4] p4) d3 + (Zero {t5} [n] p5 _))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && t4.Alignment() <= t1.Alignment() + && t5.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && n >= o2 + t2.Size() + && n >= o3 + t3.Size() + && n >= o4 + t4.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Zero {t1} [n] dst mem)))) +(Move {t1} [n] dst p1 + mem:(Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Store {t4} (OffPtr [o4] p4) d3 + (Store {t5} (OffPtr [o5] p5) d4 + (Zero {t6} [n] p6 _)))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && t4.Alignment() <= t1.Alignment() + && t5.Alignment() <= t1.Alignment() + && t6.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && registerizable(b, t5) + && n >= o2 + t2.Size() + && n >= o3 + t3.Size() + && n >= o4 + t4.Size() + && n >= o5 + t5.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Store {t5} (OffPtr [o5] dst) d4 + (Zero {t1} [n] dst mem))))) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} op2:(OffPtr [o2] p2) d1 + (Zero {t3} [n] p3 _)))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && n >= o2 + t2.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Zero {t1} [n] dst mem)) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Zero {t4} [n] p4 _))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && t4.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && n >= o2 + t2.Size() + && n >= o3 + t3.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Zero {t1} [n] dst mem))) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Store {t4} (OffPtr [o4] p4) d3 + (Zero {t5} [n] p5 _)))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && t4.Alignment() <= t1.Alignment() + && t5.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && n >= o2 + t2.Size() + && n >= o3 + t3.Size() + && n >= o4 + t4.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Zero {t1} [n] dst mem)))) +(Move {t1} [n] dst p1 + mem:(VarDef + (Store {t2} (OffPtr [o2] p2) d1 + (Store {t3} (OffPtr [o3] p3) d2 + (Store {t4} (OffPtr [o4] p4) d3 + (Store {t5} (OffPtr [o5] p5) d4 + (Zero {t6} [n] p6 _))))))) + && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) + && t2.Alignment() <= t1.Alignment() + && t3.Alignment() <= t1.Alignment() + && t4.Alignment() <= t1.Alignment() + && t5.Alignment() <= t1.Alignment() + && t6.Alignment() <= t1.Alignment() + && registerizable(b, t2) + && registerizable(b, t3) + && registerizable(b, t4) + && registerizable(b, t5) + && n >= o2 + t2.Size() + && n >= o3 + t3.Size() + && n >= o4 + t4.Size() + && n >= o5 + t5.Size() + => (Store {t2} (OffPtr [o2] dst) d1 + (Store {t3} (OffPtr [o3] dst) d2 + (Store {t4} (OffPtr [o4] dst) d3 + (Store {t5} (OffPtr [o5] dst) d4 + (Zero {t1} [n] dst mem))))) + +(SelectN [0] call:(StaticLECall {sym} a x)) && needRaceCleanup(sym, call) && clobber(call) => x +(SelectN [0] call:(StaticLECall {sym} x)) && needRaceCleanup(sym, call) && clobber(call) => x + +// When rewriting append to growslice, we use as the new length the result of +// growslice so that we don't have to spill/restore the new length around the growslice call. +// The exception here is that if the new length is a constant, avoiding spilling it +// is pointless and its constantness is sometimes useful for subsequent optimizations. +// See issue 56440. +// Note there are 2 rules here, one for the pre-decomposed []T result and one for +// the post-decomposed (*T,int,int) result. (The latter is generated after call expansion.) +(SliceLen (SelectN [0] (StaticLECall {sym} _ newLen:(Const(64|32)) _ _ _ _))) && isSameCall(sym, "runtime.growslice") => newLen +(SelectN [1] (StaticCall {sym} _ newLen:(Const(64|32)) _ _ _ _)) && v.Type.IsInteger() && isSameCall(sym, "runtime.growslice") => newLen + +// Collapse moving A -> B -> C into just A -> C. +// Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible. +// This happens most commonly when B is an autotmp inserted earlier +// during compilation to ensure correctness. +// Take care that overlapping moves are preserved. +// Restrict this optimization to the stack, to avoid duplicating loads from the heap; +// see CL 145208 for discussion. +(Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _)) + && t1.Compare(t2) == types.CMPeq + && isSamePtr(tmp1, tmp2) + && isStackPtr(src) && !isVolatile(src) + && disjoint(src, s, tmp2, s) + && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) + => (Move {t1} [s] dst src midmem) + +// Same, but for large types that require VarDefs. +(Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _))) + && t1.Compare(t2) == types.CMPeq + && isSamePtr(tmp1, tmp2) + && isStackPtr(src) && !isVolatile(src) + && disjoint(src, s, tmp2, s) + && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config)) + => (Move {t1} [s] dst src midmem) + +// Don't zero the same bits twice. +(Zero {t} [s] dst1 zero:(Zero {t} [s] dst2 _)) && isSamePtr(dst1, dst2) => zero +(Zero {t} [s] dst1 vardef:(VarDef (Zero {t} [s] dst2 _))) && isSamePtr(dst1, dst2) => vardef + +// Elide self-moves. This only happens rarely (e.g test/fixedbugs/bug277.go). +// However, this rule is needed to prevent the previous rule from looping forever in such cases. +(Move dst src mem) && isSamePtr(dst, src) => mem + +// Constant rotate detection. +((Add64|Or64|Xor64) (Lsh64x64 x z:(Const64 [c])) (Rsh64Ux64 x (Const64 [d]))) && c < 64 && d == 64-c && canRotate(config, 64) => (RotateLeft64 x z) +((Add32|Or32|Xor32) (Lsh32x64 x z:(Const64 [c])) (Rsh32Ux64 x (Const64 [d]))) && c < 32 && d == 32-c && canRotate(config, 32) => (RotateLeft32 x z) +((Add16|Or16|Xor16) (Lsh16x64 x z:(Const64 [c])) (Rsh16Ux64 x (Const64 [d]))) && c < 16 && d == 16-c && canRotate(config, 16) => (RotateLeft16 x z) +((Add8|Or8|Xor8) (Lsh8x64 x z:(Const64 [c])) (Rsh8Ux64 x (Const64 [d]))) && c < 8 && d == 8-c && canRotate(config, 8) => (RotateLeft8 x z) + +// Non-constant rotate detection. +// We use shiftIsBounded to make sure that neither of the shifts are >64. +// Note: these rules are subtle when the shift amounts are 0/64, as Go shifts +// are different from most native shifts. But it works out. +((Add64|Or64|Xor64) left:(Lsh64x64 x y) right:(Rsh64Ux64 x (Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) +((Add64|Or64|Xor64) left:(Lsh64x32 x y) right:(Rsh64Ux32 x (Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) +((Add64|Or64|Xor64) left:(Lsh64x16 x y) right:(Rsh64Ux16 x (Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) +((Add64|Or64|Xor64) left:(Lsh64x8 x y) right:(Rsh64Ux8 x (Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x y) + +((Add64|Or64|Xor64) right:(Rsh64Ux64 x y) left:(Lsh64x64 x z:(Sub64 (Const64 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) +((Add64|Or64|Xor64) right:(Rsh64Ux32 x y) left:(Lsh64x32 x z:(Sub32 (Const32 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) +((Add64|Or64|Xor64) right:(Rsh64Ux16 x y) left:(Lsh64x16 x z:(Sub16 (Const16 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) +((Add64|Or64|Xor64) right:(Rsh64Ux8 x y) left:(Lsh64x8 x z:(Sub8 (Const8 [64]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 64) => (RotateLeft64 x z) + +((Add32|Or32|Xor32) left:(Lsh32x64 x y) right:(Rsh32Ux64 x (Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) +((Add32|Or32|Xor32) left:(Lsh32x32 x y) right:(Rsh32Ux32 x (Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) +((Add32|Or32|Xor32) left:(Lsh32x16 x y) right:(Rsh32Ux16 x (Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) +((Add32|Or32|Xor32) left:(Lsh32x8 x y) right:(Rsh32Ux8 x (Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x y) + +((Add32|Or32|Xor32) right:(Rsh32Ux64 x y) left:(Lsh32x64 x z:(Sub64 (Const64 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) +((Add32|Or32|Xor32) right:(Rsh32Ux32 x y) left:(Lsh32x32 x z:(Sub32 (Const32 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) +((Add32|Or32|Xor32) right:(Rsh32Ux16 x y) left:(Lsh32x16 x z:(Sub16 (Const16 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) +((Add32|Or32|Xor32) right:(Rsh32Ux8 x y) left:(Lsh32x8 x z:(Sub8 (Const8 [32]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 32) => (RotateLeft32 x z) + +((Add16|Or16|Xor16) left:(Lsh16x64 x y) right:(Rsh16Ux64 x (Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) +((Add16|Or16|Xor16) left:(Lsh16x32 x y) right:(Rsh16Ux32 x (Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) +((Add16|Or16|Xor16) left:(Lsh16x16 x y) right:(Rsh16Ux16 x (Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) +((Add16|Or16|Xor16) left:(Lsh16x8 x y) right:(Rsh16Ux8 x (Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x y) + +((Add16|Or16|Xor16) right:(Rsh16Ux64 x y) left:(Lsh16x64 x z:(Sub64 (Const64 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) +((Add16|Or16|Xor16) right:(Rsh16Ux32 x y) left:(Lsh16x32 x z:(Sub32 (Const32 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) +((Add16|Or16|Xor16) right:(Rsh16Ux16 x y) left:(Lsh16x16 x z:(Sub16 (Const16 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) +((Add16|Or16|Xor16) right:(Rsh16Ux8 x y) left:(Lsh16x8 x z:(Sub8 (Const8 [16]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 16) => (RotateLeft16 x z) + +((Add8|Or8|Xor8) left:(Lsh8x64 x y) right:(Rsh8Ux64 x (Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) +((Add8|Or8|Xor8) left:(Lsh8x32 x y) right:(Rsh8Ux32 x (Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) +((Add8|Or8|Xor8) left:(Lsh8x16 x y) right:(Rsh8Ux16 x (Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) +((Add8|Or8|Xor8) left:(Lsh8x8 x y) right:(Rsh8Ux8 x (Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x y) + +((Add8|Or8|Xor8) right:(Rsh8Ux64 x y) left:(Lsh8x64 x z:(Sub64 (Const64 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) +((Add8|Or8|Xor8) right:(Rsh8Ux32 x y) left:(Lsh8x32 x z:(Sub32 (Const32 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) +((Add8|Or8|Xor8) right:(Rsh8Ux16 x y) left:(Lsh8x16 x z:(Sub16 (Const16 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) +((Add8|Or8|Xor8) right:(Rsh8Ux8 x y) left:(Lsh8x8 x z:(Sub8 (Const8 [8]) y))) && (shiftIsBounded(left) || shiftIsBounded(right)) && canRotate(config, 8) => (RotateLeft8 x z) + +// Rotating by y&c, with c a mask that doesn't change the bottom bits, is the same as rotating by y. +(RotateLeft64 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 63 => (RotateLeft64 x y) +(RotateLeft32 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 31 => (RotateLeft32 x y) +(RotateLeft16 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 15 => (RotateLeft16 x y) +(RotateLeft8 x (And(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 7 => (RotateLeft8 x y) + +// Rotating by -(y&c), with c a mask that doesn't change the bottom bits, is the same as rotating by -y. +(RotateLeft64 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&63 == 63 => (RotateLeft64 x (Neg(64|32|16|8) y)) +(RotateLeft32 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&31 == 31 => (RotateLeft32 x (Neg(64|32|16|8) y)) +(RotateLeft16 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&15 == 15 => (RotateLeft16 x (Neg(64|32|16|8) y)) +(RotateLeft8 x (Neg(64|32|16|8) (And(64|32|16|8) y (Const(64|32|16|8) [c])))) && c&7 == 7 => (RotateLeft8 x (Neg(64|32|16|8) y)) + +// Rotating by y+c, with c a multiple of the value width, is the same as rotating by y. +(RotateLeft64 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&63 == 0 => (RotateLeft64 x y) +(RotateLeft32 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&31 == 0 => (RotateLeft32 x y) +(RotateLeft16 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&15 == 0 => (RotateLeft16 x y) +(RotateLeft8 x (Add(64|32|16|8) y (Const(64|32|16|8) [c]))) && c&7 == 0 => (RotateLeft8 x y) + +// Rotating by c-y, with c a multiple of the value width, is the same as rotating by -y. +(RotateLeft64 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&63 == 0 => (RotateLeft64 x (Neg(64|32|16|8) y)) +(RotateLeft32 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&31 == 0 => (RotateLeft32 x (Neg(64|32|16|8) y)) +(RotateLeft16 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&15 == 0 => (RotateLeft16 x (Neg(64|32|16|8) y)) +(RotateLeft8 x (Sub(64|32|16|8) (Const(64|32|16|8) [c]) y)) && c&7 == 0 => (RotateLeft8 x (Neg(64|32|16|8) y)) + +// Ensure we don't do Const64 rotates in a 32-bit system. +(RotateLeft64 x (Const64 [c])) && config.PtrSize == 4 => (RotateLeft64 x (Const32 [int32(c)])) +(RotateLeft32 x (Const64 [c])) && config.PtrSize == 4 => (RotateLeft32 x (Const32 [int32(c)])) +(RotateLeft16 x (Const64 [c])) && config.PtrSize == 4 => (RotateLeft16 x (Const32 [int32(c)])) +(RotateLeft8 x (Const64 [c])) && config.PtrSize == 4 => (RotateLeft8 x (Const32 [int32(c)])) + +// Rotating by c, then by d, is the same as rotating by c+d. +// We're trading a rotate for an add, which seems generally a good choice. It is especially good when c and d are constants. +// This rule is a bit tricky as c and d might be different widths. We handle only cases where they are the same width. +(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 8 && d.Type.Size() == 8 => (RotateLeft(64|32|16|8) x (Add64 c d)) +(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 4 && d.Type.Size() == 4 => (RotateLeft(64|32|16|8) x (Add32 c d)) +(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 2 && d.Type.Size() == 2 => (RotateLeft(64|32|16|8) x (Add16 c d)) +(RotateLeft(64|32|16|8) (RotateLeft(64|32|16|8) x c) d) && c.Type.Size() == 1 && d.Type.Size() == 1 => (RotateLeft(64|32|16|8) x (Add8 c d)) + +// Loading constant values from dictionaries and itabs. +(Load (OffPtr [off] (Addr {s} sb) ) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) +(Load (OffPtr [off] (Convert (Addr {s} sb) _) ) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) +(Load (OffPtr [off] (ITab (IMake (Addr {s} sb) _))) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) +(Load (OffPtr [off] (ITab (IMake (Convert (Addr {s} sb) _) _))) _) && t.IsUintptr() && isFixedSym(s, off) => (Addr {fixedSym(b.Func, s, off)} sb) + +// Loading constant values from runtime._type.hash. +(Load (OffPtr [off] (Addr {sym} _) ) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)]) +(Load (OffPtr [off] (Convert (Addr {sym} _) _) ) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)]) +(Load (OffPtr [off] (ITab (IMake (Addr {sym} _) _))) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)]) +(Load (OffPtr [off] (ITab (IMake (Convert (Addr {sym} _) _) _))) _) && t.IsInteger() && t.Size() == 4 && isFixed32(config, sym, off) => (Const32 [fixed32(config, sym, off)]) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/genericOps.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/genericOps.go new file mode 100644 index 0000000000000000000000000000000000000000..69eb48ce44028f834caae09453599d8f98928f29 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/genericOps.go @@ -0,0 +1,675 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// Generic opcodes typically specify a width. The inputs and outputs +// of that op are the given number of bits wide. There is no notion of +// "sign", so Add32 can be used both for signed and unsigned 32-bit +// addition. + +// Signed/unsigned is explicit with the extension ops +// (SignExt*/ZeroExt*) and implicit as the arg to some opcodes +// (e.g. the second argument to shifts is unsigned). If not mentioned, +// all args take signed inputs, or don't care whether their inputs +// are signed or unsigned. + +var genericOps = []opData{ + // 2-input arithmetic + // Types must be consistent with Go typing. Add, for example, must take two values + // of the same type and produces that same type. + {name: "Add8", argLength: 2, commutative: true}, // arg0 + arg1 + {name: "Add16", argLength: 2, commutative: true}, + {name: "Add32", argLength: 2, commutative: true}, + {name: "Add64", argLength: 2, commutative: true}, + {name: "AddPtr", argLength: 2}, // For address calculations. arg0 is a pointer and arg1 is an int. + {name: "Add32F", argLength: 2, commutative: true}, + {name: "Add64F", argLength: 2, commutative: true}, + + {name: "Sub8", argLength: 2}, // arg0 - arg1 + {name: "Sub16", argLength: 2}, + {name: "Sub32", argLength: 2}, + {name: "Sub64", argLength: 2}, + {name: "SubPtr", argLength: 2}, + {name: "Sub32F", argLength: 2}, + {name: "Sub64F", argLength: 2}, + + {name: "Mul8", argLength: 2, commutative: true}, // arg0 * arg1 + {name: "Mul16", argLength: 2, commutative: true}, + {name: "Mul32", argLength: 2, commutative: true}, + {name: "Mul64", argLength: 2, commutative: true}, + {name: "Mul32F", argLength: 2, commutative: true}, + {name: "Mul64F", argLength: 2, commutative: true}, + + {name: "Div32F", argLength: 2}, // arg0 / arg1 + {name: "Div64F", argLength: 2}, + + {name: "Hmul32", argLength: 2, commutative: true}, + {name: "Hmul32u", argLength: 2, commutative: true}, + {name: "Hmul64", argLength: 2, commutative: true}, + {name: "Hmul64u", argLength: 2, commutative: true}, + + {name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)", commutative: true}, // arg0 * arg1, returns (hi, lo) + {name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)", commutative: true}, // arg0 * arg1, returns (hi, lo) + + {name: "Mul32uover", argLength: 2, typ: "(UInt32,Bool)", commutative: true}, // Let x = arg0*arg1 (full 32x32-> 64 unsigned multiply), returns (uint32(x), (uint32(x) != x)) + {name: "Mul64uover", argLength: 2, typ: "(UInt64,Bool)", commutative: true}, // Let x = arg0*arg1 (full 64x64->128 unsigned multiply), returns (uint64(x), (uint64(x) != x)) + + // Weird special instructions for use in the strength reduction of divides. + // These ops compute unsigned (arg0 + arg1) / 2, correct to all + // 32/64 bits, even when the intermediate result of the add has 33/65 bits. + // These ops can assume arg0 >= arg1. + // Note: these ops aren't commutative! + {name: "Avg32u", argLength: 2, typ: "UInt32"}, // 32-bit platforms only + {name: "Avg64u", argLength: 2, typ: "UInt64"}, // 64-bit platforms only + + // For Div16, Div32 and Div64, AuxInt non-zero means that the divisor has been proved to be not -1 + // or that the dividend is not the most negative value. + {name: "Div8", argLength: 2}, // arg0 / arg1, signed + {name: "Div8u", argLength: 2}, // arg0 / arg1, unsigned + {name: "Div16", argLength: 2, aux: "Bool"}, + {name: "Div16u", argLength: 2}, + {name: "Div32", argLength: 2, aux: "Bool"}, + {name: "Div32u", argLength: 2}, + {name: "Div64", argLength: 2, aux: "Bool"}, + {name: "Div64u", argLength: 2}, + {name: "Div128u", argLength: 3}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r) + + // For Mod16, Mod32 and Mod64, AuxInt non-zero means that the divisor has been proved to be not -1. + {name: "Mod8", argLength: 2}, // arg0 % arg1, signed + {name: "Mod8u", argLength: 2}, // arg0 % arg1, unsigned + {name: "Mod16", argLength: 2, aux: "Bool"}, + {name: "Mod16u", argLength: 2}, + {name: "Mod32", argLength: 2, aux: "Bool"}, + {name: "Mod32u", argLength: 2}, + {name: "Mod64", argLength: 2, aux: "Bool"}, + {name: "Mod64u", argLength: 2}, + + {name: "And8", argLength: 2, commutative: true}, // arg0 & arg1 + {name: "And16", argLength: 2, commutative: true}, + {name: "And32", argLength: 2, commutative: true}, + {name: "And64", argLength: 2, commutative: true}, + + {name: "Or8", argLength: 2, commutative: true}, // arg0 | arg1 + {name: "Or16", argLength: 2, commutative: true}, + {name: "Or32", argLength: 2, commutative: true}, + {name: "Or64", argLength: 2, commutative: true}, + + {name: "Xor8", argLength: 2, commutative: true}, // arg0 ^ arg1 + {name: "Xor16", argLength: 2, commutative: true}, + {name: "Xor32", argLength: 2, commutative: true}, + {name: "Xor64", argLength: 2, commutative: true}, + + // For shifts, AxB means the shifted value has A bits and the shift amount has B bits. + // Shift amounts are considered unsigned. + // If arg1 is known to be nonnegative and less than the number of bits in arg0, + // then auxInt may be set to 1. + // This enables better code generation on some platforms. + {name: "Lsh8x8", argLength: 2, aux: "Bool"}, // arg0 << arg1 + {name: "Lsh8x16", argLength: 2, aux: "Bool"}, + {name: "Lsh8x32", argLength: 2, aux: "Bool"}, + {name: "Lsh8x64", argLength: 2, aux: "Bool"}, + {name: "Lsh16x8", argLength: 2, aux: "Bool"}, + {name: "Lsh16x16", argLength: 2, aux: "Bool"}, + {name: "Lsh16x32", argLength: 2, aux: "Bool"}, + {name: "Lsh16x64", argLength: 2, aux: "Bool"}, + {name: "Lsh32x8", argLength: 2, aux: "Bool"}, + {name: "Lsh32x16", argLength: 2, aux: "Bool"}, + {name: "Lsh32x32", argLength: 2, aux: "Bool"}, + {name: "Lsh32x64", argLength: 2, aux: "Bool"}, + {name: "Lsh64x8", argLength: 2, aux: "Bool"}, + {name: "Lsh64x16", argLength: 2, aux: "Bool"}, + {name: "Lsh64x32", argLength: 2, aux: "Bool"}, + {name: "Lsh64x64", argLength: 2, aux: "Bool"}, + + {name: "Rsh8x8", argLength: 2, aux: "Bool"}, // arg0 >> arg1, signed + {name: "Rsh8x16", argLength: 2, aux: "Bool"}, + {name: "Rsh8x32", argLength: 2, aux: "Bool"}, + {name: "Rsh8x64", argLength: 2, aux: "Bool"}, + {name: "Rsh16x8", argLength: 2, aux: "Bool"}, + {name: "Rsh16x16", argLength: 2, aux: "Bool"}, + {name: "Rsh16x32", argLength: 2, aux: "Bool"}, + {name: "Rsh16x64", argLength: 2, aux: "Bool"}, + {name: "Rsh32x8", argLength: 2, aux: "Bool"}, + {name: "Rsh32x16", argLength: 2, aux: "Bool"}, + {name: "Rsh32x32", argLength: 2, aux: "Bool"}, + {name: "Rsh32x64", argLength: 2, aux: "Bool"}, + {name: "Rsh64x8", argLength: 2, aux: "Bool"}, + {name: "Rsh64x16", argLength: 2, aux: "Bool"}, + {name: "Rsh64x32", argLength: 2, aux: "Bool"}, + {name: "Rsh64x64", argLength: 2, aux: "Bool"}, + + {name: "Rsh8Ux8", argLength: 2, aux: "Bool"}, // arg0 >> arg1, unsigned + {name: "Rsh8Ux16", argLength: 2, aux: "Bool"}, + {name: "Rsh8Ux32", argLength: 2, aux: "Bool"}, + {name: "Rsh8Ux64", argLength: 2, aux: "Bool"}, + {name: "Rsh16Ux8", argLength: 2, aux: "Bool"}, + {name: "Rsh16Ux16", argLength: 2, aux: "Bool"}, + {name: "Rsh16Ux32", argLength: 2, aux: "Bool"}, + {name: "Rsh16Ux64", argLength: 2, aux: "Bool"}, + {name: "Rsh32Ux8", argLength: 2, aux: "Bool"}, + {name: "Rsh32Ux16", argLength: 2, aux: "Bool"}, + {name: "Rsh32Ux32", argLength: 2, aux: "Bool"}, + {name: "Rsh32Ux64", argLength: 2, aux: "Bool"}, + {name: "Rsh64Ux8", argLength: 2, aux: "Bool"}, + {name: "Rsh64Ux16", argLength: 2, aux: "Bool"}, + {name: "Rsh64Ux32", argLength: 2, aux: "Bool"}, + {name: "Rsh64Ux64", argLength: 2, aux: "Bool"}, + + // 2-input comparisons + {name: "Eq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1 + {name: "Eq16", argLength: 2, commutative: true, typ: "Bool"}, + {name: "Eq32", argLength: 2, commutative: true, typ: "Bool"}, + {name: "Eq64", argLength: 2, commutative: true, typ: "Bool"}, + {name: "EqPtr", argLength: 2, commutative: true, typ: "Bool"}, + {name: "EqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend + {name: "EqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend + {name: "Eq32F", argLength: 2, commutative: true, typ: "Bool"}, + {name: "Eq64F", argLength: 2, commutative: true, typ: "Bool"}, + + {name: "Neq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1 + {name: "Neq16", argLength: 2, commutative: true, typ: "Bool"}, + {name: "Neq32", argLength: 2, commutative: true, typ: "Bool"}, + {name: "Neq64", argLength: 2, commutative: true, typ: "Bool"}, + {name: "NeqPtr", argLength: 2, commutative: true, typ: "Bool"}, + {name: "NeqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend + {name: "NeqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend + {name: "Neq32F", argLength: 2, commutative: true, typ: "Bool"}, + {name: "Neq64F", argLength: 2, commutative: true, typ: "Bool"}, + + {name: "Less8", argLength: 2, typ: "Bool"}, // arg0 < arg1, signed + {name: "Less8U", argLength: 2, typ: "Bool"}, // arg0 < arg1, unsigned + {name: "Less16", argLength: 2, typ: "Bool"}, + {name: "Less16U", argLength: 2, typ: "Bool"}, + {name: "Less32", argLength: 2, typ: "Bool"}, + {name: "Less32U", argLength: 2, typ: "Bool"}, + {name: "Less64", argLength: 2, typ: "Bool"}, + {name: "Less64U", argLength: 2, typ: "Bool"}, + {name: "Less32F", argLength: 2, typ: "Bool"}, + {name: "Less64F", argLength: 2, typ: "Bool"}, + + {name: "Leq8", argLength: 2, typ: "Bool"}, // arg0 <= arg1, signed + {name: "Leq8U", argLength: 2, typ: "Bool"}, // arg0 <= arg1, unsigned + {name: "Leq16", argLength: 2, typ: "Bool"}, + {name: "Leq16U", argLength: 2, typ: "Bool"}, + {name: "Leq32", argLength: 2, typ: "Bool"}, + {name: "Leq32U", argLength: 2, typ: "Bool"}, + {name: "Leq64", argLength: 2, typ: "Bool"}, + {name: "Leq64U", argLength: 2, typ: "Bool"}, + {name: "Leq32F", argLength: 2, typ: "Bool"}, + {name: "Leq64F", argLength: 2, typ: "Bool"}, + + // the type of a CondSelect is the same as the type of its first + // two arguments, which should be register-width scalars; the third + // argument should be a boolean + {name: "CondSelect", argLength: 3}, // arg2 ? arg0 : arg1 + + // boolean ops + {name: "AndB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 && arg1 (not shortcircuited) + {name: "OrB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 || arg1 (not shortcircuited) + {name: "EqB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1 + {name: "NeqB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1 + {name: "Not", argLength: 1, typ: "Bool"}, // !arg0, boolean + + // 1-input ops + {name: "Neg8", argLength: 1}, // -arg0 + {name: "Neg16", argLength: 1}, + {name: "Neg32", argLength: 1}, + {name: "Neg64", argLength: 1}, + {name: "Neg32F", argLength: 1}, + {name: "Neg64F", argLength: 1}, + + {name: "Com8", argLength: 1}, // ^arg0 + {name: "Com16", argLength: 1}, + {name: "Com32", argLength: 1}, + {name: "Com64", argLength: 1}, + + {name: "Ctz8", argLength: 1}, // Count trailing (low order) zeroes (returns 0-8) + {name: "Ctz16", argLength: 1}, // Count trailing (low order) zeroes (returns 0-16) + {name: "Ctz32", argLength: 1}, // Count trailing (low order) zeroes (returns 0-32) + {name: "Ctz64", argLength: 1}, // Count trailing (low order) zeroes (returns 0-64) + {name: "Ctz8NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-7 + {name: "Ctz16NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-15 + {name: "Ctz32NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-31 + {name: "Ctz64NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-63 + {name: "BitLen8", argLength: 1}, // Number of bits in arg[0] (returns 0-8) + {name: "BitLen16", argLength: 1}, // Number of bits in arg[0] (returns 0-16) + {name: "BitLen32", argLength: 1}, // Number of bits in arg[0] (returns 0-32) + {name: "BitLen64", argLength: 1}, // Number of bits in arg[0] (returns 0-64) + + {name: "Bswap16", argLength: 1}, // Swap bytes + {name: "Bswap32", argLength: 1}, // Swap bytes + {name: "Bswap64", argLength: 1}, // Swap bytes + + {name: "BitRev8", argLength: 1}, // Reverse the bits in arg[0] + {name: "BitRev16", argLength: 1}, // Reverse the bits in arg[0] + {name: "BitRev32", argLength: 1}, // Reverse the bits in arg[0] + {name: "BitRev64", argLength: 1}, // Reverse the bits in arg[0] + + {name: "PopCount8", argLength: 1}, // Count bits in arg[0] + {name: "PopCount16", argLength: 1}, // Count bits in arg[0] + {name: "PopCount32", argLength: 1}, // Count bits in arg[0] + {name: "PopCount64", argLength: 1}, // Count bits in arg[0] + + // RotateLeftX instructions rotate the X bits of arg[0] to the left + // by the low lg_2(X) bits of arg[1], interpreted as an unsigned value. + // Note that this works out regardless of the bit width or signedness of + // arg[1]. In particular, RotateLeft by x is the same as RotateRight by -x. + {name: "RotateLeft64", argLength: 2}, + {name: "RotateLeft32", argLength: 2}, + {name: "RotateLeft16", argLength: 2}, + {name: "RotateLeft8", argLength: 2}, + + // Square root. + // Special cases: + // +∞ → +∞ + // ±0 → ±0 (sign preserved) + // x<0 → NaN + // NaN → NaN + {name: "Sqrt", argLength: 1}, // √arg0 (floating point, double precision) + {name: "Sqrt32", argLength: 1}, // √arg0 (floating point, single precision) + + // Round to integer, float64 only. + // Special cases: + // ±∞ → ±∞ (sign preserved) + // ±0 → ±0 (sign preserved) + // NaN → NaN + {name: "Floor", argLength: 1}, // round arg0 toward -∞ + {name: "Ceil", argLength: 1}, // round arg0 toward +∞ + {name: "Trunc", argLength: 1}, // round arg0 toward 0 + {name: "Round", argLength: 1}, // round arg0 to nearest, ties away from 0 + {name: "RoundToEven", argLength: 1}, // round arg0 to nearest, ties to even + + // Modify the sign bit + {name: "Abs", argLength: 1}, // absolute value arg0 + {name: "Copysign", argLength: 2}, // copy sign from arg0 to arg1 + + // Float min/max implementation, if hardware is available. + {name: "Min64F", argLength: 2}, // min(arg0,arg1) + {name: "Min32F", argLength: 2}, // min(arg0,arg1) + {name: "Max64F", argLength: 2}, // max(arg0,arg1) + {name: "Max32F", argLength: 2}, // max(arg0,arg1) + + // 3-input opcode. + // Fused-multiply-add, float64 only. + // When a*b+c is exactly zero (before rounding), then the result is +0 or -0. + // The 0's sign is determined according to the standard rules for the + // addition (-0 if both a*b and c are -0, +0 otherwise). + // + // Otherwise, when a*b+c rounds to zero, then the resulting 0's sign is + // determined by the sign of the exact result a*b+c. + // See section 6.3 in ieee754. + // + // When the multiply is an infinity times a zero, the result is NaN. + // See section 7.2 in ieee754. + {name: "FMA", argLength: 3}, // compute (a*b)+c without intermediate rounding + + // Data movement. Max argument length for Phi is indefinite. + {name: "Phi", argLength: -1, zeroWidth: true}, // select an argument based on which predecessor block we came from + {name: "Copy", argLength: 1}, // output = arg0 + // Convert converts between pointers and integers. + // We have a special op for this so as to not confuse GC + // (particularly stack maps). It takes a memory arg so it + // gets correctly ordered with respect to GC safepoints. + // It gets compiled to nothing, so its result must in the same + // register as its argument. regalloc knows it can use any + // allocatable integer register for OpConvert. + // arg0=ptr/int arg1=mem, output=int/ptr + {name: "Convert", argLength: 2, zeroWidth: true, resultInArg0: true}, + + // constants. Constant values are stored in the aux or + // auxint fields. + {name: "ConstBool", aux: "Bool"}, // auxint is 0 for false and 1 for true + {name: "ConstString", aux: "String"}, // value is aux.(string) + {name: "ConstNil", typ: "BytePtr"}, // nil pointer + {name: "Const8", aux: "Int8"}, // auxint is sign-extended 8 bits + {name: "Const16", aux: "Int16"}, // auxint is sign-extended 16 bits + {name: "Const32", aux: "Int32"}, // auxint is sign-extended 32 bits + // Note: ConstX are sign-extended even when the type of the value is unsigned. + // For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa. + {name: "Const64", aux: "Int64"}, // value is auxint + // Note: for both Const32F and Const64F, we disallow encoding NaNs. + // Signaling NaNs are tricky because if you do anything with them, they become quiet. + // Particularly, converting a 32 bit sNaN to 64 bit and back converts it to a qNaN. + // See issue 36399 and 36400. + // Encodings of +inf, -inf, and -0 are fine. + {name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32 + {name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint)) + {name: "ConstInterface"}, // nil interface + {name: "ConstSlice"}, // nil slice + + // Constant-like things + {name: "InitMem", zeroWidth: true}, // memory input to the function. + {name: "Arg", aux: "SymOff", symEffect: "Read", zeroWidth: true}, // argument to the function. aux=GCNode of arg, off = offset in that arg. + + // Like Arg, these are generic ops that survive lowering. AuxInt is a register index, and the actual output register for each index is defined by the architecture. + // AuxInt = integer argument index (not a register number). ABI-specified spill loc obtained from function + {name: "ArgIntReg", aux: "NameOffsetInt8", zeroWidth: true}, // argument to the function in an int reg. + {name: "ArgFloatReg", aux: "NameOffsetInt8", zeroWidth: true}, // argument to the function in a float reg. + + // The address of a variable. arg0 is the base pointer. + // If the variable is a global, the base pointer will be SB and + // the Aux field will be a *obj.LSym. + // If the variable is a local, the base pointer will be SP and + // the Aux field will be a *gc.Node. + {name: "Addr", argLength: 1, aux: "Sym", symEffect: "Addr"}, // Address of a variable. Arg0=SB. Aux identifies the variable. + {name: "LocalAddr", argLength: 2, aux: "Sym", symEffect: "Addr"}, // Address of a variable. Arg0=SP. Arg1=mem. Aux identifies the variable. + + {name: "SP", zeroWidth: true}, // stack pointer + {name: "SB", typ: "Uintptr", zeroWidth: true}, // static base pointer (a.k.a. globals pointer) + {name: "Invalid"}, // unused value + {name: "SPanchored", typ: "Uintptr", argLength: 2, zeroWidth: true}, // arg0 = SP, arg1 = mem. Result is identical to arg0, but cannot be scheduled before memory state arg1. + + // Memory operations + {name: "Load", argLength: 2}, // Load from arg0. arg1=memory + {name: "Dereference", argLength: 2}, // Load from arg0. arg1=memory. Helper op for arg/result passing, result is an otherwise not-SSA-able "value". + {name: "Store", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + // Normally we require that the source and destination of Move do not overlap. + // There is an exception when we know all the loads will happen before all + // the stores. In that case, overlap is ok. See + // memmove inlining in generic.rules. When inlineablememmovesize (in ../rewrite.go) + // returns true, we must do all loads before all stores, when lowering Move. + // The type of Move is used for the write barrier pass to insert write barriers + // and for alignment on some architectures. + // For pointerless types, it is possible for the type to be inaccurate. + // For type alignment and pointer information, use the type in Aux; + // for type size, use the size in AuxInt. + // The "inline runtime.memmove" rewrite rule generates Moves with inaccurate types, + // such as type byte instead of the more accurate type [8]byte. + {name: "Move", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory. + {name: "Zero", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory. + + // Memory operations with write barriers. + // Expand to runtime calls. Write barrier will be removed if write on stack. + {name: "StoreWB", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory. + {name: "MoveWB", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory. + {name: "ZeroWB", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory. + {name: "WBend", argLength: 1, typ: "Mem"}, // Write barrier code is done, interrupting is now allowed. + + // WB invokes runtime.gcWriteBarrier. This is not a normal + // call: it takes arguments in registers, doesn't clobber + // general-purpose registers (the exact clobber set is + // arch-dependent), and is not a safe-point. + {name: "WB", argLength: 1, typ: "(BytePtr,Mem)", aux: "Int64"}, // arg0=mem, auxint=# of buffer entries needed. Returns buffer pointer and memory. + + {name: "HasCPUFeature", argLength: 0, typ: "bool", aux: "Sym", symEffect: "None"}, // aux=place that this feature flag can be loaded from + + // PanicBounds and PanicExtend generate a runtime panic. + // Their arguments provide index values to use in panic messages. + // Both PanicBounds and PanicExtend have an AuxInt value from the BoundsKind type (in ../op.go). + // PanicBounds' index is int sized. + // PanicExtend's index is int64 sized. (PanicExtend is only used on 32-bit archs.) + {name: "PanicBounds", argLength: 3, aux: "Int64", typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. + {name: "PanicExtend", argLength: 4, aux: "Int64", typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. + + // Function calls. Arguments to the call have already been written to the stack. + // Return values appear on the stack. The method receiver, if any, is treated + // as a phantom first argument. + // TODO(josharian): ClosureCall and InterCall should have Int32 aux + // to match StaticCall's 32 bit arg size limit. + // TODO(drchase,josharian): could the arg size limit be bundled into the rules for CallOff? + + // Before lowering, LECalls receive their fixed inputs (first), memory (last), + // and a variable number of input values in the middle. + // They produce a variable number of result values. + // These values are not necessarily "SSA-able"; they can be too large, + // but in that case inputs are loaded immediately before with OpDereference, + // and outputs are stored immediately with OpStore. + // + // After call expansion, Calls have the same fixed-middle-memory arrangement of inputs, + // with the difference that the "middle" is only the register-resident inputs, + // and the non-register inputs are instead stored at ABI-defined offsets from SP + // (and the stores thread through the memory that is ultimately an input to the call). + // Outputs follow a similar pattern; register-resident outputs are the leading elements + // of a Result-typed output, with memory last, and any memory-resident outputs have been + // stored to ABI-defined locations. Each non-memory input or output fits in a register. + // + // Subsequent architecture-specific lowering only changes the opcode. + + {name: "ClosureCall", argLength: -1, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory. + {name: "StaticCall", argLength: -1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory. + {name: "InterCall", argLength: -1, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1..argN-1 are register inputs, argN=memory, auxint=arg size. Returns Result of register results, plus memory. + {name: "TailCall", argLength: -1, aux: "CallOff", call: true}, // tail call function aux.(*obj.LSym), arg0..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory. + + {name: "ClosureLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded closure call. arg0=code pointer, arg1=context ptr, arg2..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. + {name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. + {name: "InterLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded interface call. arg0=code pointer, arg1..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. + {name: "TailLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static tail call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem. + + // Conversions: signed extensions, zero (unsigned) extensions, truncations + {name: "SignExt8to16", argLength: 1, typ: "Int16"}, + {name: "SignExt8to32", argLength: 1, typ: "Int32"}, + {name: "SignExt8to64", argLength: 1, typ: "Int64"}, + {name: "SignExt16to32", argLength: 1, typ: "Int32"}, + {name: "SignExt16to64", argLength: 1, typ: "Int64"}, + {name: "SignExt32to64", argLength: 1, typ: "Int64"}, + {name: "ZeroExt8to16", argLength: 1, typ: "UInt16"}, + {name: "ZeroExt8to32", argLength: 1, typ: "UInt32"}, + {name: "ZeroExt8to64", argLength: 1, typ: "UInt64"}, + {name: "ZeroExt16to32", argLength: 1, typ: "UInt32"}, + {name: "ZeroExt16to64", argLength: 1, typ: "UInt64"}, + {name: "ZeroExt32to64", argLength: 1, typ: "UInt64"}, + {name: "Trunc16to8", argLength: 1}, + {name: "Trunc32to8", argLength: 1}, + {name: "Trunc32to16", argLength: 1}, + {name: "Trunc64to8", argLength: 1}, + {name: "Trunc64to16", argLength: 1}, + {name: "Trunc64to32", argLength: 1}, + + {name: "Cvt32to32F", argLength: 1}, + {name: "Cvt32to64F", argLength: 1}, + {name: "Cvt64to32F", argLength: 1}, + {name: "Cvt64to64F", argLength: 1}, + {name: "Cvt32Fto32", argLength: 1}, + {name: "Cvt32Fto64", argLength: 1}, + {name: "Cvt64Fto32", argLength: 1}, + {name: "Cvt64Fto64", argLength: 1}, + {name: "Cvt32Fto64F", argLength: 1}, + {name: "Cvt64Fto32F", argLength: 1}, + {name: "CvtBoolToUint8", argLength: 1}, + + // Force rounding to precision of type. + {name: "Round32F", argLength: 1}, + {name: "Round64F", argLength: 1}, + + // Automatically inserted safety checks + {name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil + {name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0. + {name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0. + {name: "NilCheck", argLength: 2, nilCheck: true}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns the ptr unmodified. + + // Pseudo-ops + {name: "GetG", argLength: 1, zeroWidth: true}, // runtime.getg() (read g pointer). arg0=mem + {name: "GetClosurePtr"}, // get closure pointer from dedicated register + {name: "GetCallerPC"}, // for getcallerpc intrinsic + {name: "GetCallerSP", argLength: 1}, // for getcallersp intrinsic. arg0=mem. + + // Indexing operations + {name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type + {name: "OffPtr", argLength: 1, aux: "Int64"}, // arg0 + auxint (arg0 and result are pointers) + + // Slices + {name: "SliceMake", argLength: 3}, // arg0=ptr, arg1=len, arg2=cap + {name: "SlicePtr", argLength: 1, typ: "BytePtr"}, // ptr(arg0) + {name: "SliceLen", argLength: 1}, // len(arg0) + {name: "SliceCap", argLength: 1}, // cap(arg0) + // SlicePtrUnchecked, like SlicePtr, extracts the pointer from a slice. + // SlicePtr values are assumed non-nil, because they are guarded by bounds checks. + // SlicePtrUnchecked values can be nil. + {name: "SlicePtrUnchecked", argLength: 1}, + + // Complex (part/whole) + {name: "ComplexMake", argLength: 2}, // arg0=real, arg1=imag + {name: "ComplexReal", argLength: 1}, // real(arg0) + {name: "ComplexImag", argLength: 1}, // imag(arg0) + + // Strings + {name: "StringMake", argLength: 2}, // arg0=ptr, arg1=len + {name: "StringPtr", argLength: 1, typ: "BytePtr"}, // ptr(arg0) + {name: "StringLen", argLength: 1, typ: "Int"}, // len(arg0) + + // Interfaces + {name: "IMake", argLength: 2}, // arg0=itab, arg1=data + {name: "ITab", argLength: 1, typ: "Uintptr"}, // arg0=interface, returns itable field + {name: "IData", argLength: 1}, // arg0=interface, returns data field + + // Structs + {name: "StructMake0"}, // Returns struct with 0 fields. + {name: "StructMake1", argLength: 1}, // arg0=field0. Returns struct. + {name: "StructMake2", argLength: 2}, // arg0,arg1=field0,field1. Returns struct. + {name: "StructMake3", argLength: 3}, // arg0..2=field0..2. Returns struct. + {name: "StructMake4", argLength: 4}, // arg0..3=field0..3. Returns struct. + {name: "StructSelect", argLength: 1, aux: "Int64"}, // arg0=struct, auxint=field index. Returns the auxint'th field. + + // Arrays + {name: "ArrayMake0"}, // Returns array with 0 elements + {name: "ArrayMake1", argLength: 1}, // Returns array with 1 element + {name: "ArraySelect", argLength: 1, aux: "Int64"}, // arg0=array, auxint=index. Returns a[i]. + + // Spill&restore ops for the register allocator. These are + // semantically identical to OpCopy; they do not take/return + // stores like regular memory ops do. We can get away without memory + // args because we know there is no aliasing of spill slots on the stack. + {name: "StoreReg", argLength: 1}, + {name: "LoadReg", argLength: 1}, + + // Used during ssa construction. Like Copy, but the arg has not been specified yet. + {name: "FwdRef", aux: "Sym", symEffect: "None"}, + + // Unknown value. Used for Values whose values don't matter because they are dead code. + {name: "Unknown"}, + + {name: "VarDef", argLength: 1, aux: "Sym", typ: "Mem", symEffect: "None", zeroWidth: true}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem + // TODO: what's the difference between VarLive and KeepAlive? + {name: "VarLive", argLength: 1, aux: "Sym", symEffect: "Read", zeroWidth: true}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem + {name: "KeepAlive", argLength: 2, typ: "Mem", zeroWidth: true}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem + + // InlMark marks the start of an inlined function body. Its AuxInt field + // distinguishes which entry in the local inline tree it is marking. + {name: "InlMark", argLength: 1, aux: "Int32", typ: "Void"}, // arg[0]=mem, returns void. + + // Ops for breaking 64-bit operations on 32-bit architectures + {name: "Int64Make", argLength: 2, typ: "UInt64"}, // arg0=hi, arg1=lo + {name: "Int64Hi", argLength: 1, typ: "UInt32"}, // high 32-bit of arg0 + {name: "Int64Lo", argLength: 1, typ: "UInt32"}, // low 32-bit of arg0 + + {name: "Add32carry", argLength: 2, commutative: true, typ: "(UInt32,Flags)"}, // arg0 + arg1, returns (value, carry) + {name: "Add32withcarry", argLength: 3, commutative: true}, // arg0 + arg1 + arg2, arg2=carry (0 or 1) + + {name: "Sub32carry", argLength: 2, typ: "(UInt32,Flags)"}, // arg0 - arg1, returns (value, carry) + {name: "Sub32withcarry", argLength: 3}, // arg0 - arg1 - arg2, arg2=carry (0 or 1) + + {name: "Add64carry", argLength: 3, commutative: true, typ: "(UInt64,UInt64)"}, // arg0 + arg1 + arg2, arg2 must be 0 or 1. returns (value, value>>64) + {name: "Sub64borrow", argLength: 3, typ: "(UInt64,UInt64)"}, // arg0 - (arg1 + arg2), arg2 must be 0 or 1. returns (value, value>>64&1) + + {name: "Signmask", argLength: 1, typ: "Int32"}, // 0 if arg0 >= 0, -1 if arg0 < 0 + {name: "Zeromask", argLength: 1, typ: "UInt32"}, // 0 if arg0 == 0, 0xffffffff if arg0 != 0 + {name: "Slicemask", argLength: 1}, // 0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0. Type is native int size. + + {name: "SpectreIndex", argLength: 2}, // arg0 if 0 <= arg0 < arg1, 0 otherwise. Type is native int size. + {name: "SpectreSliceIndex", argLength: 2}, // arg0 if 0 <= arg0 <= arg1, 0 otherwise. Type is native int size. + + {name: "Cvt32Uto32F", argLength: 1}, // uint32 -> float32, only used on 32-bit arch + {name: "Cvt32Uto64F", argLength: 1}, // uint32 -> float64, only used on 32-bit arch + {name: "Cvt32Fto32U", argLength: 1}, // float32 -> uint32, only used on 32-bit arch + {name: "Cvt64Fto32U", argLength: 1}, // float64 -> uint32, only used on 32-bit arch + {name: "Cvt64Uto32F", argLength: 1}, // uint64 -> float32, only used on archs that has the instruction + {name: "Cvt64Uto64F", argLength: 1}, // uint64 -> float64, only used on archs that has the instruction + {name: "Cvt32Fto64U", argLength: 1}, // float32 -> uint64, only used on archs that has the instruction + {name: "Cvt64Fto64U", argLength: 1}, // float64 -> uint64, only used on archs that has the instruction + + // pseudo-ops for breaking Tuple + {name: "Select0", argLength: 1, zeroWidth: true}, // the first component of a tuple + {name: "Select1", argLength: 1, zeroWidth: true}, // the second component of a tuple + {name: "SelectN", argLength: 1, aux: "Int64"}, // arg0=result, auxint=field index. Returns the auxint'th member. + {name: "SelectNAddr", argLength: 1, aux: "Int64"}, // arg0=result, auxint=field index. Returns the address of auxint'th member. Used for un-SSA-able result types. + {name: "MakeResult", argLength: -1}, // arg0 .. are components of a "Result" (like the result from a Call). The last arg should be memory (like the result from a call). + + // Atomic operations used for semantically inlining sync/atomic and + // runtime/internal/atomic. Atomic loads return a new memory so that + // the loads are properly ordered with respect to other loads and + // stores. + {name: "AtomicLoad8", argLength: 2, typ: "(UInt8,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory. + {name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory. + {name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory. + {name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory. + {name: "AtomicLoadAcq32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory. + {name: "AtomicLoadAcq64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory. + {name: "AtomicStore8", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory. + {name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory. + {name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory. + {name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory. + {name: "AtomicStoreRel32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory. + {name: "AtomicStoreRel64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory. + {name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory. + {name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory. + {name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory. + {name: "AtomicAdd64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory. + {name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory. + {name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory. + {name: "AtomicCompareAndSwapRel32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Lock release, reports whether store happens and new memory. + {name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory. + {name: "AtomicAnd32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory. + {name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory. + {name: "AtomicOr32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory. + + // Atomic operation variants + // These variants have the same semantics as above atomic operations. + // But they are used for generating more efficient code on certain modern machines, with run-time CPU feature detection. + // Currently, they are used on ARM64 only. + {name: "AtomicAdd32Variant", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory. + {name: "AtomicAdd64Variant", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory. + {name: "AtomicExchange32Variant", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory. + {name: "AtomicExchange64Variant", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory. + {name: "AtomicCompareAndSwap32Variant", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory. + {name: "AtomicCompareAndSwap64Variant", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory. + {name: "AtomicAnd8Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory. + {name: "AtomicAnd32Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory. + {name: "AtomicOr8Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory. + {name: "AtomicOr32Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory. + + // Publication barrier + {name: "PubBarrier", argLength: 1, hasSideEffects: true}, // Do data barrier. arg0=memory. + + // Clobber experiment op + {name: "Clobber", argLength: 0, typ: "Void", aux: "SymOff", symEffect: "None"}, // write an invalid pointer value to the given pointer slot of a stack variable + {name: "ClobberReg", argLength: 0, typ: "Void"}, // clobber a register + + // Prefetch instruction + {name: "PrefetchCache", argLength: 2, hasSideEffects: true}, // Do prefetch arg0 to cache. arg0=addr, arg1=memory. + {name: "PrefetchCacheStreamed", argLength: 2, hasSideEffects: true}, // Do non-temporal or streamed prefetch arg0 to cache. arg0=addr, arg1=memory. +} + +// kind controls successors implicit exit +// ---------------------------------------------------------- +// Exit [return mem] [] yes +// Ret [return mem] [] yes +// RetJmp [return mem] [] yes +// Plain [] [next] +// If [boolean Value] [then, else] +// First [] [always, never] +// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc) +//JumpTable [integer Value] [succ1,succ2,..] + +var genericBlocks = []blockData{ + {name: "Plain"}, // a single successor + {name: "If", controls: 1}, // if Controls[0] goto Succs[0] else goto Succs[1] + {name: "Defer", controls: 1}, // Succs[0]=defer queued, Succs[1]=defer recovered. Controls[0] is call op (of memory type) + {name: "Ret", controls: 1}, // no successors, Controls[0] value is memory result + {name: "RetJmp", controls: 1}, // no successors, Controls[0] value is a tail call + {name: "Exit", controls: 1}, // no successors, Controls[0] value generates a panic + {name: "JumpTable", controls: 1}, // multiple successors, the integer Controls[0] selects which one + + // transient block state used for dead code removal + {name: "First"}, // 2 successors, always takes the first one (second is dead) +} + +func init() { + archs = append(archs, arch{ + name: "generic", + ops: genericOps, + blocks: genericBlocks, + generic: true, + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/main.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/main.go new file mode 100644 index 0000000000000000000000000000000000000000..086418c7cb2e52f346cbe9a4eedb4715276d9f8e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/main.go @@ -0,0 +1,571 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The gen command generates Go code (in the parent directory) for all +// the architecture-specific opcodes, blocks, and rewrites. +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "log" + "math/bits" + "os" + "path" + "regexp" + "runtime" + "runtime/pprof" + "runtime/trace" + "sort" + "strings" + "sync" +) + +// TODO: capitalize these types, so that we can more easily tell variable names +// apart from type names, and avoid awkward func parameters like "arch arch". + +type arch struct { + name string + pkg string // obj package to import for this arch. + genfile string // source file containing opcode code generation. + ops []opData + blocks []blockData + regnames []string + ParamIntRegNames string + ParamFloatRegNames string + gpregmask regMask + fpregmask regMask + fp32regmask regMask + fp64regmask regMask + specialregmask regMask + framepointerreg int8 + linkreg int8 + generic bool + imports []string +} + +type opData struct { + name string + reg regInfo + asm string + typ string // default result type + aux string + rematerializeable bool + argLength int32 // number of arguments, if -1, then this operation has a variable number of arguments + commutative bool // this operation is commutative on its first 2 arguments (e.g. addition) + resultInArg0 bool // (first, if a tuple) output of v and v.Args[0] must be allocated to the same register + resultNotInArgs bool // outputs must not be allocated to the same registers as inputs + clobberFlags bool // this op clobbers flags register + needIntTemp bool // need a temporary free integer register + call bool // is a function call + tailCall bool // is a tail call + nilCheck bool // this op is a nil check on arg0 + faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset) + faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset) + hasSideEffects bool // for "reasons", not to be eliminated. E.g., atomic store, #19182. + zeroWidth bool // op never translates into any machine code. example: copy, which may sometimes translate to machine code, is not zero-width. + unsafePoint bool // this op is an unsafe point, i.e. not safe for async preemption + symEffect string // effect this op has on symbol in aux + scale uint8 // amd64/386 indexed load scale +} + +type blockData struct { + name string // the suffix for this block ("EQ", "LT", etc.) + controls int // the number of control values this type of block requires + aux string // the type of the Aux/AuxInt value, if any +} + +type regInfo struct { + // inputs[i] encodes the set of registers allowed for the i'th input. + // Inputs that don't use registers (flags, memory, etc.) should be 0. + inputs []regMask + // clobbers encodes the set of registers that are overwritten by + // the instruction (other than the output registers). + clobbers regMask + // outputs[i] encodes the set of registers allowed for the i'th output. + outputs []regMask +} + +type regMask uint64 + +func (a arch) regMaskComment(r regMask) string { + var buf strings.Builder + for i := uint64(0); r != 0; i++ { + if r&1 != 0 { + if buf.Len() == 0 { + buf.WriteString(" //") + } + buf.WriteString(" ") + buf.WriteString(a.regnames[i]) + } + r >>= 1 + } + return buf.String() +} + +var archs []arch + +var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") +var memprofile = flag.String("memprofile", "", "write memory profile to `file`") +var tracefile = flag.String("trace", "", "write trace to `file`") + +func main() { + flag.Parse() + if *cpuprofile != "" { + f, err := os.Create(*cpuprofile) + if err != nil { + log.Fatal("could not create CPU profile: ", err) + } + defer f.Close() + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal("could not start CPU profile: ", err) + } + defer pprof.StopCPUProfile() + } + if *tracefile != "" { + f, err := os.Create(*tracefile) + if err != nil { + log.Fatalf("failed to create trace output file: %v", err) + } + defer func() { + if err := f.Close(); err != nil { + log.Fatalf("failed to close trace file: %v", err) + } + }() + + if err := trace.Start(f); err != nil { + log.Fatalf("failed to start trace: %v", err) + } + defer trace.Stop() + } + + sort.Sort(ArchsByName(archs)) + + // The generate tasks are run concurrently, since they are CPU-intensive + // that can easily make use of many cores on a machine. + // + // Note that there is no limit on the concurrency at the moment. On a + // four-core laptop at the time of writing, peak RSS usually reaches + // ~200MiB, which seems doable by practically any machine nowadays. If + // that stops being the case, we can cap this func to a fixed number of + // architectures being generated at once. + + tasks := []func(){ + genOp, + genAllocators, + } + for _, a := range archs { + a := a // the funcs are ran concurrently at a later time + tasks = append(tasks, func() { + genRules(a) + genSplitLoadRules(a) + genLateLowerRules(a) + }) + } + var wg sync.WaitGroup + for _, task := range tasks { + task := task + wg.Add(1) + go func() { + task() + wg.Done() + }() + } + wg.Wait() + + if *memprofile != "" { + f, err := os.Create(*memprofile) + if err != nil { + log.Fatal("could not create memory profile: ", err) + } + defer f.Close() + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + log.Fatal("could not write memory profile: ", err) + } + } +} + +func genOp() { + w := new(bytes.Buffer) + fmt.Fprintf(w, "// Code generated from _gen/*Ops.go using 'go generate'; DO NOT EDIT.\n") + fmt.Fprintln(w) + fmt.Fprintln(w, "package ssa") + + fmt.Fprintln(w, "import (") + fmt.Fprintln(w, "\"cmd/internal/obj\"") + for _, a := range archs { + if a.pkg != "" { + fmt.Fprintf(w, "%q\n", a.pkg) + } + } + fmt.Fprintln(w, ")") + + // generate Block* declarations + fmt.Fprintln(w, "const (") + fmt.Fprintln(w, "BlockInvalid BlockKind = iota") + for _, a := range archs { + fmt.Fprintln(w) + for _, d := range a.blocks { + fmt.Fprintf(w, "Block%s%s\n", a.Name(), d.name) + } + } + fmt.Fprintln(w, ")") + + // generate block kind string method + fmt.Fprintln(w, "var blockString = [...]string{") + fmt.Fprintln(w, "BlockInvalid:\"BlockInvalid\",") + for _, a := range archs { + fmt.Fprintln(w) + for _, b := range a.blocks { + fmt.Fprintf(w, "Block%s%s:\"%s\",\n", a.Name(), b.name, b.name) + } + } + fmt.Fprintln(w, "}") + fmt.Fprintln(w, "func (k BlockKind) String() string {return blockString[k]}") + + // generate block kind auxint method + fmt.Fprintln(w, "func (k BlockKind) AuxIntType() string {") + fmt.Fprintln(w, "switch k {") + for _, a := range archs { + for _, b := range a.blocks { + if b.auxIntType() == "invalid" { + continue + } + fmt.Fprintf(w, "case Block%s%s: return \"%s\"\n", a.Name(), b.name, b.auxIntType()) + } + } + fmt.Fprintln(w, "}") + fmt.Fprintln(w, "return \"\"") + fmt.Fprintln(w, "}") + + // generate Op* declarations + fmt.Fprintln(w, "const (") + fmt.Fprintln(w, "OpInvalid Op = iota") // make sure OpInvalid is 0. + for _, a := range archs { + fmt.Fprintln(w) + for _, v := range a.ops { + if v.name == "Invalid" { + continue + } + fmt.Fprintf(w, "Op%s%s\n", a.Name(), v.name) + } + } + fmt.Fprintln(w, ")") + + // generate OpInfo table + fmt.Fprintln(w, "var opcodeTable = [...]opInfo{") + fmt.Fprintln(w, " { name: \"OpInvalid\" },") + for _, a := range archs { + fmt.Fprintln(w) + + pkg := path.Base(a.pkg) + for _, v := range a.ops { + if v.name == "Invalid" { + continue + } + fmt.Fprintln(w, "{") + fmt.Fprintf(w, "name:\"%s\",\n", v.name) + + // flags + if v.aux != "" { + fmt.Fprintf(w, "auxType: aux%s,\n", v.aux) + } + fmt.Fprintf(w, "argLen: %d,\n", v.argLength) + + if v.rematerializeable { + if v.reg.clobbers != 0 { + log.Fatalf("%s is rematerializeable and clobbers registers", v.name) + } + if v.clobberFlags { + log.Fatalf("%s is rematerializeable and clobbers flags", v.name) + } + fmt.Fprintln(w, "rematerializeable: true,") + } + if v.commutative { + fmt.Fprintln(w, "commutative: true,") + } + if v.resultInArg0 { + fmt.Fprintln(w, "resultInArg0: true,") + // OpConvert's register mask is selected dynamically, + // so don't try to check it in the static table. + if v.name != "Convert" && v.reg.inputs[0] != v.reg.outputs[0] { + log.Fatalf("%s: input[0] and output[0] must use the same registers for %s", a.name, v.name) + } + if v.name != "Convert" && v.commutative && v.reg.inputs[1] != v.reg.outputs[0] { + log.Fatalf("%s: input[1] and output[0] must use the same registers for %s", a.name, v.name) + } + } + if v.resultNotInArgs { + fmt.Fprintln(w, "resultNotInArgs: true,") + } + if v.clobberFlags { + fmt.Fprintln(w, "clobberFlags: true,") + } + if v.needIntTemp { + fmt.Fprintln(w, "needIntTemp: true,") + } + if v.call { + fmt.Fprintln(w, "call: true,") + } + if v.tailCall { + fmt.Fprintln(w, "tailCall: true,") + } + if v.nilCheck { + fmt.Fprintln(w, "nilCheck: true,") + } + if v.faultOnNilArg0 { + fmt.Fprintln(w, "faultOnNilArg0: true,") + if v.aux != "Sym" && v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "Int32" && v.aux != "" { + log.Fatalf("faultOnNilArg0 with aux %s not allowed", v.aux) + } + } + if v.faultOnNilArg1 { + fmt.Fprintln(w, "faultOnNilArg1: true,") + if v.aux != "Sym" && v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "Int32" && v.aux != "" { + log.Fatalf("faultOnNilArg1 with aux %s not allowed", v.aux) + } + } + if v.hasSideEffects { + fmt.Fprintln(w, "hasSideEffects: true,") + } + if v.zeroWidth { + fmt.Fprintln(w, "zeroWidth: true,") + } + if v.unsafePoint { + fmt.Fprintln(w, "unsafePoint: true,") + } + needEffect := strings.HasPrefix(v.aux, "Sym") + if v.symEffect != "" { + if !needEffect { + log.Fatalf("symEffect with aux %s not allowed", v.aux) + } + fmt.Fprintf(w, "symEffect: Sym%s,\n", strings.Replace(v.symEffect, ",", "|Sym", -1)) + } else if needEffect { + log.Fatalf("symEffect needed for aux %s", v.aux) + } + if a.name == "generic" { + fmt.Fprintln(w, "generic:true,") + fmt.Fprintln(w, "},") // close op + // generic ops have no reg info or asm + continue + } + if v.asm != "" { + fmt.Fprintf(w, "asm: %s.A%s,\n", pkg, v.asm) + } + if v.scale != 0 { + fmt.Fprintf(w, "scale: %d,\n", v.scale) + } + fmt.Fprintln(w, "reg:regInfo{") + + // Compute input allocation order. We allocate from the + // most to the least constrained input. This order guarantees + // that we will always be able to find a register. + var s []intPair + for i, r := range v.reg.inputs { + if r != 0 { + s = append(s, intPair{countRegs(r), i}) + } + } + if len(s) > 0 { + sort.Sort(byKey(s)) + fmt.Fprintln(w, "inputs: []inputInfo{") + for _, p := range s { + r := v.reg.inputs[p.val] + fmt.Fprintf(w, "{%d,%d},%s\n", p.val, r, a.regMaskComment(r)) + } + fmt.Fprintln(w, "},") + } + + if v.reg.clobbers > 0 { + fmt.Fprintf(w, "clobbers: %d,%s\n", v.reg.clobbers, a.regMaskComment(v.reg.clobbers)) + } + + // reg outputs + s = s[:0] + for i, r := range v.reg.outputs { + s = append(s, intPair{countRegs(r), i}) + } + if len(s) > 0 { + sort.Sort(byKey(s)) + fmt.Fprintln(w, "outputs: []outputInfo{") + for _, p := range s { + r := v.reg.outputs[p.val] + fmt.Fprintf(w, "{%d,%d},%s\n", p.val, r, a.regMaskComment(r)) + } + fmt.Fprintln(w, "},") + } + fmt.Fprintln(w, "},") // close reg info + fmt.Fprintln(w, "},") // close op + } + } + fmt.Fprintln(w, "}") + + fmt.Fprintln(w, "func (o Op) Asm() obj.As {return opcodeTable[o].asm}") + fmt.Fprintln(w, "func (o Op) Scale() int16 {return int16(opcodeTable[o].scale)}") + + // generate op string method + fmt.Fprintln(w, "func (o Op) String() string {return opcodeTable[o].name }") + + fmt.Fprintln(w, "func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect }") + fmt.Fprintln(w, "func (o Op) IsCall() bool { return opcodeTable[o].call }") + fmt.Fprintln(w, "func (o Op) IsTailCall() bool { return opcodeTable[o].tailCall }") + fmt.Fprintln(w, "func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects }") + fmt.Fprintln(w, "func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint }") + fmt.Fprintln(w, "func (o Op) ResultInArg0() bool { return opcodeTable[o].resultInArg0 }") + + // generate registers + for _, a := range archs { + if a.generic { + continue + } + fmt.Fprintf(w, "var registers%s = [...]Register {\n", a.name) + var gcRegN int + num := map[string]int8{} + for i, r := range a.regnames { + num[r] = int8(i) + pkg := a.pkg[len("cmd/internal/obj/"):] + var objname string // name in cmd/internal/obj/$ARCH + switch r { + case "SB": + // SB isn't a real register. cmd/internal/obj expects 0 in this case. + objname = "0" + case "SP": + objname = pkg + ".REGSP" + case "g": + objname = pkg + ".REGG" + default: + objname = pkg + ".REG_" + r + } + // Assign a GC register map index to registers + // that may contain pointers. + gcRegIdx := -1 + if a.gpregmask&(1< 32 { + // Won't fit in a uint32 mask. + log.Fatalf("too many GC registers (%d > 32) on %s", gcRegN, a.name) + } + fmt.Fprintln(w, "}") + fmt.Fprintf(w, "var paramIntReg%s = %#v\n", a.name, paramIntRegs) + fmt.Fprintf(w, "var paramFloatReg%s = %#v\n", a.name, paramFloatRegs) + fmt.Fprintf(w, "var gpRegMask%s = regMask(%d)\n", a.name, a.gpregmask) + fmt.Fprintf(w, "var fpRegMask%s = regMask(%d)\n", a.name, a.fpregmask) + if a.fp32regmask != 0 { + fmt.Fprintf(w, "var fp32RegMask%s = regMask(%d)\n", a.name, a.fp32regmask) + } + if a.fp64regmask != 0 { + fmt.Fprintf(w, "var fp64RegMask%s = regMask(%d)\n", a.name, a.fp64regmask) + } + fmt.Fprintf(w, "var specialRegMask%s = regMask(%d)\n", a.name, a.specialregmask) + fmt.Fprintf(w, "var framepointerReg%s = int8(%d)\n", a.name, a.framepointerreg) + fmt.Fprintf(w, "var linkReg%s = int8(%d)\n", a.name, a.linkreg) + } + + // gofmt result + b := w.Bytes() + var err error + b, err = format.Source(b) + if err != nil { + fmt.Printf("%s\n", w.Bytes()) + panic(err) + } + + if err := os.WriteFile("../opGen.go", b, 0666); err != nil { + log.Fatalf("can't write output: %v\n", err) + } + + // Check that the arch genfile handles all the arch-specific opcodes. + // This is very much a hack, but it is better than nothing. + // + // Do a single regexp pass to record all ops being handled in a map, and + // then compare that with the ops list. This is much faster than one + // regexp pass per opcode. + for _, a := range archs { + if a.genfile == "" { + continue + } + + pattern := fmt.Sprintf(`\Wssa\.Op%s([a-zA-Z0-9_]+)\W`, a.name) + rxOp, err := regexp.Compile(pattern) + if err != nil { + log.Fatalf("bad opcode regexp %s: %v", pattern, err) + } + + src, err := os.ReadFile(a.genfile) + if err != nil { + log.Fatalf("can't read %s: %v", a.genfile, err) + } + seen := make(map[string]bool, len(a.ops)) + for _, m := range rxOp.FindAllSubmatch(src, -1) { + seen[string(m[1])] = true + } + for _, op := range a.ops { + if !seen[op.name] { + log.Fatalf("Op%s%s has no code generation in %s", a.name, op.name, a.genfile) + } + } + } +} + +// Name returns the name of the architecture for use in Op* and Block* enumerations. +func (a arch) Name() string { + s := a.name + if s == "generic" { + s = "" + } + return s +} + +// countRegs returns the number of set bits in the register mask. +func countRegs(r regMask) int { + return bits.OnesCount64(uint64(r)) +} + +// for sorting a pair of integers by key +type intPair struct { + key, val int +} +type byKey []intPair + +func (a byKey) Len() int { return len(a) } +func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byKey) Less(i, j int) bool { return a[i].key < a[j].key } + +type ArchsByName []arch + +func (x ArchsByName) Len() int { return len(x) } +func (x ArchsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x ArchsByName) Less(i, j int) bool { return x[i].name < x[j].name } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/rulegen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/rulegen.go new file mode 100644 index 0000000000000000000000000000000000000000..072df298f370d8c27a427e037963526924ba24c7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/_gen/rulegen.go @@ -0,0 +1,1885 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This program generates Go code that applies rewrite rules to a Value. +// The generated code implements a function of type func (v *Value) bool +// which reports whether if did something. +// Ideas stolen from Swift: http://www.hpl.hp.com/techreports/Compaq-DEC/WRL-2000-2.html + +package main + +import ( + "bufio" + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/printer" + "go/token" + "io" + "log" + "os" + "path" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/go/ast/astutil" +) + +// rule syntax: +// sexpr [&& extra conditions] => [@block] sexpr +// +// sexpr are s-expressions (lisp-like parenthesized groupings) +// sexpr ::= [variable:](opcode sexpr*) +// | variable +// | +// | [auxint] +// | {aux} +// +// aux ::= variable | {code} +// type ::= variable | {code} +// variable ::= some token +// opcode ::= one of the opcodes from the *Ops.go files + +// special rules: trailing ellipsis "..." (in the outermost sexpr?) must match on both sides of a rule. +// trailing three underscore "___" in the outermost match sexpr indicate the presence of +// extra ignored args that need not appear in the replacement + +// extra conditions is just a chunk of Go that evaluates to a boolean. It may use +// variables declared in the matching tsexpr. The variable "v" is predefined to be +// the value matched by the entire rule. + +// If multiple rules match, the first one in file order is selected. + +var ( + genLog = flag.Bool("log", false, "generate code that logs; for debugging only") + addLine = flag.Bool("line", false, "add line number comment to generated rules; for debugging only") +) + +type Rule struct { + Rule string + Loc string // file name & line number +} + +func (r Rule) String() string { + return fmt.Sprintf("rule %q at %s", r.Rule, r.Loc) +} + +func normalizeSpaces(s string) string { + return strings.Join(strings.Fields(strings.TrimSpace(s)), " ") +} + +// parse returns the matching part of the rule, additional conditions, and the result. +func (r Rule) parse() (match, cond, result string) { + s := strings.Split(r.Rule, "=>") + match = normalizeSpaces(s[0]) + result = normalizeSpaces(s[1]) + cond = "" + if i := strings.Index(match, "&&"); i >= 0 { + cond = normalizeSpaces(match[i+2:]) + match = normalizeSpaces(match[:i]) + } + return match, cond, result +} + +func genRules(arch arch) { genRulesSuffix(arch, "") } +func genSplitLoadRules(arch arch) { genRulesSuffix(arch, "splitload") } +func genLateLowerRules(arch arch) { genRulesSuffix(arch, "latelower") } + +func genRulesSuffix(arch arch, suff string) { + // Open input file. + text, err := os.Open(arch.name + suff + ".rules") + if err != nil { + if suff == "" { + // All architectures must have a plain rules file. + log.Fatalf("can't read rule file: %v", err) + } + // Some architectures have bonus rules files that others don't share. That's fine. + return + } + + // oprules contains a list of rules for each block and opcode + blockrules := map[string][]Rule{} + oprules := map[string][]Rule{} + + // read rule file + scanner := bufio.NewScanner(text) + rule := "" + var lineno int + var ruleLineno int // line number of "=>" + for scanner.Scan() { + lineno++ + line := scanner.Text() + if i := strings.Index(line, "//"); i >= 0 { + // Remove comments. Note that this isn't string safe, so + // it will truncate lines with // inside strings. Oh well. + line = line[:i] + } + rule += " " + line + rule = strings.TrimSpace(rule) + if rule == "" { + continue + } + if !strings.Contains(rule, "=>") { + continue + } + if ruleLineno == 0 { + ruleLineno = lineno + } + if strings.HasSuffix(rule, "=>") { + continue // continue on the next line + } + if n := balance(rule); n > 0 { + continue // open parentheses remain, continue on the next line + } else if n < 0 { + break // continuing the line can't help, and it will only make errors worse + } + + loc := fmt.Sprintf("%s%s.rules:%d", arch.name, suff, ruleLineno) + for _, rule2 := range expandOr(rule) { + r := Rule{Rule: rule2, Loc: loc} + if rawop := strings.Split(rule2, " ")[0][1:]; isBlock(rawop, arch) { + blockrules[rawop] = append(blockrules[rawop], r) + continue + } + // Do fancier value op matching. + match, _, _ := r.parse() + op, oparch, _, _, _, _ := parseValue(match, arch, loc) + opname := fmt.Sprintf("Op%s%s", oparch, op.name) + oprules[opname] = append(oprules[opname], r) + } + rule = "" + ruleLineno = 0 + } + if err := scanner.Err(); err != nil { + log.Fatalf("scanner failed: %v\n", err) + } + if balance(rule) != 0 { + log.Fatalf("%s.rules:%d: unbalanced rule: %v\n", arch.name, lineno, rule) + } + + // Order all the ops. + var ops []string + for op := range oprules { + ops = append(ops, op) + } + sort.Strings(ops) + + genFile := &File{Arch: arch, Suffix: suff} + // Main rewrite routine is a switch on v.Op. + fn := &Func{Kind: "Value", ArgLen: -1} + + sw := &Switch{Expr: exprf("v.Op")} + for _, op := range ops { + eop, ok := parseEllipsisRules(oprules[op], arch) + if ok { + if strings.Contains(oprules[op][0].Rule, "=>") && opByName(arch, op).aux != opByName(arch, eop).aux { + panic(fmt.Sprintf("can't use ... for ops that have different aux types: %s and %s", op, eop)) + } + swc := &Case{Expr: exprf("%s", op)} + swc.add(stmtf("v.Op = %s", eop)) + swc.add(stmtf("return true")) + sw.add(swc) + continue + } + + swc := &Case{Expr: exprf("%s", op)} + swc.add(stmtf("return rewriteValue%s%s_%s(v)", arch.name, suff, op)) + sw.add(swc) + } + if len(sw.List) > 0 { // skip if empty + fn.add(sw) + } + fn.add(stmtf("return false")) + genFile.add(fn) + + // Generate a routine per op. Note that we don't make one giant routine + // because it is too big for some compilers. + for _, op := range ops { + rules := oprules[op] + _, ok := parseEllipsisRules(oprules[op], arch) + if ok { + continue + } + + // rr is kept between iterations, so that each rule can check + // that the previous rule wasn't unconditional. + var rr *RuleRewrite + fn := &Func{ + Kind: "Value", + Suffix: fmt.Sprintf("_%s", op), + ArgLen: opByName(arch, op).argLength, + } + fn.add(declReserved("b", "v.Block")) + fn.add(declReserved("config", "b.Func.Config")) + fn.add(declReserved("fe", "b.Func.fe")) + fn.add(declReserved("typ", "&b.Func.Config.Types")) + for _, rule := range rules { + if rr != nil && !rr.CanFail { + log.Fatalf("unconditional rule %s is followed by other rules", rr.Match) + } + rr = &RuleRewrite{Loc: rule.Loc} + rr.Match, rr.Cond, rr.Result = rule.parse() + pos, _ := genMatch(rr, arch, rr.Match, fn.ArgLen >= 0) + if pos == "" { + pos = "v.Pos" + } + if rr.Cond != "" { + rr.add(breakf("!(%s)", rr.Cond)) + } + genResult(rr, arch, rr.Result, pos) + if *genLog { + rr.add(stmtf("logRule(%q)", rule.Loc)) + } + fn.add(rr) + } + if rr.CanFail { + fn.add(stmtf("return false")) + } + genFile.add(fn) + } + + // Generate block rewrite function. There are only a few block types + // so we can make this one function with a switch. + fn = &Func{Kind: "Block"} + fn.add(declReserved("config", "b.Func.Config")) + fn.add(declReserved("typ", "&b.Func.Config.Types")) + + sw = &Switch{Expr: exprf("b.Kind")} + ops = ops[:0] + for op := range blockrules { + ops = append(ops, op) + } + sort.Strings(ops) + for _, op := range ops { + name, data := getBlockInfo(op, arch) + swc := &Case{Expr: exprf("%s", name)} + for _, rule := range blockrules[op] { + swc.add(genBlockRewrite(rule, arch, data)) + } + sw.add(swc) + } + if len(sw.List) > 0 { // skip if empty + fn.add(sw) + } + fn.add(stmtf("return false")) + genFile.add(fn) + + // Remove unused imports and variables. + buf := new(bytes.Buffer) + fprint(buf, genFile) + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "", buf, parser.ParseComments) + if err != nil { + filename := fmt.Sprintf("%s_broken.go", arch.name) + if err := os.WriteFile(filename, buf.Bytes(), 0644); err != nil { + log.Printf("failed to dump broken code to %s: %v", filename, err) + } else { + log.Printf("dumped broken code to %s", filename) + } + log.Fatalf("failed to parse generated code for arch %s: %v", arch.name, err) + } + tfile := fset.File(file.Pos()) + + // First, use unusedInspector to find the unused declarations by their + // start position. + u := unusedInspector{unused: make(map[token.Pos]bool)} + u.node(file) + + // Then, delete said nodes via astutil.Apply. + pre := func(c *astutil.Cursor) bool { + node := c.Node() + if node == nil { + return true + } + if u.unused[node.Pos()] { + c.Delete() + // Unused imports and declarations use exactly + // one line. Prevent leaving an empty line. + tfile.MergeLine(tfile.Position(node.Pos()).Line) + return false + } + return true + } + post := func(c *astutil.Cursor) bool { + switch node := c.Node().(type) { + case *ast.GenDecl: + if len(node.Specs) == 0 { + // Don't leave a broken or empty GenDecl behind, + // such as "import ()". + c.Delete() + } + } + return true + } + file = astutil.Apply(file, pre, post).(*ast.File) + + // Write the well-formatted source to file + f, err := os.Create("../rewrite" + arch.name + suff + ".go") + if err != nil { + log.Fatalf("can't write output: %v", err) + } + defer f.Close() + // gofmt result; use a buffered writer, as otherwise go/format spends + // far too much time in syscalls. + bw := bufio.NewWriter(f) + if err := format.Node(bw, fset, file); err != nil { + log.Fatalf("can't format output: %v", err) + } + if err := bw.Flush(); err != nil { + log.Fatalf("can't write output: %v", err) + } + if err := f.Close(); err != nil { + log.Fatalf("can't write output: %v", err) + } +} + +// unusedInspector can be used to detect unused variables and imports in an +// ast.Node via its node method. The result is available in the "unused" map. +// +// note that unusedInspector is lazy and best-effort; it only supports the node +// types and patterns used by the rulegen program. +type unusedInspector struct { + // scope is the current scope, which can never be nil when a declaration + // is encountered. That is, the unusedInspector.node entrypoint should + // generally be an entire file or block. + scope *scope + + // unused is the resulting set of unused declared names, indexed by the + // starting position of the node that declared the name. + unused map[token.Pos]bool + + // defining is the object currently being defined; this is useful so + // that if "foo := bar" is unused and removed, we can then detect if + // "bar" becomes unused as well. + defining *object +} + +// scoped opens a new scope when called, and returns a function which closes +// that same scope. When a scope is closed, unused variables are recorded. +func (u *unusedInspector) scoped() func() { + outer := u.scope + u.scope = &scope{outer: outer, objects: map[string]*object{}} + return func() { + for anyUnused := true; anyUnused; { + anyUnused = false + for _, obj := range u.scope.objects { + if obj.numUses > 0 { + continue + } + u.unused[obj.pos] = true + for _, used := range obj.used { + if used.numUses--; used.numUses == 0 { + anyUnused = true + } + } + // We've decremented numUses for each of the + // objects in used. Zero this slice too, to keep + // everything consistent. + obj.used = nil + } + } + u.scope = outer + } +} + +func (u *unusedInspector) exprs(list []ast.Expr) { + for _, x := range list { + u.node(x) + } +} + +func (u *unusedInspector) node(node ast.Node) { + switch node := node.(type) { + case *ast.File: + defer u.scoped()() + for _, decl := range node.Decls { + u.node(decl) + } + case *ast.GenDecl: + for _, spec := range node.Specs { + u.node(spec) + } + case *ast.ImportSpec: + impPath, _ := strconv.Unquote(node.Path.Value) + name := path.Base(impPath) + u.scope.objects[name] = &object{ + name: name, + pos: node.Pos(), + } + case *ast.FuncDecl: + u.node(node.Type) + if node.Body != nil { + u.node(node.Body) + } + case *ast.FuncType: + if node.Params != nil { + u.node(node.Params) + } + if node.Results != nil { + u.node(node.Results) + } + case *ast.FieldList: + for _, field := range node.List { + u.node(field) + } + case *ast.Field: + u.node(node.Type) + + // statements + + case *ast.BlockStmt: + defer u.scoped()() + for _, stmt := range node.List { + u.node(stmt) + } + case *ast.DeclStmt: + u.node(node.Decl) + case *ast.IfStmt: + if node.Init != nil { + u.node(node.Init) + } + u.node(node.Cond) + u.node(node.Body) + if node.Else != nil { + u.node(node.Else) + } + case *ast.ForStmt: + if node.Init != nil { + u.node(node.Init) + } + if node.Cond != nil { + u.node(node.Cond) + } + if node.Post != nil { + u.node(node.Post) + } + u.node(node.Body) + case *ast.SwitchStmt: + if node.Init != nil { + u.node(node.Init) + } + if node.Tag != nil { + u.node(node.Tag) + } + u.node(node.Body) + case *ast.CaseClause: + u.exprs(node.List) + defer u.scoped()() + for _, stmt := range node.Body { + u.node(stmt) + } + case *ast.BranchStmt: + case *ast.ExprStmt: + u.node(node.X) + case *ast.AssignStmt: + if node.Tok != token.DEFINE { + u.exprs(node.Rhs) + u.exprs(node.Lhs) + break + } + lhs := node.Lhs + if len(lhs) == 2 && lhs[1].(*ast.Ident).Name == "_" { + lhs = lhs[:1] + } + if len(lhs) != 1 { + panic("no support for := with multiple names") + } + + name := lhs[0].(*ast.Ident) + obj := &object{ + name: name.Name, + pos: name.NamePos, + } + + old := u.defining + u.defining = obj + u.exprs(node.Rhs) + u.defining = old + + u.scope.objects[name.Name] = obj + case *ast.ReturnStmt: + u.exprs(node.Results) + case *ast.IncDecStmt: + u.node(node.X) + + // expressions + + case *ast.CallExpr: + u.node(node.Fun) + u.exprs(node.Args) + case *ast.SelectorExpr: + u.node(node.X) + case *ast.UnaryExpr: + u.node(node.X) + case *ast.BinaryExpr: + u.node(node.X) + u.node(node.Y) + case *ast.StarExpr: + u.node(node.X) + case *ast.ParenExpr: + u.node(node.X) + case *ast.IndexExpr: + u.node(node.X) + u.node(node.Index) + case *ast.TypeAssertExpr: + u.node(node.X) + u.node(node.Type) + case *ast.Ident: + if obj := u.scope.Lookup(node.Name); obj != nil { + obj.numUses++ + if u.defining != nil { + u.defining.used = append(u.defining.used, obj) + } + } + case *ast.BasicLit: + case *ast.ValueSpec: + u.exprs(node.Values) + default: + panic(fmt.Sprintf("unhandled node: %T", node)) + } +} + +// scope keeps track of a certain scope and its declared names, as well as the +// outer (parent) scope. +type scope struct { + outer *scope // can be nil, if this is the top-level scope + objects map[string]*object // indexed by each declared name +} + +func (s *scope) Lookup(name string) *object { + if obj := s.objects[name]; obj != nil { + return obj + } + if s.outer == nil { + return nil + } + return s.outer.Lookup(name) +} + +// object keeps track of a declared name, such as a variable or import. +type object struct { + name string + pos token.Pos // start position of the node declaring the object + + numUses int // number of times this object is used + used []*object // objects that its declaration makes use of +} + +func fprint(w io.Writer, n Node) { + switch n := n.(type) { + case *File: + file := n + seenRewrite := make(map[[3]string]string) + fmt.Fprintf(w, "// Code generated from _gen/%s%s.rules using 'go generate'; DO NOT EDIT.\n", n.Arch.name, n.Suffix) + fmt.Fprintf(w, "\npackage ssa\n") + for _, path := range append([]string{ + "fmt", + "internal/buildcfg", + "math", + "cmd/internal/obj", + "cmd/compile/internal/base", + "cmd/compile/internal/types", + "cmd/compile/internal/ir", + }, n.Arch.imports...) { + fmt.Fprintf(w, "import %q\n", path) + } + for _, f := range n.List { + f := f.(*Func) + fmt.Fprintf(w, "func rewrite%s%s%s%s(", f.Kind, n.Arch.name, n.Suffix, f.Suffix) + fmt.Fprintf(w, "%c *%s) bool {\n", strings.ToLower(f.Kind)[0], f.Kind) + if f.Kind == "Value" && f.ArgLen > 0 { + for i := f.ArgLen - 1; i >= 0; i-- { + fmt.Fprintf(w, "v_%d := v.Args[%d]\n", i, i) + } + } + for _, n := range f.List { + fprint(w, n) + + if rr, ok := n.(*RuleRewrite); ok { + k := [3]string{ + normalizeMatch(rr.Match, file.Arch), + normalizeWhitespace(rr.Cond), + normalizeWhitespace(rr.Result), + } + if prev, ok := seenRewrite[k]; ok { + log.Fatalf("duplicate rule %s, previously seen at %s\n", rr.Loc, prev) + } + seenRewrite[k] = rr.Loc + } + } + fmt.Fprintf(w, "}\n") + } + case *Switch: + fmt.Fprintf(w, "switch ") + fprint(w, n.Expr) + fmt.Fprintf(w, " {\n") + for _, n := range n.List { + fprint(w, n) + } + fmt.Fprintf(w, "}\n") + case *Case: + fmt.Fprintf(w, "case ") + fprint(w, n.Expr) + fmt.Fprintf(w, ":\n") + for _, n := range n.List { + fprint(w, n) + } + case *RuleRewrite: + if *addLine { + fmt.Fprintf(w, "// %s\n", n.Loc) + } + fmt.Fprintf(w, "// match: %s\n", n.Match) + if n.Cond != "" { + fmt.Fprintf(w, "// cond: %s\n", n.Cond) + } + fmt.Fprintf(w, "// result: %s\n", n.Result) + fmt.Fprintf(w, "for %s {\n", n.Check) + nCommutative := 0 + for _, n := range n.List { + if b, ok := n.(*CondBreak); ok { + b.InsideCommuteLoop = nCommutative > 0 + } + fprint(w, n) + if loop, ok := n.(StartCommuteLoop); ok { + if nCommutative != loop.Depth { + panic("mismatch commute loop depth") + } + nCommutative++ + } + } + fmt.Fprintf(w, "return true\n") + for i := 0; i < nCommutative; i++ { + fmt.Fprintln(w, "}") + } + if n.CommuteDepth > 0 && n.CanFail { + fmt.Fprint(w, "break\n") + } + fmt.Fprintf(w, "}\n") + case *Declare: + fmt.Fprintf(w, "%s := ", n.Name) + fprint(w, n.Value) + fmt.Fprintln(w) + case *CondBreak: + fmt.Fprintf(w, "if ") + fprint(w, n.Cond) + fmt.Fprintf(w, " {\n") + if n.InsideCommuteLoop { + fmt.Fprintf(w, "continue") + } else { + fmt.Fprintf(w, "break") + } + fmt.Fprintf(w, "\n}\n") + case ast.Node: + printConfig.Fprint(w, emptyFset, n) + if _, ok := n.(ast.Stmt); ok { + fmt.Fprintln(w) + } + case StartCommuteLoop: + fmt.Fprintf(w, "for _i%[1]d := 0; _i%[1]d <= 1; _i%[1]d, %[2]s_0, %[2]s_1 = _i%[1]d + 1, %[2]s_1, %[2]s_0 {\n", n.Depth, n.V) + default: + log.Fatalf("cannot print %T", n) + } +} + +var printConfig = printer.Config{ + Mode: printer.RawFormat, // we use go/format later, so skip work here +} + +var emptyFset = token.NewFileSet() + +// Node can be a Statement or an ast.Expr. +type Node interface{} + +// Statement can be one of our high-level statement struct types, or an +// ast.Stmt under some limited circumstances. +type Statement interface{} + +// BodyBase is shared by all of our statement pseudo-node types which can +// contain other statements. +type BodyBase struct { + List []Statement + CanFail bool +} + +func (w *BodyBase) add(node Statement) { + var last Statement + if len(w.List) > 0 { + last = w.List[len(w.List)-1] + } + if node, ok := node.(*CondBreak); ok { + w.CanFail = true + if last, ok := last.(*CondBreak); ok { + // Add to the previous "if { break }" via a + // logical OR, which will save verbosity. + last.Cond = &ast.BinaryExpr{ + Op: token.LOR, + X: last.Cond, + Y: node.Cond, + } + return + } + } + + w.List = append(w.List, node) +} + +// predeclared contains globally known tokens that should not be redefined. +var predeclared = map[string]bool{ + "nil": true, + "false": true, + "true": true, +} + +// declared reports if the body contains a Declare with the given name. +func (w *BodyBase) declared(name string) bool { + if predeclared[name] { + // Treat predeclared names as having already been declared. + // This lets us use nil to match an aux field or + // true and false to match an auxint field. + return true + } + for _, s := range w.List { + if decl, ok := s.(*Declare); ok && decl.Name == name { + return true + } + } + return false +} + +// These types define some high-level statement struct types, which can be used +// as a Statement. This allows us to keep some node structs simpler, and have +// higher-level nodes such as an entire rule rewrite. +// +// Note that ast.Expr is always used as-is; we don't declare our own expression +// nodes. +type ( + File struct { + BodyBase // []*Func + Arch arch + Suffix string + } + Func struct { + BodyBase + Kind string // "Value" or "Block" + Suffix string + ArgLen int32 // if kind == "Value", number of args for this op + } + Switch struct { + BodyBase // []*Case + Expr ast.Expr + } + Case struct { + BodyBase + Expr ast.Expr + } + RuleRewrite struct { + BodyBase + Match, Cond, Result string // top comments + Check string // top-level boolean expression + + Alloc int // for unique var names + Loc string // file name & line number of the original rule + CommuteDepth int // used to track depth of commute loops + } + Declare struct { + Name string + Value ast.Expr + } + CondBreak struct { + Cond ast.Expr + InsideCommuteLoop bool + } + StartCommuteLoop struct { + Depth int + V string + } +) + +// exprf parses a Go expression generated from fmt.Sprintf, panicking if an +// error occurs. +func exprf(format string, a ...interface{}) ast.Expr { + src := fmt.Sprintf(format, a...) + expr, err := parser.ParseExpr(src) + if err != nil { + log.Fatalf("expr parse error on %q: %v", src, err) + } + return expr +} + +// stmtf parses a Go statement generated from fmt.Sprintf. This function is only +// meant for simple statements that don't have a custom Statement node declared +// in this package, such as ast.ReturnStmt or ast.ExprStmt. +func stmtf(format string, a ...interface{}) Statement { + src := fmt.Sprintf(format, a...) + fsrc := "package p\nfunc _() {\n" + src + "\n}\n" + file, err := parser.ParseFile(token.NewFileSet(), "", fsrc, 0) + if err != nil { + log.Fatalf("stmt parse error on %q: %v", src, err) + } + return file.Decls[0].(*ast.FuncDecl).Body.List[0] +} + +var reservedNames = map[string]bool{ + "v": true, // Values[i], etc + "b": true, // v.Block + "config": true, // b.Func.Config + "fe": true, // b.Func.fe + "typ": true, // &b.Func.Config.Types +} + +// declf constructs a simple "name := value" declaration, +// using exprf for its value. +// +// name must not be one of reservedNames. +// This helps prevent unintended shadowing and name clashes. +// To declare a reserved name, use declReserved. +func declf(loc, name, format string, a ...interface{}) *Declare { + if reservedNames[name] { + log.Fatalf("rule %s uses the reserved name %s", loc, name) + } + return &Declare{name, exprf(format, a...)} +} + +// declReserved is like declf, but the name must be one of reservedNames. +// Calls to declReserved should generally be static and top-level. +func declReserved(name, value string) *Declare { + if !reservedNames[name] { + panic(fmt.Sprintf("declReserved call does not use a reserved name: %q", name)) + } + return &Declare{name, exprf(value)} +} + +// breakf constructs a simple "if cond { break }" statement, using exprf for its +// condition. +func breakf(format string, a ...interface{}) *CondBreak { + return &CondBreak{Cond: exprf(format, a...)} +} + +func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite { + rr := &RuleRewrite{Loc: rule.Loc} + rr.Match, rr.Cond, rr.Result = rule.parse() + _, _, auxint, aux, s := extract(rr.Match) // remove parens, then split + + // check match of control values + if len(s) < data.controls { + log.Fatalf("incorrect number of arguments in %s, got %v wanted at least %v", rule, len(s), data.controls) + } + controls := s[:data.controls] + pos := make([]string, data.controls) + for i, arg := range controls { + cname := fmt.Sprintf("b.Controls[%v]", i) + if strings.Contains(arg, "(") { + vname, expr := splitNameExpr(arg) + if vname == "" { + vname = fmt.Sprintf("v_%v", i) + } + rr.add(declf(rr.Loc, vname, cname)) + p, op := genMatch0(rr, arch, expr, vname, nil, false) // TODO: pass non-nil cnt? + if op != "" { + check := fmt.Sprintf("%s.Op == %s", cname, op) + if rr.Check == "" { + rr.Check = check + } else { + rr.Check += " && " + check + } + } + if p == "" { + p = vname + ".Pos" + } + pos[i] = p + } else { + rr.add(declf(rr.Loc, arg, cname)) + pos[i] = arg + ".Pos" + } + } + for _, e := range []struct { + name, field, dclType string + }{ + {auxint, "AuxInt", data.auxIntType()}, + {aux, "Aux", data.auxType()}, + } { + if e.name == "" { + continue + } + + if e.dclType == "" { + log.Fatalf("op %s has no declared type for %s", data.name, e.field) + } + if !token.IsIdentifier(e.name) || rr.declared(e.name) { + rr.add(breakf("%sTo%s(b.%s) != %s", unTitle(e.field), title(e.dclType), e.field, e.name)) + } else { + rr.add(declf(rr.Loc, e.name, "%sTo%s(b.%s)", unTitle(e.field), title(e.dclType), e.field)) + } + } + if rr.Cond != "" { + rr.add(breakf("!(%s)", rr.Cond)) + } + + // Rule matches. Generate result. + outop, _, auxint, aux, t := extract(rr.Result) // remove parens, then split + blockName, outdata := getBlockInfo(outop, arch) + if len(t) < outdata.controls { + log.Fatalf("incorrect number of output arguments in %s, got %v wanted at least %v", rule, len(s), outdata.controls) + } + + // Check if newsuccs is the same set as succs. + succs := s[data.controls:] + newsuccs := t[outdata.controls:] + m := map[string]bool{} + for _, succ := range succs { + if m[succ] { + log.Fatalf("can't have a repeat successor name %s in %s", succ, rule) + } + m[succ] = true + } + for _, succ := range newsuccs { + if !m[succ] { + log.Fatalf("unknown successor %s in %s", succ, rule) + } + delete(m, succ) + } + if len(m) != 0 { + log.Fatalf("unmatched successors %v in %s", m, rule) + } + + var genControls [2]string + for i, control := range t[:outdata.controls] { + // Select a source position for any new control values. + // TODO: does it always make sense to use the source position + // of the original control values or should we be using the + // block's source position in some cases? + newpos := "b.Pos" // default to block's source position + if i < len(pos) && pos[i] != "" { + // Use the previous control value's source position. + newpos = pos[i] + } + + // Generate a new control value (or copy an existing value). + genControls[i] = genResult0(rr, arch, control, false, false, newpos, nil) + } + switch outdata.controls { + case 0: + rr.add(stmtf("b.Reset(%s)", blockName)) + case 1: + rr.add(stmtf("b.resetWithControl(%s, %s)", blockName, genControls[0])) + case 2: + rr.add(stmtf("b.resetWithControl2(%s, %s, %s)", blockName, genControls[0], genControls[1])) + default: + log.Fatalf("too many controls: %d", outdata.controls) + } + + if auxint != "" { + // Make sure auxint value has the right type. + rr.add(stmtf("b.AuxInt = %sToAuxInt(%s)", unTitle(outdata.auxIntType()), auxint)) + } + if aux != "" { + // Make sure aux value has the right type. + rr.add(stmtf("b.Aux = %sToAux(%s)", unTitle(outdata.auxType()), aux)) + } + + succChanged := false + for i := 0; i < len(succs); i++ { + if succs[i] != newsuccs[i] { + succChanged = true + } + } + if succChanged { + if len(succs) != 2 { + log.Fatalf("changed successors, len!=2 in %s", rule) + } + if succs[0] != newsuccs[1] || succs[1] != newsuccs[0] { + log.Fatalf("can only handle swapped successors in %s", rule) + } + rr.add(stmtf("b.swapSuccessors()")) + } + + if *genLog { + rr.add(stmtf("logRule(%q)", rule.Loc)) + } + return rr +} + +// genMatch returns the variable whose source position should be used for the +// result (or "" if no opinion), and a boolean that reports whether the match can fail. +func genMatch(rr *RuleRewrite, arch arch, match string, pregenTop bool) (pos, checkOp string) { + cnt := varCount(rr) + return genMatch0(rr, arch, match, "v", cnt, pregenTop) +} + +func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, pregenTop bool) (pos, checkOp string) { + if match[0] != '(' || match[len(match)-1] != ')' { + log.Fatalf("%s: non-compound expr in genMatch0: %q", rr.Loc, match) + } + op, oparch, typ, auxint, aux, args := parseValue(match, arch, rr.Loc) + + checkOp = fmt.Sprintf("Op%s%s", oparch, op.name) + + if op.faultOnNilArg0 || op.faultOnNilArg1 { + // Prefer the position of an instruction which could fault. + pos = v + ".Pos" + } + + // If the last argument is ___, it means "don't care about trailing arguments, really" + // The likely/intended use is for rewrites that are too tricky to express in the existing pattern language + // Do a length check early because long patterns fed short (ultimately not-matching) inputs will + // do an indexing error in pattern-matching. + if op.argLength == -1 { + l := len(args) + if l == 0 || args[l-1] != "___" { + rr.add(breakf("len(%s.Args) != %d", v, l)) + } else if l > 1 && args[l-1] == "___" { + rr.add(breakf("len(%s.Args) < %d", v, l-1)) + } + } + + for _, e := range []struct { + name, field, dclType string + }{ + {typ, "Type", "*types.Type"}, + {auxint, "AuxInt", op.auxIntType()}, + {aux, "Aux", op.auxType()}, + } { + if e.name == "" { + continue + } + + if e.dclType == "" { + log.Fatalf("op %s has no declared type for %s", op.name, e.field) + } + if !token.IsIdentifier(e.name) || rr.declared(e.name) { + switch e.field { + case "Aux": + rr.add(breakf("auxTo%s(%s.%s) != %s", title(e.dclType), v, e.field, e.name)) + case "AuxInt": + rr.add(breakf("auxIntTo%s(%s.%s) != %s", title(e.dclType), v, e.field, e.name)) + case "Type": + rr.add(breakf("%s.%s != %s", v, e.field, e.name)) + } + } else { + switch e.field { + case "Aux": + rr.add(declf(rr.Loc, e.name, "auxTo%s(%s.%s)", title(e.dclType), v, e.field)) + case "AuxInt": + rr.add(declf(rr.Loc, e.name, "auxIntTo%s(%s.%s)", title(e.dclType), v, e.field)) + case "Type": + rr.add(declf(rr.Loc, e.name, "%s.%s", v, e.field)) + } + } + } + + commutative := op.commutative + if commutative { + if args[0] == args[1] { + // When we have (Add x x), for any x, + // even if there are other uses of x besides these two, + // and even if x is not a variable, + // we can skip the commutative match. + commutative = false + } + if cnt[args[0]] == 1 && cnt[args[1]] == 1 { + // When we have (Add x y) with no other uses + // of x and y in the matching rule and condition, + // then we can skip the commutative match (Add y x). + commutative = false + } + } + + if !pregenTop { + // Access last argument first to minimize bounds checks. + for n := len(args) - 1; n > 0; n-- { + a := args[n] + if a == "_" { + continue + } + if !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) { + rr.add(declf(rr.Loc, a, "%s.Args[%d]", v, n)) + // delete the last argument so it is not reprocessed + args = args[:n] + } else { + rr.add(stmtf("_ = %s.Args[%d]", v, n)) + } + break + } + } + if commutative && !pregenTop { + for i := 0; i <= 1; i++ { + vname := fmt.Sprintf("%s_%d", v, i) + rr.add(declf(rr.Loc, vname, "%s.Args[%d]", v, i)) + } + } + if commutative { + rr.add(StartCommuteLoop{rr.CommuteDepth, v}) + rr.CommuteDepth++ + } + for i, arg := range args { + if arg == "_" { + continue + } + var rhs string + if (commutative && i < 2) || pregenTop { + rhs = fmt.Sprintf("%s_%d", v, i) + } else { + rhs = fmt.Sprintf("%s.Args[%d]", v, i) + } + if !strings.Contains(arg, "(") { + // leaf variable + if rr.declared(arg) { + // variable already has a definition. Check whether + // the old definition and the new definition match. + // For example, (add x x). Equality is just pointer equality + // on Values (so cse is important to do before lowering). + rr.add(breakf("%s != %s", arg, rhs)) + } else { + if arg != rhs { + rr.add(declf(rr.Loc, arg, "%s", rhs)) + } + } + continue + } + // compound sexpr + argname, expr := splitNameExpr(arg) + if argname == "" { + argname = fmt.Sprintf("%s_%d", v, i) + } + if argname == "b" { + log.Fatalf("don't name args 'b', it is ambiguous with blocks") + } + + if argname != rhs { + rr.add(declf(rr.Loc, argname, "%s", rhs)) + } + bexpr := exprf("%s.Op != addLater", argname) + rr.add(&CondBreak{Cond: bexpr}) + argPos, argCheckOp := genMatch0(rr, arch, expr, argname, cnt, false) + bexpr.(*ast.BinaryExpr).Y.(*ast.Ident).Name = argCheckOp + + if argPos != "" { + // Keep the argument in preference to the parent, as the + // argument is normally earlier in program flow. + // Keep the argument in preference to an earlier argument, + // as that prefers the memory argument which is also earlier + // in the program flow. + pos = argPos + } + } + + return pos, checkOp +} + +func genResult(rr *RuleRewrite, arch arch, result, pos string) { + move := result[0] == '@' + if move { + // parse @block directive + s := strings.SplitN(result[1:], " ", 2) + rr.add(stmtf("b = %s", s[0])) + result = s[1] + } + cse := make(map[string]string) + genResult0(rr, arch, result, true, move, pos, cse) +} + +func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos string, cse map[string]string) string { + resname, expr := splitNameExpr(result) + result = expr + // TODO: when generating a constant result, use f.constVal to avoid + // introducing copies just to clean them up again. + if result[0] != '(' { + // variable + if top { + // It in not safe in general to move a variable between blocks + // (and particularly not a phi node). + // Introduce a copy. + rr.add(stmtf("v.copyOf(%s)", result)) + } + return result + } + + w := normalizeWhitespace(result) + if prev := cse[w]; prev != "" { + return prev + } + + op, oparch, typ, auxint, aux, args := parseValue(result, arch, rr.Loc) + + // Find the type of the variable. + typeOverride := typ != "" + if typ == "" && op.typ != "" { + typ = typeName(op.typ) + } + + v := "v" + if top && !move { + rr.add(stmtf("v.reset(Op%s%s)", oparch, op.name)) + if typeOverride { + rr.add(stmtf("v.Type = %s", typ)) + } + } else { + if typ == "" { + log.Fatalf("sub-expression %s (op=Op%s%s) at %s must have a type", result, oparch, op.name, rr.Loc) + } + if resname == "" { + v = fmt.Sprintf("v%d", rr.Alloc) + } else { + v = resname + } + rr.Alloc++ + rr.add(declf(rr.Loc, v, "b.NewValue0(%s, Op%s%s, %s)", pos, oparch, op.name, typ)) + if move && top { + // Rewrite original into a copy + rr.add(stmtf("v.copyOf(%s)", v)) + } + } + + if auxint != "" { + // Make sure auxint value has the right type. + rr.add(stmtf("%s.AuxInt = %sToAuxInt(%s)", v, unTitle(op.auxIntType()), auxint)) + } + if aux != "" { + // Make sure aux value has the right type. + rr.add(stmtf("%s.Aux = %sToAux(%s)", v, unTitle(op.auxType()), aux)) + } + all := new(strings.Builder) + for i, arg := range args { + x := genResult0(rr, arch, arg, false, move, pos, cse) + if i > 0 { + all.WriteString(", ") + } + all.WriteString(x) + } + switch len(args) { + case 0: + case 1: + rr.add(stmtf("%s.AddArg(%s)", v, all.String())) + default: + rr.add(stmtf("%s.AddArg%d(%s)", v, len(args), all.String())) + } + + if cse != nil { + cse[w] = v + } + return v +} + +func split(s string) []string { + var r []string + +outer: + for s != "" { + d := 0 // depth of ({[< + var open, close byte // opening and closing markers ({[< or )}]> + nonsp := false // found a non-space char so far + for i := 0; i < len(s); i++ { + switch { + case d == 0 && s[i] == '(': + open, close = '(', ')' + d++ + case d == 0 && s[i] == '<': + open, close = '<', '>' + d++ + case d == 0 && s[i] == '[': + open, close = '[', ']' + d++ + case d == 0 && s[i] == '{': + open, close = '{', '}' + d++ + case d == 0 && (s[i] == ' ' || s[i] == '\t'): + if nonsp { + r = append(r, strings.TrimSpace(s[:i])) + s = s[i:] + continue outer + } + case d > 0 && s[i] == open: + d++ + case d > 0 && s[i] == close: + d-- + default: + nonsp = true + } + } + if d != 0 { + log.Fatalf("imbalanced expression: %q", s) + } + if nonsp { + r = append(r, strings.TrimSpace(s)) + } + break + } + return r +} + +// isBlock reports whether this op is a block opcode. +func isBlock(name string, arch arch) bool { + for _, b := range genericBlocks { + if b.name == name { + return true + } + } + for _, b := range arch.blocks { + if b.name == name { + return true + } + } + return false +} + +func extract(val string) (op, typ, auxint, aux string, args []string) { + val = val[1 : len(val)-1] // remove () + + // Split val up into regions. + // Split by spaces/tabs, except those contained in (), {}, [], or <>. + s := split(val) + + // Extract restrictions and args. + op = s[0] + for _, a := range s[1:] { + switch a[0] { + case '<': + typ = a[1 : len(a)-1] // remove <> + case '[': + auxint = a[1 : len(a)-1] // remove [] + case '{': + aux = a[1 : len(a)-1] // remove {} + default: + args = append(args, a) + } + } + return +} + +// parseValue parses a parenthesized value from a rule. +// The value can be from the match or the result side. +// It returns the op and unparsed strings for typ, auxint, and aux restrictions and for all args. +// oparch is the architecture that op is located in, or "" for generic. +func parseValue(val string, arch arch, loc string) (op opData, oparch, typ, auxint, aux string, args []string) { + // Resolve the op. + var s string + s, typ, auxint, aux, args = extract(val) + + // match reports whether x is a good op to select. + // If strict is true, rule generation might succeed. + // If strict is false, rule generation has failed, + // but we're trying to generate a useful error. + // Doing strict=true then strict=false allows + // precise op matching while retaining good error messages. + match := func(x opData, strict bool, archname string) bool { + if x.name != s { + return false + } + if x.argLength != -1 && int(x.argLength) != len(args) && (len(args) != 1 || args[0] != "...") { + if strict { + return false + } + log.Printf("%s: op %s (%s) should have %d args, has %d", loc, s, archname, x.argLength, len(args)) + } + return true + } + + for _, x := range genericOps { + if match(x, true, "generic") { + op = x + break + } + } + for _, x := range arch.ops { + if arch.name != "generic" && match(x, true, arch.name) { + if op.name != "" { + log.Fatalf("%s: matches for op %s found in both generic and %s", loc, op.name, arch.name) + } + op = x + oparch = arch.name + break + } + } + + if op.name == "" { + // Failed to find the op. + // Run through everything again with strict=false + // to generate useful diagnostic messages before failing. + for _, x := range genericOps { + match(x, false, "generic") + } + for _, x := range arch.ops { + match(x, false, arch.name) + } + log.Fatalf("%s: unknown op %s", loc, s) + } + + // Sanity check aux, auxint. + if auxint != "" && !opHasAuxInt(op) { + log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux) + } + if aux != "" && !opHasAux(op) { + log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux) + } + return +} + +func opHasAuxInt(op opData) bool { + switch op.aux { + case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "UInt8", "Float32", "Float64", + "SymOff", "CallOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop": + return true + } + return false +} + +func opHasAux(op opData) bool { + switch op.aux { + case "String", "Sym", "SymOff", "Call", "CallOff", "SymValAndOff", "Typ", "TypSize", + "S390XCCMask", "S390XRotateParams": + return true + } + return false +} + +// splitNameExpr splits s-expr arg, possibly prefixed by "name:", +// into name and the unprefixed expression. +// For example, "x:(Foo)" yields "x", "(Foo)", +// and "(Foo)" yields "", "(Foo)". +func splitNameExpr(arg string) (name, expr string) { + colon := strings.Index(arg, ":") + if colon < 0 { + return "", arg + } + openparen := strings.Index(arg, "(") + if openparen < 0 { + log.Fatalf("splitNameExpr(%q): colon but no open parens", arg) + } + if colon > openparen { + // colon is inside the parens, such as in "(Foo x:(Bar))". + return "", arg + } + return arg[:colon], arg[colon+1:] +} + +func getBlockInfo(op string, arch arch) (name string, data blockData) { + for _, b := range genericBlocks { + if b.name == op { + return "Block" + op, b + } + } + for _, b := range arch.blocks { + if b.name == op { + return "Block" + arch.name + op, b + } + } + log.Fatalf("could not find block data for %s", op) + panic("unreachable") +} + +// typeName returns the string to use to generate a type. +func typeName(typ string) string { + if typ[0] == '(' { + ts := strings.Split(typ[1:len(typ)-1], ",") + if len(ts) != 2 { + log.Fatalf("Tuple expect 2 arguments") + } + return "types.NewTuple(" + typeName(ts[0]) + ", " + typeName(ts[1]) + ")" + } + switch typ { + case "Flags", "Mem", "Void", "Int128": + return "types.Type" + typ + default: + return "typ." + typ + } +} + +// balance returns the number of unclosed '(' characters in s. +// If a ')' appears without a corresponding '(', balance returns -1. +func balance(s string) int { + balance := 0 + for _, c := range s { + switch c { + case '(': + balance++ + case ')': + balance-- + if balance < 0 { + // don't allow ")(" to return 0 + return -1 + } + } + } + return balance +} + +// findAllOpcode is a function to find the opcode portion of s-expressions. +var findAllOpcode = regexp.MustCompile(`[(](\w+[|])+\w+[)]`).FindAllStringIndex + +// excludeFromExpansion reports whether the substring s[idx[0]:idx[1]] in a rule +// should be disregarded as a candidate for | expansion. +// It uses simple syntactic checks to see whether the substring +// is inside an AuxInt expression or inside the && conditions. +func excludeFromExpansion(s string, idx []int) bool { + left := s[:idx[0]] + if strings.LastIndexByte(left, '[') > strings.LastIndexByte(left, ']') { + // Inside an AuxInt expression. + return true + } + right := s[idx[1]:] + if strings.Contains(left, "&&") && strings.Contains(right, "=>") { + // Inside && conditions. + return true + } + return false +} + +// expandOr converts a rule into multiple rules by expanding | ops. +func expandOr(r string) []string { + // Find every occurrence of |-separated things. + // They look like MOV(B|W|L|Q|SS|SD)load or MOV(Q|L)loadidx(1|8). + // Generate rules selecting one case from each |-form. + + // Count width of |-forms. They must match. + n := 1 + for _, idx := range findAllOpcode(r, -1) { + if excludeFromExpansion(r, idx) { + continue + } + s := r[idx[0]:idx[1]] + c := strings.Count(s, "|") + 1 + if c == 1 { + continue + } + if n > 1 && n != c { + log.Fatalf("'|' count doesn't match in %s: both %d and %d\n", r, n, c) + } + n = c + } + if n == 1 { + // No |-form in this rule. + return []string{r} + } + // Build each new rule. + res := make([]string, n) + for i := 0; i < n; i++ { + buf := new(strings.Builder) + x := 0 + for _, idx := range findAllOpcode(r, -1) { + if excludeFromExpansion(r, idx) { + continue + } + buf.WriteString(r[x:idx[0]]) // write bytes we've skipped over so far + s := r[idx[0]+1 : idx[1]-1] // remove leading "(" and trailing ")" + buf.WriteString(strings.Split(s, "|")[i]) // write the op component for this rule + x = idx[1] // note that we've written more bytes + } + buf.WriteString(r[x:]) + res[i] = buf.String() + } + return res +} + +// varCount returns a map which counts the number of occurrences of +// Value variables in the s-expression rr.Match and the Go expression rr.Cond. +func varCount(rr *RuleRewrite) map[string]int { + cnt := map[string]int{} + varCount1(rr.Loc, rr.Match, cnt) + if rr.Cond != "" { + expr, err := parser.ParseExpr(rr.Cond) + if err != nil { + log.Fatalf("%s: failed to parse cond %q: %v", rr.Loc, rr.Cond, err) + } + ast.Inspect(expr, func(n ast.Node) bool { + if id, ok := n.(*ast.Ident); ok { + cnt[id.Name]++ + } + return true + }) + } + return cnt +} + +func varCount1(loc, m string, cnt map[string]int) { + if m[0] == '<' || m[0] == '[' || m[0] == '{' { + return + } + if token.IsIdentifier(m) { + cnt[m]++ + return + } + // Split up input. + name, expr := splitNameExpr(m) + if name != "" { + cnt[name]++ + } + if expr[0] != '(' || expr[len(expr)-1] != ')' { + log.Fatalf("%s: non-compound expr in varCount1: %q", loc, expr) + } + s := split(expr[1 : len(expr)-1]) + for _, arg := range s[1:] { + varCount1(loc, arg, cnt) + } +} + +// normalizeWhitespace replaces 2+ whitespace sequences with a single space. +func normalizeWhitespace(x string) string { + x = strings.Join(strings.Fields(x), " ") + x = strings.Replace(x, "( ", "(", -1) + x = strings.Replace(x, " )", ")", -1) + x = strings.Replace(x, "[ ", "[", -1) + x = strings.Replace(x, " ]", "]", -1) + x = strings.Replace(x, ")=>", ") =>", -1) + return x +} + +// opIsCommutative reports whether op s is commutative. +func opIsCommutative(op string, arch arch) bool { + for _, x := range genericOps { + if op == x.name { + if x.commutative { + return true + } + break + } + } + if arch.name != "generic" { + for _, x := range arch.ops { + if op == x.name { + if x.commutative { + return true + } + break + } + } + } + return false +} + +func normalizeMatch(m string, arch arch) string { + if token.IsIdentifier(m) { + return m + } + op, typ, auxint, aux, args := extract(m) + if opIsCommutative(op, arch) { + if args[1] < args[0] { + args[0], args[1] = args[1], args[0] + } + } + s := new(strings.Builder) + fmt.Fprintf(s, "%s <%s> [%s] {%s}", op, typ, auxint, aux) + for _, arg := range args { + prefix, expr := splitNameExpr(arg) + fmt.Fprint(s, " ", prefix, normalizeMatch(expr, arch)) + } + return s.String() +} + +func parseEllipsisRules(rules []Rule, arch arch) (newop string, ok bool) { + if len(rules) != 1 { + for _, r := range rules { + if strings.Contains(r.Rule, "...") { + log.Fatalf("%s: found ellipsis in rule, but there are other rules with the same op", r.Loc) + } + } + return "", false + } + rule := rules[0] + match, cond, result := rule.parse() + if cond != "" || !isEllipsisValue(match) || !isEllipsisValue(result) { + if strings.Contains(rule.Rule, "...") { + log.Fatalf("%s: found ellipsis in non-ellipsis rule", rule.Loc) + } + checkEllipsisRuleCandidate(rule, arch) + return "", false + } + op, oparch, _, _, _, _ := parseValue(result, arch, rule.Loc) + return fmt.Sprintf("Op%s%s", oparch, op.name), true +} + +// isEllipsisValue reports whether s is of the form (OpX ...). +func isEllipsisValue(s string) bool { + if len(s) < 2 || s[0] != '(' || s[len(s)-1] != ')' { + return false + } + c := split(s[1 : len(s)-1]) + if len(c) != 2 || c[1] != "..." { + return false + } + return true +} + +func checkEllipsisRuleCandidate(rule Rule, arch arch) { + match, cond, result := rule.parse() + if cond != "" { + return + } + op, _, _, auxint, aux, args := parseValue(match, arch, rule.Loc) + var auxint2, aux2 string + var args2 []string + var usingCopy string + var eop opData + if result[0] != '(' { + // Check for (Foo x) => x, which can be converted to (Foo ...) => (Copy ...). + args2 = []string{result} + usingCopy = " using Copy" + } else { + eop, _, _, auxint2, aux2, args2 = parseValue(result, arch, rule.Loc) + } + // Check that all restrictions in match are reproduced exactly in result. + if aux != aux2 || auxint != auxint2 || len(args) != len(args2) { + return + } + if strings.Contains(rule.Rule, "=>") && op.aux != eop.aux { + return + } + for i := range args { + if args[i] != args2[i] { + return + } + } + switch { + case opHasAux(op) && aux == "" && aux2 == "": + fmt.Printf("%s: rule silently zeros aux, either copy aux or explicitly zero\n", rule.Loc) + case opHasAuxInt(op) && auxint == "" && auxint2 == "": + fmt.Printf("%s: rule silently zeros auxint, either copy auxint or explicitly zero\n", rule.Loc) + default: + fmt.Printf("%s: possible ellipsis rule candidate%s: %q\n", rule.Loc, usingCopy, rule.Rule) + } +} + +func opByName(arch arch, name string) opData { + name = name[2:] + for _, x := range genericOps { + if name == x.name { + return x + } + } + if arch.name != "generic" { + name = name[len(arch.name):] + for _, x := range arch.ops { + if name == x.name { + return x + } + } + } + log.Fatalf("failed to find op named %s in arch %s", name, arch.name) + panic("unreachable") +} + +// auxType returns the Go type that this operation should store in its aux field. +func (op opData) auxType() string { + switch op.aux { + case "String": + return "string" + case "Sym": + // Note: a Sym can be an *obj.LSym, a *gc.Node, or nil. + return "Sym" + case "SymOff": + return "Sym" + case "Call": + return "Call" + case "CallOff": + return "Call" + case "SymValAndOff": + return "Sym" + case "Typ": + return "*types.Type" + case "TypSize": + return "*types.Type" + case "S390XCCMask": + return "s390x.CCMask" + case "S390XRotateParams": + return "s390x.RotateParams" + default: + return "invalid" + } +} + +// auxIntType returns the Go type that this operation should store in its auxInt field. +func (op opData) auxIntType() string { + switch op.aux { + case "Bool": + return "bool" + case "Int8": + return "int8" + case "Int16": + return "int16" + case "Int32": + return "int32" + case "Int64": + return "int64" + case "Int128": + return "int128" + case "UInt8": + return "uint8" + case "Float32": + return "float32" + case "Float64": + return "float64" + case "CallOff": + return "int32" + case "SymOff": + return "int32" + case "SymValAndOff": + return "ValAndOff" + case "TypSize": + return "int64" + case "CCop": + return "Op" + case "FlagConstant": + return "flagConstant" + case "ARM64BitField": + return "arm64BitField" + default: + return "invalid" + } +} + +// auxType returns the Go type that this block should store in its aux field. +func (b blockData) auxType() string { + switch b.aux { + case "Sym": + return "Sym" + case "S390XCCMask", "S390XCCMaskInt8", "S390XCCMaskUint8": + return "s390x.CCMask" + case "S390XRotateParams": + return "s390x.RotateParams" + default: + return "invalid" + } +} + +// auxIntType returns the Go type that this block should store in its auxInt field. +func (b blockData) auxIntType() string { + switch b.aux { + case "S390XCCMaskInt8": + return "int8" + case "S390XCCMaskUint8": + return "uint8" + case "Int64": + return "int64" + default: + return "invalid" + } +} + +func title(s string) string { + if i := strings.Index(s, "."); i >= 0 { + switch strings.ToLower(s[:i]) { + case "s390x": // keep arch prefix for clarity + s = s[:i] + s[i+1:] + default: + s = s[i+1:] + } + } + return strings.Title(s) +} + +func unTitle(s string) string { + if i := strings.Index(s, "."); i >= 0 { + switch strings.ToLower(s[:i]) { + case "s390x": // keep arch prefix for clarity + s = s[:i] + s[i+1:] + default: + s = s[i+1:] + } + } + return strings.ToLower(s[:1]) + s[1:] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/b53456.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/b53456.go new file mode 100644 index 0000000000000000000000000000000000000000..8104d3ed473862adb6564a129b57801b3905d611 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/b53456.go @@ -0,0 +1,19 @@ +package main + +type T struct { + m map[int]int +} + +func main() { + t := T{ + m: make(map[int]int), + } + t.Inc(5) + t.Inc(7) +} + +func (s *T) Inc(key int) { + v := s.m[key] // break, line 16 + v++ + s.m[key] = v // also here +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/convertline.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/convertline.go new file mode 100644 index 0000000000000000000000000000000000000000..08f3ae8a35be3fcbaaf9574a08ad665287e80aa1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/convertline.go @@ -0,0 +1,16 @@ +package main + +import "fmt" + +func F[T any](n T) { + fmt.Printf("called\n") +} + +func G[T any](n T) { + F(n) + fmt.Printf("after\n") +} + +func main() { + G(3) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/fma.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/fma.go new file mode 100644 index 0000000000000000000000000000000000000000..13a7ff1e1cb2a1aa886d74022d3bf323304d1fa8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/fma.go @@ -0,0 +1,37 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "os" +) + +//go:noinline +func f(x float64) float64 { + return x +} + +func inlineFma(x, y, z float64) float64 { + return x + y*z +} + +func main() { + w, x, y := 1.0, 1.0, 1.0 + x = f(x + x/(1<<52)) + w = f(w / (1 << 27)) + y = f(y + y/(1<<52)) + w0 := f(2 * w * (1 - w)) + w1 := f(w * (1 + w)) + x = x + w0*w1 + x = inlineFma(x, w0, w1) + y = y + f(w0*w1) + y = y + f(w0*w1) + fmt.Println(x, y, x-y) + + if x != y { + os.Exit(1) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts new file mode 100644 index 0000000000000000000000000000000000000000..a0404e4786b5cfedfdf422116ca001326d8fba50 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts @@ -0,0 +1,99 @@ + ./testdata/hist.go +55: func test() { +57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} +58: tinycall() // this forces l etc to stack +59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O) +60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O) +61: sink = dx + dy //gdb-opt=(dx,dy) +63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main' +64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main' +65: if len(os.Args) > 1 { +73: scanner := bufio.NewScanner(reader) +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +84: t := 0 +85: n := 0 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +99: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts new file mode 100644 index 0000000000000000000000000000000000000000..2be83ce9369343ba3b9ca03ac75f65edbc875610 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts @@ -0,0 +1,94 @@ + ./testdata/hist.go +55: func test() { +57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} +58: tinycall() // this forces l etc to stack +59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O) +60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O) +61: sink = dx + dy //gdb-opt=(dx,dy) +63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main' +64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main' +65: if len(os.Args) > 1 { +73: scanner := bufio.NewScanner(reader) +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +90: t += i * a +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +90: t += i * a +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +90: t += i * a +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +90: t += i * a +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +86: for i, a := range hist { +99: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts new file mode 100644 index 0000000000000000000000000000000000000000..72df60c76f49076e9a9378f9ef1a2608c9e4b712 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts @@ -0,0 +1,123 @@ + src/cmd/compile/internal/ssa/testdata/hist.go +55: func test() { +57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} +58: tinycall() // this forces l etc to stack +59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O) +l.begin.x = 1 +l.end.y = 4 +60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O) +61: sink = dx + dy //gdb-opt=(dx,dy) +63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main' +64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main' +hist = {array = , len = 7, cap = 7} +65: if len(os.Args) > 1 { +73: scanner := bufio.NewScanner(reader) +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 4 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 4 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +i = 5 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +74: for scanner.Scan() { //gdb-opt=(scanner/A) +84: t := 0 +85: n := 0 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +n = 3 +i = 1 +t = 3 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +n = 6 +i = 2 +t = 9 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +n = 8 +i = 4 +t = 17 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +90: t += i * a +91: n += a +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +n = 9 +i = 5 +t = 22 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +88: continue +86: for i, a := range hist { +99: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts new file mode 100644 index 0000000000000000000000000000000000000000..d3a34acf691e7b4ba418bd5562d8d678d7828865 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts @@ -0,0 +1,143 @@ + src/cmd/compile/internal/ssa/testdata/hist.go +55: func test() { +57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} +58: tinycall() // this forces l etc to stack +59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O) +l = {begin = {x = 1, y = 2}, end = {x = 3, y = 4}} +dx = +dy = +60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O) +dx = 2 +dy = +61: sink = dx + dy //gdb-opt=(dx,dy) +dx = 2 +dy = 2 +63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main' +dx = 2 +dy = +64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main' +65: if len(os.Args) > 1 { +73: scanner := bufio.NewScanner(reader) +74: for scanner.Scan() { //gdb-opt=(scanner/A) +scanner = (bufio.Scanner *) +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = {array = 0xc00005ae50, len = 7, cap = 7} +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = {array = 0xc00005ae50, len = 7, cap = 7} +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = {array = 0xc00005ae50, len = 7, cap = 7} +i = 1 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = {array = 0xc00005ae50, len = 7, cap = 7} +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = {array = 0xc00005ae50, len = 7, cap = 7} +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = {array = 0xc00005ae50, len = 7, cap = 7} +i = 2 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = {array = 0xc00005ae50, len = 7, cap = 7} +i = 4 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = {array = 0xc00005ae50, len = 7, cap = 7} +i = 4 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +75: s := scanner.Text() +76: i, err := strconv.ParseInt(s, 10, 64) +77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) +err = {tab = 0x0, data = 0x0} +hist = {array = 0xc00005ae50, len = 7, cap = 7} +i = 5 +81: hist = ensure(int(i), hist) +82: hist[int(i)]++ +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +a = 0 +n = 0 +t = 0 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +a = 3 +n = 0 +t = 0 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +90: t += i * a +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +a = 3 +n = 3 +t = 3 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +90: t += i * a +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +a = 0 +n = 6 +t = 9 +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +a = 2 +n = 6 +t = 9 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +90: t += i * a +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +a = 1 +n = 8 +t = 17 +92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) +91: n += a +90: t += i * a +86: for i, a := range hist { +87: if a == 0 { //gdb-opt=(a,n,t) +a = 0 +n = 9 +t = 22 +86: for i, a := range hist { +99: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.go new file mode 100644 index 0000000000000000000000000000000000000000..f8fa6e670eebe17109d2e7575f301e7395a43558 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/hist.go @@ -0,0 +1,106 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is the input program for an end-to-end test of the DWARF produced +// by the compiler. It is compiled with various flags, then the resulting +// binary is "debugged" under the control of a harness. Because the compile+debug +// step is time-consuming, the tests for different bugs are all accumulated here +// so that their cost is only the time to "n" through the additional code. + +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +type point struct { + x, y int +} + +type line struct { + begin, end point +} + +var zero int +var sink int + +//go:noinline +func tinycall() { +} + +func ensure(n int, sl []int) []int { + for len(sl) <= n { + sl = append(sl, 0) + } + return sl +} + +var cannedInput string = `1 +1 +1 +2 +2 +2 +4 +4 +5 +` + +func test() { + // For #19868 + l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}} + tinycall() // this forces l etc to stack + dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O) + dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O) + sink = dx + dy //gdb-opt=(dx,dy) + // For #21098 + hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main' + var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main' + if len(os.Args) > 1 { + var err error + reader, err = os.Open(os.Args[1]) + if err != nil { + fmt.Fprintf(os.Stderr, "There was an error opening %s: %v\n", os.Args[1], err) + return + } + } + scanner := bufio.NewScanner(reader) + for scanner.Scan() { //gdb-opt=(scanner/A) + s := scanner.Text() + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i) + fmt.Fprintf(os.Stderr, "There was an error: %v\n", err) + return + } + hist = ensure(int(i), hist) + hist[int(i)]++ + } + t := 0 + n := 0 + for i, a := range hist { + if a == 0 { //gdb-opt=(a,n,t) + continue + } + t += i * a + n += a + fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t) + } +} + +func main() { + growstack() // Use stack early to prevent growth during test, which confuses gdb + test() +} + +var snk string + +//go:noinline +func growstack() { + snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts new file mode 100644 index 0000000000000000000000000000000000000000..a00934be18e0335e3c85f33c5e68e281420180cb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts @@ -0,0 +1,11 @@ + ./testdata/i22558.go +19: func test(t *thing, u *thing) { +20: if t.next != nil { +23: fmt.Fprintf(os.Stderr, "%s\n", t.name) +24: u.self = u +25: t.self = t +26: t.next = u +27: for _, p := range t.stuff { +28: if isFoo(t, p) { +29: return +44: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts new file mode 100644 index 0000000000000000000000000000000000000000..70dfa07b87fc9b0640571267b4f54b0889036e9e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts @@ -0,0 +1,11 @@ + src/cmd/compile/internal/ssa/testdata/i22558.go +19: func test(t *thing, u *thing) { +20: if t.next != nil { +23: fmt.Fprintf(os.Stderr, "%s\n", t.name) +24: u.self = u +25: t.self = t +26: t.next = u +27: for _, p := range t.stuff { +28: if isFoo(t, p) { +29: return +44: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22558.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22558.go new file mode 100644 index 0000000000000000000000000000000000000000..8aea76c3c05224cf909a8ef6ab772df8d596a500 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22558.go @@ -0,0 +1,51 @@ +package main + +import ( + "fmt" + "os" +) + +type big struct { + pile [768]int8 +} + +type thing struct { + name string + next *thing + self *thing + stuff []big +} + +func test(t *thing, u *thing) { + if t.next != nil { + return + } + fmt.Fprintf(os.Stderr, "%s\n", t.name) + u.self = u + t.self = t + t.next = u + for _, p := range t.stuff { + if isFoo(t, p) { + return + } + } +} + +//go:noinline +func isFoo(t *thing, b big) bool { + return true +} + +func main() { + growstack() // Use stack early to prevent growth during test, which confuses gdb + t := &thing{name: "t", self: nil, next: nil, stuff: make([]big, 1)} + u := thing{name: "u", self: t, next: t, stuff: make([]big, 1)} + test(t, &u) +} + +var snk string + +//go:noinline +func growstack() { + snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts new file mode 100644 index 0000000000000000000000000000000000000000..18a5ff9247410a4d699e724525fd1d67d0be8d01 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts @@ -0,0 +1,7 @@ + ./testdata/i22600.go +8: func test() { +9: pwd, err := os.Getwd() +10: if err != nil { +14: fmt.Println(pwd) +15: } +20: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts new file mode 100644 index 0000000000000000000000000000000000000000..46285e2078becf90baa674f750aa1298f3f9e7f1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts @@ -0,0 +1,7 @@ + src/cmd/compile/internal/ssa/testdata/i22600.go +8: func test() { +9: pwd, err := os.Getwd() +10: if err != nil { +14: fmt.Println(pwd) +15: } +20: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22600.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22600.go new file mode 100644 index 0000000000000000000000000000000000000000..27f0d3d565f8694821b7054d9cd4ac6d03a2726a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/i22600.go @@ -0,0 +1,27 @@ +package main + +import ( + "fmt" + "os" +) + +func test() { + pwd, err := os.Getwd() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + fmt.Println(pwd) +} + +func main() { + growstack() // Use stack early to prevent growth during test, which confuses gdb + test() +} + +var snk string + +//go:noinline +func growstack() { + snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts new file mode 100644 index 0000000000000000000000000000000000000000..0b9f06f85dad54b280512c6474d94954f139714f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts @@ -0,0 +1,12 @@ + ./testdata/infloop.go +6: func test() { +8: go func() {}() +10: for { +1: package main +10: for { +1: package main +10: for { +1: package main +10: for { +1: package main +10: for { diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts new file mode 100644 index 0000000000000000000000000000000000000000..d465ad1396b39c57c742f6fcaae00f2dc2ce4e86 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts @@ -0,0 +1,4 @@ + src/cmd/compile/internal/ssa/testdata/infloop.go +6: func test() { +8: go func() {}() +10: for { diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/infloop.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/infloop.go new file mode 100644 index 0000000000000000000000000000000000000000..cdb374fb577382c52ff5c963ed78c1930374a5bc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/infloop.go @@ -0,0 +1,16 @@ +package main + +var sink int + +//go:noinline +func test() { + // This is for #30167, incorrect line numbers in an infinite loop + go func() {}() + + for { + } +} + +func main() { + test() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/inline-dump.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/inline-dump.go new file mode 100644 index 0000000000000000000000000000000000000000..97893b6f212f986b9931fd5f678759b6e0ded95d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/inline-dump.go @@ -0,0 +1,17 @@ +package foo + +func f(m, n int) int { + a := g(n) + b := g(m) + return a + b +} + +func g(x int) int { + y := h(x + 1) + z := h(x - 1) + return y + z +} + +func h(x int) int { + return x * x +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/pushback.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/pushback.go new file mode 100644 index 0000000000000000000000000000000000000000..754e6cbb2307876bb4bed202c968f64c88658acd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/pushback.go @@ -0,0 +1,30 @@ +package main + +type Node struct { + Circular bool +} + +type ExtNode[V any] struct { + v V + Node +} + +type List[V any] struct { + root *ExtNode[V] + len int +} + +func (list *List[V]) PushBack(arg V) { + if list.len == 0 { + list.root = &ExtNode[V]{v: arg} + list.root.Circular = true + list.len++ + return + } + list.len++ +} + +func main() { + var v List[int] + v.PushBack(1) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/sayhi.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/sayhi.go new file mode 100644 index 0000000000000000000000000000000000000000..680e1eb3a18f35ee0190a5673416cd292df5d72d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/sayhi.go @@ -0,0 +1,12 @@ +package foo + +import ( + "fmt" + "sync" +) + +func sayhi(n int, wg *sync.WaitGroup) { + fmt.Println("hi", n) + fmt.Println("hi", n) + wg.Done() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts new file mode 100644 index 0000000000000000000000000000000000000000..f182ff45d45dfae92bdbe344a53b7dcb6dcf11e9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts @@ -0,0 +1,56 @@ + ./testdata/scopes.go +22: func test() { +23: x := id(0) +24: y := id(0) +25: fmt.Println(x) +26: for i := x; i < 3; i++ { +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +26: for i := x; i < 3; i++ { +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +26: for i := x; i < 3; i++ { +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +26: for i := x; i < 3; i++ { +30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y) +31: fmt.Println(x, y) +33: for x := 0; x <= 1; x++ { // From delve scopetest.go +34: a := y +35: f1(a) +37: b := 0 +38: f2(b) +39: if gretbool() { +40: c := 0 +41: f3(c) +46: f5(b) +48: f6(a) +33: for x := 0; x <= 1; x++ { // From delve scopetest.go +34: a := y +35: f1(a) +37: b := 0 +38: f2(b) +39: if gretbool() { +43: c := 1.1 +44: f4(int(c)) +46: f5(b) +48: f6(a) +33: for x := 0; x <= 1; x++ { // From delve scopetest.go +53: j = id(1) +54: f = id(2) +56: for i := 0; i <= 5; i++ { +57: j += j * (j ^ 3) / 100 +58: if i == f { +62: sleepytime() +56: for i := 0; i <= 5; i++ { +57: j += j * (j ^ 3) / 100 +58: if i == f { +62: sleepytime() +56: for i := 0; i <= 5; i++ { +57: j += j * (j ^ 3) / 100 +58: if i == f { +59: fmt.Println("foo") +60: break +64: helloworld() +66: } +15: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts new file mode 100644 index 0000000000000000000000000000000000000000..b5e41aa906e8c131ee887a5351cbd14152a41ab6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts @@ -0,0 +1,46 @@ + ./testdata/scopes.go +22: func test() { +23: x := id(0) +24: y := id(0) +25: fmt.Println(x) +26: for i := x; i < 3; i++ { +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +26: for i := x; i < 3; i++ { +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +26: for i := x; i < 3; i++ { +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +26: for i := x; i < 3; i++ { +31: fmt.Println(x, y) +30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y) +31: fmt.Println(x, y) +33: for x := 0; x <= 1; x++ { // From delve scopetest.go +35: f1(a) +38: f2(b) +39: if gretbool() { +41: f3(c) +46: f5(b) +48: f6(a) +33: for x := 0; x <= 1; x++ { // From delve scopetest.go +35: f1(a) +38: f2(b) +39: if gretbool() { +44: f4(int(c)) +46: f5(b) +48: f6(a) +33: for x := 0; x <= 1; x++ { // From delve scopetest.go +53: j = id(1) +54: f = id(2) +56: for i := 0; i <= 5; i++ { +58: if i == f { +62: sleepytime() +56: for i := 0; i <= 5; i++ { +58: if i == f { +62: sleepytime() +56: for i := 0; i <= 5; i++ { +58: if i == f { +59: fmt.Println("foo") +64: helloworld() +15: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts new file mode 100644 index 0000000000000000000000000000000000000000..6eb490391113a7d7f5df008a9ab07da6133550ce --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts @@ -0,0 +1,64 @@ + src/cmd/compile/internal/ssa/testdata/scopes.go +22: func test() { +23: x := id(0) +24: y := id(0) +25: fmt.Println(x) +0: +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +x = 0 +y = 0 +26: for i := x; i < 3; i++ { +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +x = 1 +y = 0 +26: for i := x; i < 3; i++ { +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +x = 4 +y = 1 +26: for i := x; i < 3; i++ { +30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y) +x = 0 +y = 5 +31: fmt.Println(x, y) +0: 5 +34: a := y +35: f1(a) +37: b := 0 +38: f2(b) +39: if gretbool() { +40: c := 0 +41: f3(c) +46: f5(b) +48: f6(a) +33: for x := 0; x <= 1; x++ { // From delve scopetest.go +34: a := y +35: f1(a) +37: b := 0 +38: f2(b) +39: if gretbool() { +43: c := 1.1 +44: f4(int(c)) +46: f5(b) +48: f6(a) +33: for x := 0; x <= 1; x++ { // From delve scopetest.go +53: j = id(1) +54: f = id(2) +56: for i := 0; i <= 5; i++ { +57: j += j * (j ^ 3) / 100 +58: if i == f { +62: sleepytime() +56: for i := 0; i <= 5; i++ { +57: j += j * (j ^ 3) / 100 +58: if i == f { +62: sleepytime() +56: for i := 0; i <= 5; i++ { +57: j += j * (j ^ 3) / 100 +58: if i == f { +59: fmt.Println("foo") +60: break +64: helloworld() +66: } +15: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts new file mode 100644 index 0000000000000000000000000000000000000000..5a186b5440b512fd4d7137b38e570917ff88a045 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts @@ -0,0 +1,55 @@ + src/cmd/compile/internal/ssa/testdata/scopes.go +22: func test() { +23: x := id(0) +24: y := id(0) +25: fmt.Println(x) +0: +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +x = 0 +y = 0 +26: for i := x; i < 3; i++ { +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +x = 1 +y = 0 +26: for i := x; i < 3; i++ { +27: x := i * i +28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) +x = 4 +y = 1 +26: for i := x; i < 3; i++ { +31: fmt.Println(x, y) +30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y) +x = 0 +y = 5 +31: fmt.Println(x, y) +0: 5 +35: f1(a) +38: f2(b) +39: if gretbool() { +41: f3(c) +46: f5(b) +48: f6(a) +33: for x := 0; x <= 1; x++ { // From delve scopetest.go +35: f1(a) +38: f2(b) +39: if gretbool() { +44: f4(int(c)) +46: f5(b) +48: f6(a) +33: for x := 0; x <= 1; x++ { // From delve scopetest.go +53: j = id(1) +54: f = id(2) +56: for i := 0; i <= 5; i++ { +58: if i == f { +62: sleepytime() +56: for i := 0; i <= 5; i++ { +58: if i == f { +62: sleepytime() +56: for i := 0; i <= 5; i++ { +58: if i == f { +59: fmt.Println("foo") +64: helloworld() +66: } +15: } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.go new file mode 100644 index 0000000000000000000000000000000000000000..e93d69936fb2d3120d5a002ee3b37d7ae4039527 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/ssa/testdata/scopes.go @@ -0,0 +1,107 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "time" +) + +func main() { + growstack() // Use stack early to prevent growth during test, which confuses gdb + test() +} + +//go:noinline +func id(x int) int { + return x +} + +func test() { + x := id(0) + y := id(0) + fmt.Println(x) + for i := x; i < 3; i++ { + x := i * i + y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y) + } + y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y) + fmt.Println(x, y) + + for x := 0; x <= 1; x++ { // From delve scopetest.go + a := y + f1(a) + { + b := 0 + f2(b) + if gretbool() { + c := 0 + f3(c) + } else { + c := 1.1 + f4(int(c)) + } + f5(b) + } + f6(a) + } + + { // From delve testnextprog.go + var ( + j = id(1) + f = id(2) + ) + for i := 0; i <= 5; i++ { + j += j * (j ^ 3) / 100 + if i == f { + fmt.Println("foo") + break + } + sleepytime() + } + helloworld() + } +} + +func sleepytime() { + time.Sleep(5 * time.Millisecond) +} + +func helloworld() { + fmt.Println("Hello, World!") +} + +//go:noinline +func f1(x int) {} + +//go:noinline +func f2(x int) {} + +//go:noinline +func f3(x int) {} + +//go:noinline +func f4(x int) {} + +//go:noinline +func f5(x int) {} + +//go:noinline +func f6(x int) {} + +var boolvar = true + +func gretbool() bool { + x := boolvar + boolvar = !boolvar + return x +} + +var sink string + +//go:noinline +func growstack() { + sink = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/chans.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/chans.go new file mode 100644 index 0000000000000000000000000000000000000000..d4c4207a4caaf4a407202acf5dad0c50a5fa80a3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/chans.go @@ -0,0 +1,66 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chans + +import "runtime" + +// Ranger returns a Sender and a Receiver. The Receiver provides a +// Next method to retrieve values. The Sender provides a Send method +// to send values and a Close method to stop sending values. The Next +// method indicates when the Sender has been closed, and the Send +// method indicates when the Receiver has been freed. +// +// This is a convenient way to exit a goroutine sending values when +// the receiver stops reading them. +func Ranger[T any]() (*Sender[T], *Receiver[T]) { + c := make(chan T) + d := make(chan bool) + s := &Sender[T]{values: c, done: d} + r := &Receiver[T]{values: c, done: d} + runtime.SetFinalizer(r, r.finalize) + return s, r +} + +// A sender is used to send values to a Receiver. +type Sender[T any] struct { + values chan<- T + done <-chan bool +} + +// Send sends a value to the receiver. It returns whether any more +// values may be sent; if it returns false the value was not sent. +func (s *Sender[T]) Send(v T) bool { + select { + case s.values <- v: + return true + case <-s.done: + return false + } +} + +// Close tells the receiver that no more values will arrive. +// After Close is called, the Sender may no longer be used. +func (s *Sender[T]) Close() { + close(s.values) +} + +// A Receiver receives values from a Sender. +type Receiver[T any] struct { + values <-chan T + done chan<- bool +} + +// Next returns the next value from the channel. The bool result +// indicates whether the value is valid, or whether the Sender has +// been closed and no more values will be received. +func (r *Receiver[T]) Next() (T, bool) { + v, ok := <-r.values + return v, ok +} + +// finalize is a finalizer for the receiver. +func (r *Receiver[T]) finalize() { + close(r.done) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/fallthrough.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/fallthrough.go new file mode 100644 index 0000000000000000000000000000000000000000..851da81ea04584891a03cc593da9f4d52fbdb694 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/fallthrough.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fallthroughs + +func _() { + var x int + switch x { + case 0: + fallthrough + + case 1: + fallthrough // ERROR fallthrough statement out of place + { + } + + case 2: + { + fallthrough // ERROR fallthrough statement out of place + } + + case 3: + for { + fallthrough // ERROR fallthrough statement out of place + } + + case 4: + fallthrough // trailing empty statements are ok + ; + ; + + case 5: + fallthrough + + default: + fallthrough // ERROR cannot fallthrough final case in switch + } + + fallthrough // ERROR fallthrough statement out of place + + if true { + fallthrough // ERROR fallthrough statement out of place + } + + for { + fallthrough // ERROR fallthrough statement out of place + } + + var t any + switch t.(type) { + case int: + fallthrough // ERROR cannot fallthrough in type switch + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/interface.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/interface.go new file mode 100644 index 0000000000000000000000000000000000000000..dbc41879896920495d064f92cb1c16cc0c8d71fb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/interface.go @@ -0,0 +1,74 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains test cases for interfaces containing +// constraint elements. + +package p + +type _ interface { + m() + E +} + +type _ interface { + m() + ~int + int | string + int | ~string + ~int | ~string +} + +type _ interface { + m() + ~int + T[int, string] | string + int | ~T[string, struct{}] + ~int | ~string +} + +type _ interface { + int + []byte + [10]int + struct{} + *int + func() + interface{} + map[string]int + chan T + chan<- T + <-chan T + T[int] +} + +type _ interface { + int | string + []byte | string + [10]int | string + struct{} | string + *int | string + func() | string + interface{} | string + map[string]int | string + chan T | string + chan<- T | string + <-chan T | string + T[int] | string +} + +type _ interface { + ~int | string + ~[]byte | string + ~[10]int | string + ~struct{} | string + ~*int | string + ~func() | string + ~interface{} | string + ~map[string]int | string + ~chan T | string + ~chan<- T | string + ~<-chan T | string + ~T[int] | string +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue20789.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue20789.go new file mode 100644 index 0000000000000000000000000000000000000000..0d5988b9a608c21316be48ba481f5f81a4994985 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue20789.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure this doesn't crash the compiler. +// Line 9 must end in EOF for this test (no newline). + +package e +func([<-chan<-[func /* ERROR unexpected u */ u){go \ No newline at end of file diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue23385.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue23385.go new file mode 100644 index 0000000000000000000000000000000000000000..2459a7369bc1e53c288899b1d35241916d7a1088 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue23385.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Check error message for use of = instead of == . + +package p + +func _() { + if true || 0 /* ERROR cannot use assignment .* as value */ = 1 { + } +} + +func _(a, b string) { + if a == "a" && b /* ERROR cannot use assignment .* as value */ = "b" { + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue23434.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue23434.go new file mode 100644 index 0000000000000000000000000000000000000000..e436abfecbb2ad8a173638fdfc4c6e4f24279b55 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue23434.go @@ -0,0 +1,31 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test case for go.dev/issue/23434: Better synchronization of +// parser after missing type. There should be exactly +// one error each time, with now follow errors. + +package p + +type T /* ERROR unexpected newline */ + +type Map map[int] /* ERROR unexpected newline */ + +// Examples from go.dev/issue/23434: + +func g() { + m := make(map[string] /* ERROR unexpected ! */ !) + for { + x := 1 + print(x) + } +} + +func f() { + m := make(map[string] /* ERROR unexpected \) */ ) + for { + x := 1 + print(x) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue31092.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue31092.go new file mode 100644 index 0000000000000000000000000000000000000000..0bd40bd7cd629ef4ef516b51f1c793778aac776a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue31092.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test cases for go.dev/issue/31092: Better synchronization of +// parser after seeing an := rather than an = in a const, +// type, or variable declaration. + +package p + +const _ /* ERROR unexpected := */ := 0 +type _ /* ERROR unexpected := */ := int +var _ /* ERROR unexpected := */ := 0 + +const _ int /* ERROR unexpected := */ := 0 +var _ int /* ERROR unexpected := */ := 0 diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue43527.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue43527.go new file mode 100644 index 0000000000000000000000000000000000000000..99a8c0965df1cc0557d416ee0c13b0dbf739b173 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue43527.go @@ -0,0 +1,23 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type ( + // 0 and 1-element []-lists are syntactically valid + _[A, B /* ERROR missing type constraint */ ] int + _[A, /* ERROR missing type parameter name */ interface{}] int + _[A, B, C /* ERROR missing type constraint */ ] int + _[A B, C /* ERROR missing type constraint */ ] int + _[A B, /* ERROR missing type parameter name */ interface{}] int + _[A B, /* ERROR missing type parameter name */ interface{}, C D] int + _[A B, /* ERROR missing type parameter name */ interface{}, C, D] int + _[A B, /* ERROR missing type parameter name */ interface{}, C, interface{}] int + _[A B, C interface{}, D, /* ERROR missing type parameter name */ interface{}] int +) + +// function type parameters use the same parsing routine - just have a couple of tests + +func _[A, B /* ERROR missing type constraint */ ]() {} +func _[A, /* ERROR missing type parameter name */ interface{}]() {} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue43674.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue43674.go new file mode 100644 index 0000000000000000000000000000000000000000..51c692ae69fd251a676ee6f0d20cbf424881e26f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue43674.go @@ -0,0 +1,13 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func _(... /* ERROR [.][.][.] is missing type */ ) +func _(... /* ERROR [.][.][.] is missing type */ , int) + +func _(a, b ... /* ERROR [.][.][.] is missing type */ ) +func _(a, b ... /* ERROR [.][.][.] is missing type */ , x int) + +func _()(... /* ERROR [.][.][.] is missing type */ ) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue46558.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue46558.go new file mode 100644 index 0000000000000000000000000000000000000000..a22b6008258464503dd41b037a3094dccec10ebb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue46558.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func F(s string) { + switch s[0] { + case 'a': + case s[2] { // ERROR unexpected { + case 'b': + } + } +} // ERROR non-declaration statement diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue47704.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue47704.go new file mode 100644 index 0000000000000000000000000000000000000000..e4cdad148f62c01c8767b8c3c96cd0851f3ac6c4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue47704.go @@ -0,0 +1,17 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func _() { + _ = m[] // ERROR expected operand + _ = m[x,] + _ = m[x /* ERROR unexpected a */ a b c d] +} + +// test case from the issue +func f(m map[int]int) int { + return m[0 // ERROR expected comma, \: or \] + ] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue48382.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue48382.go new file mode 100644 index 0000000000000000000000000000000000000000..7c024a051f6cf6fccbfb48023fba123f4391cd61 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue48382.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type _ func /* ERROR function type must have no type parameters */ [ /* ERROR empty type parameter list */ ]() +type _ func /* ERROR function type must have no type parameters */ [ x /* ERROR missing type constraint */ ]() +type _ func /* ERROR function type must have no type parameters */ [P any]() + +var _ = (func /* ERROR function type must have no type parameters */ [P any]())(nil) +var _ = func /* ERROR function type must have no type parameters */ [P any]() {} + +type _ interface{ + m /* ERROR interface method must have no type parameters */ [P any]() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue49205.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue49205.go new file mode 100644 index 0000000000000000000000000000000000000000..bbcc950c5c790548e9991f303690f58e0e4eb360 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue49205.go @@ -0,0 +1,27 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +// test case from issue + +type _ interface{ + m /* ERROR unexpected int in interface type; possibly missing semicolon or newline or } */ int +} + +// other cases where the fix for this issue affects the error message + +const ( + x int = 10 /* ERROR unexpected literal "foo" in grouped declaration; possibly missing semicolon or newline or \) */ "foo" +) + +var _ = []int{1, 2, 3 /* ERROR unexpected int in composite literal; possibly missing comma or } */ int } + +type _ struct { + x y /* ERROR syntax error: unexpected comma in struct type; possibly missing semicolon or newline or } */ , +} + +func f(a, b c /* ERROR unexpected d in parameter list; possibly missing comma or \) */ d) { + f(a, b, c /* ERROR unexpected d in argument list; possibly missing comma or \) */ d) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue49482.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue49482.go new file mode 100644 index 0000000000000000000000000000000000000000..1fc303d169bd97f0f2771a7fc506bc984e196a42 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue49482.go @@ -0,0 +1,31 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type ( + // these need a comma to disambiguate + _[P *T,] struct{} + _[P *T, _ any] struct{} + _[P (*T),] struct{} + _[P (*T), _ any] struct{} + _[P (T),] struct{} + _[P (T), _ any] struct{} + + // these parse as name followed by type + _[P *struct{}] struct{} + _[P (*struct{})] struct{} + _[P ([]int)] struct{} + + // array declarations + _ [P(T)]struct{} + _ [P((T))]struct{} + _ [P * *T] struct{} // this could be a name followed by a type but it makes the rules more complicated + _ [P * T]struct{} + _ [P(*T)]struct{} + _ [P(**T)]struct{} + _ [P * T - T]struct{} + _ [P*T-T /* ERROR unexpected comma */ ,]struct{} + _ [10 /* ERROR unexpected comma */ ,]struct{} +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue52391.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue52391.go new file mode 100644 index 0000000000000000000000000000000000000000..f2098ceadb2c467f21afcfbbebf6af1c849ac700 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue52391.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type _ interface { + int + (int) + (*int) + *([]byte) + ~(int) + (int) | (string) + (int) | ~(string) + (/* ERROR unexpected ~ */ ~int) + (int /* ERROR unexpected \| */ | /* ERROR unexpected string */ string /* ERROR unexpected \) */ ) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue56022.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue56022.go new file mode 100644 index 0000000000000000000000000000000000000000..d28d35cd8e3a8bf215ed5e21114f465b08e1ff3c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue56022.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func /* ERROR unexpected {, expected name or \($ */ {} +func (T) /* ERROR unexpected {, expected name$ */ {} +func (T) /* ERROR unexpected \(, expected name$ */ () {} +func (T) /* ERROR unexpected \(, expected name$ */ () diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue60599.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue60599.go new file mode 100644 index 0000000000000000000000000000000000000000..711d97bde0a1a75cb62131bdbf75e53ee1cf3a26 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue60599.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func _(x, y, z int) { + if x /* ERROR cannot use assignment x = y as value */ = y {} + if x || y /* ERROR cannot use assignment \(x || y\) = z as value */ = z {} + if x /* ERROR cannot use assignment x = \(y || z\) as value */ = y || z {} +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue63835.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue63835.go new file mode 100644 index 0000000000000000000000000000000000000000..3d165c016ea38a377652fed962e5079500b8859e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/issue63835.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func (x string) /* ERROR syntax error: unexpected \[, expected name */ []byte { + return []byte(x) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/linalg.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/linalg.go new file mode 100644 index 0000000000000000000000000000000000000000..822d0287e7490ada0681ce74a10de3ab0122c765 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/linalg.go @@ -0,0 +1,83 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package linalg + +import "math" + +// Numeric is type bound that matches any numeric type. +// It would likely be in a constraints package in the standard library. +type Numeric interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | + uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | + float32 | ~float64 | + complex64 | ~complex128 +} + +func DotProduct[T Numeric](s1, s2 []T) T { + if len(s1) != len(s2) { + panic("DotProduct: slices of unequal length") + } + var r T + for i := range s1 { + r += s1[i] * s2[i] + } + return r +} + +// NumericAbs matches numeric types with an Abs method. +type NumericAbs[T any] interface { + Numeric + + Abs() T +} + +// AbsDifference computes the absolute value of the difference of +// a and b, where the absolute value is determined by the Abs method. +func AbsDifference[T NumericAbs[T]](a, b T) T { + d := a - b + return d.Abs() +} + +// OrderedNumeric is a type bound that matches numeric types that support the < operator. +type OrderedNumeric interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | + uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | + float32 | ~float64 +} + +// Complex is a type bound that matches the two complex types, which do not have a < operator. +type Complex interface { + ~complex64 | ~complex128 +} + +// OrderedAbs is a helper type that defines an Abs method for +// ordered numeric types. +type OrderedAbs[T OrderedNumeric] T + +func (a OrderedAbs[T]) Abs() OrderedAbs[T] { + if a < 0 { + return -a + } + return a +} + +// ComplexAbs is a helper type that defines an Abs method for +// complex types. +type ComplexAbs[T Complex] T + +func (a ComplexAbs[T]) Abs() ComplexAbs[T] { + r := float64(real(a)) + i := float64(imag(a)) + d := math.Sqrt(r * r + i * i) + return ComplexAbs[T](complex(d, 0)) +} + +func OrderedAbsDifference[T OrderedNumeric](a, b T) T { + return T(AbsDifference(OrderedAbs[T](a), OrderedAbs[T](b))) +} + +func ComplexAbsDifference[T Complex](a, b T) T { + return T(AbsDifference(ComplexAbs[T](a), ComplexAbs[T](b))) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/map.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/map.go new file mode 100644 index 0000000000000000000000000000000000000000..a508d214b8a4a0bd17ce7bf6e41a92d9fa35f174 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/map.go @@ -0,0 +1,112 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package orderedmap provides an ordered map, implemented as a binary tree. +package orderedmap + +import "chans" + +// Map is an ordered map. +type Map[K, V any] struct { + root *node[K, V] + compare func(K, K) int +} + +// node is the type of a node in the binary tree. +type node[K, V any] struct { + key K + val V + left, right *node[K, V] +} + +// New returns a new map. +func New[K, V any](compare func(K, K) int) *Map[K, V] { + return &Map[K, V]{compare: compare} +} + +// find looks up key in the map, and returns either a pointer +// to the node holding key, or a pointer to the location where +// such a node would go. +func (m *Map[K, V]) find(key K) **node[K, V] { + pn := &m.root + for *pn != nil { + switch cmp := m.compare(key, (*pn).key); { + case cmp < 0: + pn = &(*pn).left + case cmp > 0: + pn = &(*pn).right + default: + return pn + } + } + return pn +} + +// Insert inserts a new key/value into the map. +// If the key is already present, the value is replaced. +// Returns true if this is a new key, false if already present. +func (m *Map[K, V]) Insert(key K, val V) bool { + pn := m.find(key) + if *pn != nil { + (*pn).val = val + return false + } + *pn = &node[K, V]{key: key, val: val} + return true +} + +// Find returns the value associated with a key, or zero if not present. +// The found result reports whether the key was found. +func (m *Map[K, V]) Find(key K) (V, bool) { + pn := m.find(key) + if *pn == nil { + var zero V // see the discussion of zero values, above + return zero, false + } + return (*pn).val, true +} + +// keyValue is a pair of key and value used when iterating. +type keyValue[K, V any] struct { + key K + val V +} + +// InOrder returns an iterator that does an in-order traversal of the map. +func (m *Map[K, V]) InOrder() *Iterator[K, V] { + sender, receiver := chans.Ranger[keyValue[K, V]]() + var f func(*node[K, V]) bool + f = func(n *node[K, V]) bool { + if n == nil { + return true + } + // Stop sending values if sender.Send returns false, + // meaning that nothing is listening at the receiver end. + return f(n.left) && + sender.Send(keyValue[K, V]{n.key, n.val}) && + f(n.right) + } + go func() { + f(m.root) + sender.Close() + }() + return &Iterator[K, V]{receiver} +} + +// Iterator is used to iterate over the map. +type Iterator[K, V any] struct { + r *chans.Receiver[keyValue[K, V]] +} + +// Next returns the next key and value pair, and a boolean indicating +// whether they are valid or whether we have reached the end. +func (it *Iterator[K, V]) Next() (K, V, bool) { + keyval, ok := it.r.Next() + if !ok { + var zerok K + var zerov V + return zerok, zerov, false + } + return keyval.key, keyval.val, true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/map2.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/map2.go new file mode 100644 index 0000000000000000000000000000000000000000..3d1cbfbd22d4f287e95a6b50c218c84f449ef3a6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/map2.go @@ -0,0 +1,146 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is like map.go, but instead of importing chans, it contains +// the necessary functionality at the end of the file. + +// Package orderedmap provides an ordered map, implemented as a binary tree. +package orderedmap + +// Map is an ordered map. +type Map[K, V any] struct { + root *node[K, V] + compare func(K, K) int +} + +// node is the type of a node in the binary tree. +type node[K, V any] struct { + key K + val V + left, right *node[K, V] +} + +// New returns a new map. +func New[K, V any](compare func(K, K) int) *Map[K, V] { + return &Map[K, V]{compare: compare} +} + +// find looks up key in the map, and returns either a pointer +// to the node holding key, or a pointer to the location where +// such a node would go. +func (m *Map[K, V]) find(key K) **node[K, V] { + pn := &m.root + for *pn != nil { + switch cmp := m.compare(key, (*pn).key); { + case cmp < 0: + pn = &(*pn).left + case cmp > 0: + pn = &(*pn).right + default: + return pn + } + } + return pn +} + +// Insert inserts a new key/value into the map. +// If the key is already present, the value is replaced. +// Returns true if this is a new key, false if already present. +func (m *Map[K, V]) Insert(key K, val V) bool { + pn := m.find(key) + if *pn != nil { + (*pn).val = val + return false + } + *pn = &node[K, V]{key: key, val: val} + return true +} + +// Find returns the value associated with a key, or zero if not present. +// The found result reports whether the key was found. +func (m *Map[K, V]) Find(key K) (V, bool) { + pn := m.find(key) + if *pn == nil { + var zero V // see the discussion of zero values, above + return zero, false + } + return (*pn).val, true +} + +// keyValue is a pair of key and value used when iterating. +type keyValue[K, V any] struct { + key K + val V +} + +// InOrder returns an iterator that does an in-order traversal of the map. +func (m *Map[K, V]) InOrder() *Iterator[K, V] { + sender, receiver := chans_Ranger[keyValue[K, V]]() + var f func(*node[K, V]) bool + f = func(n *node[K, V]) bool { + if n == nil { + return true + } + // Stop sending values if sender.Send returns false, + // meaning that nothing is listening at the receiver end. + return f(n.left) && + sender.Send(keyValue[K, V]{n.key, n.val}) && + f(n.right) + } + go func() { + f(m.root) + sender.Close() + }() + return &Iterator[K, V]{receiver} +} + +// Iterator is used to iterate over the map. +type Iterator[K, V any] struct { + r *chans_Receiver[keyValue[K, V]] +} + +// Next returns the next key and value pair, and a boolean indicating +// whether they are valid or whether we have reached the end. +func (it *Iterator[K, V]) Next() (K, V, bool) { + keyval, ok := it.r.Next() + if !ok { + var zerok K + var zerov V + return zerok, zerov, false + } + return keyval.key, keyval.val, true +} + +// chans + +func chans_Ranger[T any]() (*chans_Sender[T], *chans_Receiver[T]) + +// A sender is used to send values to a Receiver. +type chans_Sender[T any] struct { + values chan<- T + done <-chan bool +} + +func (s *chans_Sender[T]) Send(v T) bool { + select { + case s.values <- v: + return true + case <-s.done: + return false + } +} + +func (s *chans_Sender[T]) Close() { + close(s.values) +} + +type chans_Receiver[T any] struct { + values <-chan T + done chan<- bool +} + +func (r *chans_Receiver[T]) Next() (T, bool) { + v, ok := <-r.values + return v, ok +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/sample.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/sample.go new file mode 100644 index 0000000000000000000000000000000000000000..5a2b4bf0c46cb2cdf2845624afa18400ee94e59d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/sample.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a sample test file illustrating the use +// of error comments with the error test harness. + +package p + +// The following are invalid error comments; they are +// silently ignored. The prefix must be exactly one of +// "/* ERROR " or "// ERROR ". +// +/*ERROR*/ +/*ERROR foo*/ +/* ERRORfoo */ +/* ERROR foo */ +//ERROR +// ERROR +// ERRORfoo +// ERROR foo + +// This is a valid error comment; it applies to the +// immediately following token. +import "math" /* ERROR unexpected comma */ , + +// If there are multiple /*-style error comments before +// the next token, only the last one is considered. +type x = /* ERROR ignored */ /* ERROR literal 0 in type declaration */ 0 + +// A //-style error comment matches any error position +// on the same line. +func () foo() // ERROR method has no receiver diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/slices.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/slices.go new file mode 100644 index 0000000000000000000000000000000000000000..92651095568d7ee80bf83e52f511287e5b1772cd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/slices.go @@ -0,0 +1,68 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices implements various slice algorithms. +package slices + +// Map turns a []T1 to a []T2 using a mapping function. +func Map[T1, T2 any](s []T1, f func(T1) T2) []T2 { + r := make([]T2, len(s)) + for i, v := range s { + r[i] = f(v) + } + return r +} + +// Reduce reduces a []T1 to a single value using a reduction function. +func Reduce[T1, T2 any](s []T1, initializer T2, f func(T2, T1) T2) T2 { + r := initializer + for _, v := range s { + r = f(r, v) + } + return r +} + +// Filter filters values from a slice using a filter function. +func Filter[T any](s []T, f func(T) bool) []T { + var r []T + for _, v := range s { + if f(v) { + r = append(r, v) + } + } + return r +} + +// Example uses + +func limiter(x int) byte { + switch { + case x < 0: + return 0 + default: + return byte(x) + case x > 255: + return 255 + } +} + +var input = []int{-4, 68954, 7, 44, 0, -555, 6945} +var limited1 = Map[int, byte](input, limiter) +var limited2 = Map(input, limiter) // using type inference + +func reducer(x float64, y int) float64 { + return x + float64(y) +} + +var reduced1 = Reduce[int, float64](input, 0, reducer) +var reduced2 = Reduce(input, 1i, reducer) // using type inference +var reduced3 = Reduce(input, 1, reducer) // using type inference + +func filter(x int) bool { + return x&1 != 0 +} + +var filtered1 = Filter[int](input, filter) +var filtered2 = Filter(input, filter) // using type inference + diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/smoketest.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/smoketest.go new file mode 100644 index 0000000000000000000000000000000000000000..6b3593ac7ad1226299c73e84c131cd8c23999f1f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/smoketest.go @@ -0,0 +1,73 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains basic generic code snippets. + +package p + +// type parameter lists +type B[P any] struct{} +type _[P interface{}] struct{} +type _[P B] struct{} +type _[P B[P]] struct{} + +type _[A, B, C any] struct{} +type _[A, B, C B] struct{} +type _[A, B, C B[A, B, C]] struct{} +type _[A1, A2 B1, A3 B2, A4, A5, A6 B3] struct{} + +type _[A interface{}] struct{} +type _[A, B interface{ m() }] struct{} + +type _[A, B, C any] struct{} + +// in functions +func _[P any]() +func _[P interface{}]() +func _[P B]() +func _[P B[P]]() + +// type instantiations +type _ T[int] + +// in expressions +var _ = T[int]{} + +// in embedded types +type _ struct{ T[int] } + +// interfaces +type _ interface { + m() + ~int +} + +type _ interface { + ~int | ~float | ~string + ~complex128 + underlying(underlying underlying) underlying +} + +type _ interface { + T + T[int] +} + +// tricky cases +func _(T[P], T[P1, P2]) +func _(a [N]T) + +type _ struct { + T[P] + T[P1, P2] + f[N] +} +type _ interface { + m() + + // instantiated types + T[ /* ERROR empty type argument list */ ] + T[P] + T[P1, P2] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/tparams.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/tparams.go new file mode 100644 index 0000000000000000000000000000000000000000..4b68a1585f9635861a19d3cc4f2f8af2f8d79e94 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/tparams.go @@ -0,0 +1,57 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type t[a, b /* ERROR missing type constraint */ ] struct{} +type t[a t, b t, c /* ERROR missing type constraint */ ] struct{} +type t struct { + t [n]byte + t[a] + t[a, b] +} +type t interface { + t[a] + m /* ERROR method must have no type parameters */ [_ _, /* ERROR mixed */ _]() + t[a, b] +} + +func f[ /* ERROR empty type parameter list */ ]() +func f[a, b /* ERROR missing type constraint */ ]() +func f[a t, b t, c /* ERROR missing type constraint */ ]() + +func f[a b, /* ERROR expected ] */ 0] () + +// go.dev/issue/49482 +type ( + t[a *[]int] struct{} + t[a *t,] struct{} + t[a *t|[]int] struct{} + t[a *t|t,] struct{} + t[a *t|~t,] struct{} + t[a *struct{}|t] struct{} + t[a *t|struct{}] struct{} + t[a *struct{}|~t] struct{} +) + +// go.dev/issue/51488 +type ( + t[a *t|t,] struct{} + t[a *t|t, b t] struct{} + t[a *t|t] struct{} + t[a *[]t|t] struct{} + t[a ([]t)] struct{} + t[a ([]t)|t] struct{} +) + +// go.dev/issue/60812 +type ( + t [t]struct{} + t [[]t]struct{} + t [[t]t]struct{} + t [/* ERROR missing type parameter name or invalid array length */ t[t]]struct{} + t [t t[t], /* ERROR missing type parameter name */ t[t]]struct{} + t [/* ERROR missing type parameter name */ t[t], t t[t]]struct{} + t [/* ERROR missing type parameter name */ t[t], t[t]]struct{} // report only first error +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/typeset.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/typeset.go new file mode 100644 index 0000000000000000000000000000000000000000..819025c1aa291f9a13ff93a3194b0947285dd045 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/syntax/testdata/typeset.go @@ -0,0 +1,91 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains test cases for typeset-only constraint elements. + +package p + +type ( + _[_ t] t + _[_ ~t] t + _[_ t|t] t + _[_ ~t|t] t + _[_ t|~t] t + _[_ ~t|~t] t + + _[_ t, _, _ t|t] t + _[_ t, _, _ ~t|t] t + _[_ t, _, _ t|~t] t + _[_ t, _, _ ~t|~t] t + + _[_ t.t] t + _[_ ~t.t] t + _[_ t.t|t.t] t + _[_ ~t.t|t.t] t + _[_ t.t|~t.t] t + _[_ ~t.t|~t.t] t + + _[_ t, _, _ t.t|t.t] t + _[_ t, _, _ ~t.t|t.t] t + _[_ t, _, _ t.t|~t.t] t + _[_ t, _, _ ~t.t|~t.t] t + + _[_ struct{}] t + _[_ ~struct{}] t + + _[_ struct{}|t] t + _[_ ~struct{}|t] t + _[_ struct{}|~t] t + _[_ ~struct{}|~t] t + + _[_ t|struct{}] t + _[_ ~t|struct{}] t + _[_ t|~struct{}] t + _[_ ~t|~struct{}] t + + // test cases for go.dev/issue/49175 + _[_ []t]t + _[_ [1]t]t + _[_ ~[]t]t + _[_ ~[1]t]t + t [ /* ERROR missing type parameter name */ t[0]]t +) + +// test cases for go.dev/issue/49174 +func _[_ t]() {} +func _[_ []t]() {} +func _[_ [1]t]() {} +func _[_ []t | t]() {} +func _[_ [1]t | t]() {} +func _[_ t | []t]() {} +func _[_ []t | []t]() {} +func _[_ [1]t | [1]t]() {} +func _[_ t[t] | t[t]]() {} + +// Single-expression type parameter lists and those that don't start +// with a (type parameter) name are considered array sizes. +// The term must be a valid expression (it could be a type incl. a +// tilde term) but the type-checker will complain. +type ( + _[t] t + _[t|t] t + + // These are invalid and the type-checker will complain. + _[~t] t + _[~t|t] t + _[t|~t] t + _[~t|~t] t +) + +type ( + _[_ t, t /* ERROR missing type constraint */ ] t + _[_ ~t, t /* ERROR missing type constraint */ ] t + _[_ t, /* ERROR missing type parameter name */ ~t] t + _[_ ~t, /* ERROR missing type parameter name */ ~t] t + + _[_ t|t, /* ERROR missing type parameter name */ t|t] t + _[_ ~t|t, /* ERROR missing type parameter name */ t|t] t + _[_ t|t, /* ERROR missing type parameter name */ ~t|t] t + _[_ ~t|t, /* ERROR missing type parameter name */ ~t|t] t +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/fixedbugs_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/fixedbugs_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8ff7a60aae66dc92c34c10acb902652d04eee481 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/fixedbugs_test.go @@ -0,0 +1,86 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "internal/testenv" + "os" + "path/filepath" + "strings" + "testing" +) + +type T struct { + x [2]int64 // field that will be clobbered. Also makes type not SSAable. + p *byte // has a pointer +} + +//go:noinline +func makeT() T { + return T{} +} + +var g T + +var sink interface{} + +func TestIssue15854(t *testing.T) { + for i := 0; i < 10000; i++ { + if g.x[0] != 0 { + t.Fatalf("g.x[0] clobbered with %x\n", g.x[0]) + } + // The bug was in the following assignment. The return + // value of makeT() is not copied out of the args area of + // stack frame in a timely fashion. So when write barriers + // are enabled, the marshaling of the args for the write + // barrier call clobbers the result of makeT() before it is + // read by the write barrier code. + g = makeT() + sink = make([]byte, 1000) // force write barriers to eventually happen + } +} +func TestIssue15854b(t *testing.T) { + const N = 10000 + a := make([]T, N) + for i := 0; i < N; i++ { + a = append(a, makeT()) + sink = make([]byte, 1000) // force write barriers to eventually happen + } + for i, v := range a { + if v.x[0] != 0 { + t.Fatalf("a[%d].x[0] clobbered with %x\n", i, v.x[0]) + } + } +} + +// Test that the generated assembly has line numbers (Issue #16214). +func TestIssue16214(t *testing.T) { + testenv.MustHaveGoBuild(t) + dir := t.TempDir() + + src := filepath.Join(dir, "x.go") + err := os.WriteFile(src, []byte(issue16214src), 0644) + if err != nil { + t.Fatalf("could not write file: %v", err) + } + + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-p=main", "-S", "-o", filepath.Join(dir, "out.o"), src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("go tool compile: %v\n%s", err, out) + } + + if strings.Contains(string(out), "unknown line number") { + t.Errorf("line number missing in assembly:\n%s", out) + } +} + +var issue16214src = ` +package main + +func Mod32(x uint32) uint32 { + return x % 3 // frontend rewrites it as HMUL with 2863311531, the LITERAL node has unknown Pos +} +` diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/float_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/float_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c736f970f9941705d001d8e9c008b66e13fdec2a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/float_test.go @@ -0,0 +1,545 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "math" + "testing" +) + +//go:noinline +func compare1(a, b float64) bool { + return a < b +} + +//go:noinline +func compare2(a, b float32) bool { + return a < b +} + +func TestFloatCompare(t *testing.T) { + if !compare1(3, 5) { + t.Errorf("compare1 returned false") + } + if !compare2(3, 5) { + t.Errorf("compare2 returned false") + } +} + +func TestFloatCompareFolded(t *testing.T) { + // float64 comparisons + d1, d3, d5, d9 := float64(1), float64(3), float64(5), float64(9) + if d3 == d5 { + t.Errorf("d3 == d5 returned true") + } + if d3 != d3 { + t.Errorf("d3 != d3 returned true") + } + if d3 > d5 { + t.Errorf("d3 > d5 returned true") + } + if d3 >= d9 { + t.Errorf("d3 >= d9 returned true") + } + if d5 < d1 { + t.Errorf("d5 < d1 returned true") + } + if d9 <= d1 { + t.Errorf("d9 <= d1 returned true") + } + if math.NaN() == math.NaN() { + t.Errorf("math.NaN() == math.NaN() returned true") + } + if math.NaN() >= math.NaN() { + t.Errorf("math.NaN() >= math.NaN() returned true") + } + if math.NaN() <= math.NaN() { + t.Errorf("math.NaN() <= math.NaN() returned true") + } + if math.Copysign(math.NaN(), -1) < math.NaN() { + t.Errorf("math.Copysign(math.NaN(), -1) < math.NaN() returned true") + } + if math.Inf(1) != math.Inf(1) { + t.Errorf("math.Inf(1) != math.Inf(1) returned true") + } + if math.Inf(-1) != math.Inf(-1) { + t.Errorf("math.Inf(-1) != math.Inf(-1) returned true") + } + if math.Copysign(0, -1) != 0 { + t.Errorf("math.Copysign(0, -1) != 0 returned true") + } + if math.Copysign(0, -1) < 0 { + t.Errorf("math.Copysign(0, -1) < 0 returned true") + } + if 0 > math.Copysign(0, -1) { + t.Errorf("0 > math.Copysign(0, -1) returned true") + } + + // float32 comparisons + s1, s3, s5, s9 := float32(1), float32(3), float32(5), float32(9) + if s3 == s5 { + t.Errorf("s3 == s5 returned true") + } + if s3 != s3 { + t.Errorf("s3 != s3 returned true") + } + if s3 > s5 { + t.Errorf("s3 > s5 returned true") + } + if s3 >= s9 { + t.Errorf("s3 >= s9 returned true") + } + if s5 < s1 { + t.Errorf("s5 < s1 returned true") + } + if s9 <= s1 { + t.Errorf("s9 <= s1 returned true") + } + sPosNaN, sNegNaN := float32(math.NaN()), float32(math.Copysign(math.NaN(), -1)) + if sPosNaN == sPosNaN { + t.Errorf("sPosNaN == sPosNaN returned true") + } + if sPosNaN >= sPosNaN { + t.Errorf("sPosNaN >= sPosNaN returned true") + } + if sPosNaN <= sPosNaN { + t.Errorf("sPosNaN <= sPosNaN returned true") + } + if sNegNaN < sPosNaN { + t.Errorf("sNegNaN < sPosNaN returned true") + } + sPosInf, sNegInf := float32(math.Inf(1)), float32(math.Inf(-1)) + if sPosInf != sPosInf { + t.Errorf("sPosInf != sPosInf returned true") + } + if sNegInf != sNegInf { + t.Errorf("sNegInf != sNegInf returned true") + } + sNegZero := float32(math.Copysign(0, -1)) + if sNegZero != 0 { + t.Errorf("sNegZero != 0 returned true") + } + if sNegZero < 0 { + t.Errorf("sNegZero < 0 returned true") + } + if 0 > sNegZero { + t.Errorf("0 > sNegZero returned true") + } +} + +//go:noinline +func cvt1(a float64) uint64 { + return uint64(a) +} + +//go:noinline +func cvt2(a float64) uint32 { + return uint32(a) +} + +//go:noinline +func cvt3(a float32) uint64 { + return uint64(a) +} + +//go:noinline +func cvt4(a float32) uint32 { + return uint32(a) +} + +//go:noinline +func cvt5(a float64) int64 { + return int64(a) +} + +//go:noinline +func cvt6(a float64) int32 { + return int32(a) +} + +//go:noinline +func cvt7(a float32) int64 { + return int64(a) +} + +//go:noinline +func cvt8(a float32) int32 { + return int32(a) +} + +// make sure to cover int, uint cases (issue #16738) +// +//go:noinline +func cvt9(a float64) int { + return int(a) +} + +//go:noinline +func cvt10(a float64) uint { + return uint(a) +} + +//go:noinline +func cvt11(a float32) int { + return int(a) +} + +//go:noinline +func cvt12(a float32) uint { + return uint(a) +} + +//go:noinline +func f2i64p(v float64) *int64 { + return ip64(int64(v / 0.1)) +} + +//go:noinline +func ip64(v int64) *int64 { + return &v +} + +func TestFloatConvert(t *testing.T) { + if got := cvt1(3.5); got != 3 { + t.Errorf("cvt1 got %d, wanted 3", got) + } + if got := cvt2(3.5); got != 3 { + t.Errorf("cvt2 got %d, wanted 3", got) + } + if got := cvt3(3.5); got != 3 { + t.Errorf("cvt3 got %d, wanted 3", got) + } + if got := cvt4(3.5); got != 3 { + t.Errorf("cvt4 got %d, wanted 3", got) + } + if got := cvt5(3.5); got != 3 { + t.Errorf("cvt5 got %d, wanted 3", got) + } + if got := cvt6(3.5); got != 3 { + t.Errorf("cvt6 got %d, wanted 3", got) + } + if got := cvt7(3.5); got != 3 { + t.Errorf("cvt7 got %d, wanted 3", got) + } + if got := cvt8(3.5); got != 3 { + t.Errorf("cvt8 got %d, wanted 3", got) + } + if got := cvt9(3.5); got != 3 { + t.Errorf("cvt9 got %d, wanted 3", got) + } + if got := cvt10(3.5); got != 3 { + t.Errorf("cvt10 got %d, wanted 3", got) + } + if got := cvt11(3.5); got != 3 { + t.Errorf("cvt11 got %d, wanted 3", got) + } + if got := cvt12(3.5); got != 3 { + t.Errorf("cvt12 got %d, wanted 3", got) + } + if got := *f2i64p(10); got != 100 { + t.Errorf("f2i64p got %d, wanted 100", got) + } +} + +func TestFloatConvertFolded(t *testing.T) { + // Assign constants to variables so that they are (hopefully) constant folded + // by the SSA backend rather than the frontend. + u64, u32, u16, u8 := uint64(1<<63), uint32(1<<31), uint16(1<<15), uint8(1<<7) + i64, i32, i16, i8 := int64(-1<<63), int32(-1<<31), int16(-1<<15), int8(-1<<7) + du64, du32, du16, du8 := float64(1<<63), float64(1<<31), float64(1<<15), float64(1<<7) + di64, di32, di16, di8 := float64(-1<<63), float64(-1<<31), float64(-1<<15), float64(-1<<7) + su64, su32, su16, su8 := float32(1<<63), float32(1<<31), float32(1<<15), float32(1<<7) + si64, si32, si16, si8 := float32(-1<<63), float32(-1<<31), float32(-1<<15), float32(-1<<7) + + // integer to float + if float64(u64) != du64 { + t.Errorf("float64(u64) != du64") + } + if float64(u32) != du32 { + t.Errorf("float64(u32) != du32") + } + if float64(u16) != du16 { + t.Errorf("float64(u16) != du16") + } + if float64(u8) != du8 { + t.Errorf("float64(u8) != du8") + } + if float64(i64) != di64 { + t.Errorf("float64(i64) != di64") + } + if float64(i32) != di32 { + t.Errorf("float64(i32) != di32") + } + if float64(i16) != di16 { + t.Errorf("float64(i16) != di16") + } + if float64(i8) != di8 { + t.Errorf("float64(i8) != di8") + } + if float32(u64) != su64 { + t.Errorf("float32(u64) != su64") + } + if float32(u32) != su32 { + t.Errorf("float32(u32) != su32") + } + if float32(u16) != su16 { + t.Errorf("float32(u16) != su16") + } + if float32(u8) != su8 { + t.Errorf("float32(u8) != su8") + } + if float32(i64) != si64 { + t.Errorf("float32(i64) != si64") + } + if float32(i32) != si32 { + t.Errorf("float32(i32) != si32") + } + if float32(i16) != si16 { + t.Errorf("float32(i16) != si16") + } + if float32(i8) != si8 { + t.Errorf("float32(i8) != si8") + } + + // float to integer + if uint64(du64) != u64 { + t.Errorf("uint64(du64) != u64") + } + if uint32(du32) != u32 { + t.Errorf("uint32(du32) != u32") + } + if uint16(du16) != u16 { + t.Errorf("uint16(du16) != u16") + } + if uint8(du8) != u8 { + t.Errorf("uint8(du8) != u8") + } + if int64(di64) != i64 { + t.Errorf("int64(di64) != i64") + } + if int32(di32) != i32 { + t.Errorf("int32(di32) != i32") + } + if int16(di16) != i16 { + t.Errorf("int16(di16) != i16") + } + if int8(di8) != i8 { + t.Errorf("int8(di8) != i8") + } + if uint64(su64) != u64 { + t.Errorf("uint64(su64) != u64") + } + if uint32(su32) != u32 { + t.Errorf("uint32(su32) != u32") + } + if uint16(su16) != u16 { + t.Errorf("uint16(su16) != u16") + } + if uint8(su8) != u8 { + t.Errorf("uint8(su8) != u8") + } + if int64(si64) != i64 { + t.Errorf("int64(si64) != i64") + } + if int32(si32) != i32 { + t.Errorf("int32(si32) != i32") + } + if int16(si16) != i16 { + t.Errorf("int16(si16) != i16") + } + if int8(si8) != i8 { + t.Errorf("int8(si8) != i8") + } +} + +func TestFloat32StoreToLoadConstantFold(t *testing.T) { + // Test that math.Float32{,from}bits constant fold correctly. + // In particular we need to be careful that signaling NaN (sNaN) values + // are not converted to quiet NaN (qNaN) values during compilation. + // See issue #27193 for more information. + + // signaling NaNs + { + const nan = uint32(0x7f800001) // sNaN + if x := math.Float32bits(math.Float32frombits(nan)); x != nan { + t.Errorf("got %#x, want %#x", x, nan) + } + } + { + const nan = uint32(0x7fbfffff) // sNaN + if x := math.Float32bits(math.Float32frombits(nan)); x != nan { + t.Errorf("got %#x, want %#x", x, nan) + } + } + { + const nan = uint32(0xff800001) // sNaN + if x := math.Float32bits(math.Float32frombits(nan)); x != nan { + t.Errorf("got %#x, want %#x", x, nan) + } + } + { + const nan = uint32(0xffbfffff) // sNaN + if x := math.Float32bits(math.Float32frombits(nan)); x != nan { + t.Errorf("got %#x, want %#x", x, nan) + } + } + + // quiet NaNs + { + const nan = uint32(0x7fc00000) // qNaN + if x := math.Float32bits(math.Float32frombits(nan)); x != nan { + t.Errorf("got %#x, want %#x", x, nan) + } + } + { + const nan = uint32(0x7fffffff) // qNaN + if x := math.Float32bits(math.Float32frombits(nan)); x != nan { + t.Errorf("got %#x, want %#x", x, nan) + } + } + { + const nan = uint32(0x8fc00000) // qNaN + if x := math.Float32bits(math.Float32frombits(nan)); x != nan { + t.Errorf("got %#x, want %#x", x, nan) + } + } + { + const nan = uint32(0x8fffffff) // qNaN + if x := math.Float32bits(math.Float32frombits(nan)); x != nan { + t.Errorf("got %#x, want %#x", x, nan) + } + } + + // infinities + { + const inf = uint32(0x7f800000) // +∞ + if x := math.Float32bits(math.Float32frombits(inf)); x != inf { + t.Errorf("got %#x, want %#x", x, inf) + } + } + { + const negInf = uint32(0xff800000) // -∞ + if x := math.Float32bits(math.Float32frombits(negInf)); x != negInf { + t.Errorf("got %#x, want %#x", x, negInf) + } + } + + // numbers + { + const zero = uint32(0) // +0.0 + if x := math.Float32bits(math.Float32frombits(zero)); x != zero { + t.Errorf("got %#x, want %#x", x, zero) + } + } + { + const negZero = uint32(1 << 31) // -0.0 + if x := math.Float32bits(math.Float32frombits(negZero)); x != negZero { + t.Errorf("got %#x, want %#x", x, negZero) + } + } + { + const one = uint32(0x3f800000) // 1.0 + if x := math.Float32bits(math.Float32frombits(one)); x != one { + t.Errorf("got %#x, want %#x", x, one) + } + } + { + const negOne = uint32(0xbf800000) // -1.0 + if x := math.Float32bits(math.Float32frombits(negOne)); x != negOne { + t.Errorf("got %#x, want %#x", x, negOne) + } + } + { + const frac = uint32(0x3fc00000) // +1.5 + if x := math.Float32bits(math.Float32frombits(frac)); x != frac { + t.Errorf("got %#x, want %#x", x, frac) + } + } + { + const negFrac = uint32(0xbfc00000) // -1.5 + if x := math.Float32bits(math.Float32frombits(negFrac)); x != negFrac { + t.Errorf("got %#x, want %#x", x, negFrac) + } + } +} + +// Signaling NaN values as constants. +const ( + snan32bits uint32 = 0x7f800001 + snan64bits uint64 = 0x7ff0000000000001 +) + +// Signaling NaNs as variables. +var snan32bitsVar uint32 = snan32bits +var snan64bitsVar uint64 = snan64bits + +func TestFloatSignalingNaN(t *testing.T) { + // Make sure we generate a signaling NaN from a constant properly. + // See issue 36400. + f32 := math.Float32frombits(snan32bits) + g32 := math.Float32frombits(snan32bitsVar) + x32 := math.Float32bits(f32) + y32 := math.Float32bits(g32) + if x32 != y32 { + t.Errorf("got %x, want %x (diff=%x)", x32, y32, x32^y32) + } + + f64 := math.Float64frombits(snan64bits) + g64 := math.Float64frombits(snan64bitsVar) + x64 := math.Float64bits(f64) + y64 := math.Float64bits(g64) + if x64 != y64 { + t.Errorf("got %x, want %x (diff=%x)", x64, y64, x64^y64) + } +} + +func TestFloatSignalingNaNConversion(t *testing.T) { + // Test to make sure when we convert a signaling NaN, we get a NaN. + // (Ideally we want a quiet NaN, but some platforms don't agree.) + // See issue 36399. + s32 := math.Float32frombits(snan32bitsVar) + if s32 == s32 { + t.Errorf("converting a NaN did not result in a NaN") + } + s64 := math.Float64frombits(snan64bitsVar) + if s64 == s64 { + t.Errorf("converting a NaN did not result in a NaN") + } +} + +func TestFloatSignalingNaNConversionConst(t *testing.T) { + // Test to make sure when we convert a signaling NaN, it converts to a NaN. + // (Ideally we want a quiet NaN, but some platforms don't agree.) + // See issue 36399 and 36400. + s32 := math.Float32frombits(snan32bits) + if s32 == s32 { + t.Errorf("converting a NaN did not result in a NaN") + } + s64 := math.Float64frombits(snan64bits) + if s64 == s64 { + t.Errorf("converting a NaN did not result in a NaN") + } +} + +var sinkFloat float64 + +func BenchmarkMul2(b *testing.B) { + for i := 0; i < b.N; i++ { + var m float64 = 1 + for j := 0; j < 500; j++ { + m *= 2 + } + sinkFloat = m + } +} +func BenchmarkMulNeg2(b *testing.B) { + for i := 0; i < b.N; i++ { + var m float64 = 1 + for j := 0; j < 500; j++ { + m *= -2 + } + sinkFloat = m + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/global_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/global_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c8b3370e9c6cb293b573f4171062fee524a9a75b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/global_test.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "bytes" + "internal/testenv" + "os" + "path/filepath" + "strings" + "testing" +) + +// Make sure "hello world" does not link in all the +// fmt.scanf routines. See issue 6853. +func TestScanfRemoval(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + // Make a directory to work in. + dir := t.TempDir() + + // Create source. + src := filepath.Join(dir, "test.go") + f, err := os.Create(src) + if err != nil { + t.Fatalf("could not create source file: %v", err) + } + f.Write([]byte(` +package main +import "fmt" +func main() { + fmt.Println("hello world") +} +`)) + f.Close() + + // Name of destination. + dst := filepath.Join(dir, "test") + + // Compile source. + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-o", dst, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("could not build target: %v\n%s", err, out) + } + + // Check destination to see if scanf code was included. + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", dst) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("could not read target: %v", err) + } + if bytes.Contains(out, []byte("scanInt")) { + t.Fatalf("scanf code not removed from helloworld") + } +} + +// Make sure -S prints assembly code. See issue 14515. +func TestDashS(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + // Make a directory to work in. + dir := t.TempDir() + + // Create source. + src := filepath.Join(dir, "test.go") + f, err := os.Create(src) + if err != nil { + t.Fatalf("could not create source file: %v", err) + } + f.Write([]byte(` +package main +import "fmt" +func main() { + fmt.Println("hello world") +} +`)) + f.Close() + + // Compile source. + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-gcflags", "-S", "-o", filepath.Join(dir, "test"), src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("could not build target: %v\n%s", err, out) + } + + patterns := []string{ + // It is hard to look for actual instructions in an + // arch-independent way. So we'll just look for + // pseudo-ops that are arch-independent. + "\tTEXT\t", + "\tFUNCDATA\t", + "\tPCDATA\t", + } + outstr := string(out) + for _, p := range patterns { + if !strings.Contains(outstr, p) { + println(outstr) + panic("can't find pattern " + p) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/iface_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/iface_test.go new file mode 100644 index 0000000000000000000000000000000000000000..db41eb8e55c740c36652ae1c739779bac928168a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/iface_test.go @@ -0,0 +1,138 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import "testing" + +// Test to make sure we make copies of the values we +// put in interfaces. + +var x int + +func TestEfaceConv1(t *testing.T) { + a := 5 + i := interface{}(a) + a += 2 + if got := i.(int); got != 5 { + t.Errorf("wanted 5, got %d\n", got) + } +} + +func TestEfaceConv2(t *testing.T) { + a := 5 + sink = &a + i := interface{}(a) + a += 2 + if got := i.(int); got != 5 { + t.Errorf("wanted 5, got %d\n", got) + } +} + +func TestEfaceConv3(t *testing.T) { + x = 5 + if got := e2int3(x); got != 5 { + t.Errorf("wanted 5, got %d\n", got) + } +} + +//go:noinline +func e2int3(i interface{}) int { + x = 7 + return i.(int) +} + +func TestEfaceConv4(t *testing.T) { + a := 5 + if got := e2int4(a, &a); got != 5 { + t.Errorf("wanted 5, got %d\n", got) + } +} + +//go:noinline +func e2int4(i interface{}, p *int) int { + *p = 7 + return i.(int) +} + +type Int int + +var y Int + +type I interface { + foo() +} + +func (i Int) foo() { +} + +func TestIfaceConv1(t *testing.T) { + a := Int(5) + i := interface{}(a) + a += 2 + if got := i.(Int); got != 5 { + t.Errorf("wanted 5, got %d\n", int(got)) + } +} + +func TestIfaceConv2(t *testing.T) { + a := Int(5) + sink = &a + i := interface{}(a) + a += 2 + if got := i.(Int); got != 5 { + t.Errorf("wanted 5, got %d\n", int(got)) + } +} + +func TestIfaceConv3(t *testing.T) { + y = 5 + if got := i2Int3(y); got != 5 { + t.Errorf("wanted 5, got %d\n", int(got)) + } +} + +//go:noinline +func i2Int3(i I) Int { + y = 7 + return i.(Int) +} + +func TestIfaceConv4(t *testing.T) { + a := Int(5) + if got := i2Int4(a, &a); got != 5 { + t.Errorf("wanted 5, got %d\n", int(got)) + } +} + +//go:noinline +func i2Int4(i I, p *Int) Int { + *p = 7 + return i.(Int) +} + +func BenchmarkEfaceInteger(b *testing.B) { + sum := 0 + for i := 0; i < b.N; i++ { + sum += i2int(i) + } + sink = sum +} + +//go:noinline +func i2int(i interface{}) int { + return i.(int) +} + +func BenchmarkTypeAssert(b *testing.B) { + e := any(Int(0)) + r := true + for i := 0; i < b.N; i++ { + _, ok := e.(I) + if !ok { + r = false + } + } + sink = r +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/inl_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/inl_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0ccc7b3761995d0f80d03796692ad4ee8fcf49bc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/inl_test.go @@ -0,0 +1,411 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "bufio" + "internal/goexperiment" + "internal/testenv" + "io" + "math/bits" + "regexp" + "runtime" + "strings" + "testing" +) + +// TestIntendedInlining tests that specific functions are inlined. +// This allows refactoring for code clarity and re-use without fear that +// changes to the compiler will cause silent performance regressions. +func TestIntendedInlining(t *testing.T) { + if testing.Short() && testenv.Builder() == "" { + t.Skip("skipping in short mode") + } + testenv.MustHaveGoRun(t) + t.Parallel() + + // want is the list of function names (by package) that should + // be inlinable. If they have no callers in their packages, they + // might not actually be inlined anywhere. + want := map[string][]string{ + "runtime": { + "add", + "acquirem", + "add1", + "addb", + "adjustpanics", + "adjustpointer", + "alignDown", + "alignUp", + "bucketMask", + "bucketShift", + "chanbuf", + "evacuated", + "fastlog2", + "float64bits", + "funcspdelta", + "getm", + "getMCache", + "isDirectIface", + "itabHashFunc", + "nextslicecap", + "noescape", + "pcvalueCacheKey", + "rand32", + "readUnaligned32", + "readUnaligned64", + "releasem", + "roundupsize", + "stackmapdata", + "stringStructOf", + "subtract1", + "subtractb", + "tophash", + "(*bmap).keys", + "(*bmap).overflow", + "(*waitq).enqueue", + "funcInfo.entry", + + // GC-related ones + "cgoInRange", + "gclinkptr.ptr", + "guintptr.ptr", + "writeHeapBitsForAddr", + "heapBitsSlice", + "markBits.isMarked", + "muintptr.ptr", + "puintptr.ptr", + "spanOf", + "spanOfUnchecked", + "typePointers.nextFast", + "(*gcWork).putFast", + "(*gcWork).tryGetFast", + "(*guintptr).set", + "(*markBits).advance", + "(*mspan).allocBitsForIndex", + "(*mspan).base", + "(*mspan).markBitsForBase", + "(*mspan).markBitsForIndex", + "(*mspan).writeUserArenaHeapBits", + "(*muintptr).set", + "(*puintptr).set", + "(*wbBuf).get1", + "(*wbBuf).get2", + + // Trace-related ones. + "traceLocker.ok", + "traceEnabled", + }, + "runtime/internal/sys": {}, + "runtime/internal/math": { + "MulUintptr", + }, + "bytes": { + "(*Buffer).Bytes", + "(*Buffer).Cap", + "(*Buffer).Len", + "(*Buffer).Grow", + "(*Buffer).Next", + "(*Buffer).Read", + "(*Buffer).ReadByte", + "(*Buffer).Reset", + "(*Buffer).String", + "(*Buffer).UnreadByte", + "(*Buffer).tryGrowByReslice", + }, + "internal/abi": { + "UseInterfaceSwitchCache", + }, + "compress/flate": { + "byLiteral.Len", + "byLiteral.Less", + "byLiteral.Swap", + "(*dictDecoder).tryWriteCopy", + }, + "encoding/base64": { + "assemble32", + "assemble64", + }, + "unicode/utf8": { + "FullRune", + "FullRuneInString", + "RuneLen", + "AppendRune", + "ValidRune", + }, + "unicode/utf16": { + "Decode", + }, + "reflect": { + "Value.Bool", + "Value.Bytes", + "Value.CanAddr", + "Value.CanComplex", + "Value.CanFloat", + "Value.CanInt", + "Value.CanInterface", + "Value.CanSet", + "Value.CanUint", + "Value.Cap", + "Value.Complex", + "Value.Float", + "Value.Int", + "Value.Interface", + "Value.IsNil", + "Value.IsValid", + "Value.Kind", + "Value.Len", + "Value.MapRange", + "Value.OverflowComplex", + "Value.OverflowFloat", + "Value.OverflowInt", + "Value.OverflowUint", + "Value.String", + "Value.Type", + "Value.Uint", + "Value.UnsafeAddr", + "Value.pointer", + "add", + "align", + "flag.mustBe", + "flag.mustBeAssignable", + "flag.mustBeExported", + "flag.kind", + "flag.ro", + }, + "regexp": { + "(*bitState).push", + }, + "math/big": { + "bigEndianWord", + // The following functions require the math_big_pure_go build tag. + "addVW", + "subVW", + }, + "math/rand": { + "(*rngSource).Int63", + "(*rngSource).Uint64", + }, + "net": { + "(*UDPConn).ReadFromUDP", + }, + "sync": { + // Both OnceFunc and its returned closure need to be inlinable so + // that the returned closure can be inlined into the caller of OnceFunc. + "OnceFunc", + "OnceFunc.func2", // The returned closure. + // TODO(austin): It would be good to check OnceValue and OnceValues, + // too, but currently they aren't reported because they have type + // parameters and aren't instantiated in sync. + }, + "sync/atomic": { + // (*Bool).CompareAndSwap handled below. + "(*Bool).Load", + "(*Bool).Store", + "(*Bool).Swap", + "(*Int32).Add", + "(*Int32).CompareAndSwap", + "(*Int32).Load", + "(*Int32).Store", + "(*Int32).Swap", + "(*Int64).Add", + "(*Int64).CompareAndSwap", + "(*Int64).Load", + "(*Int64).Store", + "(*Int64).Swap", + "(*Uint32).Add", + "(*Uint32).CompareAndSwap", + "(*Uint32).Load", + "(*Uint32).Store", + "(*Uint32).Swap", + "(*Uint64).Add", + "(*Uint64).CompareAndSwap", + "(*Uint64).Load", + "(*Uint64).Store", + "(*Uint64).Swap", + "(*Uintptr).Add", + "(*Uintptr).CompareAndSwap", + "(*Uintptr).Load", + "(*Uintptr).Store", + "(*Uintptr).Swap", + "(*Pointer[go.shape.int]).CompareAndSwap", + "(*Pointer[go.shape.int]).Load", + "(*Pointer[go.shape.int]).Store", + "(*Pointer[go.shape.int]).Swap", + }, + } + + if runtime.GOARCH != "386" && runtime.GOARCH != "loong64" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" { + // nextFreeFast calls sys.TrailingZeros64, which on 386 is implemented in asm and is not inlinable. + // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386. + // On loong64, mips64x and riscv64, TrailingZeros64 is not intrinsified and causes nextFreeFast + // too expensive to inline (Issue 22239). + want["runtime"] = append(want["runtime"], "nextFreeFast") + // Same behavior for heapBits.nextFast. + want["runtime"] = append(want["runtime"], "heapBits.nextFast") + } + if runtime.GOARCH != "386" { + // As explained above, TrailingZeros64 and TrailingZeros32 are not Go code on 386. + // The same applies to Bswap32. + want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "TrailingZeros64") + want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "TrailingZeros32") + want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Bswap32") + } + if runtime.GOARCH == "amd64" || runtime.GOARCH == "arm64" || runtime.GOARCH == "loong64" || runtime.GOARCH == "mips" || runtime.GOARCH == "mips64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "s390x" { + // runtime/internal/atomic.Loaduintptr is only intrinsified on these platforms. + want["runtime"] = append(want["runtime"], "traceAcquire") + } + if bits.UintSize == 64 { + // mix is only defined on 64-bit architectures + want["runtime"] = append(want["runtime"], "mix") + // (*Bool).CompareAndSwap is just over budget on 32-bit systems (386, arm). + want["sync/atomic"] = append(want["sync/atomic"], "(*Bool).CompareAndSwap") + } + + switch runtime.GOARCH { + case "386", "wasm", "arm": + default: + // TODO(mvdan): As explained in /test/inline_sync.go, some + // architectures don't have atomic intrinsics, so these go over + // the inlining budget. Move back to the main table once that + // problem is solved. + want["sync"] = []string{ + "(*Mutex).Lock", + "(*Mutex).Unlock", + "(*RWMutex).RLock", + "(*RWMutex).RUnlock", + "(*Once).Do", + } + } + + // Functions that must actually be inlined; they must have actual callers. + must := map[string]bool{ + "compress/flate.byLiteral.Len": true, + "compress/flate.byLiteral.Less": true, + "compress/flate.byLiteral.Swap": true, + } + + notInlinedReason := make(map[string]string) + pkgs := make([]string, 0, len(want)) + for pname, fnames := range want { + pkgs = append(pkgs, pname) + for _, fname := range fnames { + fullName := pname + "." + fname + if _, ok := notInlinedReason[fullName]; ok { + t.Errorf("duplicate func: %s", fullName) + } + notInlinedReason[fullName] = "unknown reason" + } + } + + args := append([]string{"build", "-gcflags=-m -m", "-tags=math_big_pure_go"}, pkgs...) + cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), args...)) + pr, pw := io.Pipe() + cmd.Stdout = pw + cmd.Stderr = pw + cmdErr := make(chan error, 1) + go func() { + cmdErr <- cmd.Run() + pw.Close() + }() + scanner := bufio.NewScanner(pr) + curPkg := "" + canInline := regexp.MustCompile(`: can inline ([^ ]*)`) + haveInlined := regexp.MustCompile(`: inlining call to ([^ ]*)`) + cannotInline := regexp.MustCompile(`: cannot inline ([^ ]*): (.*)`) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, "# ") { + curPkg = line[2:] + continue + } + if m := haveInlined.FindStringSubmatch(line); m != nil { + fname := m[1] + delete(notInlinedReason, curPkg+"."+fname) + continue + } + if m := canInline.FindStringSubmatch(line); m != nil { + fname := m[1] + fullname := curPkg + "." + fname + // If function must be inlined somewhere, being inlinable is not enough + if _, ok := must[fullname]; !ok { + delete(notInlinedReason, fullname) + continue + } + } + if m := cannotInline.FindStringSubmatch(line); m != nil { + fname, reason := m[1], m[2] + fullName := curPkg + "." + fname + if _, ok := notInlinedReason[fullName]; ok { + // cmd/compile gave us a reason why + notInlinedReason[fullName] = reason + } + continue + } + } + if err := <-cmdErr; err != nil { + t.Fatal(err) + } + if err := scanner.Err(); err != nil { + t.Fatal(err) + } + for fullName, reason := range notInlinedReason { + t.Errorf("%s was not inlined: %s", fullName, reason) + } +} + +func collectInlCands(msgs string) map[string]struct{} { + rv := make(map[string]struct{}) + lines := strings.Split(msgs, "\n") + re := regexp.MustCompile(`^\S+\s+can\s+inline\s+(\S+)`) + for _, line := range lines { + m := re.FindStringSubmatch(line) + if m != nil { + rv[m[1]] = struct{}{} + } + } + return rv +} + +func TestIssue56044(t *testing.T) { + if testing.Short() { + t.Skipf("skipping test: too long for short mode") + } + if !goexperiment.CoverageRedesign { + t.Skipf("skipping new coverage tests (experiment not enabled)") + } + + testenv.MustHaveGoBuild(t) + + modes := []string{"-covermode=set", "-covermode=atomic"} + + for _, mode := range modes { + // Build the Go runtime with "-m", capturing output. + args := []string{"build", "-gcflags=runtime=-m", "runtime"} + cmd := testenv.Command(t, testenv.GoToolPath(t), args...) + b, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed (%v): %s", err, b) + } + mbase := collectInlCands(string(b)) + + // Redo the build with -cover, also with "-m". + args = []string{"build", "-gcflags=runtime=-m", mode, "runtime"} + cmd = testenv.Command(t, testenv.GoToolPath(t), args...) + b, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed (%v): %s", err, b) + } + mcov := collectInlCands(string(b)) + + // Make sure that there aren't any functions that are marked + // as inline candidates at base but not with coverage. + for k := range mbase { + if _, ok := mcov[k]; !ok { + t.Errorf("error: did not find %s in coverage -m output", k) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/inst_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/inst_test.go new file mode 100644 index 0000000000000000000000000000000000000000..069e2ffaf5b8df663e74e0b8b7480609992a10c7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/inst_test.go @@ -0,0 +1,60 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "internal/testenv" + "os" + "path/filepath" + "regexp" + "testing" +) + +// TestInst tests that only one instantiation of Sort is created, even though generic +// Sort is used for multiple pointer types across two packages. +func TestInst(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveGoRun(t) + + // Build ptrsort.go, which uses package mysort. + var output []byte + var err error + filename := "ptrsort.go" + exename := "ptrsort" + outname := "ptrsort.out" + gotool := testenv.GoToolPath(t) + dest := filepath.Join(t.TempDir(), exename) + cmd := testenv.Command(t, gotool, "build", "-o", dest, filepath.Join("testdata", filename)) + if output, err = cmd.CombinedOutput(); err != nil { + t.Fatalf("Failed: %v:\nOutput: %s\n", err, output) + } + + // Test that there is exactly one shape-based instantiation of Sort in + // the executable. + cmd = testenv.Command(t, gotool, "tool", "nm", dest) + if output, err = cmd.CombinedOutput(); err != nil { + t.Fatalf("Failed: %v:\nOut: %s\n", err, output) + } + // Look for shape-based instantiation of Sort, but ignore any extra wrapper + // ending in "-tramp" (which are created on riscv). + re := regexp.MustCompile(`\bSort\[.*shape.*\][^-]`) + r := re.FindAllIndex(output, -1) + if len(r) != 1 { + t.Fatalf("Wanted 1 instantiations of Sort function, got %d\n", len(r)) + } + + // Actually run the test and make sure output is correct. + cmd = testenv.Command(t, gotool, "run", filepath.Join("testdata", filename)) + if output, err = cmd.CombinedOutput(); err != nil { + t.Fatalf("Failed: %v:\nOut: %s\n", err, output) + } + out, err := os.ReadFile(filepath.Join("testdata", outname)) + if err != nil { + t.Fatalf("Could not find %s\n", outname) + } + if string(out) != string(output) { + t.Fatalf("Wanted output %v, got %v\n", string(out), string(output)) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/intrinsics_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/intrinsics_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b89198c5058d8538772311207054fe65c622f051 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/intrinsics_test.go @@ -0,0 +1,62 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "math/bits" + "testing" +) + +func TestBitLen64(t *testing.T) { + for i := 0; i <= 64; i++ { + got := bits.Len64(1 << i) + want := i + 1 + if want == 65 { + want = 0 + } + if got != want { + t.Errorf("Len64(1<<%d) = %d, want %d", i, got, want) + } + } +} + +func TestBitLen32(t *testing.T) { + for i := 0; i <= 32; i++ { + got := bits.Len32(1 << i) + want := i + 1 + if want == 33 { + want = 0 + } + if got != want { + t.Errorf("Len32(1<<%d) = %d, want %d", i, got, want) + } + } +} + +func TestBitLen16(t *testing.T) { + for i := 0; i <= 16; i++ { + got := bits.Len16(1 << i) + want := i + 1 + if want == 17 { + want = 0 + } + if got != want { + t.Errorf("Len16(1<<%d) = %d, want %d", i, got, want) + } + } +} + +func TestBitLen8(t *testing.T) { + for i := 0; i <= 8; i++ { + got := bits.Len8(1 << i) + want := i + 1 + if want == 9 { + want = 0 + } + if got != want { + t.Errorf("Len8(1<<%d) = %d, want %d", i, got, want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/issue50182_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/issue50182_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cd277fa285eab5c98fd4e091c538387b49766290 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/issue50182_test.go @@ -0,0 +1,62 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "fmt" + "sort" + "testing" +) + +// Test that calling methods on generic types doesn't cause allocations. +func genericSorted[T sort.Interface](data T) bool { + n := data.Len() + for i := n - 1; i > 0; i-- { + if data.Less(i, i-1) { + return false + } + } + return true +} +func TestGenericSorted(t *testing.T) { + var data = sort.IntSlice{-10, -5, 0, 1, 2, 3, 5, 7, 11, 100, 100, 100, 1000, 10000} + f := func() { + genericSorted(data) + } + if n := testing.AllocsPerRun(10, f); n > 0 { + t.Errorf("got %f allocs, want 0", n) + } +} + +// Test that escape analysis correctly tracks escaping inside of methods +// called on generic types. +type fooer interface { + foo() +} +type P struct { + p *int + q int +} + +var esc []*int + +func (p P) foo() { + esc = append(esc, p.p) // foo escapes the pointer from inside of p +} +func f[T fooer](t T) { + t.foo() +} +func TestGenericEscape(t *testing.T) { + for i := 0; i < 4; i++ { + var x int = 77 + i + var p P = P{p: &x} + f(p) + } + for i, p := range esc { + if got, want := *p, 77+i; got != want { + panic(fmt.Sprintf("entry %d: got %d, want %d", i, got, want)) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/issue53888_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/issue53888_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0d5b13b5c877b25097c6d02079a14d91744c5c16 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/issue53888_test.go @@ -0,0 +1,46 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package test + +import ( + "internal/testenv" + "testing" +) + +func TestAppendOfMake(t *testing.T) { + testenv.SkipIfOptimizationOff(t) + for n := 32; n < 33; n++ { // avoid stack allocation of make() + b := make([]byte, n) + f := func() { + b = append(b[:0], make([]byte, n)...) + } + if n := testing.AllocsPerRun(10, f); n > 0 { + t.Errorf("got %f allocs, want 0", n) + } + type S []byte + + s := make(S, n) + g := func() { + s = append(s[:0], make(S, n)...) + } + if n := testing.AllocsPerRun(10, g); n > 0 { + t.Errorf("got %f allocs, want 0", n) + } + h := func() { + s = append(s[:0], make([]byte, n)...) + } + if n := testing.AllocsPerRun(10, h); n > 0 { + t.Errorf("got %f allocs, want 0", n) + } + i := func() { + b = append(b[:0], make(S, n)...) + } + if n := testing.AllocsPerRun(10, i); n > 0 { + t.Errorf("got %f allocs, want 0", n) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/issue57434_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/issue57434_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6a34b54a0721a4d3bc749939b3f1596ce0eaa0a5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/issue57434_test.go @@ -0,0 +1,38 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "testing" +) + +var output int + +type Object struct { + Val int +} + +func (o *Object) Initialize() *Object { + o.Val = 5 + return o +} + +func (o *Object) Update() *Object { + o.Val = o.Val + 1 + return o +} + +func TestAutotmpLoopDepth(t *testing.T) { + f := func() { + for i := 0; i < 10; i++ { + var obj Object + obj.Initialize().Update() + output = obj.Val + } + } + if n := testing.AllocsPerRun(10, f); n > 0 { + t.Error("obj moved to heap") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/lang_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/lang_test.go new file mode 100644 index 0000000000000000000000000000000000000000..34ed378cd8eaa1cbbe96e24d0219a0b36b69f2a7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/lang_test.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "internal/testenv" + "os" + "path/filepath" + "testing" +) + +const aliasSrc = ` +package x + +type T = int +` + +func TestInvalidLang(t *testing.T) { + t.Parallel() + + testenv.MustHaveGoBuild(t) + + dir := t.TempDir() + + src := filepath.Join(dir, "alias.go") + if err := os.WriteFile(src, []byte(aliasSrc), 0644); err != nil { + t.Fatal(err) + } + + outfile := filepath.Join(dir, "alias.o") + + if testLang(t, "go9.99", src, outfile) == nil { + t.Error("compilation with -lang=go9.99 succeeded unexpectedly") + } + + // This test will have to be adjusted if we ever reach 1.99 or 2.0. + if testLang(t, "go1.99", src, outfile) == nil { + t.Error("compilation with -lang=go1.99 succeeded unexpectedly") + } + + if testLang(t, "go1.8", src, outfile) == nil { + t.Error("compilation with -lang=go1.8 succeeded unexpectedly") + } + + if err := testLang(t, "go1.9", src, outfile); err != nil { + t.Errorf("compilation with -lang=go1.9 failed unexpectedly: %v", err) + } +} + +func testLang(t *testing.T, lang, src, outfile string) error { + run := []string{testenv.GoToolPath(t), "tool", "compile", "-p=p", "-lang", lang, "-o", outfile, src} + t.Log(run) + out, err := testenv.Command(t, run[0], run[1:]...).CombinedOutput() + t.Logf("%s", out) + return err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/logic_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/logic_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0e46b5faef8924a6ec28d8b39fe7791951b9acb5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/logic_test.go @@ -0,0 +1,293 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import "testing" + +// Tests to make sure logic simplification rules are correct. + +func TestLogic64(t *testing.T) { + // test values to determine function equality + values := [...]int64{-1 << 63, 1<<63 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4} + + // golden functions we use repeatedly + zero := func(x int64) int64 { return 0 } + id := func(x int64) int64 { return x } + or := func(x, y int64) int64 { return x | y } + and := func(x, y int64) int64 { return x & y } + y := func(x, y int64) int64 { return y } + + for _, test := range [...]struct { + name string + f func(int64) int64 + golden func(int64) int64 + }{ + {"x|x", func(x int64) int64 { return x | x }, id}, + {"x|0", func(x int64) int64 { return x | 0 }, id}, + {"x|-1", func(x int64) int64 { return x | -1 }, func(x int64) int64 { return -1 }}, + {"x&x", func(x int64) int64 { return x & x }, id}, + {"x&0", func(x int64) int64 { return x & 0 }, zero}, + {"x&-1", func(x int64) int64 { return x & -1 }, id}, + {"x^x", func(x int64) int64 { return x ^ x }, zero}, + {"x^0", func(x int64) int64 { return x ^ 0 }, id}, + {"x^-1", func(x int64) int64 { return x ^ -1 }, func(x int64) int64 { return ^x }}, + {"x+0", func(x int64) int64 { return x + 0 }, id}, + {"x-x", func(x int64) int64 { return x - x }, zero}, + {"x*0", func(x int64) int64 { return x * 0 }, zero}, + {"^^x", func(x int64) int64 { return ^^x }, id}, + } { + for _, v := range values { + got := test.f(v) + want := test.golden(v) + if want != got { + t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want) + } + } + } + for _, test := range [...]struct { + name string + f func(int64, int64) int64 + golden func(int64, int64) int64 + }{ + {"x|(x|y)", func(x, y int64) int64 { return x | (x | y) }, or}, + {"x|(y|x)", func(x, y int64) int64 { return x | (y | x) }, or}, + {"(x|y)|x", func(x, y int64) int64 { return (x | y) | x }, or}, + {"(y|x)|x", func(x, y int64) int64 { return (y | x) | x }, or}, + {"x&(x&y)", func(x, y int64) int64 { return x & (x & y) }, and}, + {"x&(y&x)", func(x, y int64) int64 { return x & (y & x) }, and}, + {"(x&y)&x", func(x, y int64) int64 { return (x & y) & x }, and}, + {"(y&x)&x", func(x, y int64) int64 { return (y & x) & x }, and}, + {"x^(x^y)", func(x, y int64) int64 { return x ^ (x ^ y) }, y}, + {"x^(y^x)", func(x, y int64) int64 { return x ^ (y ^ x) }, y}, + {"(x^y)^x", func(x, y int64) int64 { return (x ^ y) ^ x }, y}, + {"(y^x)^x", func(x, y int64) int64 { return (y ^ x) ^ x }, y}, + {"-(y-x)", func(x, y int64) int64 { return -(y - x) }, func(x, y int64) int64 { return x - y }}, + {"(x+y)-x", func(x, y int64) int64 { return (x + y) - x }, y}, + {"(y+x)-x", func(x, y int64) int64 { return (y + x) - x }, y}, + } { + for _, v := range values { + for _, w := range values { + got := test.f(v, w) + want := test.golden(v, w) + if want != got { + t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want) + } + } + } + } +} + +func TestLogic32(t *testing.T) { + // test values to determine function equality + values := [...]int32{-1 << 31, 1<<31 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4} + + // golden functions we use repeatedly + zero := func(x int32) int32 { return 0 } + id := func(x int32) int32 { return x } + or := func(x, y int32) int32 { return x | y } + and := func(x, y int32) int32 { return x & y } + y := func(x, y int32) int32 { return y } + + for _, test := range [...]struct { + name string + f func(int32) int32 + golden func(int32) int32 + }{ + {"x|x", func(x int32) int32 { return x | x }, id}, + {"x|0", func(x int32) int32 { return x | 0 }, id}, + {"x|-1", func(x int32) int32 { return x | -1 }, func(x int32) int32 { return -1 }}, + {"x&x", func(x int32) int32 { return x & x }, id}, + {"x&0", func(x int32) int32 { return x & 0 }, zero}, + {"x&-1", func(x int32) int32 { return x & -1 }, id}, + {"x^x", func(x int32) int32 { return x ^ x }, zero}, + {"x^0", func(x int32) int32 { return x ^ 0 }, id}, + {"x^-1", func(x int32) int32 { return x ^ -1 }, func(x int32) int32 { return ^x }}, + {"x+0", func(x int32) int32 { return x + 0 }, id}, + {"x-x", func(x int32) int32 { return x - x }, zero}, + {"x*0", func(x int32) int32 { return x * 0 }, zero}, + {"^^x", func(x int32) int32 { return ^^x }, id}, + } { + for _, v := range values { + got := test.f(v) + want := test.golden(v) + if want != got { + t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want) + } + } + } + for _, test := range [...]struct { + name string + f func(int32, int32) int32 + golden func(int32, int32) int32 + }{ + {"x|(x|y)", func(x, y int32) int32 { return x | (x | y) }, or}, + {"x|(y|x)", func(x, y int32) int32 { return x | (y | x) }, or}, + {"(x|y)|x", func(x, y int32) int32 { return (x | y) | x }, or}, + {"(y|x)|x", func(x, y int32) int32 { return (y | x) | x }, or}, + {"x&(x&y)", func(x, y int32) int32 { return x & (x & y) }, and}, + {"x&(y&x)", func(x, y int32) int32 { return x & (y & x) }, and}, + {"(x&y)&x", func(x, y int32) int32 { return (x & y) & x }, and}, + {"(y&x)&x", func(x, y int32) int32 { return (y & x) & x }, and}, + {"x^(x^y)", func(x, y int32) int32 { return x ^ (x ^ y) }, y}, + {"x^(y^x)", func(x, y int32) int32 { return x ^ (y ^ x) }, y}, + {"(x^y)^x", func(x, y int32) int32 { return (x ^ y) ^ x }, y}, + {"(y^x)^x", func(x, y int32) int32 { return (y ^ x) ^ x }, y}, + {"-(y-x)", func(x, y int32) int32 { return -(y - x) }, func(x, y int32) int32 { return x - y }}, + {"(x+y)-x", func(x, y int32) int32 { return (x + y) - x }, y}, + {"(y+x)-x", func(x, y int32) int32 { return (y + x) - x }, y}, + } { + for _, v := range values { + for _, w := range values { + got := test.f(v, w) + want := test.golden(v, w) + if want != got { + t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want) + } + } + } + } +} + +func TestLogic16(t *testing.T) { + // test values to determine function equality + values := [...]int16{-1 << 15, 1<<15 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4} + + // golden functions we use repeatedly + zero := func(x int16) int16 { return 0 } + id := func(x int16) int16 { return x } + or := func(x, y int16) int16 { return x | y } + and := func(x, y int16) int16 { return x & y } + y := func(x, y int16) int16 { return y } + + for _, test := range [...]struct { + name string + f func(int16) int16 + golden func(int16) int16 + }{ + {"x|x", func(x int16) int16 { return x | x }, id}, + {"x|0", func(x int16) int16 { return x | 0 }, id}, + {"x|-1", func(x int16) int16 { return x | -1 }, func(x int16) int16 { return -1 }}, + {"x&x", func(x int16) int16 { return x & x }, id}, + {"x&0", func(x int16) int16 { return x & 0 }, zero}, + {"x&-1", func(x int16) int16 { return x & -1 }, id}, + {"x^x", func(x int16) int16 { return x ^ x }, zero}, + {"x^0", func(x int16) int16 { return x ^ 0 }, id}, + {"x^-1", func(x int16) int16 { return x ^ -1 }, func(x int16) int16 { return ^x }}, + {"x+0", func(x int16) int16 { return x + 0 }, id}, + {"x-x", func(x int16) int16 { return x - x }, zero}, + {"x*0", func(x int16) int16 { return x * 0 }, zero}, + {"^^x", func(x int16) int16 { return ^^x }, id}, + } { + for _, v := range values { + got := test.f(v) + want := test.golden(v) + if want != got { + t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want) + } + } + } + for _, test := range [...]struct { + name string + f func(int16, int16) int16 + golden func(int16, int16) int16 + }{ + {"x|(x|y)", func(x, y int16) int16 { return x | (x | y) }, or}, + {"x|(y|x)", func(x, y int16) int16 { return x | (y | x) }, or}, + {"(x|y)|x", func(x, y int16) int16 { return (x | y) | x }, or}, + {"(y|x)|x", func(x, y int16) int16 { return (y | x) | x }, or}, + {"x&(x&y)", func(x, y int16) int16 { return x & (x & y) }, and}, + {"x&(y&x)", func(x, y int16) int16 { return x & (y & x) }, and}, + {"(x&y)&x", func(x, y int16) int16 { return (x & y) & x }, and}, + {"(y&x)&x", func(x, y int16) int16 { return (y & x) & x }, and}, + {"x^(x^y)", func(x, y int16) int16 { return x ^ (x ^ y) }, y}, + {"x^(y^x)", func(x, y int16) int16 { return x ^ (y ^ x) }, y}, + {"(x^y)^x", func(x, y int16) int16 { return (x ^ y) ^ x }, y}, + {"(y^x)^x", func(x, y int16) int16 { return (y ^ x) ^ x }, y}, + {"-(y-x)", func(x, y int16) int16 { return -(y - x) }, func(x, y int16) int16 { return x - y }}, + {"(x+y)-x", func(x, y int16) int16 { return (x + y) - x }, y}, + {"(y+x)-x", func(x, y int16) int16 { return (y + x) - x }, y}, + } { + for _, v := range values { + for _, w := range values { + got := test.f(v, w) + want := test.golden(v, w) + if want != got { + t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want) + } + } + } + } +} + +func TestLogic8(t *testing.T) { + // test values to determine function equality + values := [...]int8{-1 << 7, 1<<7 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4} + + // golden functions we use repeatedly + zero := func(x int8) int8 { return 0 } + id := func(x int8) int8 { return x } + or := func(x, y int8) int8 { return x | y } + and := func(x, y int8) int8 { return x & y } + y := func(x, y int8) int8 { return y } + + for _, test := range [...]struct { + name string + f func(int8) int8 + golden func(int8) int8 + }{ + {"x|x", func(x int8) int8 { return x | x }, id}, + {"x|0", func(x int8) int8 { return x | 0 }, id}, + {"x|-1", func(x int8) int8 { return x | -1 }, func(x int8) int8 { return -1 }}, + {"x&x", func(x int8) int8 { return x & x }, id}, + {"x&0", func(x int8) int8 { return x & 0 }, zero}, + {"x&-1", func(x int8) int8 { return x & -1 }, id}, + {"x^x", func(x int8) int8 { return x ^ x }, zero}, + {"x^0", func(x int8) int8 { return x ^ 0 }, id}, + {"x^-1", func(x int8) int8 { return x ^ -1 }, func(x int8) int8 { return ^x }}, + {"x+0", func(x int8) int8 { return x + 0 }, id}, + {"x-x", func(x int8) int8 { return x - x }, zero}, + {"x*0", func(x int8) int8 { return x * 0 }, zero}, + {"^^x", func(x int8) int8 { return ^^x }, id}, + } { + for _, v := range values { + got := test.f(v) + want := test.golden(v) + if want != got { + t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want) + } + } + } + for _, test := range [...]struct { + name string + f func(int8, int8) int8 + golden func(int8, int8) int8 + }{ + {"x|(x|y)", func(x, y int8) int8 { return x | (x | y) }, or}, + {"x|(y|x)", func(x, y int8) int8 { return x | (y | x) }, or}, + {"(x|y)|x", func(x, y int8) int8 { return (x | y) | x }, or}, + {"(y|x)|x", func(x, y int8) int8 { return (y | x) | x }, or}, + {"x&(x&y)", func(x, y int8) int8 { return x & (x & y) }, and}, + {"x&(y&x)", func(x, y int8) int8 { return x & (y & x) }, and}, + {"(x&y)&x", func(x, y int8) int8 { return (x & y) & x }, and}, + {"(y&x)&x", func(x, y int8) int8 { return (y & x) & x }, and}, + {"x^(x^y)", func(x, y int8) int8 { return x ^ (x ^ y) }, y}, + {"x^(y^x)", func(x, y int8) int8 { return x ^ (y ^ x) }, y}, + {"(x^y)^x", func(x, y int8) int8 { return (x ^ y) ^ x }, y}, + {"(y^x)^x", func(x, y int8) int8 { return (y ^ x) ^ x }, y}, + {"-(y-x)", func(x, y int8) int8 { return -(y - x) }, func(x, y int8) int8 { return x - y }}, + {"(x+y)-x", func(x, y int8) int8 { return (x + y) - x }, y}, + {"(y+x)-x", func(x, y int8) int8 { return (y + x) - x }, y}, + } { + for _, v := range values { + for _, w := range values { + got := test.f(v, w) + want := test.golden(v, w) + if want != got { + t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want) + } + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/math_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/math_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1febe9d42be96c3a486c9ed9aed09ab2e6afb6cc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/math_test.go @@ -0,0 +1,171 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "testing" +) + +var Output int + +func BenchmarkDiv64UnsignedSmall(b *testing.B) { + q := uint64(1) + for i := 1; i <= b.N; i++ { + q = (q + uint64(i)) / uint64(i) + } + Output = int(q) +} + +func BenchmarkDiv64Small(b *testing.B) { + q := int64(1) + for i := 1; i <= b.N; i++ { + q = (q + int64(i)) / int64(i) + } + Output = int(q) +} + +func BenchmarkDiv64SmallNegDivisor(b *testing.B) { + q := int64(-1) + for i := 1; i <= b.N; i++ { + q = (int64(i) - q) / -int64(i) + } + Output = int(q) +} + +func BenchmarkDiv64SmallNegDividend(b *testing.B) { + q := int64(-1) + for i := 1; i <= b.N; i++ { + q = -(int64(i) - q) / int64(i) + } + Output = int(q) +} + +func BenchmarkDiv64SmallNegBoth(b *testing.B) { + q := int64(1) + for i := 1; i <= b.N; i++ { + q = -(int64(i) + q) / -int64(i) + } + Output = int(q) +} + +func BenchmarkDiv64Unsigned(b *testing.B) { + q := uint64(1) + for i := 1; i <= b.N; i++ { + q = (uint64(0x7fffffffffffffff) - uint64(i) - (q & 1)) / uint64(i) + } + Output = int(q) +} + +func BenchmarkDiv64(b *testing.B) { + q := int64(1) + for i := 1; i <= b.N; i++ { + q = (int64(0x7fffffffffffffff) - int64(i) - (q & 1)) / int64(i) + } + Output = int(q) +} + +func BenchmarkDiv64NegDivisor(b *testing.B) { + q := int64(-1) + for i := 1; i <= b.N; i++ { + q = (int64(0x7fffffffffffffff) - int64(i) - (q & 1)) / -int64(i) + } + Output = int(q) +} + +func BenchmarkDiv64NegDividend(b *testing.B) { + q := int64(-1) + for i := 1; i <= b.N; i++ { + q = -(int64(0x7fffffffffffffff) - int64(i) - (q & 1)) / int64(i) + } + Output = int(q) +} + +func BenchmarkDiv64NegBoth(b *testing.B) { + q := int64(-1) + for i := 1; i <= b.N; i++ { + q = -(int64(0x7fffffffffffffff) - int64(i) - (q & 1)) / -int64(i) + } + Output = int(q) +} + +func BenchmarkMod64UnsignedSmall(b *testing.B) { + r := uint64(1) + for i := 1; i <= b.N; i++ { + r = (uint64(i) + r) % uint64(i) + } + Output = int(r) +} + +func BenchmarkMod64Small(b *testing.B) { + r := int64(1) + for i := 1; i <= b.N; i++ { + r = (int64(i) + r) % int64(i) + } + Output = int(r) +} + +func BenchmarkMod64SmallNegDivisor(b *testing.B) { + r := int64(-1) + for i := 1; i <= b.N; i++ { + r = (int64(i) - r) % -int64(i) + } + Output = int(r) +} + +func BenchmarkMod64SmallNegDividend(b *testing.B) { + r := int64(-1) + for i := 1; i <= b.N; i++ { + r = -(int64(i) - r) % int64(i) + } + Output = int(r) +} + +func BenchmarkMod64SmallNegBoth(b *testing.B) { + r := int64(1) + for i := 1; i <= b.N; i++ { + r = -(int64(i) + r) % -int64(i) + } + Output = int(r) +} + +func BenchmarkMod64Unsigned(b *testing.B) { + r := uint64(1) + for i := 1; i <= b.N; i++ { + r = (uint64(0x7fffffffffffffff) - uint64(i) - (r & 1)) % uint64(i) + } + Output = int(r) +} + +func BenchmarkMod64(b *testing.B) { + r := int64(1) + for i := 1; i <= b.N; i++ { + r = (int64(0x7fffffffffffffff) - int64(i) - (r & 1)) % int64(i) + } + Output = int(r) +} + +func BenchmarkMod64NegDivisor(b *testing.B) { + r := int64(-1) + for i := 1; i <= b.N; i++ { + r = (int64(0x7fffffffffffffff) - int64(i) - (r & 1)) % -int64(i) + } + Output = int(r) +} + +func BenchmarkMod64NegDividend(b *testing.B) { + r := int64(-1) + for i := 1; i <= b.N; i++ { + r = -(int64(0x7fffffffffffffff) - int64(i) - (r & 1)) % int64(i) + } + Output = int(r) +} + +func BenchmarkMod64NegBoth(b *testing.B) { + r := int64(1) + for i := 1; i <= b.N; i++ { + r = -(int64(0x7fffffffffffffff) - int64(i) - (r & 1)) % -int64(i) + } + Output = int(r) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/memcombine_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/memcombine_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3fc4a004a3d583e4a61ca0f18714f7fd2a225d2e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/memcombine_test.go @@ -0,0 +1,199 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "encoding/binary" + "testing" +) + +var gv = [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8} + +//go:noinline +func readGlobalUnaligned() uint64 { + return binary.LittleEndian.Uint64(gv[1:]) +} + +func TestUnalignedGlobal(t *testing.T) { + // Note: this is a test not so much of the result of the read, but of + // the correct compilation of that read. On s390x unaligned global + // accesses fail to compile. + if got, want := readGlobalUnaligned(), uint64(0x0807060504030201); got != want { + t.Errorf("read global %x, want %x", got, want) + } +} + +func TestSpillOfExtendedEndianLoads(t *testing.T) { + b := []byte{0xaa, 0xbb, 0xcc, 0xdd} + + var testCases = []struct { + fn func([]byte) uint64 + want uint64 + }{ + {readUint16le, 0xbbaa}, + {readUint16be, 0xaabb}, + {readUint32le, 0xddccbbaa}, + {readUint32be, 0xaabbccdd}, + } + for _, test := range testCases { + if got := test.fn(b); got != test.want { + t.Errorf("got %x, want %x", got, test.want) + } + } +} + +func readUint16le(b []byte) uint64 { + y := uint64(binary.LittleEndian.Uint16(b)) + nop() // force spill + return y +} + +func readUint16be(b []byte) uint64 { + y := uint64(binary.BigEndian.Uint16(b)) + nop() // force spill + return y +} + +func readUint32le(b []byte) uint64 { + y := uint64(binary.LittleEndian.Uint32(b)) + nop() // force spill + return y +} + +func readUint32be(b []byte) uint64 { + y := uint64(binary.BigEndian.Uint32(b)) + nop() // force spill + return y +} + +//go:noinline +func nop() { +} + +type T32 struct { + a, b uint32 +} + +//go:noinline +func (t *T32) bigEndianLoad() uint64 { + return uint64(t.a)<<32 | uint64(t.b) +} + +//go:noinline +func (t *T32) littleEndianLoad() uint64 { + return uint64(t.a) | (uint64(t.b) << 32) +} + +//go:noinline +func (t *T32) bigEndianStore(x uint64) { + t.a = uint32(x >> 32) + t.b = uint32(x) +} + +//go:noinline +func (t *T32) littleEndianStore(x uint64) { + t.a = uint32(x) + t.b = uint32(x >> 32) +} + +type T16 struct { + a, b uint16 +} + +//go:noinline +func (t *T16) bigEndianLoad() uint32 { + return uint32(t.a)<<16 | uint32(t.b) +} + +//go:noinline +func (t *T16) littleEndianLoad() uint32 { + return uint32(t.a) | (uint32(t.b) << 16) +} + +//go:noinline +func (t *T16) bigEndianStore(x uint32) { + t.a = uint16(x >> 16) + t.b = uint16(x) +} + +//go:noinline +func (t *T16) littleEndianStore(x uint32) { + t.a = uint16(x) + t.b = uint16(x >> 16) +} + +type T8 struct { + a, b uint8 +} + +//go:noinline +func (t *T8) bigEndianLoad() uint16 { + return uint16(t.a)<<8 | uint16(t.b) +} + +//go:noinline +func (t *T8) littleEndianLoad() uint16 { + return uint16(t.a) | (uint16(t.b) << 8) +} + +//go:noinline +func (t *T8) bigEndianStore(x uint16) { + t.a = uint8(x >> 8) + t.b = uint8(x) +} + +//go:noinline +func (t *T8) littleEndianStore(x uint16) { + t.a = uint8(x) + t.b = uint8(x >> 8) +} + +func TestIssue64468(t *testing.T) { + t32 := T32{1, 2} + if got, want := t32.bigEndianLoad(), uint64(1<<32+2); got != want { + t.Errorf("T32.bigEndianLoad got %x want %x\n", got, want) + } + if got, want := t32.littleEndianLoad(), uint64(1+2<<32); got != want { + t.Errorf("T32.littleEndianLoad got %x want %x\n", got, want) + } + t16 := T16{1, 2} + if got, want := t16.bigEndianLoad(), uint32(1<<16+2); got != want { + t.Errorf("T16.bigEndianLoad got %x want %x\n", got, want) + } + if got, want := t16.littleEndianLoad(), uint32(1+2<<16); got != want { + t.Errorf("T16.littleEndianLoad got %x want %x\n", got, want) + } + t8 := T8{1, 2} + if got, want := t8.bigEndianLoad(), uint16(1<<8+2); got != want { + t.Errorf("T8.bigEndianLoad got %x want %x\n", got, want) + } + if got, want := t8.littleEndianLoad(), uint16(1+2<<8); got != want { + t.Errorf("T8.littleEndianLoad got %x want %x\n", got, want) + } + t32.bigEndianStore(1<<32 + 2) + if got, want := t32, (T32{1, 2}); got != want { + t.Errorf("T32.bigEndianStore got %x want %x\n", got, want) + } + t32.littleEndianStore(1<<32 + 2) + if got, want := t32, (T32{2, 1}); got != want { + t.Errorf("T32.littleEndianStore got %x want %x\n", got, want) + } + t16.bigEndianStore(1<<16 + 2) + if got, want := t16, (T16{1, 2}); got != want { + t.Errorf("T16.bigEndianStore got %x want %x\n", got, want) + } + t16.littleEndianStore(1<<16 + 2) + if got, want := t16, (T16{2, 1}); got != want { + t.Errorf("T16.littleEndianStore got %x want %x\n", got, want) + } + t8.bigEndianStore(1<<8 + 2) + if got, want := t8, (T8{1, 2}); got != want { + t.Errorf("T8.bigEndianStore got %x want %x\n", got, want) + } + t8.littleEndianStore(1<<8 + 2) + if got, want := t8, (T8{2, 1}); got != want { + t.Errorf("T8.littleEndianStore got %x want %x\n", got, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/mulconst_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/mulconst_test.go new file mode 100644 index 0000000000000000000000000000000000000000..314cab32de4da761c7961dd4b6ff9546dc76f32d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/mulconst_test.go @@ -0,0 +1,242 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import "testing" + +// Benchmark multiplication of an integer by various constants. +// +// The comment above each sub-benchmark provides an example of how the +// target multiplication operation might be implemented using shift +// (multiplication by a power of 2), addition and subtraction +// operations. It is platform-dependent whether these transformations +// are actually applied. + +var ( + mulSinkI32 int32 + mulSinkI64 int64 + mulSinkU32 uint32 + mulSinkU64 uint64 +) + +func BenchmarkMulconstI32(b *testing.B) { + // 3x = 2x + x + b.Run("3", func(b *testing.B) { + x := int32(1) + for i := 0; i < b.N; i++ { + x *= 3 + } + mulSinkI32 = x + }) + // 5x = 4x + x + b.Run("5", func(b *testing.B) { + x := int32(1) + for i := 0; i < b.N; i++ { + x *= 5 + } + mulSinkI32 = x + }) + // 12x = 8x + 4x + b.Run("12", func(b *testing.B) { + x := int32(1) + for i := 0; i < b.N; i++ { + x *= 12 + } + mulSinkI32 = x + }) + // 120x = 128x - 8x + b.Run("120", func(b *testing.B) { + x := int32(1) + for i := 0; i < b.N; i++ { + x *= 120 + } + mulSinkI32 = x + }) + // -120x = 8x - 120x + b.Run("-120", func(b *testing.B) { + x := int32(1) + for i := 0; i < b.N; i++ { + x *= -120 + } + mulSinkI32 = x + }) + // 65537x = 65536x + x + b.Run("65537", func(b *testing.B) { + x := int32(1) + for i := 0; i < b.N; i++ { + x *= 65537 + } + mulSinkI32 = x + }) + // 65538x = 65536x + 2x + b.Run("65538", func(b *testing.B) { + x := int32(1) + for i := 0; i < b.N; i++ { + x *= 65538 + } + mulSinkI32 = x + }) +} + +func BenchmarkMulconstI64(b *testing.B) { + // 3x = 2x + x + b.Run("3", func(b *testing.B) { + x := int64(1) + for i := 0; i < b.N; i++ { + x *= 3 + } + mulSinkI64 = x + }) + // 5x = 4x + x + b.Run("5", func(b *testing.B) { + x := int64(1) + for i := 0; i < b.N; i++ { + x *= 5 + } + mulSinkI64 = x + }) + // 12x = 8x + 4x + b.Run("12", func(b *testing.B) { + x := int64(1) + for i := 0; i < b.N; i++ { + x *= 12 + } + mulSinkI64 = x + }) + // 120x = 128x - 8x + b.Run("120", func(b *testing.B) { + x := int64(1) + for i := 0; i < b.N; i++ { + x *= 120 + } + mulSinkI64 = x + }) + // -120x = 8x - 120x + b.Run("-120", func(b *testing.B) { + x := int64(1) + for i := 0; i < b.N; i++ { + x *= -120 + } + mulSinkI64 = x + }) + // 65537x = 65536x + x + b.Run("65537", func(b *testing.B) { + x := int64(1) + for i := 0; i < b.N; i++ { + x *= 65537 + } + mulSinkI64 = x + }) + // 65538x = 65536x + 2x + b.Run("65538", func(b *testing.B) { + x := int64(1) + for i := 0; i < b.N; i++ { + x *= 65538 + } + mulSinkI64 = x + }) +} + +func BenchmarkMulconstU32(b *testing.B) { + // 3x = 2x + x + b.Run("3", func(b *testing.B) { + x := uint32(1) + for i := 0; i < b.N; i++ { + x *= 3 + } + mulSinkU32 = x + }) + // 5x = 4x + x + b.Run("5", func(b *testing.B) { + x := uint32(1) + for i := 0; i < b.N; i++ { + x *= 5 + } + mulSinkU32 = x + }) + // 12x = 8x + 4x + b.Run("12", func(b *testing.B) { + x := uint32(1) + for i := 0; i < b.N; i++ { + x *= 12 + } + mulSinkU32 = x + }) + // 120x = 128x - 8x + b.Run("120", func(b *testing.B) { + x := uint32(1) + for i := 0; i < b.N; i++ { + x *= 120 + } + mulSinkU32 = x + }) + // 65537x = 65536x + x + b.Run("65537", func(b *testing.B) { + x := uint32(1) + for i := 0; i < b.N; i++ { + x *= 65537 + } + mulSinkU32 = x + }) + // 65538x = 65536x + 2x + b.Run("65538", func(b *testing.B) { + x := uint32(1) + for i := 0; i < b.N; i++ { + x *= 65538 + } + mulSinkU32 = x + }) +} + +func BenchmarkMulconstU64(b *testing.B) { + // 3x = 2x + x + b.Run("3", func(b *testing.B) { + x := uint64(1) + for i := 0; i < b.N; i++ { + x *= 3 + } + mulSinkU64 = x + }) + // 5x = 4x + x + b.Run("5", func(b *testing.B) { + x := uint64(1) + for i := 0; i < b.N; i++ { + x *= 5 + } + mulSinkU64 = x + }) + // 12x = 8x + 4x + b.Run("12", func(b *testing.B) { + x := uint64(1) + for i := 0; i < b.N; i++ { + x *= 12 + } + mulSinkU64 = x + }) + // 120x = 128x - 8x + b.Run("120", func(b *testing.B) { + x := uint64(1) + for i := 0; i < b.N; i++ { + x *= 120 + } + mulSinkU64 = x + }) + // 65537x = 65536x + x + b.Run("65537", func(b *testing.B) { + x := uint64(1) + for i := 0; i < b.N; i++ { + x *= 65537 + } + mulSinkU64 = x + }) + // 65538x = 65536x + 2x + b.Run("65538", func(b *testing.B) { + x := uint64(1) + for i := 0; i < b.N; i++ { + x *= 65538 + } + mulSinkU64 = x + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/pgo_devirtualize_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/pgo_devirtualize_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f4512436834fc56ef5de13ad97a297942534a0b0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/pgo_devirtualize_test.go @@ -0,0 +1,261 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "bufio" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "regexp" + "testing" +) + +type devirtualization struct { + pos string + callee string +} + +// testPGODevirtualize tests that specific PGO devirtualize rewrites are performed. +func testPGODevirtualize(t *testing.T, dir string, want []devirtualization) { + testenv.MustHaveGoRun(t) + t.Parallel() + + const pkg = "example.com/pgo/devirtualize" + + // Add a go.mod so we have a consistent symbol names in this temp dir. + goMod := fmt.Sprintf(`module %s +go 1.21 +`, pkg) + if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644); err != nil { + t.Fatalf("error writing go.mod: %v", err) + } + + // Run the test without PGO to ensure that the test assertions are + // correct even in the non-optimized version. + cmd := testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "test", ".")) + cmd.Dir = dir + b, err := cmd.CombinedOutput() + t.Logf("Test without PGO:\n%s", b) + if err != nil { + t.Fatalf("Test failed without PGO: %v", err) + } + + // Build the test with the profile. + pprof := filepath.Join(dir, "devirt.pprof") + gcflag := fmt.Sprintf("-gcflags=-m=2 -pgoprofile=%s -d=pgodebug=3", pprof) + out := filepath.Join(dir, "test.exe") + cmd = testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "test", "-o", out, gcflag, ".")) + cmd.Dir = dir + + pr, pw, err := os.Pipe() + if err != nil { + t.Fatalf("error creating pipe: %v", err) + } + defer pr.Close() + cmd.Stdout = pw + cmd.Stderr = pw + + err = cmd.Start() + pw.Close() + if err != nil { + t.Fatalf("error starting go test: %v", err) + } + + got := make(map[devirtualization]struct{}) + + devirtualizedLine := regexp.MustCompile(`(.*): PGO devirtualizing \w+ call .* to (.*)`) + + scanner := bufio.NewScanner(pr) + for scanner.Scan() { + line := scanner.Text() + t.Logf("child: %s", line) + + m := devirtualizedLine.FindStringSubmatch(line) + if m == nil { + continue + } + + d := devirtualization{ + pos: m[1], + callee: m[2], + } + got[d] = struct{}{} + } + if err := cmd.Wait(); err != nil { + t.Fatalf("error running go test: %v", err) + } + if err := scanner.Err(); err != nil { + t.Fatalf("error reading go test output: %v", err) + } + + if len(got) != len(want) { + t.Errorf("mismatched devirtualization count; got %v want %v", got, want) + } + for _, w := range want { + if _, ok := got[w]; ok { + continue + } + t.Errorf("devirtualization %v missing; got %v", w, got) + } + + // Run test with PGO to ensure the assertions are still true. + cmd = testenv.CleanCmdEnv(testenv.Command(t, out)) + cmd.Dir = dir + b, err = cmd.CombinedOutput() + t.Logf("Test with PGO:\n%s", b) + if err != nil { + t.Fatalf("Test failed without PGO: %v", err) + } +} + +// TestPGODevirtualize tests that specific functions are devirtualized when PGO +// is applied to the exact source that was profiled. +func TestPGODevirtualize(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting wd: %v", err) + } + srcDir := filepath.Join(wd, "testdata", "pgo", "devirtualize") + + // Copy the module to a scratch location so we can add a go.mod. + dir := t.TempDir() + if err := os.Mkdir(filepath.Join(dir, "mult.pkg"), 0755); err != nil { + t.Fatalf("error creating dir: %v", err) + } + for _, file := range []string{"devirt.go", "devirt_test.go", "devirt.pprof", filepath.Join("mult.pkg", "mult.go")} { + if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { + t.Fatalf("error copying %s: %v", file, err) + } + } + + want := []devirtualization{ + // ExerciseIface + { + pos: "./devirt.go:101:20", + callee: "mult.Mult.Multiply", + }, + { + pos: "./devirt.go:101:39", + callee: "Add.Add", + }, + // ExerciseFuncConcrete + { + pos: "./devirt.go:173:36", + callee: "AddFn", + }, + { + pos: "./devirt.go:173:15", + callee: "mult.MultFn", + }, + // ExerciseFuncField + { + pos: "./devirt.go:207:35", + callee: "AddFn", + }, + { + pos: "./devirt.go:207:19", + callee: "mult.MultFn", + }, + // ExerciseFuncClosure + // TODO(prattmic): Closure callees not implemented. + //{ + // pos: "./devirt.go:249:27", + // callee: "AddClosure.func1", + //}, + //{ + // pos: "./devirt.go:249:15", + // callee: "mult.MultClosure.func1", + //}, + } + + testPGODevirtualize(t, dir, want) +} + +// Regression test for https://go.dev/issue/65615. If a target function changes +// from non-generic to generic we can't devirtualize it (don't know the type +// parameters), but the compiler should not crash. +func TestLookupFuncGeneric(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting wd: %v", err) + } + srcDir := filepath.Join(wd, "testdata", "pgo", "devirtualize") + + // Copy the module to a scratch location so we can add a go.mod. + dir := t.TempDir() + if err := os.Mkdir(filepath.Join(dir, "mult.pkg"), 0755); err != nil { + t.Fatalf("error creating dir: %v", err) + } + for _, file := range []string{"devirt.go", "devirt_test.go", "devirt.pprof", filepath.Join("mult.pkg", "mult.go")} { + if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { + t.Fatalf("error copying %s: %v", file, err) + } + } + + // Change MultFn from a concrete function to a parameterized function. + if err := convertMultToGeneric(filepath.Join(dir, "mult.pkg", "mult.go")); err != nil { + t.Fatalf("error editing mult.go: %v", err) + } + + // Same as TestPGODevirtualize except for MultFn, which we cannot + // devirtualize to because it has become generic. + // + // Note that the important part of this test is that the build is + // successful, not the specific devirtualizations. + want := []devirtualization{ + // ExerciseIface + { + pos: "./devirt.go:101:20", + callee: "mult.Mult.Multiply", + }, + { + pos: "./devirt.go:101:39", + callee: "Add.Add", + }, + // ExerciseFuncConcrete + { + pos: "./devirt.go:173:36", + callee: "AddFn", + }, + // ExerciseFuncField + { + pos: "./devirt.go:207:35", + callee: "AddFn", + }, + // ExerciseFuncClosure + // TODO(prattmic): Closure callees not implemented. + //{ + // pos: "./devirt.go:249:27", + // callee: "AddClosure.func1", + //}, + //{ + // pos: "./devirt.go:249:15", + // callee: "mult.MultClosure.func1", + //}, + } + + testPGODevirtualize(t, dir, want) +} + +var multFnRe = regexp.MustCompile(`func MultFn\(a, b int64\) int64`) + +func convertMultToGeneric(path string) error { + content, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("error opening: %w", err) + } + + if !multFnRe.Match(content) { + return fmt.Errorf("MultFn not found; update regexp?") + } + + // Users of MultFn shouldn't need adjustment, type inference should + // work OK. + content = multFnRe.ReplaceAll(content, []byte(`func MultFn[T int32|int64](a, b T) T`)) + + return os.WriteFile(path, content, 0644) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/pgo_inl_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/pgo_inl_test.go new file mode 100644 index 0000000000000000000000000000000000000000..da6c4a53d362288bb3b1b4f7f919d92b5e00b9b3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/pgo_inl_test.go @@ -0,0 +1,344 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "bufio" + "bytes" + "fmt" + "internal/profile" + "internal/testenv" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "testing" +) + +func buildPGOInliningTest(t *testing.T, dir string, gcflag string) []byte { + const pkg = "example.com/pgo/inline" + + // Add a go.mod so we have a consistent symbol names in this temp dir. + goMod := fmt.Sprintf(`module %s +go 1.19 +`, pkg) + if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644); err != nil { + t.Fatalf("error writing go.mod: %v", err) + } + + exe := filepath.Join(dir, "test.exe") + args := []string{"test", "-c", "-o", exe, "-gcflags=" + gcflag} + cmd := testenv.Command(t, testenv.GoToolPath(t), args...) + cmd.Dir = dir + cmd = testenv.CleanCmdEnv(cmd) + t.Log(cmd) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed: %v, output:\n%s", err, out) + } + return out +} + +// testPGOIntendedInlining tests that specific functions are inlined. +func testPGOIntendedInlining(t *testing.T, dir string) { + testenv.MustHaveGoRun(t) + t.Parallel() + + const pkg = "example.com/pgo/inline" + + want := []string{ + "(*BS).NS", + } + + // The functions which are not expected to be inlined are as follows. + wantNot := []string{ + // The calling edge main->A is hot and the cost of A is large + // than inlineHotCalleeMaxBudget. + "A", + // The calling edge BenchmarkA" -> benchmarkB is cold and the + // cost of A is large than inlineMaxBudget. + "benchmarkB", + } + + must := map[string]bool{ + "(*BS).NS": true, + } + + notInlinedReason := make(map[string]string) + for _, fname := range want { + fullName := pkg + "." + fname + if _, ok := notInlinedReason[fullName]; ok { + t.Errorf("duplicate func: %s", fullName) + } + notInlinedReason[fullName] = "unknown reason" + } + + // If the compiler emit "cannot inline for function A", the entry A + // in expectedNotInlinedList will be removed. + expectedNotInlinedList := make(map[string]struct{}) + for _, fname := range wantNot { + fullName := pkg + "." + fname + expectedNotInlinedList[fullName] = struct{}{} + } + + // Build the test with the profile. Use a smaller threshold to test. + // TODO: maybe adjust the test to work with default threshold. + pprof := filepath.Join(dir, "inline_hot.pprof") + gcflag := fmt.Sprintf("-m -m -pgoprofile=%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90", pprof) + out := buildPGOInliningTest(t, dir, gcflag) + + scanner := bufio.NewScanner(bytes.NewReader(out)) + curPkg := "" + canInline := regexp.MustCompile(`: can inline ([^ ]*)`) + haveInlined := regexp.MustCompile(`: inlining call to ([^ ]*)`) + cannotInline := regexp.MustCompile(`: cannot inline ([^ ]*): (.*)`) + for scanner.Scan() { + line := scanner.Text() + t.Logf("child: %s", line) + if strings.HasPrefix(line, "# ") { + curPkg = line[2:] + splits := strings.Split(curPkg, " ") + curPkg = splits[0] + continue + } + if m := haveInlined.FindStringSubmatch(line); m != nil { + fname := m[1] + delete(notInlinedReason, curPkg+"."+fname) + continue + } + if m := canInline.FindStringSubmatch(line); m != nil { + fname := m[1] + fullname := curPkg + "." + fname + // If function must be inlined somewhere, being inlinable is not enough + if _, ok := must[fullname]; !ok { + delete(notInlinedReason, fullname) + continue + } + } + if m := cannotInline.FindStringSubmatch(line); m != nil { + fname, reason := m[1], m[2] + fullName := curPkg + "." + fname + if _, ok := notInlinedReason[fullName]; ok { + // cmd/compile gave us a reason why + notInlinedReason[fullName] = reason + } + delete(expectedNotInlinedList, fullName) + continue + } + } + if err := scanner.Err(); err != nil { + t.Fatalf("error reading output: %v", err) + } + for fullName, reason := range notInlinedReason { + t.Errorf("%s was not inlined: %s", fullName, reason) + } + + // If the list expectedNotInlinedList is not empty, it indicates + // the functions in the expectedNotInlinedList are marked with caninline. + for fullName, _ := range expectedNotInlinedList { + t.Errorf("%s was expected not inlined", fullName) + } +} + +// TestPGOIntendedInlining tests that specific functions are inlined when PGO +// is applied to the exact source that was profiled. +func TestPGOIntendedInlining(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting wd: %v", err) + } + srcDir := filepath.Join(wd, "testdata/pgo/inline") + + // Copy the module to a scratch location so we can add a go.mod. + dir := t.TempDir() + + for _, file := range []string{"inline_hot.go", "inline_hot_test.go", "inline_hot.pprof"} { + if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { + t.Fatalf("error copying %s: %v", file, err) + } + } + + testPGOIntendedInlining(t, dir) +} + +// TestPGOIntendedInlining tests that specific functions are inlined when PGO +// is applied to the modified source. +func TestPGOIntendedInliningShiftedLines(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting wd: %v", err) + } + srcDir := filepath.Join(wd, "testdata/pgo/inline") + + // Copy the module to a scratch location so we can modify the source. + dir := t.TempDir() + + // Copy most of the files unmodified. + for _, file := range []string{"inline_hot_test.go", "inline_hot.pprof"} { + if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { + t.Fatalf("error copying %s : %v", file, err) + } + } + + // Add some comments to the top of inline_hot.go. This adjusts the line + // numbers of all of the functions without changing the semantics. + src, err := os.Open(filepath.Join(srcDir, "inline_hot.go")) + if err != nil { + t.Fatalf("error opening src inline_hot.go: %v", err) + } + defer src.Close() + + dst, err := os.Create(filepath.Join(dir, "inline_hot.go")) + if err != nil { + t.Fatalf("error creating dst inline_hot.go: %v", err) + } + defer dst.Close() + + if _, err := io.WriteString(dst, `// Autogenerated +// Lines +`); err != nil { + t.Fatalf("error writing comments to dst: %v", err) + } + + if _, err := io.Copy(dst, src); err != nil { + t.Fatalf("error copying inline_hot.go: %v", err) + } + + dst.Close() + + testPGOIntendedInlining(t, dir) +} + +// TestPGOSingleIndex tests that the sample index can not be 1 and compilation +// will not fail. All it should care about is that the sample type is either +// CPU nanoseconds or samples count, whichever it finds first. +func TestPGOSingleIndex(t *testing.T) { + for _, tc := range []struct { + originalIndex int + }{{ + // The `testdata/pgo/inline/inline_hot.pprof` file is a standard CPU + // profile as the runtime would generate. The 0 index contains the + // value-type samples and value-unit count. The 1 index contains the + // value-type cpu and value-unit nanoseconds. These tests ensure that + // the compiler can work with profiles that only have a single index, + // but are either samples count or CPU nanoseconds. + originalIndex: 0, + }, { + originalIndex: 1, + }} { + t.Run(fmt.Sprintf("originalIndex=%d", tc.originalIndex), func(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting wd: %v", err) + } + srcDir := filepath.Join(wd, "testdata/pgo/inline") + + // Copy the module to a scratch location so we can add a go.mod. + dir := t.TempDir() + + originalPprofFile, err := os.Open(filepath.Join(srcDir, "inline_hot.pprof")) + if err != nil { + t.Fatalf("error opening inline_hot.pprof: %v", err) + } + defer originalPprofFile.Close() + + p, err := profile.Parse(originalPprofFile) + if err != nil { + t.Fatalf("error parsing inline_hot.pprof: %v", err) + } + + // Move the samples count value-type to the 0 index. + p.SampleType = []*profile.ValueType{p.SampleType[tc.originalIndex]} + + // Ensure we only have a single set of sample values. + for _, s := range p.Sample { + s.Value = []int64{s.Value[tc.originalIndex]} + } + + modifiedPprofFile, err := os.Create(filepath.Join(dir, "inline_hot.pprof")) + if err != nil { + t.Fatalf("error creating inline_hot.pprof: %v", err) + } + defer modifiedPprofFile.Close() + + if err := p.Write(modifiedPprofFile); err != nil { + t.Fatalf("error writing inline_hot.pprof: %v", err) + } + + for _, file := range []string{"inline_hot.go", "inline_hot_test.go"} { + if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { + t.Fatalf("error copying %s: %v", file, err) + } + } + + testPGOIntendedInlining(t, dir) + }) + } +} + +func copyFile(dst, src string) error { + s, err := os.Open(src) + if err != nil { + return err + } + defer s.Close() + + d, err := os.Create(dst) + if err != nil { + return err + } + defer d.Close() + + _, err = io.Copy(d, s) + return err +} + +// TestPGOHash tests that PGO optimization decisions can be selected by pgohash. +func TestPGOHash(t *testing.T) { + testenv.MustHaveGoRun(t) + t.Parallel() + + const pkg = "example.com/pgo/inline" + + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting wd: %v", err) + } + srcDir := filepath.Join(wd, "testdata/pgo/inline") + + // Copy the module to a scratch location so we can add a go.mod. + dir := t.TempDir() + + for _, file := range []string{"inline_hot.go", "inline_hot_test.go", "inline_hot.pprof"} { + if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { + t.Fatalf("error copying %s: %v", file, err) + } + } + + pprof := filepath.Join(dir, "inline_hot.pprof") + // build with -trimpath so the source location (thus the hash) + // does not depend on the temporary directory path. + gcflag0 := fmt.Sprintf("-pgoprofile=%s -trimpath %s=>%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90,pgodebug=1", pprof, dir, pkg) + + // Check that a hash match allows PGO inlining. + const srcPos = "example.com/pgo/inline/inline_hot.go:81:19" + const hashMatch = "pgohash triggered " + srcPos + " (inline)" + pgoDebugRE := regexp.MustCompile(`hot-budget check allows inlining for call .* at ` + strings.ReplaceAll(srcPos, ".", "\\.")) + hash := "v1" // 1 matches srcPos, v for verbose (print source location) + gcflag := gcflag0 + ",pgohash=" + hash + out := buildPGOInliningTest(t, dir, gcflag) + if !bytes.Contains(out, []byte(hashMatch)) || !pgoDebugRE.Match(out) { + t.Errorf("output does not contain expected source line, out:\n%s", out) + } + + // Check that a hash mismatch turns off PGO inlining. + hash = "v0" // 0 should not match srcPos + gcflag = gcflag0 + ",pgohash=" + hash + out = buildPGOInliningTest(t, dir, gcflag) + if bytes.Contains(out, []byte(hashMatch)) || pgoDebugRE.Match(out) { + t.Errorf("output contains unexpected source line, out:\n%s", out) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/race.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/race.go new file mode 100644 index 0000000000000000000000000000000000000000..b7215382eb2a21a56b5143c85c9d0a37c559de9d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/race.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !compiler_bootstrap + +package test + +// The racecompile builder only builds packages, but does not build +// or run tests. This is a non-test file to hold cases that (used +// to) trigger compiler data races, so they will be exercised on +// the racecompile builder. +// +// This package is not imported so functions here are not included +// in the actual compiler. + +// Issue 55357: data race when building multiple instantiations of +// generic closures with _ parameters. +func Issue55357() { + type U struct { + A int + B string + C string + } + var q T55357[U] + q.Count() + q.List() + + type M struct { + A int64 + B uint32 + C uint32 + } + var q2 T55357[M] + q2.Count() + q2.List() +} + +type T55357[T any] struct{} + +//go:noinline +func (q *T55357[T]) do(w, v bool, fn func(bk []byte, v T) error) error { + return nil +} + +func (q *T55357[T]) Count() (n int, rerr error) { + err := q.do(false, false, func(kb []byte, _ T) error { + n++ + return nil + }) + return n, err +} + +func (q *T55357[T]) List() (list []T, rerr error) { + var l []T + err := q.do(false, true, func(_ []byte, v T) error { + l = append(l, v) + return nil + }) + if err != nil { + return nil, err + } + return l, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/reproduciblebuilds_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/reproduciblebuilds_test.go new file mode 100644 index 0000000000000000000000000000000000000000..466e0c3a38ae1b31236330d4220b3c18aa17bafd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/reproduciblebuilds_test.go @@ -0,0 +1,106 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "bytes" + "internal/testenv" + "os" + "path/filepath" + "testing" +) + +func TestReproducibleBuilds(t *testing.T) { + tests := []string{ + "issue20272.go", + "issue27013.go", + "issue30202.go", + } + + testenv.MustHaveGoBuild(t) + iters := 10 + if testing.Short() { + iters = 4 + } + t.Parallel() + for _, test := range tests { + test := test + t.Run(test, func(t *testing.T) { + t.Parallel() + var want []byte + tmp, err := os.CreateTemp("", "") + if err != nil { + t.Fatalf("temp file creation failed: %v", err) + } + defer os.Remove(tmp.Name()) + defer tmp.Close() + for i := 0; i < iters; i++ { + // Note: use -c 2 to expose any nondeterminism which is the result + // of the runtime scheduler. + out, err := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-p=p", "-c", "2", "-o", tmp.Name(), filepath.Join("testdata", "reproducible", test)).CombinedOutput() + if err != nil { + t.Fatalf("failed to compile: %v\n%s", err, out) + } + obj, err := os.ReadFile(tmp.Name()) + if err != nil { + t.Fatalf("failed to read object file: %v", err) + } + if i == 0 { + want = obj + } else { + if !bytes.Equal(want, obj) { + t.Fatalf("builds produced different output after %d iters (%d bytes vs %d bytes)", i, len(want), len(obj)) + } + } + } + }) + } +} + +func TestIssue38068(t *testing.T) { + testenv.MustHaveGoBuild(t) + t.Parallel() + + // Compile a small package with and without the concurrent + // backend, then check to make sure that the resulting archives + // are identical. Note: this uses "go tool compile" instead of + // "go build" since the latter will generate different build IDs + // if it sees different command line flags. + scenarios := []struct { + tag string + args string + libpath string + }{ + {tag: "serial", args: "-c=1"}, + {tag: "concurrent", args: "-c=2"}} + + tmpdir := t.TempDir() + + src := filepath.Join("testdata", "reproducible", "issue38068.go") + for i := range scenarios { + s := &scenarios[i] + s.libpath = filepath.Join(tmpdir, s.tag+".a") + // Note: use of "-p" required in order for DWARF to be generated. + cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "compile", "-p=issue38068", "-buildid=", s.args, "-o", s.libpath, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("%v: %v:\n%s", cmd.Args, err, out) + } + } + + readBytes := func(fn string) []byte { + payload, err := os.ReadFile(fn) + if err != nil { + t.Fatalf("failed to read executable '%s': %v", fn, err) + } + return payload + } + + b1 := readBytes(scenarios[0].libpath) + b2 := readBytes(scenarios[1].libpath) + if !bytes.Equal(b1, b2) { + t.Fatalf("concurrent and serial builds produced different output") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/shift_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/shift_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dd893a1121c5ef4588e36f821f93fcb98951b542 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/shift_test.go @@ -0,0 +1,1152 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "reflect" + "testing" +) + +// Tests shifts of zero. + +//go:noinline +func ofz64l64(n uint64) int64 { + var x int64 + return x << n +} + +//go:noinline +func ofz64l32(n uint32) int64 { + var x int64 + return x << n +} + +//go:noinline +func ofz64l16(n uint16) int64 { + var x int64 + return x << n +} + +//go:noinline +func ofz64l8(n uint8) int64 { + var x int64 + return x << n +} + +//go:noinline +func ofz64r64(n uint64) int64 { + var x int64 + return x >> n +} + +//go:noinline +func ofz64r32(n uint32) int64 { + var x int64 + return x >> n +} + +//go:noinline +func ofz64r16(n uint16) int64 { + var x int64 + return x >> n +} + +//go:noinline +func ofz64r8(n uint8) int64 { + var x int64 + return x >> n +} + +//go:noinline +func ofz64ur64(n uint64) uint64 { + var x uint64 + return x >> n +} + +//go:noinline +func ofz64ur32(n uint32) uint64 { + var x uint64 + return x >> n +} + +//go:noinline +func ofz64ur16(n uint16) uint64 { + var x uint64 + return x >> n +} + +//go:noinline +func ofz64ur8(n uint8) uint64 { + var x uint64 + return x >> n +} + +//go:noinline +func ofz32l64(n uint64) int32 { + var x int32 + return x << n +} + +//go:noinline +func ofz32l32(n uint32) int32 { + var x int32 + return x << n +} + +//go:noinline +func ofz32l16(n uint16) int32 { + var x int32 + return x << n +} + +//go:noinline +func ofz32l8(n uint8) int32 { + var x int32 + return x << n +} + +//go:noinline +func ofz32r64(n uint64) int32 { + var x int32 + return x >> n +} + +//go:noinline +func ofz32r32(n uint32) int32 { + var x int32 + return x >> n +} + +//go:noinline +func ofz32r16(n uint16) int32 { + var x int32 + return x >> n +} + +//go:noinline +func ofz32r8(n uint8) int32 { + var x int32 + return x >> n +} + +//go:noinline +func ofz32ur64(n uint64) uint32 { + var x uint32 + return x >> n +} + +//go:noinline +func ofz32ur32(n uint32) uint32 { + var x uint32 + return x >> n +} + +//go:noinline +func ofz32ur16(n uint16) uint32 { + var x uint32 + return x >> n +} + +//go:noinline +func ofz32ur8(n uint8) uint32 { + var x uint32 + return x >> n +} + +//go:noinline +func ofz16l64(n uint64) int16 { + var x int16 + return x << n +} + +//go:noinline +func ofz16l32(n uint32) int16 { + var x int16 + return x << n +} + +//go:noinline +func ofz16l16(n uint16) int16 { + var x int16 + return x << n +} + +//go:noinline +func ofz16l8(n uint8) int16 { + var x int16 + return x << n +} + +//go:noinline +func ofz16r64(n uint64) int16 { + var x int16 + return x >> n +} + +//go:noinline +func ofz16r32(n uint32) int16 { + var x int16 + return x >> n +} + +//go:noinline +func ofz16r16(n uint16) int16 { + var x int16 + return x >> n +} + +//go:noinline +func ofz16r8(n uint8) int16 { + var x int16 + return x >> n +} + +//go:noinline +func ofz16ur64(n uint64) uint16 { + var x uint16 + return x >> n +} + +//go:noinline +func ofz16ur32(n uint32) uint16 { + var x uint16 + return x >> n +} + +//go:noinline +func ofz16ur16(n uint16) uint16 { + var x uint16 + return x >> n +} + +//go:noinline +func ofz16ur8(n uint8) uint16 { + var x uint16 + return x >> n +} + +//go:noinline +func ofz8l64(n uint64) int8 { + var x int8 + return x << n +} + +//go:noinline +func ofz8l32(n uint32) int8 { + var x int8 + return x << n +} + +//go:noinline +func ofz8l16(n uint16) int8 { + var x int8 + return x << n +} + +//go:noinline +func ofz8l8(n uint8) int8 { + var x int8 + return x << n +} + +//go:noinline +func ofz8r64(n uint64) int8 { + var x int8 + return x >> n +} + +//go:noinline +func ofz8r32(n uint32) int8 { + var x int8 + return x >> n +} + +//go:noinline +func ofz8r16(n uint16) int8 { + var x int8 + return x >> n +} + +//go:noinline +func ofz8r8(n uint8) int8 { + var x int8 + return x >> n +} + +//go:noinline +func ofz8ur64(n uint64) uint8 { + var x uint8 + return x >> n +} + +//go:noinline +func ofz8ur32(n uint32) uint8 { + var x uint8 + return x >> n +} + +//go:noinline +func ofz8ur16(n uint16) uint8 { + var x uint8 + return x >> n +} + +//go:noinline +func ofz8ur8(n uint8) uint8 { + var x uint8 + return x >> n +} + +func TestShiftOfZero(t *testing.T) { + if got := ofz64l64(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz64l32(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz64l16(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz64l8(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz64r64(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz64r32(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz64r16(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz64r8(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz64ur64(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz64ur32(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz64ur16(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz64ur8(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + + if got := ofz32l64(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz32l32(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz32l16(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz32l8(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz32r64(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz32r32(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz32r16(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz32r8(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz32ur64(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz32ur32(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz32ur16(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz32ur8(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + + if got := ofz16l64(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz16l32(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz16l16(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz16l8(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz16r64(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz16r32(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz16r16(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz16r8(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz16ur64(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz16ur32(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz16ur16(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz16ur8(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + + if got := ofz8l64(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz8l32(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz8l16(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz8l8(5); got != 0 { + t.Errorf("0<<5 == %d, want 0", got) + } + if got := ofz8r64(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz8r32(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz8r16(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz8r8(5); got != 0 { + t.Errorf("0>>5 == %d, want 0", got) + } + if got := ofz8ur64(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz8ur32(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz8ur16(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } + if got := ofz8ur8(5); got != 0 { + t.Errorf("0>>>5 == %d, want 0", got) + } +} + +//go:noinline +func byz64l(n int64) int64 { + return n << 0 +} + +//go:noinline +func byz64r(n int64) int64 { + return n >> 0 +} + +//go:noinline +func byz64ur(n uint64) uint64 { + return n >> 0 +} + +//go:noinline +func byz32l(n int32) int32 { + return n << 0 +} + +//go:noinline +func byz32r(n int32) int32 { + return n >> 0 +} + +//go:noinline +func byz32ur(n uint32) uint32 { + return n >> 0 +} + +//go:noinline +func byz16l(n int16) int16 { + return n << 0 +} + +//go:noinline +func byz16r(n int16) int16 { + return n >> 0 +} + +//go:noinline +func byz16ur(n uint16) uint16 { + return n >> 0 +} + +//go:noinline +func byz8l(n int8) int8 { + return n << 0 +} + +//go:noinline +func byz8r(n int8) int8 { + return n >> 0 +} + +//go:noinline +func byz8ur(n uint8) uint8 { + return n >> 0 +} + +func TestShiftByZero(t *testing.T) { + { + var n int64 = 0x5555555555555555 + if got := byz64l(n); got != n { + t.Errorf("%x<<0 == %x, want %x", n, got, n) + } + if got := byz64r(n); got != n { + t.Errorf("%x>>0 == %x, want %x", n, got, n) + } + } + { + var n uint64 = 0xaaaaaaaaaaaaaaaa + if got := byz64ur(n); got != n { + t.Errorf("%x>>>0 == %x, want %x", n, got, n) + } + } + + { + var n int32 = 0x55555555 + if got := byz32l(n); got != n { + t.Errorf("%x<<0 == %x, want %x", n, got, n) + } + if got := byz32r(n); got != n { + t.Errorf("%x>>0 == %x, want %x", n, got, n) + } + } + { + var n uint32 = 0xaaaaaaaa + if got := byz32ur(n); got != n { + t.Errorf("%x>>>0 == %x, want %x", n, got, n) + } + } + + { + var n int16 = 0x5555 + if got := byz16l(n); got != n { + t.Errorf("%x<<0 == %x, want %x", n, got, n) + } + if got := byz16r(n); got != n { + t.Errorf("%x>>0 == %x, want %x", n, got, n) + } + } + { + var n uint16 = 0xaaaa + if got := byz16ur(n); got != n { + t.Errorf("%x>>>0 == %x, want %x", n, got, n) + } + } + + { + var n int8 = 0x55 + if got := byz8l(n); got != n { + t.Errorf("%x<<0 == %x, want %x", n, got, n) + } + if got := byz8r(n); got != n { + t.Errorf("%x>>0 == %x, want %x", n, got, n) + } + } + { + var n uint8 = 0x55 + if got := byz8ur(n); got != n { + t.Errorf("%x>>>0 == %x, want %x", n, got, n) + } + } +} + +//go:noinline +func two64l(x int64) int64 { + return x << 1 << 1 +} + +//go:noinline +func two64r(x int64) int64 { + return x >> 1 >> 1 +} + +//go:noinline +func two64ur(x uint64) uint64 { + return x >> 1 >> 1 +} + +//go:noinline +func two32l(x int32) int32 { + return x << 1 << 1 +} + +//go:noinline +func two32r(x int32) int32 { + return x >> 1 >> 1 +} + +//go:noinline +func two32ur(x uint32) uint32 { + return x >> 1 >> 1 +} + +//go:noinline +func two16l(x int16) int16 { + return x << 1 << 1 +} + +//go:noinline +func two16r(x int16) int16 { + return x >> 1 >> 1 +} + +//go:noinline +func two16ur(x uint16) uint16 { + return x >> 1 >> 1 +} + +//go:noinline +func two8l(x int8) int8 { + return x << 1 << 1 +} + +//go:noinline +func two8r(x int8) int8 { + return x >> 1 >> 1 +} + +//go:noinline +func two8ur(x uint8) uint8 { + return x >> 1 >> 1 +} + +func TestShiftCombine(t *testing.T) { + if got, want := two64l(4), int64(16); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := two64r(64), int64(16); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := two64ur(64), uint64(16); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := two32l(4), int32(16); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := two32r(64), int32(16); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := two32ur(64), uint32(16); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := two16l(4), int16(16); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := two16r(64), int16(16); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := two16ur(64), uint16(16); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := two8l(4), int8(16); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := two8r(64), int8(16); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := two8ur(64), uint8(16); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + +} + +//go:noinline +func three64l(x int64) int64 { + return x << 3 >> 1 << 2 +} + +//go:noinline +func three64ul(x uint64) uint64 { + return x << 3 >> 1 << 2 +} + +//go:noinline +func three64r(x int64) int64 { + return x >> 3 << 1 >> 2 +} + +//go:noinline +func three64ur(x uint64) uint64 { + return x >> 3 << 1 >> 2 +} + +//go:noinline +func three32l(x int32) int32 { + return x << 3 >> 1 << 2 +} + +//go:noinline +func three32ul(x uint32) uint32 { + return x << 3 >> 1 << 2 +} + +//go:noinline +func three32r(x int32) int32 { + return x >> 3 << 1 >> 2 +} + +//go:noinline +func three32ur(x uint32) uint32 { + return x >> 3 << 1 >> 2 +} + +//go:noinline +func three16l(x int16) int16 { + return x << 3 >> 1 << 2 +} + +//go:noinline +func three16ul(x uint16) uint16 { + return x << 3 >> 1 << 2 +} + +//go:noinline +func three16r(x int16) int16 { + return x >> 3 << 1 >> 2 +} + +//go:noinline +func three16ur(x uint16) uint16 { + return x >> 3 << 1 >> 2 +} + +//go:noinline +func three8l(x int8) int8 { + return x << 3 >> 1 << 2 +} + +//go:noinline +func three8ul(x uint8) uint8 { + return x << 3 >> 1 << 2 +} + +//go:noinline +func three8r(x int8) int8 { + return x >> 3 << 1 >> 2 +} + +//go:noinline +func three8ur(x uint8) uint8 { + return x >> 3 << 1 >> 2 +} + +func TestShiftCombine3(t *testing.T) { + if got, want := three64l(4), int64(64); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := three64ul(4), uint64(64); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := three64r(64), int64(4); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := three64ur(64), uint64(4); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := three32l(4), int32(64); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := three32ul(4), uint32(64); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := three32r(64), int32(4); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := three32ur(64), uint32(4); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := three16l(4), int16(64); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := three16ul(4), uint16(64); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := three16r(64), int16(4); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := three16ur(64), uint16(4); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := three8l(4), int8(64); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := three8ul(4), uint8(64); want != got { + t.Errorf("4<<1<<1 == %d, want %d", got, want) + } + if got, want := three8r(64), int8(4); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } + if got, want := three8ur(64), uint8(4); want != got { + t.Errorf("64>>1>>1 == %d, want %d", got, want) + } +} + +var ( + one64 int64 = 1 + one64u uint64 = 1 + one32 int32 = 1 + one32u uint32 = 1 + one16 int16 = 1 + one16u uint16 = 1 + one8 int8 = 1 + one8u uint8 = 1 +) + +func TestShiftLargeCombine(t *testing.T) { + var N uint64 = 0x8000000000000000 + if one64<>N>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one64u>>N>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one32<>N>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one32u>>N>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one16<>N>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one16u>>N>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one8<>N>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one8u>>N>>N == 1 { + t.Errorf("shift overflow mishandled") + } +} + +func TestShiftLargeCombine3(t *testing.T) { + var N uint64 = 0x8000000000000001 + if one64<>2<>2<>N<<2>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one64u>>N<<2>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one32<>2<>2<>N<<2>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one32u>>N<<2>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one16<>2<>2<>N<<2>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one16u>>N<<2>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one8<>2<>2<>N<<2>>N == 1 { + t.Errorf("shift overflow mishandled") + } + if one8u>>N<<2>>N == 1 { + t.Errorf("shift overflow mishandled") + } +} + +func TestShiftGeneric(t *testing.T) { + for _, test := range [...]struct { + valueWidth int + signed bool + shiftWidth int + left bool + f interface{} + }{ + {64, true, 64, true, func(n int64, s uint64) int64 { return n << s }}, + {64, true, 64, false, func(n int64, s uint64) int64 { return n >> s }}, + {64, false, 64, false, func(n uint64, s uint64) uint64 { return n >> s }}, + {64, true, 32, true, func(n int64, s uint32) int64 { return n << s }}, + {64, true, 32, false, func(n int64, s uint32) int64 { return n >> s }}, + {64, false, 32, false, func(n uint64, s uint32) uint64 { return n >> s }}, + {64, true, 16, true, func(n int64, s uint16) int64 { return n << s }}, + {64, true, 16, false, func(n int64, s uint16) int64 { return n >> s }}, + {64, false, 16, false, func(n uint64, s uint16) uint64 { return n >> s }}, + {64, true, 8, true, func(n int64, s uint8) int64 { return n << s }}, + {64, true, 8, false, func(n int64, s uint8) int64 { return n >> s }}, + {64, false, 8, false, func(n uint64, s uint8) uint64 { return n >> s }}, + + {32, true, 64, true, func(n int32, s uint64) int32 { return n << s }}, + {32, true, 64, false, func(n int32, s uint64) int32 { return n >> s }}, + {32, false, 64, false, func(n uint32, s uint64) uint32 { return n >> s }}, + {32, true, 32, true, func(n int32, s uint32) int32 { return n << s }}, + {32, true, 32, false, func(n int32, s uint32) int32 { return n >> s }}, + {32, false, 32, false, func(n uint32, s uint32) uint32 { return n >> s }}, + {32, true, 16, true, func(n int32, s uint16) int32 { return n << s }}, + {32, true, 16, false, func(n int32, s uint16) int32 { return n >> s }}, + {32, false, 16, false, func(n uint32, s uint16) uint32 { return n >> s }}, + {32, true, 8, true, func(n int32, s uint8) int32 { return n << s }}, + {32, true, 8, false, func(n int32, s uint8) int32 { return n >> s }}, + {32, false, 8, false, func(n uint32, s uint8) uint32 { return n >> s }}, + + {16, true, 64, true, func(n int16, s uint64) int16 { return n << s }}, + {16, true, 64, false, func(n int16, s uint64) int16 { return n >> s }}, + {16, false, 64, false, func(n uint16, s uint64) uint16 { return n >> s }}, + {16, true, 32, true, func(n int16, s uint32) int16 { return n << s }}, + {16, true, 32, false, func(n int16, s uint32) int16 { return n >> s }}, + {16, false, 32, false, func(n uint16, s uint32) uint16 { return n >> s }}, + {16, true, 16, true, func(n int16, s uint16) int16 { return n << s }}, + {16, true, 16, false, func(n int16, s uint16) int16 { return n >> s }}, + {16, false, 16, false, func(n uint16, s uint16) uint16 { return n >> s }}, + {16, true, 8, true, func(n int16, s uint8) int16 { return n << s }}, + {16, true, 8, false, func(n int16, s uint8) int16 { return n >> s }}, + {16, false, 8, false, func(n uint16, s uint8) uint16 { return n >> s }}, + + {8, true, 64, true, func(n int8, s uint64) int8 { return n << s }}, + {8, true, 64, false, func(n int8, s uint64) int8 { return n >> s }}, + {8, false, 64, false, func(n uint8, s uint64) uint8 { return n >> s }}, + {8, true, 32, true, func(n int8, s uint32) int8 { return n << s }}, + {8, true, 32, false, func(n int8, s uint32) int8 { return n >> s }}, + {8, false, 32, false, func(n uint8, s uint32) uint8 { return n >> s }}, + {8, true, 16, true, func(n int8, s uint16) int8 { return n << s }}, + {8, true, 16, false, func(n int8, s uint16) int8 { return n >> s }}, + {8, false, 16, false, func(n uint8, s uint16) uint8 { return n >> s }}, + {8, true, 8, true, func(n int8, s uint8) int8 { return n << s }}, + {8, true, 8, false, func(n int8, s uint8) int8 { return n >> s }}, + {8, false, 8, false, func(n uint8, s uint8) uint8 { return n >> s }}, + } { + fv := reflect.ValueOf(test.f) + var args [2]reflect.Value + for i := 0; i < test.valueWidth; i++ { + // Build value to be shifted. + var n int64 = 1 + for j := 0; j < i; j++ { + n <<= 1 + } + args[0] = reflect.ValueOf(n).Convert(fv.Type().In(0)) + for s := 0; s <= test.shiftWidth; s++ { + args[1] = reflect.ValueOf(s).Convert(fv.Type().In(1)) + + // Compute desired result. We're testing variable shifts + // assuming constant shifts are correct. + r := n + var op string + switch { + case test.left: + op = "<<" + for j := 0; j < s; j++ { + r <<= 1 + } + switch test.valueWidth { + case 32: + r = int64(int32(r)) + case 16: + r = int64(int16(r)) + case 8: + r = int64(int8(r)) + } + case test.signed: + op = ">>" + switch test.valueWidth { + case 32: + r = int64(int32(r)) + case 16: + r = int64(int16(r)) + case 8: + r = int64(int8(r)) + } + for j := 0; j < s; j++ { + r >>= 1 + } + default: + op = ">>>" + for j := 0; j < s; j++ { + r = int64(uint64(r) >> 1) + } + } + + // Call function. + res := fv.Call(args[:])[0].Convert(reflect.ValueOf(r).Type()) + + if res.Int() != r { + t.Errorf("%s%dx%d(%x,%x)=%x, want %x", op, test.valueWidth, test.shiftWidth, n, s, res.Int(), r) + } + } + } + } +} + +var shiftSink64 int64 + +func BenchmarkShiftArithmeticRight(b *testing.B) { + x := shiftSink64 + for i := 0; i < b.N; i++ { + x = x >> (i & 63) + } + shiftSink64 = x +} + +//go:noinline +func incorrectRotate1(x, c uint64) uint64 { + // This should not compile to a rotate instruction. + return x<>(64-c) +} + +//go:noinline +func incorrectRotate2(x uint64) uint64 { + var c uint64 = 66 + // This should not compile to a rotate instruction. + return x<>(64-c) +} + +func TestIncorrectRotate(t *testing.T) { + if got := incorrectRotate1(1, 66); got != 0 { + t.Errorf("got %x want 0", got) + } + if got := incorrectRotate2(1); got != 0 { + t.Errorf("got %x want 0", got) + } +} + +//go:noinline +func variableShiftOverflow64x8(x int64, y, z uint8) (a, b, c int64) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int64(uint64(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow32x8(x int32, y, z uint8) (a, b, c int32) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int32(uint32(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow16x8(x int16, y, z uint8) (a, b, c int16) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int16(uint16(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow8x8(x int8, y, z uint8) (a, b, c int8) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int8(uint8(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow64x16(x int64, y, z uint16) (a, b, c int64) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int64(uint64(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow32x16(x int32, y, z uint16) (a, b, c int32) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int32(uint32(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow16x16(x int16, y, z uint16) (a, b, c int16) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int16(uint16(x) >> (y + z)) +} + +//go:noinline +func variableShiftOverflow8x16(x int8, y, z uint16) (a, b, c int8) { + // Verify junk bits are ignored when doing a variable shift. + return x >> (y + z), x << (y + z), int8(uint8(x) >> (y + z)) +} + +//go:noinline +func makeU8(x uint64) uint8 { + // Ensure the upper portions of the register are clear before testing large shift values + // using non-native types (e.g uint8 on PPC64). + return uint8(x) +} + +//go:noinline +func makeU16(x uint64) uint16 { + // Ensure the upper portions of the register are clear before testing large shift values + // using non-native types (e.g uint8 on PPC64). + return uint16(x) +} + +func TestShiftOverflow(t *testing.T) { + if v, w, z := variableShiftOverflow64x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x7fffffffffffffe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffffffffffe0", v, w, z) + } + if v, w, z := variableShiftOverflow32x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x7fffffe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffe0", v, w, z) + } + if v, w, z := variableShiftOverflow16x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x7fe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fe0", v, w, z) + } + if v, w, z := variableShiftOverflow8x8(-64, makeU8(255), 2); v != -32 || w != -128 || z != 0x60 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x60", v, w, z) + } + if v, w, z := variableShiftOverflow64x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x7fffffffffffffe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffffffffffe0", v, w, z) + } + if v, w, z := variableShiftOverflow32x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x7fffffe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fffffe0,", v, w, z) + } + if v, w, z := variableShiftOverflow16x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x7fe0 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x7fe0", v, w, z) + } + if v, w, z := variableShiftOverflow8x16(-64, makeU16(0xffff), 2); v != -32 || w != -128 || z != 0x60 { + t.Errorf("got %d %d 0x%x, expected -32 -128 0x60", v, w, z) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/ssa_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/ssa_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7f2faa1140d445438c5ee2fb447a6ab50b7756b4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/ssa_test.go @@ -0,0 +1,179 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "internal/testenv" + "os" + "path/filepath" + "runtime" + "strings" + "testing" +) + +// runGenTest runs a test-generator, then runs the generated test. +// Generated test can either fail in compilation or execution. +// The environment variable parameter(s) is passed to the run +// of the generated test. +func runGenTest(t *testing.T, filename, tmpname string, ev ...string) { + testenv.MustHaveGoRun(t) + gotool := testenv.GoToolPath(t) + var stdout, stderr bytes.Buffer + cmd := testenv.Command(t, gotool, "run", filepath.Join("testdata", filename)) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr) + } + // Write stdout into a temporary file + rungo := filepath.Join(t.TempDir(), "run.go") + ok := os.WriteFile(rungo, stdout.Bytes(), 0600) + if ok != nil { + t.Fatalf("Failed to create temporary file " + rungo) + } + + stdout.Reset() + stderr.Reset() + cmd = testenv.Command(t, gotool, "run", "-gcflags=-d=ssa/check/on", rungo) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + cmd.Env = append(cmd.Env, ev...) + err := cmd.Run() + if err != nil { + t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr) + } + if s := stderr.String(); s != "" { + t.Errorf("Stderr = %s\nWant empty", s) + } + if s := stdout.String(); s != "" { + t.Errorf("Stdout = %s\nWant empty", s) + } +} + +func TestGenFlowGraph(t *testing.T) { + if testing.Short() { + t.Skip("not run in short mode.") + } + runGenTest(t, "flowgraph_generator1.go", "ssa_fg_tmp1") +} + +// TestCode runs all the tests in the testdata directory as subtests. +// These tests are special because we want to run them with different +// compiler flags set (and thus they can't just be _test.go files in +// this directory). +func TestCode(t *testing.T) { + testenv.MustHaveGoBuild(t) + gotool := testenv.GoToolPath(t) + + // Make a temporary directory to work in. + tmpdir := t.TempDir() + + // Find all the test functions (and the files containing them). + var srcs []string // files containing Test functions + type test struct { + name string // TestFoo + usesFloat bool // might use float operations + } + var tests []test + files, err := os.ReadDir("testdata") + if err != nil { + t.Fatalf("can't read testdata directory: %v", err) + } + for _, f := range files { + if !strings.HasSuffix(f.Name(), "_test.go") { + continue + } + text, err := os.ReadFile(filepath.Join("testdata", f.Name())) + if err != nil { + t.Fatalf("can't read testdata/%s: %v", f.Name(), err) + } + fset := token.NewFileSet() + code, err := parser.ParseFile(fset, f.Name(), text, 0) + if err != nil { + t.Fatalf("can't parse testdata/%s: %v", f.Name(), err) + } + srcs = append(srcs, filepath.Join("testdata", f.Name())) + foundTest := false + for _, d := range code.Decls { + fd, ok := d.(*ast.FuncDecl) + if !ok { + continue + } + if !strings.HasPrefix(fd.Name.Name, "Test") { + continue + } + if fd.Recv != nil { + continue + } + if fd.Type.Results != nil { + continue + } + if len(fd.Type.Params.List) != 1 { + continue + } + p := fd.Type.Params.List[0] + if len(p.Names) != 1 { + continue + } + s, ok := p.Type.(*ast.StarExpr) + if !ok { + continue + } + sel, ok := s.X.(*ast.SelectorExpr) + if !ok { + continue + } + base, ok := sel.X.(*ast.Ident) + if !ok { + continue + } + if base.Name != "testing" { + continue + } + if sel.Sel.Name != "T" { + continue + } + // Found a testing function. + tests = append(tests, test{name: fd.Name.Name, usesFloat: bytes.Contains(text, []byte("float"))}) + foundTest = true + } + if !foundTest { + t.Fatalf("test file testdata/%s has no tests in it", f.Name()) + } + } + + flags := []string{""} + if runtime.GOARCH == "arm" || runtime.GOARCH == "mips" || runtime.GOARCH == "mips64" || runtime.GOARCH == "386" { + flags = append(flags, ",softfloat") + } + for _, flag := range flags { + args := []string{"test", "-c", "-gcflags=-d=ssa/check/on" + flag, "-o", filepath.Join(tmpdir, "code.test")} + args = append(args, srcs...) + out, err := testenv.Command(t, gotool, args...).CombinedOutput() + if err != nil || len(out) != 0 { + t.Fatalf("Build failed: %v\n%s\n", err, out) + } + + // Now we have a test binary. Run it with all the tests as subtests of this one. + for _, test := range tests { + test := test + if flag == ",softfloat" && !test.usesFloat { + // No point in running the soft float version if the test doesn't use floats. + continue + } + t.Run(fmt.Sprintf("%s%s", test.name[4:], flag), func(t *testing.T) { + out, err := testenv.Command(t, filepath.Join(tmpdir, "code.test"), "-test.run=^"+test.name+"$").CombinedOutput() + if err != nil || string(out) != "PASS\n" { + t.Errorf("Failed:\n%s\n", out) + } + }) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/switch_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/switch_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1d12361cbb69545f32a7f8cf5f3c90d72c658baf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/switch_test.go @@ -0,0 +1,296 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "math/bits" + "testing" +) + +func BenchmarkSwitch8Predictable(b *testing.B) { + benchmarkSwitch8(b, true) +} +func BenchmarkSwitch8Unpredictable(b *testing.B) { + benchmarkSwitch8(b, false) +} +func benchmarkSwitch8(b *testing.B, predictable bool) { + n := 0 + rng := newRNG() + for i := 0; i < b.N; i++ { + rng = rng.next(predictable) + switch rng.value() & 7 { + case 0: + n += 1 + case 1: + n += 2 + case 2: + n += 3 + case 3: + n += 4 + case 4: + n += 5 + case 5: + n += 6 + case 6: + n += 7 + case 7: + n += 8 + } + } + sink = n +} + +func BenchmarkSwitch32Predictable(b *testing.B) { + benchmarkSwitch32(b, true) +} +func BenchmarkSwitch32Unpredictable(b *testing.B) { + benchmarkSwitch32(b, false) +} +func benchmarkSwitch32(b *testing.B, predictable bool) { + n := 0 + rng := newRNG() + for i := 0; i < b.N; i++ { + rng = rng.next(predictable) + switch rng.value() & 31 { + case 0, 1, 2: + n += 1 + case 4, 5, 6: + n += 2 + case 8, 9, 10: + n += 3 + case 12, 13, 14: + n += 4 + case 16, 17, 18: + n += 5 + case 20, 21, 22: + n += 6 + case 24, 25, 26: + n += 7 + case 28, 29, 30: + n += 8 + default: + n += 9 + } + } + sink = n +} + +func BenchmarkSwitchStringPredictable(b *testing.B) { + benchmarkSwitchString(b, true) +} +func BenchmarkSwitchStringUnpredictable(b *testing.B) { + benchmarkSwitchString(b, false) +} +func benchmarkSwitchString(b *testing.B, predictable bool) { + a := []string{ + "foo", + "foo1", + "foo22", + "foo333", + "foo4444", + "foo55555", + "foo666666", + "foo7777777", + } + n := 0 + rng := newRNG() + for i := 0; i < b.N; i++ { + rng = rng.next(predictable) + switch a[rng.value()&7] { + case "foo": + n += 1 + case "foo1": + n += 2 + case "foo22": + n += 3 + case "foo333": + n += 4 + case "foo4444": + n += 5 + case "foo55555": + n += 6 + case "foo666666": + n += 7 + case "foo7777777": + n += 8 + } + } + sink = n +} + +func BenchmarkSwitchTypePredictable(b *testing.B) { + benchmarkSwitchType(b, true) +} +func BenchmarkSwitchTypeUnpredictable(b *testing.B) { + benchmarkSwitchType(b, false) +} +func benchmarkSwitchType(b *testing.B, predictable bool) { + a := []any{ + int8(1), + int16(2), + int32(3), + int64(4), + uint8(5), + uint16(6), + uint32(7), + uint64(8), + } + n := 0 + rng := newRNG() + for i := 0; i < b.N; i++ { + rng = rng.next(predictable) + switch a[rng.value()&7].(type) { + case int8: + n += 1 + case int16: + n += 2 + case int32: + n += 3 + case int64: + n += 4 + case uint8: + n += 5 + case uint16: + n += 6 + case uint32: + n += 7 + case uint64: + n += 8 + } + } + sink = n +} + +func BenchmarkSwitchInterfaceTypePredictable(b *testing.B) { + benchmarkSwitchInterfaceType(b, true) +} +func BenchmarkSwitchInterfaceTypeUnpredictable(b *testing.B) { + benchmarkSwitchInterfaceType(b, false) +} + +type SI0 interface { + si0() +} +type ST0 struct { +} + +func (ST0) si0() { +} + +type SI1 interface { + si1() +} +type ST1 struct { +} + +func (ST1) si1() { +} + +type SI2 interface { + si2() +} +type ST2 struct { +} + +func (ST2) si2() { +} + +type SI3 interface { + si3() +} +type ST3 struct { +} + +func (ST3) si3() { +} + +type SI4 interface { + si4() +} +type ST4 struct { +} + +func (ST4) si4() { +} + +type SI5 interface { + si5() +} +type ST5 struct { +} + +func (ST5) si5() { +} + +type SI6 interface { + si6() +} +type ST6 struct { +} + +func (ST6) si6() { +} + +type SI7 interface { + si7() +} +type ST7 struct { +} + +func (ST7) si7() { +} + +func benchmarkSwitchInterfaceType(b *testing.B, predictable bool) { + a := []any{ + ST0{}, + ST1{}, + ST2{}, + ST3{}, + ST4{}, + ST5{}, + ST6{}, + ST7{}, + } + n := 0 + rng := newRNG() + for i := 0; i < b.N; i++ { + rng = rng.next(predictable) + switch a[rng.value()&7].(type) { + case SI0: + n += 1 + case SI1: + n += 2 + case SI2: + n += 3 + case SI3: + n += 4 + case SI4: + n += 5 + case SI5: + n += 6 + case SI6: + n += 7 + case SI7: + n += 8 + } + } + sink = n +} + +// A simple random number generator used to make switches conditionally predictable. +type rng uint64 + +func newRNG() rng { + return 1 +} +func (r rng) next(predictable bool) rng { + if predictable { + return r + 1 + } + return rng(bits.RotateLeft64(uint64(r), 13) * 0x3c374d) +} +func (r rng) value() uint64 { + return uint64(r) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/test.go new file mode 100644 index 0000000000000000000000000000000000000000..195c65a9ea0312ced6f505372ab486bb06aba220 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/test.go @@ -0,0 +1,5 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/addressed_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/addressed_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4cc9ac4d5b27cd1afd512bb2a173f4bbb99f3337 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/addressed_test.go @@ -0,0 +1,214 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "testing" +) + +var output string + +func mypanic(t *testing.T, s string) { + t.Fatalf(s + "\n" + output) + +} + +func assertEqual(t *testing.T, x, y int) { + if x != y { + mypanic(t, fmt.Sprintf("assertEqual failed got %d, want %d", x, y)) + } +} + +func TestAddressed(t *testing.T) { + x := f1_ssa(2, 3) + output += fmt.Sprintln("*x is", *x) + output += fmt.Sprintln("Gratuitously use some stack") + output += fmt.Sprintln("*x is", *x) + assertEqual(t, *x, 9) + + w := f3a_ssa(6) + output += fmt.Sprintln("*w is", *w) + output += fmt.Sprintln("Gratuitously use some stack") + output += fmt.Sprintln("*w is", *w) + assertEqual(t, *w, 6) + + y := f3b_ssa(12) + output += fmt.Sprintln("*y.(*int) is", *y.(*int)) + output += fmt.Sprintln("Gratuitously use some stack") + output += fmt.Sprintln("*y.(*int) is", *y.(*int)) + assertEqual(t, *y.(*int), 12) + + z := f3c_ssa(8) + output += fmt.Sprintln("*z.(*int) is", *z.(*int)) + output += fmt.Sprintln("Gratuitously use some stack") + output += fmt.Sprintln("*z.(*int) is", *z.(*int)) + assertEqual(t, *z.(*int), 8) + + args(t) + test_autos(t) +} + +//go:noinline +func f1_ssa(x, y int) *int { + x = x*y + y + return &x +} + +//go:noinline +func f3a_ssa(x int) *int { + return &x +} + +//go:noinline +func f3b_ssa(x int) interface{} { // ./foo.go:15: internal error: f3b_ssa ~r1 (type interface {}) recorded as live on entry + return &x +} + +//go:noinline +func f3c_ssa(y int) interface{} { + x := y + return &x +} + +type V struct { + p *V + w, x int64 +} + +func args(t *testing.T) { + v := V{p: nil, w: 1, x: 1} + a := V{p: &v, w: 2, x: 2} + b := V{p: &v, w: 0, x: 0} + i := v.args_ssa(a, b) + output += fmt.Sprintln("i=", i) + assertEqual(t, int(i), 2) +} + +//go:noinline +func (v V) args_ssa(a, b V) int64 { + if v.w == 0 { + return v.x + } + if v.w == 1 { + return a.x + } + if v.w == 2 { + return b.x + } + b.p.p = &a // v.p in caller = &a + + return -1 +} + +func test_autos(t *testing.T) { + test(t, 11) + test(t, 12) + test(t, 13) + test(t, 21) + test(t, 22) + test(t, 23) + test(t, 31) + test(t, 32) +} + +func test(t *testing.T, which int64) { + output += fmt.Sprintln("test", which) + v1 := V{w: 30, x: 3, p: nil} + v2, v3 := v1.autos_ssa(which, 10, 1, 20, 2) + if which != v2.val() { + output += fmt.Sprintln("Expected which=", which, "got v2.val()=", v2.val()) + mypanic(t, "Failure of expected V value") + } + if v2.p.val() != v3.val() { + output += fmt.Sprintln("Expected v2.p.val()=", v2.p.val(), "got v3.val()=", v3.val()) + mypanic(t, "Failure of expected V.p value") + } + if which != v3.p.p.p.p.p.p.p.val() { + output += fmt.Sprintln("Expected which=", which, "got v3.p.p.p.p.p.p.p.val()=", v3.p.p.p.p.p.p.p.val()) + mypanic(t, "Failure of expected V.p value") + } +} + +func (v V) val() int64 { + return v.w + v.x +} + +// autos_ssa uses contents of v and parameters w1, w2, x1, x2 +// to initialize a bunch of locals, all of which have their +// address taken to force heap allocation, and then based on +// the value of which a pair of those locals are copied in +// various ways to the two results y, and z, which are also +// addressed. Which is expected to be one of 11-13, 21-23, 31, 32, +// and y.val() should be equal to which and y.p.val() should +// be equal to z.val(). Also, x(.p)**8 == x; that is, the +// autos are all linked into a ring. +// +//go:noinline +func (v V) autos_ssa(which, w1, x1, w2, x2 int64) (y, z V) { + fill_ssa(v.w, v.x, &v, v.p) // gratuitous no-op to force addressing + var a, b, c, d, e, f, g, h V + fill_ssa(w1, x1, &a, &b) + fill_ssa(w1, x2, &b, &c) + fill_ssa(w1, v.x, &c, &d) + fill_ssa(w2, x1, &d, &e) + fill_ssa(w2, x2, &e, &f) + fill_ssa(w2, v.x, &f, &g) + fill_ssa(v.w, x1, &g, &h) + fill_ssa(v.w, x2, &h, &a) + switch which { + case 11: + y = a + z.getsI(&b) + case 12: + y.gets(&b) + z = c + case 13: + y.gets(&c) + z = d + case 21: + y.getsI(&d) + z.gets(&e) + case 22: + y = e + z = f + case 23: + y.gets(&f) + z.getsI(&g) + case 31: + y = g + z.gets(&h) + case 32: + y.getsI(&h) + z = a + default: + + panic("") + } + return +} + +// gets is an address-mentioning way of implementing +// structure assignment. +// +//go:noinline +func (to *V) gets(from *V) { + *to = *from +} + +// gets is an address-and-interface-mentioning way of +// implementing structure assignment. +// +//go:noinline +func (to *V) getsI(from interface{}) { + *to = *from.(*V) +} + +// fill_ssa initializes r with V{w:w, x:x, p:p} +// +//go:noinline +func fill_ssa(w, x int64, r, p *V) { + *r = V{w: w, x: x, p: p} +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/append_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/append_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6663ce75fa49305d776a3daa3789e1cde70cafae --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/append_test.go @@ -0,0 +1,61 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// append_ssa.go tests append operations. +package main + +import "testing" + +//go:noinline +func appendOne_ssa(a []int, x int) []int { + return append(a, x) +} + +//go:noinline +func appendThree_ssa(a []int, x, y, z int) []int { + return append(a, x, y, z) +} + +func eqBytes(a, b []int) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +func expect(t *testing.T, got, want []int) { + if eqBytes(got, want) { + return + } + t.Errorf("expected %v, got %v\n", want, got) +} + +func testAppend(t *testing.T) { + var store [7]int + a := store[:0] + + a = appendOne_ssa(a, 1) + expect(t, a, []int{1}) + a = appendThree_ssa(a, 2, 3, 4) + expect(t, a, []int{1, 2, 3, 4}) + a = appendThree_ssa(a, 5, 6, 7) + expect(t, a, []int{1, 2, 3, 4, 5, 6, 7}) + if &a[0] != &store[0] { + t.Errorf("unnecessary grow") + } + a = appendOne_ssa(a, 8) + expect(t, a, []int{1, 2, 3, 4, 5, 6, 7, 8}) + if &a[0] == &store[0] { + t.Errorf("didn't grow") + } +} + +func TestAppend(t *testing.T) { + testAppend(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/arithBoundary_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/arithBoundary_test.go new file mode 100644 index 0000000000000000000000000000000000000000..777b7cdd601fc94467d43aaaed9dca08b6055815 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/arithBoundary_test.go @@ -0,0 +1,694 @@ +// Code generated by gen/arithBoundaryGen.go. DO NOT EDIT. + +package main + +import "testing" + +type utd64 struct { + a, b uint64 + add, sub, mul, div, mod uint64 +} +type itd64 struct { + a, b int64 + add, sub, mul, div, mod int64 +} +type utd32 struct { + a, b uint32 + add, sub, mul, div, mod uint32 +} +type itd32 struct { + a, b int32 + add, sub, mul, div, mod int32 +} +type utd16 struct { + a, b uint16 + add, sub, mul, div, mod uint16 +} +type itd16 struct { + a, b int16 + add, sub, mul, div, mod int16 +} +type utd8 struct { + a, b uint8 + add, sub, mul, div, mod uint8 +} +type itd8 struct { + a, b int8 + add, sub, mul, div, mod int8 +} + +//go:noinline +func add_uint64_ssa(a, b uint64) uint64 { + return a + b +} + +//go:noinline +func sub_uint64_ssa(a, b uint64) uint64 { + return a - b +} + +//go:noinline +func div_uint64_ssa(a, b uint64) uint64 { + return a / b +} + +//go:noinline +func mod_uint64_ssa(a, b uint64) uint64 { + return a % b +} + +//go:noinline +func mul_uint64_ssa(a, b uint64) uint64 { + return a * b +} + +//go:noinline +func add_int64_ssa(a, b int64) int64 { + return a + b +} + +//go:noinline +func sub_int64_ssa(a, b int64) int64 { + return a - b +} + +//go:noinline +func div_int64_ssa(a, b int64) int64 { + return a / b +} + +//go:noinline +func mod_int64_ssa(a, b int64) int64 { + return a % b +} + +//go:noinline +func mul_int64_ssa(a, b int64) int64 { + return a * b +} + +//go:noinline +func add_uint32_ssa(a, b uint32) uint32 { + return a + b +} + +//go:noinline +func sub_uint32_ssa(a, b uint32) uint32 { + return a - b +} + +//go:noinline +func div_uint32_ssa(a, b uint32) uint32 { + return a / b +} + +//go:noinline +func mod_uint32_ssa(a, b uint32) uint32 { + return a % b +} + +//go:noinline +func mul_uint32_ssa(a, b uint32) uint32 { + return a * b +} + +//go:noinline +func add_int32_ssa(a, b int32) int32 { + return a + b +} + +//go:noinline +func sub_int32_ssa(a, b int32) int32 { + return a - b +} + +//go:noinline +func div_int32_ssa(a, b int32) int32 { + return a / b +} + +//go:noinline +func mod_int32_ssa(a, b int32) int32 { + return a % b +} + +//go:noinline +func mul_int32_ssa(a, b int32) int32 { + return a * b +} + +//go:noinline +func add_uint16_ssa(a, b uint16) uint16 { + return a + b +} + +//go:noinline +func sub_uint16_ssa(a, b uint16) uint16 { + return a - b +} + +//go:noinline +func div_uint16_ssa(a, b uint16) uint16 { + return a / b +} + +//go:noinline +func mod_uint16_ssa(a, b uint16) uint16 { + return a % b +} + +//go:noinline +func mul_uint16_ssa(a, b uint16) uint16 { + return a * b +} + +//go:noinline +func add_int16_ssa(a, b int16) int16 { + return a + b +} + +//go:noinline +func sub_int16_ssa(a, b int16) int16 { + return a - b +} + +//go:noinline +func div_int16_ssa(a, b int16) int16 { + return a / b +} + +//go:noinline +func mod_int16_ssa(a, b int16) int16 { + return a % b +} + +//go:noinline +func mul_int16_ssa(a, b int16) int16 { + return a * b +} + +//go:noinline +func add_uint8_ssa(a, b uint8) uint8 { + return a + b +} + +//go:noinline +func sub_uint8_ssa(a, b uint8) uint8 { + return a - b +} + +//go:noinline +func div_uint8_ssa(a, b uint8) uint8 { + return a / b +} + +//go:noinline +func mod_uint8_ssa(a, b uint8) uint8 { + return a % b +} + +//go:noinline +func mul_uint8_ssa(a, b uint8) uint8 { + return a * b +} + +//go:noinline +func add_int8_ssa(a, b int8) int8 { + return a + b +} + +//go:noinline +func sub_int8_ssa(a, b int8) int8 { + return a - b +} + +//go:noinline +func div_int8_ssa(a, b int8) int8 { + return a / b +} + +//go:noinline +func mod_int8_ssa(a, b int8) int8 { + return a % b +} + +//go:noinline +func mul_int8_ssa(a, b int8) int8 { + return a * b +} + +var uint64_data []utd64 = []utd64{utd64{a: 0, b: 0, add: 0, sub: 0, mul: 0}, + utd64{a: 0, b: 1, add: 1, sub: 18446744073709551615, mul: 0, div: 0, mod: 0}, + utd64{a: 0, b: 4294967296, add: 4294967296, sub: 18446744069414584320, mul: 0, div: 0, mod: 0}, + utd64{a: 0, b: 18446744073709551615, add: 18446744073709551615, sub: 1, mul: 0, div: 0, mod: 0}, + utd64{a: 1, b: 0, add: 1, sub: 1, mul: 0}, + utd64{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + utd64{a: 1, b: 4294967296, add: 4294967297, sub: 18446744069414584321, mul: 4294967296, div: 0, mod: 1}, + utd64{a: 1, b: 18446744073709551615, add: 0, sub: 2, mul: 18446744073709551615, div: 0, mod: 1}, + utd64{a: 4294967296, b: 0, add: 4294967296, sub: 4294967296, mul: 0}, + utd64{a: 4294967296, b: 1, add: 4294967297, sub: 4294967295, mul: 4294967296, div: 4294967296, mod: 0}, + utd64{a: 4294967296, b: 4294967296, add: 8589934592, sub: 0, mul: 0, div: 1, mod: 0}, + utd64{a: 4294967296, b: 18446744073709551615, add: 4294967295, sub: 4294967297, mul: 18446744069414584320, div: 0, mod: 4294967296}, + utd64{a: 18446744073709551615, b: 0, add: 18446744073709551615, sub: 18446744073709551615, mul: 0}, + utd64{a: 18446744073709551615, b: 1, add: 0, sub: 18446744073709551614, mul: 18446744073709551615, div: 18446744073709551615, mod: 0}, + utd64{a: 18446744073709551615, b: 4294967296, add: 4294967295, sub: 18446744069414584319, mul: 18446744069414584320, div: 4294967295, mod: 4294967295}, + utd64{a: 18446744073709551615, b: 18446744073709551615, add: 18446744073709551614, sub: 0, mul: 1, div: 1, mod: 0}, +} +var int64_data []itd64 = []itd64{itd64{a: -9223372036854775808, b: -9223372036854775808, add: 0, sub: 0, mul: 0, div: 1, mod: 0}, + itd64{a: -9223372036854775808, b: -9223372036854775807, add: 1, sub: -1, mul: -9223372036854775808, div: 1, mod: -1}, + itd64{a: -9223372036854775808, b: -4294967296, add: 9223372032559808512, sub: -9223372032559808512, mul: 0, div: 2147483648, mod: 0}, + itd64{a: -9223372036854775808, b: -1, add: 9223372036854775807, sub: -9223372036854775807, mul: -9223372036854775808, div: -9223372036854775808, mod: 0}, + itd64{a: -9223372036854775808, b: 0, add: -9223372036854775808, sub: -9223372036854775808, mul: 0}, + itd64{a: -9223372036854775808, b: 1, add: -9223372036854775807, sub: 9223372036854775807, mul: -9223372036854775808, div: -9223372036854775808, mod: 0}, + itd64{a: -9223372036854775808, b: 4294967296, add: -9223372032559808512, sub: 9223372032559808512, mul: 0, div: -2147483648, mod: 0}, + itd64{a: -9223372036854775808, b: 9223372036854775806, add: -2, sub: 2, mul: 0, div: -1, mod: -2}, + itd64{a: -9223372036854775808, b: 9223372036854775807, add: -1, sub: 1, mul: -9223372036854775808, div: -1, mod: -1}, + itd64{a: -9223372036854775807, b: -9223372036854775808, add: 1, sub: 1, mul: -9223372036854775808, div: 0, mod: -9223372036854775807}, + itd64{a: -9223372036854775807, b: -9223372036854775807, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + itd64{a: -9223372036854775807, b: -4294967296, add: 9223372032559808513, sub: -9223372032559808511, mul: -4294967296, div: 2147483647, mod: -4294967295}, + itd64{a: -9223372036854775807, b: -1, add: -9223372036854775808, sub: -9223372036854775806, mul: 9223372036854775807, div: 9223372036854775807, mod: 0}, + itd64{a: -9223372036854775807, b: 0, add: -9223372036854775807, sub: -9223372036854775807, mul: 0}, + itd64{a: -9223372036854775807, b: 1, add: -9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: -9223372036854775807, mod: 0}, + itd64{a: -9223372036854775807, b: 4294967296, add: -9223372032559808511, sub: 9223372032559808513, mul: 4294967296, div: -2147483647, mod: -4294967295}, + itd64{a: -9223372036854775807, b: 9223372036854775806, add: -1, sub: 3, mul: 9223372036854775806, div: -1, mod: -1}, + itd64{a: -9223372036854775807, b: 9223372036854775807, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, + itd64{a: -4294967296, b: -9223372036854775808, add: 9223372032559808512, sub: 9223372032559808512, mul: 0, div: 0, mod: -4294967296}, + itd64{a: -4294967296, b: -9223372036854775807, add: 9223372032559808513, sub: 9223372032559808511, mul: -4294967296, div: 0, mod: -4294967296}, + itd64{a: -4294967296, b: -4294967296, add: -8589934592, sub: 0, mul: 0, div: 1, mod: 0}, + itd64{a: -4294967296, b: -1, add: -4294967297, sub: -4294967295, mul: 4294967296, div: 4294967296, mod: 0}, + itd64{a: -4294967296, b: 0, add: -4294967296, sub: -4294967296, mul: 0}, + itd64{a: -4294967296, b: 1, add: -4294967295, sub: -4294967297, mul: -4294967296, div: -4294967296, mod: 0}, + itd64{a: -4294967296, b: 4294967296, add: 0, sub: -8589934592, mul: 0, div: -1, mod: 0}, + itd64{a: -4294967296, b: 9223372036854775806, add: 9223372032559808510, sub: 9223372032559808514, mul: 8589934592, div: 0, mod: -4294967296}, + itd64{a: -4294967296, b: 9223372036854775807, add: 9223372032559808511, sub: 9223372032559808513, mul: 4294967296, div: 0, mod: -4294967296}, + itd64{a: -1, b: -9223372036854775808, add: 9223372036854775807, sub: 9223372036854775807, mul: -9223372036854775808, div: 0, mod: -1}, + itd64{a: -1, b: -9223372036854775807, add: -9223372036854775808, sub: 9223372036854775806, mul: 9223372036854775807, div: 0, mod: -1}, + itd64{a: -1, b: -4294967296, add: -4294967297, sub: 4294967295, mul: 4294967296, div: 0, mod: -1}, + itd64{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, + itd64{a: -1, b: 0, add: -1, sub: -1, mul: 0}, + itd64{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, + itd64{a: -1, b: 4294967296, add: 4294967295, sub: -4294967297, mul: -4294967296, div: 0, mod: -1}, + itd64{a: -1, b: 9223372036854775806, add: 9223372036854775805, sub: -9223372036854775807, mul: -9223372036854775806, div: 0, mod: -1}, + itd64{a: -1, b: 9223372036854775807, add: 9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: 0, mod: -1}, + itd64{a: 0, b: -9223372036854775808, add: -9223372036854775808, sub: -9223372036854775808, mul: 0, div: 0, mod: 0}, + itd64{a: 0, b: -9223372036854775807, add: -9223372036854775807, sub: 9223372036854775807, mul: 0, div: 0, mod: 0}, + itd64{a: 0, b: -4294967296, add: -4294967296, sub: 4294967296, mul: 0, div: 0, mod: 0}, + itd64{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0}, + itd64{a: 0, b: 0, add: 0, sub: 0, mul: 0}, + itd64{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0}, + itd64{a: 0, b: 4294967296, add: 4294967296, sub: -4294967296, mul: 0, div: 0, mod: 0}, + itd64{a: 0, b: 9223372036854775806, add: 9223372036854775806, sub: -9223372036854775806, mul: 0, div: 0, mod: 0}, + itd64{a: 0, b: 9223372036854775807, add: 9223372036854775807, sub: -9223372036854775807, mul: 0, div: 0, mod: 0}, + itd64{a: 1, b: -9223372036854775808, add: -9223372036854775807, sub: -9223372036854775807, mul: -9223372036854775808, div: 0, mod: 1}, + itd64{a: 1, b: -9223372036854775807, add: -9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: 0, mod: 1}, + itd64{a: 1, b: -4294967296, add: -4294967295, sub: 4294967297, mul: -4294967296, div: 0, mod: 1}, + itd64{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, + itd64{a: 1, b: 0, add: 1, sub: 1, mul: 0}, + itd64{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + itd64{a: 1, b: 4294967296, add: 4294967297, sub: -4294967295, mul: 4294967296, div: 0, mod: 1}, + itd64{a: 1, b: 9223372036854775806, add: 9223372036854775807, sub: -9223372036854775805, mul: 9223372036854775806, div: 0, mod: 1}, + itd64{a: 1, b: 9223372036854775807, add: -9223372036854775808, sub: -9223372036854775806, mul: 9223372036854775807, div: 0, mod: 1}, + itd64{a: 4294967296, b: -9223372036854775808, add: -9223372032559808512, sub: -9223372032559808512, mul: 0, div: 0, mod: 4294967296}, + itd64{a: 4294967296, b: -9223372036854775807, add: -9223372032559808511, sub: -9223372032559808513, mul: 4294967296, div: 0, mod: 4294967296}, + itd64{a: 4294967296, b: -4294967296, add: 0, sub: 8589934592, mul: 0, div: -1, mod: 0}, + itd64{a: 4294967296, b: -1, add: 4294967295, sub: 4294967297, mul: -4294967296, div: -4294967296, mod: 0}, + itd64{a: 4294967296, b: 0, add: 4294967296, sub: 4294967296, mul: 0}, + itd64{a: 4294967296, b: 1, add: 4294967297, sub: 4294967295, mul: 4294967296, div: 4294967296, mod: 0}, + itd64{a: 4294967296, b: 4294967296, add: 8589934592, sub: 0, mul: 0, div: 1, mod: 0}, + itd64{a: 4294967296, b: 9223372036854775806, add: -9223372032559808514, sub: -9223372032559808510, mul: -8589934592, div: 0, mod: 4294967296}, + itd64{a: 4294967296, b: 9223372036854775807, add: -9223372032559808513, sub: -9223372032559808511, mul: -4294967296, div: 0, mod: 4294967296}, + itd64{a: 9223372036854775806, b: -9223372036854775808, add: -2, sub: -2, mul: 0, div: 0, mod: 9223372036854775806}, + itd64{a: 9223372036854775806, b: -9223372036854775807, add: -1, sub: -3, mul: 9223372036854775806, div: 0, mod: 9223372036854775806}, + itd64{a: 9223372036854775806, b: -4294967296, add: 9223372032559808510, sub: -9223372032559808514, mul: 8589934592, div: -2147483647, mod: 4294967294}, + itd64{a: 9223372036854775806, b: -1, add: 9223372036854775805, sub: 9223372036854775807, mul: -9223372036854775806, div: -9223372036854775806, mod: 0}, + itd64{a: 9223372036854775806, b: 0, add: 9223372036854775806, sub: 9223372036854775806, mul: 0}, + itd64{a: 9223372036854775806, b: 1, add: 9223372036854775807, sub: 9223372036854775805, mul: 9223372036854775806, div: 9223372036854775806, mod: 0}, + itd64{a: 9223372036854775806, b: 4294967296, add: -9223372032559808514, sub: 9223372032559808510, mul: -8589934592, div: 2147483647, mod: 4294967294}, + itd64{a: 9223372036854775806, b: 9223372036854775806, add: -4, sub: 0, mul: 4, div: 1, mod: 0}, + itd64{a: 9223372036854775806, b: 9223372036854775807, add: -3, sub: -1, mul: -9223372036854775806, div: 0, mod: 9223372036854775806}, + itd64{a: 9223372036854775807, b: -9223372036854775808, add: -1, sub: -1, mul: -9223372036854775808, div: 0, mod: 9223372036854775807}, + itd64{a: 9223372036854775807, b: -9223372036854775807, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, + itd64{a: 9223372036854775807, b: -4294967296, add: 9223372032559808511, sub: -9223372032559808513, mul: 4294967296, div: -2147483647, mod: 4294967295}, + itd64{a: 9223372036854775807, b: -1, add: 9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: -9223372036854775807, mod: 0}, + itd64{a: 9223372036854775807, b: 0, add: 9223372036854775807, sub: 9223372036854775807, mul: 0}, + itd64{a: 9223372036854775807, b: 1, add: -9223372036854775808, sub: 9223372036854775806, mul: 9223372036854775807, div: 9223372036854775807, mod: 0}, + itd64{a: 9223372036854775807, b: 4294967296, add: -9223372032559808513, sub: 9223372032559808511, mul: -4294967296, div: 2147483647, mod: 4294967295}, + itd64{a: 9223372036854775807, b: 9223372036854775806, add: -3, sub: 1, mul: -9223372036854775806, div: 1, mod: 1}, + itd64{a: 9223372036854775807, b: 9223372036854775807, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, +} +var uint32_data []utd32 = []utd32{utd32{a: 0, b: 0, add: 0, sub: 0, mul: 0}, + utd32{a: 0, b: 1, add: 1, sub: 4294967295, mul: 0, div: 0, mod: 0}, + utd32{a: 0, b: 4294967295, add: 4294967295, sub: 1, mul: 0, div: 0, mod: 0}, + utd32{a: 1, b: 0, add: 1, sub: 1, mul: 0}, + utd32{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + utd32{a: 1, b: 4294967295, add: 0, sub: 2, mul: 4294967295, div: 0, mod: 1}, + utd32{a: 4294967295, b: 0, add: 4294967295, sub: 4294967295, mul: 0}, + utd32{a: 4294967295, b: 1, add: 0, sub: 4294967294, mul: 4294967295, div: 4294967295, mod: 0}, + utd32{a: 4294967295, b: 4294967295, add: 4294967294, sub: 0, mul: 1, div: 1, mod: 0}, +} +var int32_data []itd32 = []itd32{itd32{a: -2147483648, b: -2147483648, add: 0, sub: 0, mul: 0, div: 1, mod: 0}, + itd32{a: -2147483648, b: -2147483647, add: 1, sub: -1, mul: -2147483648, div: 1, mod: -1}, + itd32{a: -2147483648, b: -1, add: 2147483647, sub: -2147483647, mul: -2147483648, div: -2147483648, mod: 0}, + itd32{a: -2147483648, b: 0, add: -2147483648, sub: -2147483648, mul: 0}, + itd32{a: -2147483648, b: 1, add: -2147483647, sub: 2147483647, mul: -2147483648, div: -2147483648, mod: 0}, + itd32{a: -2147483648, b: 2147483647, add: -1, sub: 1, mul: -2147483648, div: -1, mod: -1}, + itd32{a: -2147483647, b: -2147483648, add: 1, sub: 1, mul: -2147483648, div: 0, mod: -2147483647}, + itd32{a: -2147483647, b: -2147483647, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + itd32{a: -2147483647, b: -1, add: -2147483648, sub: -2147483646, mul: 2147483647, div: 2147483647, mod: 0}, + itd32{a: -2147483647, b: 0, add: -2147483647, sub: -2147483647, mul: 0}, + itd32{a: -2147483647, b: 1, add: -2147483646, sub: -2147483648, mul: -2147483647, div: -2147483647, mod: 0}, + itd32{a: -2147483647, b: 2147483647, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, + itd32{a: -1, b: -2147483648, add: 2147483647, sub: 2147483647, mul: -2147483648, div: 0, mod: -1}, + itd32{a: -1, b: -2147483647, add: -2147483648, sub: 2147483646, mul: 2147483647, div: 0, mod: -1}, + itd32{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, + itd32{a: -1, b: 0, add: -1, sub: -1, mul: 0}, + itd32{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, + itd32{a: -1, b: 2147483647, add: 2147483646, sub: -2147483648, mul: -2147483647, div: 0, mod: -1}, + itd32{a: 0, b: -2147483648, add: -2147483648, sub: -2147483648, mul: 0, div: 0, mod: 0}, + itd32{a: 0, b: -2147483647, add: -2147483647, sub: 2147483647, mul: 0, div: 0, mod: 0}, + itd32{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0}, + itd32{a: 0, b: 0, add: 0, sub: 0, mul: 0}, + itd32{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0}, + itd32{a: 0, b: 2147483647, add: 2147483647, sub: -2147483647, mul: 0, div: 0, mod: 0}, + itd32{a: 1, b: -2147483648, add: -2147483647, sub: -2147483647, mul: -2147483648, div: 0, mod: 1}, + itd32{a: 1, b: -2147483647, add: -2147483646, sub: -2147483648, mul: -2147483647, div: 0, mod: 1}, + itd32{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, + itd32{a: 1, b: 0, add: 1, sub: 1, mul: 0}, + itd32{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + itd32{a: 1, b: 2147483647, add: -2147483648, sub: -2147483646, mul: 2147483647, div: 0, mod: 1}, + itd32{a: 2147483647, b: -2147483648, add: -1, sub: -1, mul: -2147483648, div: 0, mod: 2147483647}, + itd32{a: 2147483647, b: -2147483647, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, + itd32{a: 2147483647, b: -1, add: 2147483646, sub: -2147483648, mul: -2147483647, div: -2147483647, mod: 0}, + itd32{a: 2147483647, b: 0, add: 2147483647, sub: 2147483647, mul: 0}, + itd32{a: 2147483647, b: 1, add: -2147483648, sub: 2147483646, mul: 2147483647, div: 2147483647, mod: 0}, + itd32{a: 2147483647, b: 2147483647, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, +} +var uint16_data []utd16 = []utd16{utd16{a: 0, b: 0, add: 0, sub: 0, mul: 0}, + utd16{a: 0, b: 1, add: 1, sub: 65535, mul: 0, div: 0, mod: 0}, + utd16{a: 0, b: 65535, add: 65535, sub: 1, mul: 0, div: 0, mod: 0}, + utd16{a: 1, b: 0, add: 1, sub: 1, mul: 0}, + utd16{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + utd16{a: 1, b: 65535, add: 0, sub: 2, mul: 65535, div: 0, mod: 1}, + utd16{a: 65535, b: 0, add: 65535, sub: 65535, mul: 0}, + utd16{a: 65535, b: 1, add: 0, sub: 65534, mul: 65535, div: 65535, mod: 0}, + utd16{a: 65535, b: 65535, add: 65534, sub: 0, mul: 1, div: 1, mod: 0}, +} +var int16_data []itd16 = []itd16{itd16{a: -32768, b: -32768, add: 0, sub: 0, mul: 0, div: 1, mod: 0}, + itd16{a: -32768, b: -32767, add: 1, sub: -1, mul: -32768, div: 1, mod: -1}, + itd16{a: -32768, b: -1, add: 32767, sub: -32767, mul: -32768, div: -32768, mod: 0}, + itd16{a: -32768, b: 0, add: -32768, sub: -32768, mul: 0}, + itd16{a: -32768, b: 1, add: -32767, sub: 32767, mul: -32768, div: -32768, mod: 0}, + itd16{a: -32768, b: 32766, add: -2, sub: 2, mul: 0, div: -1, mod: -2}, + itd16{a: -32768, b: 32767, add: -1, sub: 1, mul: -32768, div: -1, mod: -1}, + itd16{a: -32767, b: -32768, add: 1, sub: 1, mul: -32768, div: 0, mod: -32767}, + itd16{a: -32767, b: -32767, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + itd16{a: -32767, b: -1, add: -32768, sub: -32766, mul: 32767, div: 32767, mod: 0}, + itd16{a: -32767, b: 0, add: -32767, sub: -32767, mul: 0}, + itd16{a: -32767, b: 1, add: -32766, sub: -32768, mul: -32767, div: -32767, mod: 0}, + itd16{a: -32767, b: 32766, add: -1, sub: 3, mul: 32766, div: -1, mod: -1}, + itd16{a: -32767, b: 32767, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, + itd16{a: -1, b: -32768, add: 32767, sub: 32767, mul: -32768, div: 0, mod: -1}, + itd16{a: -1, b: -32767, add: -32768, sub: 32766, mul: 32767, div: 0, mod: -1}, + itd16{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, + itd16{a: -1, b: 0, add: -1, sub: -1, mul: 0}, + itd16{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, + itd16{a: -1, b: 32766, add: 32765, sub: -32767, mul: -32766, div: 0, mod: -1}, + itd16{a: -1, b: 32767, add: 32766, sub: -32768, mul: -32767, div: 0, mod: -1}, + itd16{a: 0, b: -32768, add: -32768, sub: -32768, mul: 0, div: 0, mod: 0}, + itd16{a: 0, b: -32767, add: -32767, sub: 32767, mul: 0, div: 0, mod: 0}, + itd16{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0}, + itd16{a: 0, b: 0, add: 0, sub: 0, mul: 0}, + itd16{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0}, + itd16{a: 0, b: 32766, add: 32766, sub: -32766, mul: 0, div: 0, mod: 0}, + itd16{a: 0, b: 32767, add: 32767, sub: -32767, mul: 0, div: 0, mod: 0}, + itd16{a: 1, b: -32768, add: -32767, sub: -32767, mul: -32768, div: 0, mod: 1}, + itd16{a: 1, b: -32767, add: -32766, sub: -32768, mul: -32767, div: 0, mod: 1}, + itd16{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, + itd16{a: 1, b: 0, add: 1, sub: 1, mul: 0}, + itd16{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + itd16{a: 1, b: 32766, add: 32767, sub: -32765, mul: 32766, div: 0, mod: 1}, + itd16{a: 1, b: 32767, add: -32768, sub: -32766, mul: 32767, div: 0, mod: 1}, + itd16{a: 32766, b: -32768, add: -2, sub: -2, mul: 0, div: 0, mod: 32766}, + itd16{a: 32766, b: -32767, add: -1, sub: -3, mul: 32766, div: 0, mod: 32766}, + itd16{a: 32766, b: -1, add: 32765, sub: 32767, mul: -32766, div: -32766, mod: 0}, + itd16{a: 32766, b: 0, add: 32766, sub: 32766, mul: 0}, + itd16{a: 32766, b: 1, add: 32767, sub: 32765, mul: 32766, div: 32766, mod: 0}, + itd16{a: 32766, b: 32766, add: -4, sub: 0, mul: 4, div: 1, mod: 0}, + itd16{a: 32766, b: 32767, add: -3, sub: -1, mul: -32766, div: 0, mod: 32766}, + itd16{a: 32767, b: -32768, add: -1, sub: -1, mul: -32768, div: 0, mod: 32767}, + itd16{a: 32767, b: -32767, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, + itd16{a: 32767, b: -1, add: 32766, sub: -32768, mul: -32767, div: -32767, mod: 0}, + itd16{a: 32767, b: 0, add: 32767, sub: 32767, mul: 0}, + itd16{a: 32767, b: 1, add: -32768, sub: 32766, mul: 32767, div: 32767, mod: 0}, + itd16{a: 32767, b: 32766, add: -3, sub: 1, mul: -32766, div: 1, mod: 1}, + itd16{a: 32767, b: 32767, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, +} +var uint8_data []utd8 = []utd8{utd8{a: 0, b: 0, add: 0, sub: 0, mul: 0}, + utd8{a: 0, b: 1, add: 1, sub: 255, mul: 0, div: 0, mod: 0}, + utd8{a: 0, b: 255, add: 255, sub: 1, mul: 0, div: 0, mod: 0}, + utd8{a: 1, b: 0, add: 1, sub: 1, mul: 0}, + utd8{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + utd8{a: 1, b: 255, add: 0, sub: 2, mul: 255, div: 0, mod: 1}, + utd8{a: 255, b: 0, add: 255, sub: 255, mul: 0}, + utd8{a: 255, b: 1, add: 0, sub: 254, mul: 255, div: 255, mod: 0}, + utd8{a: 255, b: 255, add: 254, sub: 0, mul: 1, div: 1, mod: 0}, +} +var int8_data []itd8 = []itd8{itd8{a: -128, b: -128, add: 0, sub: 0, mul: 0, div: 1, mod: 0}, + itd8{a: -128, b: -127, add: 1, sub: -1, mul: -128, div: 1, mod: -1}, + itd8{a: -128, b: -1, add: 127, sub: -127, mul: -128, div: -128, mod: 0}, + itd8{a: -128, b: 0, add: -128, sub: -128, mul: 0}, + itd8{a: -128, b: 1, add: -127, sub: 127, mul: -128, div: -128, mod: 0}, + itd8{a: -128, b: 126, add: -2, sub: 2, mul: 0, div: -1, mod: -2}, + itd8{a: -128, b: 127, add: -1, sub: 1, mul: -128, div: -1, mod: -1}, + itd8{a: -127, b: -128, add: 1, sub: 1, mul: -128, div: 0, mod: -127}, + itd8{a: -127, b: -127, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + itd8{a: -127, b: -1, add: -128, sub: -126, mul: 127, div: 127, mod: 0}, + itd8{a: -127, b: 0, add: -127, sub: -127, mul: 0}, + itd8{a: -127, b: 1, add: -126, sub: -128, mul: -127, div: -127, mod: 0}, + itd8{a: -127, b: 126, add: -1, sub: 3, mul: 126, div: -1, mod: -1}, + itd8{a: -127, b: 127, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, + itd8{a: -1, b: -128, add: 127, sub: 127, mul: -128, div: 0, mod: -1}, + itd8{a: -1, b: -127, add: -128, sub: 126, mul: 127, div: 0, mod: -1}, + itd8{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, + itd8{a: -1, b: 0, add: -1, sub: -1, mul: 0}, + itd8{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, + itd8{a: -1, b: 126, add: 125, sub: -127, mul: -126, div: 0, mod: -1}, + itd8{a: -1, b: 127, add: 126, sub: -128, mul: -127, div: 0, mod: -1}, + itd8{a: 0, b: -128, add: -128, sub: -128, mul: 0, div: 0, mod: 0}, + itd8{a: 0, b: -127, add: -127, sub: 127, mul: 0, div: 0, mod: 0}, + itd8{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0}, + itd8{a: 0, b: 0, add: 0, sub: 0, mul: 0}, + itd8{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0}, + itd8{a: 0, b: 126, add: 126, sub: -126, mul: 0, div: 0, mod: 0}, + itd8{a: 0, b: 127, add: 127, sub: -127, mul: 0, div: 0, mod: 0}, + itd8{a: 1, b: -128, add: -127, sub: -127, mul: -128, div: 0, mod: 1}, + itd8{a: 1, b: -127, add: -126, sub: -128, mul: -127, div: 0, mod: 1}, + itd8{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0}, + itd8{a: 1, b: 0, add: 1, sub: 1, mul: 0}, + itd8{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0}, + itd8{a: 1, b: 126, add: 127, sub: -125, mul: 126, div: 0, mod: 1}, + itd8{a: 1, b: 127, add: -128, sub: -126, mul: 127, div: 0, mod: 1}, + itd8{a: 126, b: -128, add: -2, sub: -2, mul: 0, div: 0, mod: 126}, + itd8{a: 126, b: -127, add: -1, sub: -3, mul: 126, div: 0, mod: 126}, + itd8{a: 126, b: -1, add: 125, sub: 127, mul: -126, div: -126, mod: 0}, + itd8{a: 126, b: 0, add: 126, sub: 126, mul: 0}, + itd8{a: 126, b: 1, add: 127, sub: 125, mul: 126, div: 126, mod: 0}, + itd8{a: 126, b: 126, add: -4, sub: 0, mul: 4, div: 1, mod: 0}, + itd8{a: 126, b: 127, add: -3, sub: -1, mul: -126, div: 0, mod: 126}, + itd8{a: 127, b: -128, add: -1, sub: -1, mul: -128, div: 0, mod: 127}, + itd8{a: 127, b: -127, add: 0, sub: -2, mul: -1, div: -1, mod: 0}, + itd8{a: 127, b: -1, add: 126, sub: -128, mul: -127, div: -127, mod: 0}, + itd8{a: 127, b: 0, add: 127, sub: 127, mul: 0}, + itd8{a: 127, b: 1, add: -128, sub: 126, mul: 127, div: 127, mod: 0}, + itd8{a: 127, b: 126, add: -3, sub: 1, mul: -126, div: 1, mod: 1}, + itd8{a: 127, b: 127, add: -2, sub: 0, mul: 1, div: 1, mod: 0}, +} + +//TestArithmeticBoundary tests boundary results for arithmetic operations. +func TestArithmeticBoundary(t *testing.T) { + + for _, v := range uint64_data { + if got := add_uint64_ssa(v.a, v.b); got != v.add { + t.Errorf("add_uint64 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) + } + if got := sub_uint64_ssa(v.a, v.b); got != v.sub { + t.Errorf("sub_uint64 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) + } + if v.b != 0 { + if got := div_uint64_ssa(v.a, v.b); got != v.div { + t.Errorf("div_uint64 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) + } + + } + if v.b != 0 { + if got := mod_uint64_ssa(v.a, v.b); got != v.mod { + t.Errorf("mod_uint64 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) + } + + } + if got := mul_uint64_ssa(v.a, v.b); got != v.mul { + t.Errorf("mul_uint64 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) + } + } + for _, v := range int64_data { + if got := add_int64_ssa(v.a, v.b); got != v.add { + t.Errorf("add_int64 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) + } + if got := sub_int64_ssa(v.a, v.b); got != v.sub { + t.Errorf("sub_int64 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) + } + if v.b != 0 { + if got := div_int64_ssa(v.a, v.b); got != v.div { + t.Errorf("div_int64 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) + } + + } + if v.b != 0 { + if got := mod_int64_ssa(v.a, v.b); got != v.mod { + t.Errorf("mod_int64 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) + } + + } + if got := mul_int64_ssa(v.a, v.b); got != v.mul { + t.Errorf("mul_int64 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) + } + } + for _, v := range uint32_data { + if got := add_uint32_ssa(v.a, v.b); got != v.add { + t.Errorf("add_uint32 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) + } + if got := sub_uint32_ssa(v.a, v.b); got != v.sub { + t.Errorf("sub_uint32 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) + } + if v.b != 0 { + if got := div_uint32_ssa(v.a, v.b); got != v.div { + t.Errorf("div_uint32 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) + } + + } + if v.b != 0 { + if got := mod_uint32_ssa(v.a, v.b); got != v.mod { + t.Errorf("mod_uint32 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) + } + + } + if got := mul_uint32_ssa(v.a, v.b); got != v.mul { + t.Errorf("mul_uint32 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) + } + } + for _, v := range int32_data { + if got := add_int32_ssa(v.a, v.b); got != v.add { + t.Errorf("add_int32 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) + } + if got := sub_int32_ssa(v.a, v.b); got != v.sub { + t.Errorf("sub_int32 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) + } + if v.b != 0 { + if got := div_int32_ssa(v.a, v.b); got != v.div { + t.Errorf("div_int32 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) + } + + } + if v.b != 0 { + if got := mod_int32_ssa(v.a, v.b); got != v.mod { + t.Errorf("mod_int32 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) + } + + } + if got := mul_int32_ssa(v.a, v.b); got != v.mul { + t.Errorf("mul_int32 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) + } + } + for _, v := range uint16_data { + if got := add_uint16_ssa(v.a, v.b); got != v.add { + t.Errorf("add_uint16 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) + } + if got := sub_uint16_ssa(v.a, v.b); got != v.sub { + t.Errorf("sub_uint16 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) + } + if v.b != 0 { + if got := div_uint16_ssa(v.a, v.b); got != v.div { + t.Errorf("div_uint16 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) + } + + } + if v.b != 0 { + if got := mod_uint16_ssa(v.a, v.b); got != v.mod { + t.Errorf("mod_uint16 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) + } + + } + if got := mul_uint16_ssa(v.a, v.b); got != v.mul { + t.Errorf("mul_uint16 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) + } + } + for _, v := range int16_data { + if got := add_int16_ssa(v.a, v.b); got != v.add { + t.Errorf("add_int16 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) + } + if got := sub_int16_ssa(v.a, v.b); got != v.sub { + t.Errorf("sub_int16 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) + } + if v.b != 0 { + if got := div_int16_ssa(v.a, v.b); got != v.div { + t.Errorf("div_int16 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) + } + + } + if v.b != 0 { + if got := mod_int16_ssa(v.a, v.b); got != v.mod { + t.Errorf("mod_int16 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) + } + + } + if got := mul_int16_ssa(v.a, v.b); got != v.mul { + t.Errorf("mul_int16 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) + } + } + for _, v := range uint8_data { + if got := add_uint8_ssa(v.a, v.b); got != v.add { + t.Errorf("add_uint8 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) + } + if got := sub_uint8_ssa(v.a, v.b); got != v.sub { + t.Errorf("sub_uint8 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) + } + if v.b != 0 { + if got := div_uint8_ssa(v.a, v.b); got != v.div { + t.Errorf("div_uint8 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) + } + + } + if v.b != 0 { + if got := mod_uint8_ssa(v.a, v.b); got != v.mod { + t.Errorf("mod_uint8 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) + } + + } + if got := mul_uint8_ssa(v.a, v.b); got != v.mul { + t.Errorf("mul_uint8 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) + } + } + for _, v := range int8_data { + if got := add_int8_ssa(v.a, v.b); got != v.add { + t.Errorf("add_int8 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add) + } + if got := sub_int8_ssa(v.a, v.b); got != v.sub { + t.Errorf("sub_int8 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub) + } + if v.b != 0 { + if got := div_int8_ssa(v.a, v.b); got != v.div { + t.Errorf("div_int8 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div) + } + + } + if v.b != 0 { + if got := mod_int8_ssa(v.a, v.b); got != v.mod { + t.Errorf("mod_int8 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod) + } + + } + if got := mul_int8_ssa(v.a, v.b); got != v.mul { + t.Errorf("mul_int8 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/arithConst_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/arithConst_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9f5ac61427c40c3fd273b574b92473da44dd6e12 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/arithConst_test.go @@ -0,0 +1,9570 @@ +// Code generated by gen/arithConstGen.go. DO NOT EDIT. + +package main + +import "testing" + +//go:noinline +func add_uint64_0(a uint64) uint64 { return a + 0 } + +//go:noinline +func add_0_uint64(a uint64) uint64 { return 0 + a } + +//go:noinline +func add_uint64_1(a uint64) uint64 { return a + 1 } + +//go:noinline +func add_1_uint64(a uint64) uint64 { return 1 + a } + +//go:noinline +func add_uint64_4294967296(a uint64) uint64 { return a + 4294967296 } + +//go:noinline +func add_4294967296_uint64(a uint64) uint64 { return 4294967296 + a } + +//go:noinline +func add_uint64_9223372036854775808(a uint64) uint64 { return a + 9223372036854775808 } + +//go:noinline +func add_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 + a } + +//go:noinline +func add_uint64_18446744073709551615(a uint64) uint64 { return a + 18446744073709551615 } + +//go:noinline +func add_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 + a } + +//go:noinline +func sub_uint64_0(a uint64) uint64 { return a - 0 } + +//go:noinline +func sub_0_uint64(a uint64) uint64 { return 0 - a } + +//go:noinline +func sub_uint64_1(a uint64) uint64 { return a - 1 } + +//go:noinline +func sub_1_uint64(a uint64) uint64 { return 1 - a } + +//go:noinline +func sub_uint64_4294967296(a uint64) uint64 { return a - 4294967296 } + +//go:noinline +func sub_4294967296_uint64(a uint64) uint64 { return 4294967296 - a } + +//go:noinline +func sub_uint64_9223372036854775808(a uint64) uint64 { return a - 9223372036854775808 } + +//go:noinline +func sub_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 - a } + +//go:noinline +func sub_uint64_18446744073709551615(a uint64) uint64 { return a - 18446744073709551615 } + +//go:noinline +func sub_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 - a } + +//go:noinline +func div_0_uint64(a uint64) uint64 { return 0 / a } + +//go:noinline +func div_uint64_1(a uint64) uint64 { return a / 1 } + +//go:noinline +func div_1_uint64(a uint64) uint64 { return 1 / a } + +//go:noinline +func div_uint64_4294967296(a uint64) uint64 { return a / 4294967296 } + +//go:noinline +func div_4294967296_uint64(a uint64) uint64 { return 4294967296 / a } + +//go:noinline +func div_uint64_9223372036854775808(a uint64) uint64 { return a / 9223372036854775808 } + +//go:noinline +func div_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 / a } + +//go:noinline +func div_uint64_18446744073709551615(a uint64) uint64 { return a / 18446744073709551615 } + +//go:noinline +func div_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 / a } + +//go:noinline +func mul_uint64_0(a uint64) uint64 { return a * 0 } + +//go:noinline +func mul_0_uint64(a uint64) uint64 { return 0 * a } + +//go:noinline +func mul_uint64_1(a uint64) uint64 { return a * 1 } + +//go:noinline +func mul_1_uint64(a uint64) uint64 { return 1 * a } + +//go:noinline +func mul_uint64_4294967296(a uint64) uint64 { return a * 4294967296 } + +//go:noinline +func mul_4294967296_uint64(a uint64) uint64 { return 4294967296 * a } + +//go:noinline +func mul_uint64_9223372036854775808(a uint64) uint64 { return a * 9223372036854775808 } + +//go:noinline +func mul_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 * a } + +//go:noinline +func mul_uint64_18446744073709551615(a uint64) uint64 { return a * 18446744073709551615 } + +//go:noinline +func mul_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 * a } + +//go:noinline +func lsh_uint64_0(a uint64) uint64 { return a << 0 } + +//go:noinline +func lsh_0_uint64(a uint64) uint64 { return 0 << a } + +//go:noinline +func lsh_uint64_1(a uint64) uint64 { return a << 1 } + +//go:noinline +func lsh_1_uint64(a uint64) uint64 { return 1 << a } + +//go:noinline +func lsh_uint64_4294967296(a uint64) uint64 { return a << uint64(4294967296) } + +//go:noinline +func lsh_4294967296_uint64(a uint64) uint64 { return 4294967296 << a } + +//go:noinline +func lsh_uint64_9223372036854775808(a uint64) uint64 { return a << uint64(9223372036854775808) } + +//go:noinline +func lsh_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 << a } + +//go:noinline +func lsh_uint64_18446744073709551615(a uint64) uint64 { return a << uint64(18446744073709551615) } + +//go:noinline +func lsh_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 << a } + +//go:noinline +func rsh_uint64_0(a uint64) uint64 { return a >> 0 } + +//go:noinline +func rsh_0_uint64(a uint64) uint64 { return 0 >> a } + +//go:noinline +func rsh_uint64_1(a uint64) uint64 { return a >> 1 } + +//go:noinline +func rsh_1_uint64(a uint64) uint64 { return 1 >> a } + +//go:noinline +func rsh_uint64_4294967296(a uint64) uint64 { return a >> uint64(4294967296) } + +//go:noinline +func rsh_4294967296_uint64(a uint64) uint64 { return 4294967296 >> a } + +//go:noinline +func rsh_uint64_9223372036854775808(a uint64) uint64 { return a >> uint64(9223372036854775808) } + +//go:noinline +func rsh_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 >> a } + +//go:noinline +func rsh_uint64_18446744073709551615(a uint64) uint64 { return a >> uint64(18446744073709551615) } + +//go:noinline +func rsh_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 >> a } + +//go:noinline +func mod_0_uint64(a uint64) uint64 { return 0 % a } + +//go:noinline +func mod_uint64_1(a uint64) uint64 { return a % 1 } + +//go:noinline +func mod_1_uint64(a uint64) uint64 { return 1 % a } + +//go:noinline +func mod_uint64_4294967296(a uint64) uint64 { return a % 4294967296 } + +//go:noinline +func mod_4294967296_uint64(a uint64) uint64 { return 4294967296 % a } + +//go:noinline +func mod_uint64_9223372036854775808(a uint64) uint64 { return a % 9223372036854775808 } + +//go:noinline +func mod_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 % a } + +//go:noinline +func mod_uint64_18446744073709551615(a uint64) uint64 { return a % 18446744073709551615 } + +//go:noinline +func mod_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 % a } + +//go:noinline +func and_uint64_0(a uint64) uint64 { return a & 0 } + +//go:noinline +func and_0_uint64(a uint64) uint64 { return 0 & a } + +//go:noinline +func and_uint64_1(a uint64) uint64 { return a & 1 } + +//go:noinline +func and_1_uint64(a uint64) uint64 { return 1 & a } + +//go:noinline +func and_uint64_4294967296(a uint64) uint64 { return a & 4294967296 } + +//go:noinline +func and_4294967296_uint64(a uint64) uint64 { return 4294967296 & a } + +//go:noinline +func and_uint64_9223372036854775808(a uint64) uint64 { return a & 9223372036854775808 } + +//go:noinline +func and_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 & a } + +//go:noinline +func and_uint64_18446744073709551615(a uint64) uint64 { return a & 18446744073709551615 } + +//go:noinline +func and_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 & a } + +//go:noinline +func or_uint64_0(a uint64) uint64 { return a | 0 } + +//go:noinline +func or_0_uint64(a uint64) uint64 { return 0 | a } + +//go:noinline +func or_uint64_1(a uint64) uint64 { return a | 1 } + +//go:noinline +func or_1_uint64(a uint64) uint64 { return 1 | a } + +//go:noinline +func or_uint64_4294967296(a uint64) uint64 { return a | 4294967296 } + +//go:noinline +func or_4294967296_uint64(a uint64) uint64 { return 4294967296 | a } + +//go:noinline +func or_uint64_9223372036854775808(a uint64) uint64 { return a | 9223372036854775808 } + +//go:noinline +func or_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 | a } + +//go:noinline +func or_uint64_18446744073709551615(a uint64) uint64 { return a | 18446744073709551615 } + +//go:noinline +func or_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 | a } + +//go:noinline +func xor_uint64_0(a uint64) uint64 { return a ^ 0 } + +//go:noinline +func xor_0_uint64(a uint64) uint64 { return 0 ^ a } + +//go:noinline +func xor_uint64_1(a uint64) uint64 { return a ^ 1 } + +//go:noinline +func xor_1_uint64(a uint64) uint64 { return 1 ^ a } + +//go:noinline +func xor_uint64_4294967296(a uint64) uint64 { return a ^ 4294967296 } + +//go:noinline +func xor_4294967296_uint64(a uint64) uint64 { return 4294967296 ^ a } + +//go:noinline +func xor_uint64_9223372036854775808(a uint64) uint64 { return a ^ 9223372036854775808 } + +//go:noinline +func xor_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 ^ a } + +//go:noinline +func xor_uint64_18446744073709551615(a uint64) uint64 { return a ^ 18446744073709551615 } + +//go:noinline +func xor_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 ^ a } + +//go:noinline +func mul_uint64_3(a uint64) uint64 { return a * 3 } + +//go:noinline +func mul_3_uint64(a uint64) uint64 { return 3 * a } + +//go:noinline +func mul_uint64_5(a uint64) uint64 { return a * 5 } + +//go:noinline +func mul_5_uint64(a uint64) uint64 { return 5 * a } + +//go:noinline +func mul_uint64_7(a uint64) uint64 { return a * 7 } + +//go:noinline +func mul_7_uint64(a uint64) uint64 { return 7 * a } + +//go:noinline +func mul_uint64_9(a uint64) uint64 { return a * 9 } + +//go:noinline +func mul_9_uint64(a uint64) uint64 { return 9 * a } + +//go:noinline +func mul_uint64_10(a uint64) uint64 { return a * 10 } + +//go:noinline +func mul_10_uint64(a uint64) uint64 { return 10 * a } + +//go:noinline +func mul_uint64_11(a uint64) uint64 { return a * 11 } + +//go:noinline +func mul_11_uint64(a uint64) uint64 { return 11 * a } + +//go:noinline +func mul_uint64_13(a uint64) uint64 { return a * 13 } + +//go:noinline +func mul_13_uint64(a uint64) uint64 { return 13 * a } + +//go:noinline +func mul_uint64_19(a uint64) uint64 { return a * 19 } + +//go:noinline +func mul_19_uint64(a uint64) uint64 { return 19 * a } + +//go:noinline +func mul_uint64_21(a uint64) uint64 { return a * 21 } + +//go:noinline +func mul_21_uint64(a uint64) uint64 { return 21 * a } + +//go:noinline +func mul_uint64_25(a uint64) uint64 { return a * 25 } + +//go:noinline +func mul_25_uint64(a uint64) uint64 { return 25 * a } + +//go:noinline +func mul_uint64_27(a uint64) uint64 { return a * 27 } + +//go:noinline +func mul_27_uint64(a uint64) uint64 { return 27 * a } + +//go:noinline +func mul_uint64_37(a uint64) uint64 { return a * 37 } + +//go:noinline +func mul_37_uint64(a uint64) uint64 { return 37 * a } + +//go:noinline +func mul_uint64_41(a uint64) uint64 { return a * 41 } + +//go:noinline +func mul_41_uint64(a uint64) uint64 { return 41 * a } + +//go:noinline +func mul_uint64_45(a uint64) uint64 { return a * 45 } + +//go:noinline +func mul_45_uint64(a uint64) uint64 { return 45 * a } + +//go:noinline +func mul_uint64_73(a uint64) uint64 { return a * 73 } + +//go:noinline +func mul_73_uint64(a uint64) uint64 { return 73 * a } + +//go:noinline +func mul_uint64_81(a uint64) uint64 { return a * 81 } + +//go:noinline +func mul_81_uint64(a uint64) uint64 { return 81 * a } + +//go:noinline +func add_int64_Neg9223372036854775808(a int64) int64 { return a + -9223372036854775808 } + +//go:noinline +func add_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 + a } + +//go:noinline +func add_int64_Neg9223372036854775807(a int64) int64 { return a + -9223372036854775807 } + +//go:noinline +func add_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 + a } + +//go:noinline +func add_int64_Neg4294967296(a int64) int64 { return a + -4294967296 } + +//go:noinline +func add_Neg4294967296_int64(a int64) int64 { return -4294967296 + a } + +//go:noinline +func add_int64_Neg1(a int64) int64 { return a + -1 } + +//go:noinline +func add_Neg1_int64(a int64) int64 { return -1 + a } + +//go:noinline +func add_int64_0(a int64) int64 { return a + 0 } + +//go:noinline +func add_0_int64(a int64) int64 { return 0 + a } + +//go:noinline +func add_int64_1(a int64) int64 { return a + 1 } + +//go:noinline +func add_1_int64(a int64) int64 { return 1 + a } + +//go:noinline +func add_int64_4294967296(a int64) int64 { return a + 4294967296 } + +//go:noinline +func add_4294967296_int64(a int64) int64 { return 4294967296 + a } + +//go:noinline +func add_int64_9223372036854775806(a int64) int64 { return a + 9223372036854775806 } + +//go:noinline +func add_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 + a } + +//go:noinline +func add_int64_9223372036854775807(a int64) int64 { return a + 9223372036854775807 } + +//go:noinline +func add_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 + a } + +//go:noinline +func sub_int64_Neg9223372036854775808(a int64) int64 { return a - -9223372036854775808 } + +//go:noinline +func sub_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 - a } + +//go:noinline +func sub_int64_Neg9223372036854775807(a int64) int64 { return a - -9223372036854775807 } + +//go:noinline +func sub_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 - a } + +//go:noinline +func sub_int64_Neg4294967296(a int64) int64 { return a - -4294967296 } + +//go:noinline +func sub_Neg4294967296_int64(a int64) int64 { return -4294967296 - a } + +//go:noinline +func sub_int64_Neg1(a int64) int64 { return a - -1 } + +//go:noinline +func sub_Neg1_int64(a int64) int64 { return -1 - a } + +//go:noinline +func sub_int64_0(a int64) int64 { return a - 0 } + +//go:noinline +func sub_0_int64(a int64) int64 { return 0 - a } + +//go:noinline +func sub_int64_1(a int64) int64 { return a - 1 } + +//go:noinline +func sub_1_int64(a int64) int64 { return 1 - a } + +//go:noinline +func sub_int64_4294967296(a int64) int64 { return a - 4294967296 } + +//go:noinline +func sub_4294967296_int64(a int64) int64 { return 4294967296 - a } + +//go:noinline +func sub_int64_9223372036854775806(a int64) int64 { return a - 9223372036854775806 } + +//go:noinline +func sub_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 - a } + +//go:noinline +func sub_int64_9223372036854775807(a int64) int64 { return a - 9223372036854775807 } + +//go:noinline +func sub_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 - a } + +//go:noinline +func div_int64_Neg9223372036854775808(a int64) int64 { return a / -9223372036854775808 } + +//go:noinline +func div_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 / a } + +//go:noinline +func div_int64_Neg9223372036854775807(a int64) int64 { return a / -9223372036854775807 } + +//go:noinline +func div_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 / a } + +//go:noinline +func div_int64_Neg4294967296(a int64) int64 { return a / -4294967296 } + +//go:noinline +func div_Neg4294967296_int64(a int64) int64 { return -4294967296 / a } + +//go:noinline +func div_int64_Neg1(a int64) int64 { return a / -1 } + +//go:noinline +func div_Neg1_int64(a int64) int64 { return -1 / a } + +//go:noinline +func div_0_int64(a int64) int64 { return 0 / a } + +//go:noinline +func div_int64_1(a int64) int64 { return a / 1 } + +//go:noinline +func div_1_int64(a int64) int64 { return 1 / a } + +//go:noinline +func div_int64_4294967296(a int64) int64 { return a / 4294967296 } + +//go:noinline +func div_4294967296_int64(a int64) int64 { return 4294967296 / a } + +//go:noinline +func div_int64_9223372036854775806(a int64) int64 { return a / 9223372036854775806 } + +//go:noinline +func div_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 / a } + +//go:noinline +func div_int64_9223372036854775807(a int64) int64 { return a / 9223372036854775807 } + +//go:noinline +func div_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 / a } + +//go:noinline +func mul_int64_Neg9223372036854775808(a int64) int64 { return a * -9223372036854775808 } + +//go:noinline +func mul_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 * a } + +//go:noinline +func mul_int64_Neg9223372036854775807(a int64) int64 { return a * -9223372036854775807 } + +//go:noinline +func mul_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 * a } + +//go:noinline +func mul_int64_Neg4294967296(a int64) int64 { return a * -4294967296 } + +//go:noinline +func mul_Neg4294967296_int64(a int64) int64 { return -4294967296 * a } + +//go:noinline +func mul_int64_Neg1(a int64) int64 { return a * -1 } + +//go:noinline +func mul_Neg1_int64(a int64) int64 { return -1 * a } + +//go:noinline +func mul_int64_0(a int64) int64 { return a * 0 } + +//go:noinline +func mul_0_int64(a int64) int64 { return 0 * a } + +//go:noinline +func mul_int64_1(a int64) int64 { return a * 1 } + +//go:noinline +func mul_1_int64(a int64) int64 { return 1 * a } + +//go:noinline +func mul_int64_4294967296(a int64) int64 { return a * 4294967296 } + +//go:noinline +func mul_4294967296_int64(a int64) int64 { return 4294967296 * a } + +//go:noinline +func mul_int64_9223372036854775806(a int64) int64 { return a * 9223372036854775806 } + +//go:noinline +func mul_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 * a } + +//go:noinline +func mul_int64_9223372036854775807(a int64) int64 { return a * 9223372036854775807 } + +//go:noinline +func mul_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 * a } + +//go:noinline +func mod_int64_Neg9223372036854775808(a int64) int64 { return a % -9223372036854775808 } + +//go:noinline +func mod_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 % a } + +//go:noinline +func mod_int64_Neg9223372036854775807(a int64) int64 { return a % -9223372036854775807 } + +//go:noinline +func mod_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 % a } + +//go:noinline +func mod_int64_Neg4294967296(a int64) int64 { return a % -4294967296 } + +//go:noinline +func mod_Neg4294967296_int64(a int64) int64 { return -4294967296 % a } + +//go:noinline +func mod_int64_Neg1(a int64) int64 { return a % -1 } + +//go:noinline +func mod_Neg1_int64(a int64) int64 { return -1 % a } + +//go:noinline +func mod_0_int64(a int64) int64 { return 0 % a } + +//go:noinline +func mod_int64_1(a int64) int64 { return a % 1 } + +//go:noinline +func mod_1_int64(a int64) int64 { return 1 % a } + +//go:noinline +func mod_int64_4294967296(a int64) int64 { return a % 4294967296 } + +//go:noinline +func mod_4294967296_int64(a int64) int64 { return 4294967296 % a } + +//go:noinline +func mod_int64_9223372036854775806(a int64) int64 { return a % 9223372036854775806 } + +//go:noinline +func mod_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 % a } + +//go:noinline +func mod_int64_9223372036854775807(a int64) int64 { return a % 9223372036854775807 } + +//go:noinline +func mod_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 % a } + +//go:noinline +func and_int64_Neg9223372036854775808(a int64) int64 { return a & -9223372036854775808 } + +//go:noinline +func and_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 & a } + +//go:noinline +func and_int64_Neg9223372036854775807(a int64) int64 { return a & -9223372036854775807 } + +//go:noinline +func and_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 & a } + +//go:noinline +func and_int64_Neg4294967296(a int64) int64 { return a & -4294967296 } + +//go:noinline +func and_Neg4294967296_int64(a int64) int64 { return -4294967296 & a } + +//go:noinline +func and_int64_Neg1(a int64) int64 { return a & -1 } + +//go:noinline +func and_Neg1_int64(a int64) int64 { return -1 & a } + +//go:noinline +func and_int64_0(a int64) int64 { return a & 0 } + +//go:noinline +func and_0_int64(a int64) int64 { return 0 & a } + +//go:noinline +func and_int64_1(a int64) int64 { return a & 1 } + +//go:noinline +func and_1_int64(a int64) int64 { return 1 & a } + +//go:noinline +func and_int64_4294967296(a int64) int64 { return a & 4294967296 } + +//go:noinline +func and_4294967296_int64(a int64) int64 { return 4294967296 & a } + +//go:noinline +func and_int64_9223372036854775806(a int64) int64 { return a & 9223372036854775806 } + +//go:noinline +func and_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 & a } + +//go:noinline +func and_int64_9223372036854775807(a int64) int64 { return a & 9223372036854775807 } + +//go:noinline +func and_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 & a } + +//go:noinline +func or_int64_Neg9223372036854775808(a int64) int64 { return a | -9223372036854775808 } + +//go:noinline +func or_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 | a } + +//go:noinline +func or_int64_Neg9223372036854775807(a int64) int64 { return a | -9223372036854775807 } + +//go:noinline +func or_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 | a } + +//go:noinline +func or_int64_Neg4294967296(a int64) int64 { return a | -4294967296 } + +//go:noinline +func or_Neg4294967296_int64(a int64) int64 { return -4294967296 | a } + +//go:noinline +func or_int64_Neg1(a int64) int64 { return a | -1 } + +//go:noinline +func or_Neg1_int64(a int64) int64 { return -1 | a } + +//go:noinline +func or_int64_0(a int64) int64 { return a | 0 } + +//go:noinline +func or_0_int64(a int64) int64 { return 0 | a } + +//go:noinline +func or_int64_1(a int64) int64 { return a | 1 } + +//go:noinline +func or_1_int64(a int64) int64 { return 1 | a } + +//go:noinline +func or_int64_4294967296(a int64) int64 { return a | 4294967296 } + +//go:noinline +func or_4294967296_int64(a int64) int64 { return 4294967296 | a } + +//go:noinline +func or_int64_9223372036854775806(a int64) int64 { return a | 9223372036854775806 } + +//go:noinline +func or_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 | a } + +//go:noinline +func or_int64_9223372036854775807(a int64) int64 { return a | 9223372036854775807 } + +//go:noinline +func or_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 | a } + +//go:noinline +func xor_int64_Neg9223372036854775808(a int64) int64 { return a ^ -9223372036854775808 } + +//go:noinline +func xor_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 ^ a } + +//go:noinline +func xor_int64_Neg9223372036854775807(a int64) int64 { return a ^ -9223372036854775807 } + +//go:noinline +func xor_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 ^ a } + +//go:noinline +func xor_int64_Neg4294967296(a int64) int64 { return a ^ -4294967296 } + +//go:noinline +func xor_Neg4294967296_int64(a int64) int64 { return -4294967296 ^ a } + +//go:noinline +func xor_int64_Neg1(a int64) int64 { return a ^ -1 } + +//go:noinline +func xor_Neg1_int64(a int64) int64 { return -1 ^ a } + +//go:noinline +func xor_int64_0(a int64) int64 { return a ^ 0 } + +//go:noinline +func xor_0_int64(a int64) int64 { return 0 ^ a } + +//go:noinline +func xor_int64_1(a int64) int64 { return a ^ 1 } + +//go:noinline +func xor_1_int64(a int64) int64 { return 1 ^ a } + +//go:noinline +func xor_int64_4294967296(a int64) int64 { return a ^ 4294967296 } + +//go:noinline +func xor_4294967296_int64(a int64) int64 { return 4294967296 ^ a } + +//go:noinline +func xor_int64_9223372036854775806(a int64) int64 { return a ^ 9223372036854775806 } + +//go:noinline +func xor_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 ^ a } + +//go:noinline +func xor_int64_9223372036854775807(a int64) int64 { return a ^ 9223372036854775807 } + +//go:noinline +func xor_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 ^ a } + +//go:noinline +func mul_int64_Neg9(a int64) int64 { return a * -9 } + +//go:noinline +func mul_Neg9_int64(a int64) int64 { return -9 * a } + +//go:noinline +func mul_int64_Neg5(a int64) int64 { return a * -5 } + +//go:noinline +func mul_Neg5_int64(a int64) int64 { return -5 * a } + +//go:noinline +func mul_int64_Neg3(a int64) int64 { return a * -3 } + +//go:noinline +func mul_Neg3_int64(a int64) int64 { return -3 * a } + +//go:noinline +func mul_int64_3(a int64) int64 { return a * 3 } + +//go:noinline +func mul_3_int64(a int64) int64 { return 3 * a } + +//go:noinline +func mul_int64_5(a int64) int64 { return a * 5 } + +//go:noinline +func mul_5_int64(a int64) int64 { return 5 * a } + +//go:noinline +func mul_int64_7(a int64) int64 { return a * 7 } + +//go:noinline +func mul_7_int64(a int64) int64 { return 7 * a } + +//go:noinline +func mul_int64_9(a int64) int64 { return a * 9 } + +//go:noinline +func mul_9_int64(a int64) int64 { return 9 * a } + +//go:noinline +func mul_int64_10(a int64) int64 { return a * 10 } + +//go:noinline +func mul_10_int64(a int64) int64 { return 10 * a } + +//go:noinline +func mul_int64_11(a int64) int64 { return a * 11 } + +//go:noinline +func mul_11_int64(a int64) int64 { return 11 * a } + +//go:noinline +func mul_int64_13(a int64) int64 { return a * 13 } + +//go:noinline +func mul_13_int64(a int64) int64 { return 13 * a } + +//go:noinline +func mul_int64_19(a int64) int64 { return a * 19 } + +//go:noinline +func mul_19_int64(a int64) int64 { return 19 * a } + +//go:noinline +func mul_int64_21(a int64) int64 { return a * 21 } + +//go:noinline +func mul_21_int64(a int64) int64 { return 21 * a } + +//go:noinline +func mul_int64_25(a int64) int64 { return a * 25 } + +//go:noinline +func mul_25_int64(a int64) int64 { return 25 * a } + +//go:noinline +func mul_int64_27(a int64) int64 { return a * 27 } + +//go:noinline +func mul_27_int64(a int64) int64 { return 27 * a } + +//go:noinline +func mul_int64_37(a int64) int64 { return a * 37 } + +//go:noinline +func mul_37_int64(a int64) int64 { return 37 * a } + +//go:noinline +func mul_int64_41(a int64) int64 { return a * 41 } + +//go:noinline +func mul_41_int64(a int64) int64 { return 41 * a } + +//go:noinline +func mul_int64_45(a int64) int64 { return a * 45 } + +//go:noinline +func mul_45_int64(a int64) int64 { return 45 * a } + +//go:noinline +func mul_int64_73(a int64) int64 { return a * 73 } + +//go:noinline +func mul_73_int64(a int64) int64 { return 73 * a } + +//go:noinline +func mul_int64_81(a int64) int64 { return a * 81 } + +//go:noinline +func mul_81_int64(a int64) int64 { return 81 * a } + +//go:noinline +func add_uint32_0(a uint32) uint32 { return a + 0 } + +//go:noinline +func add_0_uint32(a uint32) uint32 { return 0 + a } + +//go:noinline +func add_uint32_1(a uint32) uint32 { return a + 1 } + +//go:noinline +func add_1_uint32(a uint32) uint32 { return 1 + a } + +//go:noinline +func add_uint32_4294967295(a uint32) uint32 { return a + 4294967295 } + +//go:noinline +func add_4294967295_uint32(a uint32) uint32 { return 4294967295 + a } + +//go:noinline +func sub_uint32_0(a uint32) uint32 { return a - 0 } + +//go:noinline +func sub_0_uint32(a uint32) uint32 { return 0 - a } + +//go:noinline +func sub_uint32_1(a uint32) uint32 { return a - 1 } + +//go:noinline +func sub_1_uint32(a uint32) uint32 { return 1 - a } + +//go:noinline +func sub_uint32_4294967295(a uint32) uint32 { return a - 4294967295 } + +//go:noinline +func sub_4294967295_uint32(a uint32) uint32 { return 4294967295 - a } + +//go:noinline +func div_0_uint32(a uint32) uint32 { return 0 / a } + +//go:noinline +func div_uint32_1(a uint32) uint32 { return a / 1 } + +//go:noinline +func div_1_uint32(a uint32) uint32 { return 1 / a } + +//go:noinline +func div_uint32_4294967295(a uint32) uint32 { return a / 4294967295 } + +//go:noinline +func div_4294967295_uint32(a uint32) uint32 { return 4294967295 / a } + +//go:noinline +func mul_uint32_0(a uint32) uint32 { return a * 0 } + +//go:noinline +func mul_0_uint32(a uint32) uint32 { return 0 * a } + +//go:noinline +func mul_uint32_1(a uint32) uint32 { return a * 1 } + +//go:noinline +func mul_1_uint32(a uint32) uint32 { return 1 * a } + +//go:noinline +func mul_uint32_4294967295(a uint32) uint32 { return a * 4294967295 } + +//go:noinline +func mul_4294967295_uint32(a uint32) uint32 { return 4294967295 * a } + +//go:noinline +func lsh_uint32_0(a uint32) uint32 { return a << 0 } + +//go:noinline +func lsh_0_uint32(a uint32) uint32 { return 0 << a } + +//go:noinline +func lsh_uint32_1(a uint32) uint32 { return a << 1 } + +//go:noinline +func lsh_1_uint32(a uint32) uint32 { return 1 << a } + +//go:noinline +func lsh_uint32_4294967295(a uint32) uint32 { return a << 4294967295 } + +//go:noinline +func lsh_4294967295_uint32(a uint32) uint32 { return 4294967295 << a } + +//go:noinline +func rsh_uint32_0(a uint32) uint32 { return a >> 0 } + +//go:noinline +func rsh_0_uint32(a uint32) uint32 { return 0 >> a } + +//go:noinline +func rsh_uint32_1(a uint32) uint32 { return a >> 1 } + +//go:noinline +func rsh_1_uint32(a uint32) uint32 { return 1 >> a } + +//go:noinline +func rsh_uint32_4294967295(a uint32) uint32 { return a >> 4294967295 } + +//go:noinline +func rsh_4294967295_uint32(a uint32) uint32 { return 4294967295 >> a } + +//go:noinline +func mod_0_uint32(a uint32) uint32 { return 0 % a } + +//go:noinline +func mod_uint32_1(a uint32) uint32 { return a % 1 } + +//go:noinline +func mod_1_uint32(a uint32) uint32 { return 1 % a } + +//go:noinline +func mod_uint32_4294967295(a uint32) uint32 { return a % 4294967295 } + +//go:noinline +func mod_4294967295_uint32(a uint32) uint32 { return 4294967295 % a } + +//go:noinline +func and_uint32_0(a uint32) uint32 { return a & 0 } + +//go:noinline +func and_0_uint32(a uint32) uint32 { return 0 & a } + +//go:noinline +func and_uint32_1(a uint32) uint32 { return a & 1 } + +//go:noinline +func and_1_uint32(a uint32) uint32 { return 1 & a } + +//go:noinline +func and_uint32_4294967295(a uint32) uint32 { return a & 4294967295 } + +//go:noinline +func and_4294967295_uint32(a uint32) uint32 { return 4294967295 & a } + +//go:noinline +func or_uint32_0(a uint32) uint32 { return a | 0 } + +//go:noinline +func or_0_uint32(a uint32) uint32 { return 0 | a } + +//go:noinline +func or_uint32_1(a uint32) uint32 { return a | 1 } + +//go:noinline +func or_1_uint32(a uint32) uint32 { return 1 | a } + +//go:noinline +func or_uint32_4294967295(a uint32) uint32 { return a | 4294967295 } + +//go:noinline +func or_4294967295_uint32(a uint32) uint32 { return 4294967295 | a } + +//go:noinline +func xor_uint32_0(a uint32) uint32 { return a ^ 0 } + +//go:noinline +func xor_0_uint32(a uint32) uint32 { return 0 ^ a } + +//go:noinline +func xor_uint32_1(a uint32) uint32 { return a ^ 1 } + +//go:noinline +func xor_1_uint32(a uint32) uint32 { return 1 ^ a } + +//go:noinline +func xor_uint32_4294967295(a uint32) uint32 { return a ^ 4294967295 } + +//go:noinline +func xor_4294967295_uint32(a uint32) uint32 { return 4294967295 ^ a } + +//go:noinline +func mul_uint32_3(a uint32) uint32 { return a * 3 } + +//go:noinline +func mul_3_uint32(a uint32) uint32 { return 3 * a } + +//go:noinline +func mul_uint32_5(a uint32) uint32 { return a * 5 } + +//go:noinline +func mul_5_uint32(a uint32) uint32 { return 5 * a } + +//go:noinline +func mul_uint32_7(a uint32) uint32 { return a * 7 } + +//go:noinline +func mul_7_uint32(a uint32) uint32 { return 7 * a } + +//go:noinline +func mul_uint32_9(a uint32) uint32 { return a * 9 } + +//go:noinline +func mul_9_uint32(a uint32) uint32 { return 9 * a } + +//go:noinline +func mul_uint32_10(a uint32) uint32 { return a * 10 } + +//go:noinline +func mul_10_uint32(a uint32) uint32 { return 10 * a } + +//go:noinline +func mul_uint32_11(a uint32) uint32 { return a * 11 } + +//go:noinline +func mul_11_uint32(a uint32) uint32 { return 11 * a } + +//go:noinline +func mul_uint32_13(a uint32) uint32 { return a * 13 } + +//go:noinline +func mul_13_uint32(a uint32) uint32 { return 13 * a } + +//go:noinline +func mul_uint32_19(a uint32) uint32 { return a * 19 } + +//go:noinline +func mul_19_uint32(a uint32) uint32 { return 19 * a } + +//go:noinline +func mul_uint32_21(a uint32) uint32 { return a * 21 } + +//go:noinline +func mul_21_uint32(a uint32) uint32 { return 21 * a } + +//go:noinline +func mul_uint32_25(a uint32) uint32 { return a * 25 } + +//go:noinline +func mul_25_uint32(a uint32) uint32 { return 25 * a } + +//go:noinline +func mul_uint32_27(a uint32) uint32 { return a * 27 } + +//go:noinline +func mul_27_uint32(a uint32) uint32 { return 27 * a } + +//go:noinline +func mul_uint32_37(a uint32) uint32 { return a * 37 } + +//go:noinline +func mul_37_uint32(a uint32) uint32 { return 37 * a } + +//go:noinline +func mul_uint32_41(a uint32) uint32 { return a * 41 } + +//go:noinline +func mul_41_uint32(a uint32) uint32 { return 41 * a } + +//go:noinline +func mul_uint32_45(a uint32) uint32 { return a * 45 } + +//go:noinline +func mul_45_uint32(a uint32) uint32 { return 45 * a } + +//go:noinline +func mul_uint32_73(a uint32) uint32 { return a * 73 } + +//go:noinline +func mul_73_uint32(a uint32) uint32 { return 73 * a } + +//go:noinline +func mul_uint32_81(a uint32) uint32 { return a * 81 } + +//go:noinline +func mul_81_uint32(a uint32) uint32 { return 81 * a } + +//go:noinline +func add_int32_Neg2147483648(a int32) int32 { return a + -2147483648 } + +//go:noinline +func add_Neg2147483648_int32(a int32) int32 { return -2147483648 + a } + +//go:noinline +func add_int32_Neg2147483647(a int32) int32 { return a + -2147483647 } + +//go:noinline +func add_Neg2147483647_int32(a int32) int32 { return -2147483647 + a } + +//go:noinline +func add_int32_Neg1(a int32) int32 { return a + -1 } + +//go:noinline +func add_Neg1_int32(a int32) int32 { return -1 + a } + +//go:noinline +func add_int32_0(a int32) int32 { return a + 0 } + +//go:noinline +func add_0_int32(a int32) int32 { return 0 + a } + +//go:noinline +func add_int32_1(a int32) int32 { return a + 1 } + +//go:noinline +func add_1_int32(a int32) int32 { return 1 + a } + +//go:noinline +func add_int32_2147483647(a int32) int32 { return a + 2147483647 } + +//go:noinline +func add_2147483647_int32(a int32) int32 { return 2147483647 + a } + +//go:noinline +func sub_int32_Neg2147483648(a int32) int32 { return a - -2147483648 } + +//go:noinline +func sub_Neg2147483648_int32(a int32) int32 { return -2147483648 - a } + +//go:noinline +func sub_int32_Neg2147483647(a int32) int32 { return a - -2147483647 } + +//go:noinline +func sub_Neg2147483647_int32(a int32) int32 { return -2147483647 - a } + +//go:noinline +func sub_int32_Neg1(a int32) int32 { return a - -1 } + +//go:noinline +func sub_Neg1_int32(a int32) int32 { return -1 - a } + +//go:noinline +func sub_int32_0(a int32) int32 { return a - 0 } + +//go:noinline +func sub_0_int32(a int32) int32 { return 0 - a } + +//go:noinline +func sub_int32_1(a int32) int32 { return a - 1 } + +//go:noinline +func sub_1_int32(a int32) int32 { return 1 - a } + +//go:noinline +func sub_int32_2147483647(a int32) int32 { return a - 2147483647 } + +//go:noinline +func sub_2147483647_int32(a int32) int32 { return 2147483647 - a } + +//go:noinline +func div_int32_Neg2147483648(a int32) int32 { return a / -2147483648 } + +//go:noinline +func div_Neg2147483648_int32(a int32) int32 { return -2147483648 / a } + +//go:noinline +func div_int32_Neg2147483647(a int32) int32 { return a / -2147483647 } + +//go:noinline +func div_Neg2147483647_int32(a int32) int32 { return -2147483647 / a } + +//go:noinline +func div_int32_Neg1(a int32) int32 { return a / -1 } + +//go:noinline +func div_Neg1_int32(a int32) int32 { return -1 / a } + +//go:noinline +func div_0_int32(a int32) int32 { return 0 / a } + +//go:noinline +func div_int32_1(a int32) int32 { return a / 1 } + +//go:noinline +func div_1_int32(a int32) int32 { return 1 / a } + +//go:noinline +func div_int32_2147483647(a int32) int32 { return a / 2147483647 } + +//go:noinline +func div_2147483647_int32(a int32) int32 { return 2147483647 / a } + +//go:noinline +func mul_int32_Neg2147483648(a int32) int32 { return a * -2147483648 } + +//go:noinline +func mul_Neg2147483648_int32(a int32) int32 { return -2147483648 * a } + +//go:noinline +func mul_int32_Neg2147483647(a int32) int32 { return a * -2147483647 } + +//go:noinline +func mul_Neg2147483647_int32(a int32) int32 { return -2147483647 * a } + +//go:noinline +func mul_int32_Neg1(a int32) int32 { return a * -1 } + +//go:noinline +func mul_Neg1_int32(a int32) int32 { return -1 * a } + +//go:noinline +func mul_int32_0(a int32) int32 { return a * 0 } + +//go:noinline +func mul_0_int32(a int32) int32 { return 0 * a } + +//go:noinline +func mul_int32_1(a int32) int32 { return a * 1 } + +//go:noinline +func mul_1_int32(a int32) int32 { return 1 * a } + +//go:noinline +func mul_int32_2147483647(a int32) int32 { return a * 2147483647 } + +//go:noinline +func mul_2147483647_int32(a int32) int32 { return 2147483647 * a } + +//go:noinline +func mod_int32_Neg2147483648(a int32) int32 { return a % -2147483648 } + +//go:noinline +func mod_Neg2147483648_int32(a int32) int32 { return -2147483648 % a } + +//go:noinline +func mod_int32_Neg2147483647(a int32) int32 { return a % -2147483647 } + +//go:noinline +func mod_Neg2147483647_int32(a int32) int32 { return -2147483647 % a } + +//go:noinline +func mod_int32_Neg1(a int32) int32 { return a % -1 } + +//go:noinline +func mod_Neg1_int32(a int32) int32 { return -1 % a } + +//go:noinline +func mod_0_int32(a int32) int32 { return 0 % a } + +//go:noinline +func mod_int32_1(a int32) int32 { return a % 1 } + +//go:noinline +func mod_1_int32(a int32) int32 { return 1 % a } + +//go:noinline +func mod_int32_2147483647(a int32) int32 { return a % 2147483647 } + +//go:noinline +func mod_2147483647_int32(a int32) int32 { return 2147483647 % a } + +//go:noinline +func and_int32_Neg2147483648(a int32) int32 { return a & -2147483648 } + +//go:noinline +func and_Neg2147483648_int32(a int32) int32 { return -2147483648 & a } + +//go:noinline +func and_int32_Neg2147483647(a int32) int32 { return a & -2147483647 } + +//go:noinline +func and_Neg2147483647_int32(a int32) int32 { return -2147483647 & a } + +//go:noinline +func and_int32_Neg1(a int32) int32 { return a & -1 } + +//go:noinline +func and_Neg1_int32(a int32) int32 { return -1 & a } + +//go:noinline +func and_int32_0(a int32) int32 { return a & 0 } + +//go:noinline +func and_0_int32(a int32) int32 { return 0 & a } + +//go:noinline +func and_int32_1(a int32) int32 { return a & 1 } + +//go:noinline +func and_1_int32(a int32) int32 { return 1 & a } + +//go:noinline +func and_int32_2147483647(a int32) int32 { return a & 2147483647 } + +//go:noinline +func and_2147483647_int32(a int32) int32 { return 2147483647 & a } + +//go:noinline +func or_int32_Neg2147483648(a int32) int32 { return a | -2147483648 } + +//go:noinline +func or_Neg2147483648_int32(a int32) int32 { return -2147483648 | a } + +//go:noinline +func or_int32_Neg2147483647(a int32) int32 { return a | -2147483647 } + +//go:noinline +func or_Neg2147483647_int32(a int32) int32 { return -2147483647 | a } + +//go:noinline +func or_int32_Neg1(a int32) int32 { return a | -1 } + +//go:noinline +func or_Neg1_int32(a int32) int32 { return -1 | a } + +//go:noinline +func or_int32_0(a int32) int32 { return a | 0 } + +//go:noinline +func or_0_int32(a int32) int32 { return 0 | a } + +//go:noinline +func or_int32_1(a int32) int32 { return a | 1 } + +//go:noinline +func or_1_int32(a int32) int32 { return 1 | a } + +//go:noinline +func or_int32_2147483647(a int32) int32 { return a | 2147483647 } + +//go:noinline +func or_2147483647_int32(a int32) int32 { return 2147483647 | a } + +//go:noinline +func xor_int32_Neg2147483648(a int32) int32 { return a ^ -2147483648 } + +//go:noinline +func xor_Neg2147483648_int32(a int32) int32 { return -2147483648 ^ a } + +//go:noinline +func xor_int32_Neg2147483647(a int32) int32 { return a ^ -2147483647 } + +//go:noinline +func xor_Neg2147483647_int32(a int32) int32 { return -2147483647 ^ a } + +//go:noinline +func xor_int32_Neg1(a int32) int32 { return a ^ -1 } + +//go:noinline +func xor_Neg1_int32(a int32) int32 { return -1 ^ a } + +//go:noinline +func xor_int32_0(a int32) int32 { return a ^ 0 } + +//go:noinline +func xor_0_int32(a int32) int32 { return 0 ^ a } + +//go:noinline +func xor_int32_1(a int32) int32 { return a ^ 1 } + +//go:noinline +func xor_1_int32(a int32) int32 { return 1 ^ a } + +//go:noinline +func xor_int32_2147483647(a int32) int32 { return a ^ 2147483647 } + +//go:noinline +func xor_2147483647_int32(a int32) int32 { return 2147483647 ^ a } + +//go:noinline +func mul_int32_Neg9(a int32) int32 { return a * -9 } + +//go:noinline +func mul_Neg9_int32(a int32) int32 { return -9 * a } + +//go:noinline +func mul_int32_Neg5(a int32) int32 { return a * -5 } + +//go:noinline +func mul_Neg5_int32(a int32) int32 { return -5 * a } + +//go:noinline +func mul_int32_Neg3(a int32) int32 { return a * -3 } + +//go:noinline +func mul_Neg3_int32(a int32) int32 { return -3 * a } + +//go:noinline +func mul_int32_3(a int32) int32 { return a * 3 } + +//go:noinline +func mul_3_int32(a int32) int32 { return 3 * a } + +//go:noinline +func mul_int32_5(a int32) int32 { return a * 5 } + +//go:noinline +func mul_5_int32(a int32) int32 { return 5 * a } + +//go:noinline +func mul_int32_7(a int32) int32 { return a * 7 } + +//go:noinline +func mul_7_int32(a int32) int32 { return 7 * a } + +//go:noinline +func mul_int32_9(a int32) int32 { return a * 9 } + +//go:noinline +func mul_9_int32(a int32) int32 { return 9 * a } + +//go:noinline +func mul_int32_10(a int32) int32 { return a * 10 } + +//go:noinline +func mul_10_int32(a int32) int32 { return 10 * a } + +//go:noinline +func mul_int32_11(a int32) int32 { return a * 11 } + +//go:noinline +func mul_11_int32(a int32) int32 { return 11 * a } + +//go:noinline +func mul_int32_13(a int32) int32 { return a * 13 } + +//go:noinline +func mul_13_int32(a int32) int32 { return 13 * a } + +//go:noinline +func mul_int32_19(a int32) int32 { return a * 19 } + +//go:noinline +func mul_19_int32(a int32) int32 { return 19 * a } + +//go:noinline +func mul_int32_21(a int32) int32 { return a * 21 } + +//go:noinline +func mul_21_int32(a int32) int32 { return 21 * a } + +//go:noinline +func mul_int32_25(a int32) int32 { return a * 25 } + +//go:noinline +func mul_25_int32(a int32) int32 { return 25 * a } + +//go:noinline +func mul_int32_27(a int32) int32 { return a * 27 } + +//go:noinline +func mul_27_int32(a int32) int32 { return 27 * a } + +//go:noinline +func mul_int32_37(a int32) int32 { return a * 37 } + +//go:noinline +func mul_37_int32(a int32) int32 { return 37 * a } + +//go:noinline +func mul_int32_41(a int32) int32 { return a * 41 } + +//go:noinline +func mul_41_int32(a int32) int32 { return 41 * a } + +//go:noinline +func mul_int32_45(a int32) int32 { return a * 45 } + +//go:noinline +func mul_45_int32(a int32) int32 { return 45 * a } + +//go:noinline +func mul_int32_73(a int32) int32 { return a * 73 } + +//go:noinline +func mul_73_int32(a int32) int32 { return 73 * a } + +//go:noinline +func mul_int32_81(a int32) int32 { return a * 81 } + +//go:noinline +func mul_81_int32(a int32) int32 { return 81 * a } + +//go:noinline +func add_uint16_0(a uint16) uint16 { return a + 0 } + +//go:noinline +func add_0_uint16(a uint16) uint16 { return 0 + a } + +//go:noinline +func add_uint16_1(a uint16) uint16 { return a + 1 } + +//go:noinline +func add_1_uint16(a uint16) uint16 { return 1 + a } + +//go:noinline +func add_uint16_65535(a uint16) uint16 { return a + 65535 } + +//go:noinline +func add_65535_uint16(a uint16) uint16 { return 65535 + a } + +//go:noinline +func sub_uint16_0(a uint16) uint16 { return a - 0 } + +//go:noinline +func sub_0_uint16(a uint16) uint16 { return 0 - a } + +//go:noinline +func sub_uint16_1(a uint16) uint16 { return a - 1 } + +//go:noinline +func sub_1_uint16(a uint16) uint16 { return 1 - a } + +//go:noinline +func sub_uint16_65535(a uint16) uint16 { return a - 65535 } + +//go:noinline +func sub_65535_uint16(a uint16) uint16 { return 65535 - a } + +//go:noinline +func div_0_uint16(a uint16) uint16 { return 0 / a } + +//go:noinline +func div_uint16_1(a uint16) uint16 { return a / 1 } + +//go:noinline +func div_1_uint16(a uint16) uint16 { return 1 / a } + +//go:noinline +func div_uint16_65535(a uint16) uint16 { return a / 65535 } + +//go:noinline +func div_65535_uint16(a uint16) uint16 { return 65535 / a } + +//go:noinline +func mul_uint16_0(a uint16) uint16 { return a * 0 } + +//go:noinline +func mul_0_uint16(a uint16) uint16 { return 0 * a } + +//go:noinline +func mul_uint16_1(a uint16) uint16 { return a * 1 } + +//go:noinline +func mul_1_uint16(a uint16) uint16 { return 1 * a } + +//go:noinline +func mul_uint16_65535(a uint16) uint16 { return a * 65535 } + +//go:noinline +func mul_65535_uint16(a uint16) uint16 { return 65535 * a } + +//go:noinline +func lsh_uint16_0(a uint16) uint16 { return a << 0 } + +//go:noinline +func lsh_0_uint16(a uint16) uint16 { return 0 << a } + +//go:noinline +func lsh_uint16_1(a uint16) uint16 { return a << 1 } + +//go:noinline +func lsh_1_uint16(a uint16) uint16 { return 1 << a } + +//go:noinline +func lsh_uint16_65535(a uint16) uint16 { return a << 65535 } + +//go:noinline +func lsh_65535_uint16(a uint16) uint16 { return 65535 << a } + +//go:noinline +func rsh_uint16_0(a uint16) uint16 { return a >> 0 } + +//go:noinline +func rsh_0_uint16(a uint16) uint16 { return 0 >> a } + +//go:noinline +func rsh_uint16_1(a uint16) uint16 { return a >> 1 } + +//go:noinline +func rsh_1_uint16(a uint16) uint16 { return 1 >> a } + +//go:noinline +func rsh_uint16_65535(a uint16) uint16 { return a >> 65535 } + +//go:noinline +func rsh_65535_uint16(a uint16) uint16 { return 65535 >> a } + +//go:noinline +func mod_0_uint16(a uint16) uint16 { return 0 % a } + +//go:noinline +func mod_uint16_1(a uint16) uint16 { return a % 1 } + +//go:noinline +func mod_1_uint16(a uint16) uint16 { return 1 % a } + +//go:noinline +func mod_uint16_65535(a uint16) uint16 { return a % 65535 } + +//go:noinline +func mod_65535_uint16(a uint16) uint16 { return 65535 % a } + +//go:noinline +func and_uint16_0(a uint16) uint16 { return a & 0 } + +//go:noinline +func and_0_uint16(a uint16) uint16 { return 0 & a } + +//go:noinline +func and_uint16_1(a uint16) uint16 { return a & 1 } + +//go:noinline +func and_1_uint16(a uint16) uint16 { return 1 & a } + +//go:noinline +func and_uint16_65535(a uint16) uint16 { return a & 65535 } + +//go:noinline +func and_65535_uint16(a uint16) uint16 { return 65535 & a } + +//go:noinline +func or_uint16_0(a uint16) uint16 { return a | 0 } + +//go:noinline +func or_0_uint16(a uint16) uint16 { return 0 | a } + +//go:noinline +func or_uint16_1(a uint16) uint16 { return a | 1 } + +//go:noinline +func or_1_uint16(a uint16) uint16 { return 1 | a } + +//go:noinline +func or_uint16_65535(a uint16) uint16 { return a | 65535 } + +//go:noinline +func or_65535_uint16(a uint16) uint16 { return 65535 | a } + +//go:noinline +func xor_uint16_0(a uint16) uint16 { return a ^ 0 } + +//go:noinline +func xor_0_uint16(a uint16) uint16 { return 0 ^ a } + +//go:noinline +func xor_uint16_1(a uint16) uint16 { return a ^ 1 } + +//go:noinline +func xor_1_uint16(a uint16) uint16 { return 1 ^ a } + +//go:noinline +func xor_uint16_65535(a uint16) uint16 { return a ^ 65535 } + +//go:noinline +func xor_65535_uint16(a uint16) uint16 { return 65535 ^ a } + +//go:noinline +func add_int16_Neg32768(a int16) int16 { return a + -32768 } + +//go:noinline +func add_Neg32768_int16(a int16) int16 { return -32768 + a } + +//go:noinline +func add_int16_Neg32767(a int16) int16 { return a + -32767 } + +//go:noinline +func add_Neg32767_int16(a int16) int16 { return -32767 + a } + +//go:noinline +func add_int16_Neg1(a int16) int16 { return a + -1 } + +//go:noinline +func add_Neg1_int16(a int16) int16 { return -1 + a } + +//go:noinline +func add_int16_0(a int16) int16 { return a + 0 } + +//go:noinline +func add_0_int16(a int16) int16 { return 0 + a } + +//go:noinline +func add_int16_1(a int16) int16 { return a + 1 } + +//go:noinline +func add_1_int16(a int16) int16 { return 1 + a } + +//go:noinline +func add_int16_32766(a int16) int16 { return a + 32766 } + +//go:noinline +func add_32766_int16(a int16) int16 { return 32766 + a } + +//go:noinline +func add_int16_32767(a int16) int16 { return a + 32767 } + +//go:noinline +func add_32767_int16(a int16) int16 { return 32767 + a } + +//go:noinline +func sub_int16_Neg32768(a int16) int16 { return a - -32768 } + +//go:noinline +func sub_Neg32768_int16(a int16) int16 { return -32768 - a } + +//go:noinline +func sub_int16_Neg32767(a int16) int16 { return a - -32767 } + +//go:noinline +func sub_Neg32767_int16(a int16) int16 { return -32767 - a } + +//go:noinline +func sub_int16_Neg1(a int16) int16 { return a - -1 } + +//go:noinline +func sub_Neg1_int16(a int16) int16 { return -1 - a } + +//go:noinline +func sub_int16_0(a int16) int16 { return a - 0 } + +//go:noinline +func sub_0_int16(a int16) int16 { return 0 - a } + +//go:noinline +func sub_int16_1(a int16) int16 { return a - 1 } + +//go:noinline +func sub_1_int16(a int16) int16 { return 1 - a } + +//go:noinline +func sub_int16_32766(a int16) int16 { return a - 32766 } + +//go:noinline +func sub_32766_int16(a int16) int16 { return 32766 - a } + +//go:noinline +func sub_int16_32767(a int16) int16 { return a - 32767 } + +//go:noinline +func sub_32767_int16(a int16) int16 { return 32767 - a } + +//go:noinline +func div_int16_Neg32768(a int16) int16 { return a / -32768 } + +//go:noinline +func div_Neg32768_int16(a int16) int16 { return -32768 / a } + +//go:noinline +func div_int16_Neg32767(a int16) int16 { return a / -32767 } + +//go:noinline +func div_Neg32767_int16(a int16) int16 { return -32767 / a } + +//go:noinline +func div_int16_Neg1(a int16) int16 { return a / -1 } + +//go:noinline +func div_Neg1_int16(a int16) int16 { return -1 / a } + +//go:noinline +func div_0_int16(a int16) int16 { return 0 / a } + +//go:noinline +func div_int16_1(a int16) int16 { return a / 1 } + +//go:noinline +func div_1_int16(a int16) int16 { return 1 / a } + +//go:noinline +func div_int16_32766(a int16) int16 { return a / 32766 } + +//go:noinline +func div_32766_int16(a int16) int16 { return 32766 / a } + +//go:noinline +func div_int16_32767(a int16) int16 { return a / 32767 } + +//go:noinline +func div_32767_int16(a int16) int16 { return 32767 / a } + +//go:noinline +func mul_int16_Neg32768(a int16) int16 { return a * -32768 } + +//go:noinline +func mul_Neg32768_int16(a int16) int16 { return -32768 * a } + +//go:noinline +func mul_int16_Neg32767(a int16) int16 { return a * -32767 } + +//go:noinline +func mul_Neg32767_int16(a int16) int16 { return -32767 * a } + +//go:noinline +func mul_int16_Neg1(a int16) int16 { return a * -1 } + +//go:noinline +func mul_Neg1_int16(a int16) int16 { return -1 * a } + +//go:noinline +func mul_int16_0(a int16) int16 { return a * 0 } + +//go:noinline +func mul_0_int16(a int16) int16 { return 0 * a } + +//go:noinline +func mul_int16_1(a int16) int16 { return a * 1 } + +//go:noinline +func mul_1_int16(a int16) int16 { return 1 * a } + +//go:noinline +func mul_int16_32766(a int16) int16 { return a * 32766 } + +//go:noinline +func mul_32766_int16(a int16) int16 { return 32766 * a } + +//go:noinline +func mul_int16_32767(a int16) int16 { return a * 32767 } + +//go:noinline +func mul_32767_int16(a int16) int16 { return 32767 * a } + +//go:noinline +func mod_int16_Neg32768(a int16) int16 { return a % -32768 } + +//go:noinline +func mod_Neg32768_int16(a int16) int16 { return -32768 % a } + +//go:noinline +func mod_int16_Neg32767(a int16) int16 { return a % -32767 } + +//go:noinline +func mod_Neg32767_int16(a int16) int16 { return -32767 % a } + +//go:noinline +func mod_int16_Neg1(a int16) int16 { return a % -1 } + +//go:noinline +func mod_Neg1_int16(a int16) int16 { return -1 % a } + +//go:noinline +func mod_0_int16(a int16) int16 { return 0 % a } + +//go:noinline +func mod_int16_1(a int16) int16 { return a % 1 } + +//go:noinline +func mod_1_int16(a int16) int16 { return 1 % a } + +//go:noinline +func mod_int16_32766(a int16) int16 { return a % 32766 } + +//go:noinline +func mod_32766_int16(a int16) int16 { return 32766 % a } + +//go:noinline +func mod_int16_32767(a int16) int16 { return a % 32767 } + +//go:noinline +func mod_32767_int16(a int16) int16 { return 32767 % a } + +//go:noinline +func and_int16_Neg32768(a int16) int16 { return a & -32768 } + +//go:noinline +func and_Neg32768_int16(a int16) int16 { return -32768 & a } + +//go:noinline +func and_int16_Neg32767(a int16) int16 { return a & -32767 } + +//go:noinline +func and_Neg32767_int16(a int16) int16 { return -32767 & a } + +//go:noinline +func and_int16_Neg1(a int16) int16 { return a & -1 } + +//go:noinline +func and_Neg1_int16(a int16) int16 { return -1 & a } + +//go:noinline +func and_int16_0(a int16) int16 { return a & 0 } + +//go:noinline +func and_0_int16(a int16) int16 { return 0 & a } + +//go:noinline +func and_int16_1(a int16) int16 { return a & 1 } + +//go:noinline +func and_1_int16(a int16) int16 { return 1 & a } + +//go:noinline +func and_int16_32766(a int16) int16 { return a & 32766 } + +//go:noinline +func and_32766_int16(a int16) int16 { return 32766 & a } + +//go:noinline +func and_int16_32767(a int16) int16 { return a & 32767 } + +//go:noinline +func and_32767_int16(a int16) int16 { return 32767 & a } + +//go:noinline +func or_int16_Neg32768(a int16) int16 { return a | -32768 } + +//go:noinline +func or_Neg32768_int16(a int16) int16 { return -32768 | a } + +//go:noinline +func or_int16_Neg32767(a int16) int16 { return a | -32767 } + +//go:noinline +func or_Neg32767_int16(a int16) int16 { return -32767 | a } + +//go:noinline +func or_int16_Neg1(a int16) int16 { return a | -1 } + +//go:noinline +func or_Neg1_int16(a int16) int16 { return -1 | a } + +//go:noinline +func or_int16_0(a int16) int16 { return a | 0 } + +//go:noinline +func or_0_int16(a int16) int16 { return 0 | a } + +//go:noinline +func or_int16_1(a int16) int16 { return a | 1 } + +//go:noinline +func or_1_int16(a int16) int16 { return 1 | a } + +//go:noinline +func or_int16_32766(a int16) int16 { return a | 32766 } + +//go:noinline +func or_32766_int16(a int16) int16 { return 32766 | a } + +//go:noinline +func or_int16_32767(a int16) int16 { return a | 32767 } + +//go:noinline +func or_32767_int16(a int16) int16 { return 32767 | a } + +//go:noinline +func xor_int16_Neg32768(a int16) int16 { return a ^ -32768 } + +//go:noinline +func xor_Neg32768_int16(a int16) int16 { return -32768 ^ a } + +//go:noinline +func xor_int16_Neg32767(a int16) int16 { return a ^ -32767 } + +//go:noinline +func xor_Neg32767_int16(a int16) int16 { return -32767 ^ a } + +//go:noinline +func xor_int16_Neg1(a int16) int16 { return a ^ -1 } + +//go:noinline +func xor_Neg1_int16(a int16) int16 { return -1 ^ a } + +//go:noinline +func xor_int16_0(a int16) int16 { return a ^ 0 } + +//go:noinline +func xor_0_int16(a int16) int16 { return 0 ^ a } + +//go:noinline +func xor_int16_1(a int16) int16 { return a ^ 1 } + +//go:noinline +func xor_1_int16(a int16) int16 { return 1 ^ a } + +//go:noinline +func xor_int16_32766(a int16) int16 { return a ^ 32766 } + +//go:noinline +func xor_32766_int16(a int16) int16 { return 32766 ^ a } + +//go:noinline +func xor_int16_32767(a int16) int16 { return a ^ 32767 } + +//go:noinline +func xor_32767_int16(a int16) int16 { return 32767 ^ a } + +//go:noinline +func add_uint8_0(a uint8) uint8 { return a + 0 } + +//go:noinline +func add_0_uint8(a uint8) uint8 { return 0 + a } + +//go:noinline +func add_uint8_1(a uint8) uint8 { return a + 1 } + +//go:noinline +func add_1_uint8(a uint8) uint8 { return 1 + a } + +//go:noinline +func add_uint8_255(a uint8) uint8 { return a + 255 } + +//go:noinline +func add_255_uint8(a uint8) uint8 { return 255 + a } + +//go:noinline +func sub_uint8_0(a uint8) uint8 { return a - 0 } + +//go:noinline +func sub_0_uint8(a uint8) uint8 { return 0 - a } + +//go:noinline +func sub_uint8_1(a uint8) uint8 { return a - 1 } + +//go:noinline +func sub_1_uint8(a uint8) uint8 { return 1 - a } + +//go:noinline +func sub_uint8_255(a uint8) uint8 { return a - 255 } + +//go:noinline +func sub_255_uint8(a uint8) uint8 { return 255 - a } + +//go:noinline +func div_0_uint8(a uint8) uint8 { return 0 / a } + +//go:noinline +func div_uint8_1(a uint8) uint8 { return a / 1 } + +//go:noinline +func div_1_uint8(a uint8) uint8 { return 1 / a } + +//go:noinline +func div_uint8_255(a uint8) uint8 { return a / 255 } + +//go:noinline +func div_255_uint8(a uint8) uint8 { return 255 / a } + +//go:noinline +func mul_uint8_0(a uint8) uint8 { return a * 0 } + +//go:noinline +func mul_0_uint8(a uint8) uint8 { return 0 * a } + +//go:noinline +func mul_uint8_1(a uint8) uint8 { return a * 1 } + +//go:noinline +func mul_1_uint8(a uint8) uint8 { return 1 * a } + +//go:noinline +func mul_uint8_255(a uint8) uint8 { return a * 255 } + +//go:noinline +func mul_255_uint8(a uint8) uint8 { return 255 * a } + +//go:noinline +func lsh_uint8_0(a uint8) uint8 { return a << 0 } + +//go:noinline +func lsh_0_uint8(a uint8) uint8 { return 0 << a } + +//go:noinline +func lsh_uint8_1(a uint8) uint8 { return a << 1 } + +//go:noinline +func lsh_1_uint8(a uint8) uint8 { return 1 << a } + +//go:noinline +func lsh_uint8_255(a uint8) uint8 { return a << 255 } + +//go:noinline +func lsh_255_uint8(a uint8) uint8 { return 255 << a } + +//go:noinline +func rsh_uint8_0(a uint8) uint8 { return a >> 0 } + +//go:noinline +func rsh_0_uint8(a uint8) uint8 { return 0 >> a } + +//go:noinline +func rsh_uint8_1(a uint8) uint8 { return a >> 1 } + +//go:noinline +func rsh_1_uint8(a uint8) uint8 { return 1 >> a } + +//go:noinline +func rsh_uint8_255(a uint8) uint8 { return a >> 255 } + +//go:noinline +func rsh_255_uint8(a uint8) uint8 { return 255 >> a } + +//go:noinline +func mod_0_uint8(a uint8) uint8 { return 0 % a } + +//go:noinline +func mod_uint8_1(a uint8) uint8 { return a % 1 } + +//go:noinline +func mod_1_uint8(a uint8) uint8 { return 1 % a } + +//go:noinline +func mod_uint8_255(a uint8) uint8 { return a % 255 } + +//go:noinline +func mod_255_uint8(a uint8) uint8 { return 255 % a } + +//go:noinline +func and_uint8_0(a uint8) uint8 { return a & 0 } + +//go:noinline +func and_0_uint8(a uint8) uint8 { return 0 & a } + +//go:noinline +func and_uint8_1(a uint8) uint8 { return a & 1 } + +//go:noinline +func and_1_uint8(a uint8) uint8 { return 1 & a } + +//go:noinline +func and_uint8_255(a uint8) uint8 { return a & 255 } + +//go:noinline +func and_255_uint8(a uint8) uint8 { return 255 & a } + +//go:noinline +func or_uint8_0(a uint8) uint8 { return a | 0 } + +//go:noinline +func or_0_uint8(a uint8) uint8 { return 0 | a } + +//go:noinline +func or_uint8_1(a uint8) uint8 { return a | 1 } + +//go:noinline +func or_1_uint8(a uint8) uint8 { return 1 | a } + +//go:noinline +func or_uint8_255(a uint8) uint8 { return a | 255 } + +//go:noinline +func or_255_uint8(a uint8) uint8 { return 255 | a } + +//go:noinline +func xor_uint8_0(a uint8) uint8 { return a ^ 0 } + +//go:noinline +func xor_0_uint8(a uint8) uint8 { return 0 ^ a } + +//go:noinline +func xor_uint8_1(a uint8) uint8 { return a ^ 1 } + +//go:noinline +func xor_1_uint8(a uint8) uint8 { return 1 ^ a } + +//go:noinline +func xor_uint8_255(a uint8) uint8 { return a ^ 255 } + +//go:noinline +func xor_255_uint8(a uint8) uint8 { return 255 ^ a } + +//go:noinline +func add_int8_Neg128(a int8) int8 { return a + -128 } + +//go:noinline +func add_Neg128_int8(a int8) int8 { return -128 + a } + +//go:noinline +func add_int8_Neg127(a int8) int8 { return a + -127 } + +//go:noinline +func add_Neg127_int8(a int8) int8 { return -127 + a } + +//go:noinline +func add_int8_Neg1(a int8) int8 { return a + -1 } + +//go:noinline +func add_Neg1_int8(a int8) int8 { return -1 + a } + +//go:noinline +func add_int8_0(a int8) int8 { return a + 0 } + +//go:noinline +func add_0_int8(a int8) int8 { return 0 + a } + +//go:noinline +func add_int8_1(a int8) int8 { return a + 1 } + +//go:noinline +func add_1_int8(a int8) int8 { return 1 + a } + +//go:noinline +func add_int8_126(a int8) int8 { return a + 126 } + +//go:noinline +func add_126_int8(a int8) int8 { return 126 + a } + +//go:noinline +func add_int8_127(a int8) int8 { return a + 127 } + +//go:noinline +func add_127_int8(a int8) int8 { return 127 + a } + +//go:noinline +func sub_int8_Neg128(a int8) int8 { return a - -128 } + +//go:noinline +func sub_Neg128_int8(a int8) int8 { return -128 - a } + +//go:noinline +func sub_int8_Neg127(a int8) int8 { return a - -127 } + +//go:noinline +func sub_Neg127_int8(a int8) int8 { return -127 - a } + +//go:noinline +func sub_int8_Neg1(a int8) int8 { return a - -1 } + +//go:noinline +func sub_Neg1_int8(a int8) int8 { return -1 - a } + +//go:noinline +func sub_int8_0(a int8) int8 { return a - 0 } + +//go:noinline +func sub_0_int8(a int8) int8 { return 0 - a } + +//go:noinline +func sub_int8_1(a int8) int8 { return a - 1 } + +//go:noinline +func sub_1_int8(a int8) int8 { return 1 - a } + +//go:noinline +func sub_int8_126(a int8) int8 { return a - 126 } + +//go:noinline +func sub_126_int8(a int8) int8 { return 126 - a } + +//go:noinline +func sub_int8_127(a int8) int8 { return a - 127 } + +//go:noinline +func sub_127_int8(a int8) int8 { return 127 - a } + +//go:noinline +func div_int8_Neg128(a int8) int8 { return a / -128 } + +//go:noinline +func div_Neg128_int8(a int8) int8 { return -128 / a } + +//go:noinline +func div_int8_Neg127(a int8) int8 { return a / -127 } + +//go:noinline +func div_Neg127_int8(a int8) int8 { return -127 / a } + +//go:noinline +func div_int8_Neg1(a int8) int8 { return a / -1 } + +//go:noinline +func div_Neg1_int8(a int8) int8 { return -1 / a } + +//go:noinline +func div_0_int8(a int8) int8 { return 0 / a } + +//go:noinline +func div_int8_1(a int8) int8 { return a / 1 } + +//go:noinline +func div_1_int8(a int8) int8 { return 1 / a } + +//go:noinline +func div_int8_126(a int8) int8 { return a / 126 } + +//go:noinline +func div_126_int8(a int8) int8 { return 126 / a } + +//go:noinline +func div_int8_127(a int8) int8 { return a / 127 } + +//go:noinline +func div_127_int8(a int8) int8 { return 127 / a } + +//go:noinline +func mul_int8_Neg128(a int8) int8 { return a * -128 } + +//go:noinline +func mul_Neg128_int8(a int8) int8 { return -128 * a } + +//go:noinline +func mul_int8_Neg127(a int8) int8 { return a * -127 } + +//go:noinline +func mul_Neg127_int8(a int8) int8 { return -127 * a } + +//go:noinline +func mul_int8_Neg1(a int8) int8 { return a * -1 } + +//go:noinline +func mul_Neg1_int8(a int8) int8 { return -1 * a } + +//go:noinline +func mul_int8_0(a int8) int8 { return a * 0 } + +//go:noinline +func mul_0_int8(a int8) int8 { return 0 * a } + +//go:noinline +func mul_int8_1(a int8) int8 { return a * 1 } + +//go:noinline +func mul_1_int8(a int8) int8 { return 1 * a } + +//go:noinline +func mul_int8_126(a int8) int8 { return a * 126 } + +//go:noinline +func mul_126_int8(a int8) int8 { return 126 * a } + +//go:noinline +func mul_int8_127(a int8) int8 { return a * 127 } + +//go:noinline +func mul_127_int8(a int8) int8 { return 127 * a } + +//go:noinline +func mod_int8_Neg128(a int8) int8 { return a % -128 } + +//go:noinline +func mod_Neg128_int8(a int8) int8 { return -128 % a } + +//go:noinline +func mod_int8_Neg127(a int8) int8 { return a % -127 } + +//go:noinline +func mod_Neg127_int8(a int8) int8 { return -127 % a } + +//go:noinline +func mod_int8_Neg1(a int8) int8 { return a % -1 } + +//go:noinline +func mod_Neg1_int8(a int8) int8 { return -1 % a } + +//go:noinline +func mod_0_int8(a int8) int8 { return 0 % a } + +//go:noinline +func mod_int8_1(a int8) int8 { return a % 1 } + +//go:noinline +func mod_1_int8(a int8) int8 { return 1 % a } + +//go:noinline +func mod_int8_126(a int8) int8 { return a % 126 } + +//go:noinline +func mod_126_int8(a int8) int8 { return 126 % a } + +//go:noinline +func mod_int8_127(a int8) int8 { return a % 127 } + +//go:noinline +func mod_127_int8(a int8) int8 { return 127 % a } + +//go:noinline +func and_int8_Neg128(a int8) int8 { return a & -128 } + +//go:noinline +func and_Neg128_int8(a int8) int8 { return -128 & a } + +//go:noinline +func and_int8_Neg127(a int8) int8 { return a & -127 } + +//go:noinline +func and_Neg127_int8(a int8) int8 { return -127 & a } + +//go:noinline +func and_int8_Neg1(a int8) int8 { return a & -1 } + +//go:noinline +func and_Neg1_int8(a int8) int8 { return -1 & a } + +//go:noinline +func and_int8_0(a int8) int8 { return a & 0 } + +//go:noinline +func and_0_int8(a int8) int8 { return 0 & a } + +//go:noinline +func and_int8_1(a int8) int8 { return a & 1 } + +//go:noinline +func and_1_int8(a int8) int8 { return 1 & a } + +//go:noinline +func and_int8_126(a int8) int8 { return a & 126 } + +//go:noinline +func and_126_int8(a int8) int8 { return 126 & a } + +//go:noinline +func and_int8_127(a int8) int8 { return a & 127 } + +//go:noinline +func and_127_int8(a int8) int8 { return 127 & a } + +//go:noinline +func or_int8_Neg128(a int8) int8 { return a | -128 } + +//go:noinline +func or_Neg128_int8(a int8) int8 { return -128 | a } + +//go:noinline +func or_int8_Neg127(a int8) int8 { return a | -127 } + +//go:noinline +func or_Neg127_int8(a int8) int8 { return -127 | a } + +//go:noinline +func or_int8_Neg1(a int8) int8 { return a | -1 } + +//go:noinline +func or_Neg1_int8(a int8) int8 { return -1 | a } + +//go:noinline +func or_int8_0(a int8) int8 { return a | 0 } + +//go:noinline +func or_0_int8(a int8) int8 { return 0 | a } + +//go:noinline +func or_int8_1(a int8) int8 { return a | 1 } + +//go:noinline +func or_1_int8(a int8) int8 { return 1 | a } + +//go:noinline +func or_int8_126(a int8) int8 { return a | 126 } + +//go:noinline +func or_126_int8(a int8) int8 { return 126 | a } + +//go:noinline +func or_int8_127(a int8) int8 { return a | 127 } + +//go:noinline +func or_127_int8(a int8) int8 { return 127 | a } + +//go:noinline +func xor_int8_Neg128(a int8) int8 { return a ^ -128 } + +//go:noinline +func xor_Neg128_int8(a int8) int8 { return -128 ^ a } + +//go:noinline +func xor_int8_Neg127(a int8) int8 { return a ^ -127 } + +//go:noinline +func xor_Neg127_int8(a int8) int8 { return -127 ^ a } + +//go:noinline +func xor_int8_Neg1(a int8) int8 { return a ^ -1 } + +//go:noinline +func xor_Neg1_int8(a int8) int8 { return -1 ^ a } + +//go:noinline +func xor_int8_0(a int8) int8 { return a ^ 0 } + +//go:noinline +func xor_0_int8(a int8) int8 { return 0 ^ a } + +//go:noinline +func xor_int8_1(a int8) int8 { return a ^ 1 } + +//go:noinline +func xor_1_int8(a int8) int8 { return 1 ^ a } + +//go:noinline +func xor_int8_126(a int8) int8 { return a ^ 126 } + +//go:noinline +func xor_126_int8(a int8) int8 { return 126 ^ a } + +//go:noinline +func xor_int8_127(a int8) int8 { return a ^ 127 } + +//go:noinline +func xor_127_int8(a int8) int8 { return 127 ^ a } + +type test_uint64 struct { + fn func(uint64) uint64 + fnname string + in uint64 + want uint64 +} + +var tests_uint64 = []test_uint64{ + + test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 0, want: 0}, + test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 0, want: 0}, + test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 1, want: 1}, + test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 1, want: 1}, + test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 4294967296, want: 4294967296}, + test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 4294967296, want: 4294967296}, + test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 0, want: 1}, + test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 0, want: 1}, + test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 1, want: 2}, + test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 1, want: 2}, + test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 4294967296, want: 4294967297}, + test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 4294967296, want: 4294967297}, + test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 9223372036854775808, want: 9223372036854775809}, + test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 9223372036854775808, want: 9223372036854775809}, + test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 18446744073709551615, want: 0}, + test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 0, want: 4294967296}, + test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 0, want: 4294967296}, + test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 1, want: 4294967297}, + test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 1, want: 4294967297}, + test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 4294967296, want: 8589934592}, + test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 4294967296, want: 8589934592}, + test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104}, + test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104}, + test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 18446744073709551615, want: 4294967295}, + test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 18446744073709551615, want: 4294967295}, + test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 0, want: 9223372036854775808}, + test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 0, want: 9223372036854775808}, + test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 1, want: 9223372036854775809}, + test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 1, want: 9223372036854775809}, + test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104}, + test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104}, + test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 9223372036854775808, want: 0}, + test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775807}, + test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807}, + test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 0, want: 18446744073709551615}, + test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 0, want: 18446744073709551615}, + test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 1, want: 0}, + test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 1, want: 0}, + test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 4294967296, want: 4294967295}, + test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 4294967296, want: 4294967295}, + test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807}, + test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775807}, + test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551614}, + test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551614}, + test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 0, want: 0}, + test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 0, want: 0}, + test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 1, want: 18446744073709551615}, + test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 1, want: 1}, + test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 4294967296, want: 18446744069414584320}, + test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 4294967296, want: 4294967296}, + test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 18446744073709551615, want: 1}, + test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 0, want: 1}, + test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 0, want: 18446744073709551615}, + test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 1, want: 0}, + test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 1, want: 0}, + test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 4294967296, want: 18446744069414584321}, + test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 4294967296, want: 4294967295}, + test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 9223372036854775808, want: 9223372036854775809}, + test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 9223372036854775808, want: 9223372036854775807}, + test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 18446744073709551615, want: 2}, + test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 18446744073709551615, want: 18446744073709551614}, + test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 0, want: 4294967296}, + test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 0, want: 18446744069414584320}, + test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 1, want: 4294967295}, + test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 1, want: 18446744069414584321}, + test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 4294967296, want: 0}, + test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 4294967296, want: 0}, + test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104}, + test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 9223372036854775808, want: 9223372032559808512}, + test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 18446744073709551615, want: 4294967297}, + test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584319}, + test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 0, want: 9223372036854775808}, + test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 0, want: 9223372036854775808}, + test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 1, want: 9223372036854775807}, + test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 1, want: 9223372036854775809}, + test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 4294967296, want: 9223372032559808512}, + test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104}, + test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 9223372036854775808, want: 0}, + test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775809}, + test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807}, + test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 0, want: 18446744073709551615}, + test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 0, want: 1}, + test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 1, want: 18446744073709551614}, + test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 1, want: 2}, + test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584319}, + test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 4294967296, want: 4294967297}, + test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807}, + test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775809}, + test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 18446744073709551615, want: 0}, + test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 1, want: 0}, + test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 4294967296, want: 0}, + test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 0, want: 0}, + test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 1, want: 1}, + test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 1, want: 1}, + test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 4294967296, want: 0}, + test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 4294967296, want: 4294967296}, + test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 0, want: 0}, + test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 1, want: 4294967296}, + test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 1, want: 0}, + test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 4294967296, want: 1}, + test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 4294967296, want: 1}, + test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 9223372036854775808, want: 2147483648}, + test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 18446744073709551615, want: 4294967295}, + test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 0, want: 0}, + test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 1, want: 9223372036854775808}, + test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 1, want: 0}, + test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 4294967296, want: 2147483648}, + test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 4294967296, want: 0}, + test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 9223372036854775808, want: 1}, + test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 9223372036854775808, want: 1}, + test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 18446744073709551615, want: 1}, + test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 0, want: 0}, + test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 1, want: 18446744073709551615}, + test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 1, want: 0}, + test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 4294967296, want: 4294967295}, + test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 4294967296, want: 0}, + test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 9223372036854775808, want: 1}, + test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 9223372036854775808, want: 0}, + test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 18446744073709551615, want: 1}, + test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 18446744073709551615, want: 1}, + test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 0, want: 0}, + test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 0, want: 0}, + test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 1, want: 0}, + test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 1, want: 0}, + test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 4294967296, want: 0}, + test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 4294967296, want: 0}, + test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 9223372036854775808, want: 0}, + test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 18446744073709551615, want: 0}, + test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 0, want: 0}, + test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 0, want: 0}, + test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 1, want: 1}, + test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 1, want: 1}, + test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 4294967296, want: 4294967296}, + test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 4294967296, want: 4294967296}, + test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 0, want: 0}, + test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 0, want: 0}, + test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 1, want: 4294967296}, + test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 1, want: 4294967296}, + test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 4294967296, want: 0}, + test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 4294967296, want: 0}, + test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 9223372036854775808, want: 0}, + test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 18446744073709551615, want: 18446744069414584320}, + test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584320}, + test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 0, want: 0}, + test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 0, want: 0}, + test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 1, want: 9223372036854775808}, + test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 1, want: 9223372036854775808}, + test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 4294967296, want: 0}, + test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 4294967296, want: 0}, + test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 9223372036854775808, want: 0}, + test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808}, + test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775808}, + test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 0, want: 0}, + test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 0, want: 0}, + test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 1, want: 18446744073709551615}, + test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 1, want: 18446744073709551615}, + test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584320}, + test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 4294967296, want: 18446744069414584320}, + test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 18446744073709551615, want: 1}, + test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 18446744073709551615, want: 1}, + test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 0, want: 0}, + test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 0, want: 0}, + test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 1, want: 0}, + test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 1, want: 1}, + test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 4294967296, want: 0}, + test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 4294967296, want: 4294967296}, + test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 0, want: 1}, + test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 0, want: 0}, + test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 1, want: 2}, + test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 1, want: 2}, + test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 4294967296, want: 0}, + test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 4294967296, want: 8589934592}, + test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 9223372036854775808, want: 0}, + test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 18446744073709551615, want: 18446744073709551614}, + test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 0, want: 4294967296}, + test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 0, want: 0}, + test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 1, want: 8589934592}, + test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 1, want: 0}, + test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 4294967296, want: 0}, + test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 4294967296, want: 0}, + test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 9223372036854775808, want: 0}, + test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 18446744073709551615, want: 0}, + test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 0, want: 9223372036854775808}, + test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 0, want: 0}, + test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 1, want: 0}, + test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 1, want: 0}, + test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 4294967296, want: 0}, + test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 4294967296, want: 0}, + test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 9223372036854775808, want: 0}, + test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 18446744073709551615, want: 0}, + test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 0, want: 18446744073709551615}, + test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 0, want: 0}, + test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 1, want: 18446744073709551614}, + test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 1, want: 0}, + test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 4294967296, want: 0}, + test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 4294967296, want: 0}, + test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 9223372036854775808, want: 0}, + test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 18446744073709551615, want: 0}, + test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 0, want: 0}, + test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 0, want: 0}, + test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 1, want: 0}, + test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 1, want: 1}, + test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 4294967296, want: 0}, + test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 4294967296, want: 4294967296}, + test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 0, want: 1}, + test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 0, want: 0}, + test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 1, want: 0}, + test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 1, want: 0}, + test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 4294967296, want: 0}, + test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 4294967296, want: 2147483648}, + test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 9223372036854775808, want: 4611686018427387904}, + test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 18446744073709551615, want: 9223372036854775807}, + test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 0, want: 4294967296}, + test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 0, want: 0}, + test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 1, want: 2147483648}, + test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 1, want: 0}, + test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 4294967296, want: 0}, + test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 4294967296, want: 0}, + test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 9223372036854775808, want: 0}, + test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 18446744073709551615, want: 0}, + test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 0, want: 9223372036854775808}, + test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 0, want: 0}, + test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 1, want: 4611686018427387904}, + test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 1, want: 0}, + test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 4294967296, want: 0}, + test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 4294967296, want: 0}, + test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 9223372036854775808, want: 0}, + test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 18446744073709551615, want: 0}, + test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 0, want: 18446744073709551615}, + test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 0, want: 0}, + test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 1, want: 9223372036854775807}, + test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 1, want: 0}, + test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 4294967296, want: 0}, + test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 4294967296, want: 0}, + test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 9223372036854775808, want: 0}, + test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 18446744073709551615, want: 0}, + test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 1, want: 0}, + test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 4294967296, want: 0}, + test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 0, want: 0}, + test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 1, want: 0}, + test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 1, want: 0}, + test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 4294967296, want: 1}, + test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 4294967296, want: 0}, + test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 9223372036854775808, want: 1}, + test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 9223372036854775808, want: 0}, + test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 18446744073709551615, want: 1}, + test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 18446744073709551615, want: 0}, + test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 0, want: 0}, + test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 1, want: 0}, + test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 1, want: 1}, + test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 4294967296, want: 0}, + test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 4294967296, want: 0}, + test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 9223372036854775808, want: 4294967296}, + test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 9223372036854775808, want: 0}, + test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 18446744073709551615, want: 4294967296}, + test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 18446744073709551615, want: 4294967295}, + test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 0, want: 0}, + test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 1, want: 0}, + test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 1, want: 1}, + test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 4294967296, want: 0}, + test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 4294967296, want: 4294967296}, + test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 9223372036854775808, want: 0}, + test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808}, + test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807}, + test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 0, want: 0}, + test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 1, want: 0}, + test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 1, want: 1}, + test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 4294967296, want: 4294967295}, + test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 4294967296, want: 4294967296}, + test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807}, + test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 18446744073709551615, want: 0}, + test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 0, want: 0}, + test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 0, want: 0}, + test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 1, want: 0}, + test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 1, want: 0}, + test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 4294967296, want: 0}, + test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 4294967296, want: 0}, + test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 9223372036854775808, want: 0}, + test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 18446744073709551615, want: 0}, + test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 0, want: 0}, + test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 0, want: 0}, + test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 1, want: 1}, + test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 1, want: 1}, + test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 4294967296, want: 0}, + test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 4294967296, want: 0}, + test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 9223372036854775808, want: 0}, + test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 18446744073709551615, want: 1}, + test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 18446744073709551615, want: 1}, + test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 0, want: 0}, + test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 0, want: 0}, + test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 1, want: 0}, + test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 1, want: 0}, + test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 4294967296, want: 4294967296}, + test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 4294967296, want: 4294967296}, + test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 9223372036854775808, want: 0}, + test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 18446744073709551615, want: 4294967296}, + test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 18446744073709551615, want: 4294967296}, + test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 0, want: 0}, + test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 0, want: 0}, + test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 1, want: 0}, + test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 1, want: 0}, + test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 4294967296, want: 0}, + test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 4294967296, want: 0}, + test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808}, + test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775808}, + test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 0, want: 0}, + test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 0, want: 0}, + test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 1, want: 1}, + test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 1, want: 1}, + test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 4294967296, want: 4294967296}, + test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 4294967296, want: 4294967296}, + test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 0, want: 0}, + test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 0, want: 0}, + test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 1, want: 1}, + test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 1, want: 1}, + test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 4294967296, want: 4294967296}, + test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 4294967296, want: 4294967296}, + test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 0, want: 1}, + test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 0, want: 1}, + test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 1, want: 1}, + test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 1, want: 1}, + test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 4294967296, want: 4294967297}, + test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 4294967296, want: 4294967297}, + test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 9223372036854775808, want: 9223372036854775809}, + test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 9223372036854775808, want: 9223372036854775809}, + test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 0, want: 4294967296}, + test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 0, want: 4294967296}, + test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 1, want: 4294967297}, + test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 1, want: 4294967297}, + test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 4294967296, want: 4294967296}, + test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 4294967296, want: 4294967296}, + test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104}, + test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104}, + test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 0, want: 9223372036854775808}, + test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 0, want: 9223372036854775808}, + test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 1, want: 9223372036854775809}, + test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 1, want: 9223372036854775809}, + test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104}, + test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104}, + test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 0, want: 18446744073709551615}, + test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 0, want: 18446744073709551615}, + test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 1, want: 18446744073709551615}, + test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 1, want: 18446744073709551615}, + test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 4294967296, want: 18446744073709551615}, + test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 4294967296, want: 18446744073709551615}, + test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 9223372036854775808, want: 18446744073709551615}, + test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 9223372036854775808, want: 18446744073709551615}, + test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 0, want: 0}, + test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 0, want: 0}, + test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 1, want: 1}, + test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 1, want: 1}, + test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 4294967296, want: 4294967296}, + test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 4294967296, want: 4294967296}, + test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 9223372036854775808, want: 9223372036854775808}, + test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 18446744073709551615, want: 18446744073709551615}, + test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 0, want: 1}, + test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 0, want: 1}, + test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 1, want: 0}, + test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 1, want: 0}, + test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 4294967296, want: 4294967297}, + test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 4294967296, want: 4294967297}, + test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 9223372036854775808, want: 9223372036854775809}, + test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 9223372036854775808, want: 9223372036854775809}, + test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 18446744073709551615, want: 18446744073709551614}, + test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 18446744073709551615, want: 18446744073709551614}, + test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 0, want: 4294967296}, + test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 0, want: 4294967296}, + test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 1, want: 4294967297}, + test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 1, want: 4294967297}, + test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 4294967296, want: 0}, + test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 4294967296, want: 0}, + test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104}, + test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104}, + test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 18446744073709551615, want: 18446744069414584319}, + test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584319}, + test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 0, want: 9223372036854775808}, + test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 0, want: 9223372036854775808}, + test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 1, want: 9223372036854775809}, + test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 1, want: 9223372036854775809}, + test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104}, + test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104}, + test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 9223372036854775808, want: 0}, + test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 9223372036854775808, want: 0}, + test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775807}, + test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807}, + test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 0, want: 18446744073709551615}, + test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 0, want: 18446744073709551615}, + test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 1, want: 18446744073709551614}, + test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 1, want: 18446744073709551614}, + test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584319}, + test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 4294967296, want: 18446744069414584319}, + test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807}, + test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775807}, + test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 18446744073709551615, want: 0}, + test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 18446744073709551615, want: 0}} + +type test_uint64mul struct { + fn func(uint64) uint64 + fnname string + in uint64 + want uint64 +} + +var tests_uint64mul = []test_uint64{ + + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 3, want: 9}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 3, want: 9}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 5, want: 15}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 5, want: 15}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 7, want: 21}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 7, want: 21}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 9, want: 27}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 9, want: 27}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 10, want: 30}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 10, want: 30}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 11, want: 33}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 11, want: 33}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 13, want: 39}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 13, want: 39}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 19, want: 57}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 19, want: 57}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 21, want: 63}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 21, want: 63}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 25, want: 75}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 25, want: 75}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 27, want: 81}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 27, want: 81}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 37, want: 111}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 37, want: 111}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 41, want: 123}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 41, want: 123}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 45, want: 135}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 45, want: 135}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 73, want: 219}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 73, want: 219}, + test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 81, want: 243}, + test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 81, want: 243}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 3, want: 15}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 3, want: 15}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 5, want: 25}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 5, want: 25}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 7, want: 35}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 7, want: 35}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 9, want: 45}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 9, want: 45}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 10, want: 50}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 10, want: 50}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 11, want: 55}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 11, want: 55}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 13, want: 65}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 13, want: 65}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 19, want: 95}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 19, want: 95}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 21, want: 105}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 21, want: 105}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 25, want: 125}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 25, want: 125}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 27, want: 135}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 27, want: 135}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 37, want: 185}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 37, want: 185}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 41, want: 205}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 41, want: 205}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 45, want: 225}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 45, want: 225}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 73, want: 365}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 73, want: 365}, + test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 81, want: 405}, + test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 81, want: 405}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 3, want: 21}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 3, want: 21}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 5, want: 35}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 5, want: 35}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 7, want: 49}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 7, want: 49}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 9, want: 63}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 9, want: 63}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 10, want: 70}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 10, want: 70}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 11, want: 77}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 11, want: 77}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 13, want: 91}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 13, want: 91}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 19, want: 133}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 19, want: 133}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 21, want: 147}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 21, want: 147}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 25, want: 175}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 25, want: 175}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 27, want: 189}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 27, want: 189}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 37, want: 259}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 37, want: 259}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 41, want: 287}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 41, want: 287}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 45, want: 315}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 45, want: 315}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 73, want: 511}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 73, want: 511}, + test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 81, want: 567}, + test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 81, want: 567}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 3, want: 27}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 3, want: 27}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 5, want: 45}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 5, want: 45}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 7, want: 63}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 7, want: 63}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 9, want: 81}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 9, want: 81}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 10, want: 90}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 10, want: 90}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 11, want: 99}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 11, want: 99}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 13, want: 117}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 13, want: 117}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 19, want: 171}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 19, want: 171}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 21, want: 189}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 21, want: 189}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 25, want: 225}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 25, want: 225}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 27, want: 243}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 27, want: 243}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 37, want: 333}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 37, want: 333}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 41, want: 369}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 41, want: 369}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 45, want: 405}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 45, want: 405}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 73, want: 657}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 73, want: 657}, + test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 81, want: 729}, + test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 81, want: 729}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 3, want: 30}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 3, want: 30}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 5, want: 50}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 5, want: 50}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 7, want: 70}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 7, want: 70}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 9, want: 90}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 9, want: 90}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 10, want: 100}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 10, want: 100}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 11, want: 110}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 11, want: 110}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 13, want: 130}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 13, want: 130}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 19, want: 190}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 19, want: 190}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 21, want: 210}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 21, want: 210}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 25, want: 250}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 25, want: 250}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 27, want: 270}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 27, want: 270}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 37, want: 370}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 37, want: 370}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 41, want: 410}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 41, want: 410}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 45, want: 450}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 45, want: 450}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 73, want: 730}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 73, want: 730}, + test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 81, want: 810}, + test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 81, want: 810}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 3, want: 33}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 3, want: 33}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 5, want: 55}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 5, want: 55}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 7, want: 77}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 7, want: 77}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 9, want: 99}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 9, want: 99}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 10, want: 110}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 10, want: 110}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 11, want: 121}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 11, want: 121}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 13, want: 143}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 13, want: 143}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 19, want: 209}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 19, want: 209}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 21, want: 231}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 21, want: 231}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 25, want: 275}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 25, want: 275}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 27, want: 297}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 27, want: 297}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 37, want: 407}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 37, want: 407}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 41, want: 451}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 41, want: 451}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 45, want: 495}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 45, want: 495}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 73, want: 803}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 73, want: 803}, + test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 81, want: 891}, + test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 81, want: 891}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 3, want: 39}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 3, want: 39}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 5, want: 65}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 5, want: 65}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 7, want: 91}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 7, want: 91}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 9, want: 117}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 9, want: 117}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 10, want: 130}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 10, want: 130}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 11, want: 143}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 11, want: 143}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 13, want: 169}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 13, want: 169}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 19, want: 247}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 19, want: 247}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 21, want: 273}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 21, want: 273}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 25, want: 325}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 25, want: 325}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 27, want: 351}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 27, want: 351}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 37, want: 481}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 37, want: 481}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 41, want: 533}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 41, want: 533}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 45, want: 585}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 45, want: 585}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 73, want: 949}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 73, want: 949}, + test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 81, want: 1053}, + test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 81, want: 1053}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 3, want: 57}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 3, want: 57}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 5, want: 95}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 5, want: 95}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 7, want: 133}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 7, want: 133}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 9, want: 171}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 9, want: 171}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 10, want: 190}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 10, want: 190}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 11, want: 209}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 11, want: 209}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 13, want: 247}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 13, want: 247}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 19, want: 361}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 19, want: 361}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 21, want: 399}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 21, want: 399}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 25, want: 475}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 25, want: 475}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 27, want: 513}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 27, want: 513}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 37, want: 703}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 37, want: 703}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 41, want: 779}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 41, want: 779}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 45, want: 855}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 45, want: 855}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 73, want: 1387}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 73, want: 1387}, + test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 81, want: 1539}, + test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 81, want: 1539}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 3, want: 63}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 3, want: 63}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 5, want: 105}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 5, want: 105}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 7, want: 147}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 7, want: 147}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 9, want: 189}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 9, want: 189}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 10, want: 210}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 10, want: 210}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 11, want: 231}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 11, want: 231}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 13, want: 273}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 13, want: 273}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 19, want: 399}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 19, want: 399}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 21, want: 441}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 21, want: 441}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 25, want: 525}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 25, want: 525}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 27, want: 567}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 27, want: 567}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 37, want: 777}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 37, want: 777}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 41, want: 861}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 41, want: 861}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 45, want: 945}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 45, want: 945}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 73, want: 1533}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 73, want: 1533}, + test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 81, want: 1701}, + test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 81, want: 1701}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 3, want: 75}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 3, want: 75}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 5, want: 125}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 5, want: 125}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 7, want: 175}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 7, want: 175}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 9, want: 225}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 9, want: 225}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 10, want: 250}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 10, want: 250}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 11, want: 275}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 11, want: 275}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 13, want: 325}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 13, want: 325}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 19, want: 475}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 19, want: 475}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 21, want: 525}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 21, want: 525}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 25, want: 625}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 25, want: 625}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 27, want: 675}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 27, want: 675}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 37, want: 925}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 37, want: 925}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 41, want: 1025}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 41, want: 1025}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 45, want: 1125}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 45, want: 1125}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 73, want: 1825}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 73, want: 1825}, + test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 81, want: 2025}, + test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 81, want: 2025}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 3, want: 81}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 3, want: 81}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 5, want: 135}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 5, want: 135}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 7, want: 189}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 7, want: 189}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 9, want: 243}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 9, want: 243}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 10, want: 270}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 10, want: 270}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 11, want: 297}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 11, want: 297}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 13, want: 351}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 13, want: 351}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 19, want: 513}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 19, want: 513}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 21, want: 567}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 21, want: 567}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 25, want: 675}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 25, want: 675}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 27, want: 729}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 27, want: 729}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 37, want: 999}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 37, want: 999}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 41, want: 1107}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 41, want: 1107}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 45, want: 1215}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 45, want: 1215}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 73, want: 1971}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 73, want: 1971}, + test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 81, want: 2187}, + test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 81, want: 2187}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 3, want: 111}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 3, want: 111}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 5, want: 185}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 5, want: 185}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 7, want: 259}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 7, want: 259}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 9, want: 333}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 9, want: 333}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 10, want: 370}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 10, want: 370}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 11, want: 407}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 11, want: 407}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 13, want: 481}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 13, want: 481}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 19, want: 703}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 19, want: 703}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 21, want: 777}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 21, want: 777}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 25, want: 925}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 25, want: 925}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 27, want: 999}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 27, want: 999}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 37, want: 1369}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 37, want: 1369}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 41, want: 1517}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 41, want: 1517}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 45, want: 1665}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 45, want: 1665}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 73, want: 2701}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 73, want: 2701}, + test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 81, want: 2997}, + test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 81, want: 2997}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 3, want: 123}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 3, want: 123}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 5, want: 205}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 5, want: 205}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 7, want: 287}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 7, want: 287}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 9, want: 369}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 9, want: 369}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 10, want: 410}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 10, want: 410}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 11, want: 451}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 11, want: 451}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 13, want: 533}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 13, want: 533}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 19, want: 779}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 19, want: 779}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 21, want: 861}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 21, want: 861}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 25, want: 1025}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 25, want: 1025}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 27, want: 1107}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 27, want: 1107}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 37, want: 1517}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 37, want: 1517}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 41, want: 1681}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 41, want: 1681}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 45, want: 1845}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 45, want: 1845}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 73, want: 2993}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 73, want: 2993}, + test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 81, want: 3321}, + test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 81, want: 3321}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 3, want: 135}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 3, want: 135}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 5, want: 225}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 5, want: 225}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 7, want: 315}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 7, want: 315}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 9, want: 405}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 9, want: 405}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 10, want: 450}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 10, want: 450}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 11, want: 495}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 11, want: 495}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 13, want: 585}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 13, want: 585}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 19, want: 855}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 19, want: 855}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 21, want: 945}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 21, want: 945}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 25, want: 1125}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 25, want: 1125}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 27, want: 1215}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 27, want: 1215}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 37, want: 1665}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 37, want: 1665}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 41, want: 1845}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 41, want: 1845}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 45, want: 2025}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 45, want: 2025}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 73, want: 3285}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 73, want: 3285}, + test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 81, want: 3645}, + test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 81, want: 3645}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 3, want: 219}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 3, want: 219}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 5, want: 365}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 5, want: 365}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 7, want: 511}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 7, want: 511}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 9, want: 657}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 9, want: 657}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 10, want: 730}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 10, want: 730}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 11, want: 803}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 11, want: 803}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 13, want: 949}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 13, want: 949}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 19, want: 1387}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 19, want: 1387}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 21, want: 1533}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 21, want: 1533}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 25, want: 1825}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 25, want: 1825}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 27, want: 1971}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 27, want: 1971}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 37, want: 2701}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 37, want: 2701}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 41, want: 2993}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 41, want: 2993}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 45, want: 3285}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 45, want: 3285}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 73, want: 5329}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 73, want: 5329}, + test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 81, want: 5913}, + test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 81, want: 5913}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 3, want: 243}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 3, want: 243}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 5, want: 405}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 5, want: 405}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 7, want: 567}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 7, want: 567}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 9, want: 729}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 9, want: 729}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 10, want: 810}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 10, want: 810}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 11, want: 891}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 11, want: 891}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 13, want: 1053}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 13, want: 1053}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 19, want: 1539}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 19, want: 1539}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 21, want: 1701}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 21, want: 1701}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 25, want: 2025}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 25, want: 2025}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 27, want: 2187}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 27, want: 2187}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 37, want: 2997}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 37, want: 2997}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 41, want: 3321}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 41, want: 3321}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 45, want: 3645}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 45, want: 3645}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 73, want: 5913}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 73, want: 5913}, + test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 81, want: 6561}, + test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 81, want: 6561}} + +type test_int64 struct { + fn func(int64) int64 + fnname string + in int64 + want int64 +} + +var tests_int64 = []test_int64{ + + test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0}, + test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1}, + test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1}, + test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -4294967296, want: 9223372032559808512}, + test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512}, + test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -1, want: 9223372036854775807}, + test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807}, + test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808}, + test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808}, + test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807}, + test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807}, + test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512}, + test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512}, + test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2}, + test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2}, + test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1}, + test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1}, + test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1}, + test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -9223372036854775807, want: 2}, + test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -9223372036854775807, want: 2}, + test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -4294967296, want: 9223372032559808513}, + test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808513}, + test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -1, want: -9223372036854775808}, + test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -1, want: -9223372036854775808}, + test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807}, + test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807}, + test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 1, want: -9223372036854775806}, + test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 1, want: -9223372036854775806}, + test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511}, + test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511}, + test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1}, + test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1}, + test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 9223372036854775807, want: 0}, + test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512}, + test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -9223372036854775808, want: 9223372032559808512}, + test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808513}, + test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -9223372036854775807, want: 9223372032559808513}, + test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -4294967296, want: -8589934592}, + test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -4294967296, want: -8589934592}, + test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -1, want: -4294967297}, + test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -1, want: -4294967297}, + test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 0, want: -4294967296}, + test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 0, want: -4294967296}, + test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 1, want: -4294967295}, + test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 1, want: -4294967295}, + test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 4294967296, want: 0}, + test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 4294967296, want: 0}, + test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808510}, + test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 9223372036854775806, want: 9223372032559808510}, + test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808511}, + test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 9223372036854775807, want: 9223372032559808511}, + test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807}, + test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -9223372036854775808, want: 9223372036854775807}, + test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -4294967296, want: -4294967297}, + test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -4294967296, want: -4294967297}, + test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -1, want: -2}, + test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -1, want: -2}, + test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 0, want: -1}, + test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 0, want: -1}, + test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 1, want: 0}, + test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 1, want: 0}, + test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 4294967296, want: 4294967295}, + test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 4294967296, want: 4294967295}, + test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 9223372036854775806, want: 9223372036854775805}, + test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 9223372036854775806, want: 9223372036854775805}, + test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 9223372036854775807, want: 9223372036854775806}, + test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 9223372036854775807, want: 9223372036854775806}, + test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -4294967296, want: -4294967296}, + test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -4294967296, want: -4294967296}, + test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -1, want: -1}, + test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -1, want: -1}, + test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 0, want: 0}, + test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 0, want: 0}, + test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 1, want: 1}, + test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 1, want: 1}, + test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 4294967296, want: 4294967296}, + test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 4294967296, want: 4294967296}, + test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -9223372036854775808, want: -9223372036854775807}, + test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -9223372036854775808, want: -9223372036854775807}, + test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -9223372036854775807, want: -9223372036854775806}, + test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -9223372036854775807, want: -9223372036854775806}, + test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -4294967296, want: -4294967295}, + test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -4294967296, want: -4294967295}, + test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -1, want: 0}, + test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -1, want: 0}, + test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 0, want: 1}, + test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 0, want: 1}, + test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 1, want: 2}, + test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 1, want: 2}, + test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 4294967296, want: 4294967297}, + test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 4294967296, want: 4294967297}, + test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 9223372036854775806, want: 9223372036854775807}, + test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 9223372036854775806, want: 9223372036854775807}, + test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 9223372036854775807, want: -9223372036854775808}, + test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 9223372036854775807, want: -9223372036854775808}, + test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512}, + test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512}, + test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511}, + test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511}, + test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -4294967296, want: 0}, + test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -4294967296, want: 0}, + test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -1, want: 4294967295}, + test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -1, want: 4294967295}, + test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 0, want: 4294967296}, + test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 0, want: 4294967296}, + test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 1, want: 4294967297}, + test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 1, want: 4294967297}, + test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 4294967296, want: 8589934592}, + test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 4294967296, want: 8589934592}, + test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 9223372036854775806, want: -9223372032559808514}, + test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 9223372036854775806, want: -9223372032559808514}, + test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 9223372036854775807, want: -9223372032559808513}, + test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 9223372036854775807, want: -9223372032559808513}, + test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -9223372036854775808, want: -2}, + test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -9223372036854775808, want: -2}, + test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -9223372036854775807, want: -1}, + test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -9223372036854775807, want: -1}, + test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -4294967296, want: 9223372032559808510}, + test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -4294967296, want: 9223372032559808510}, + test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -1, want: 9223372036854775805}, + test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -1, want: 9223372036854775805}, + test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 0, want: 9223372036854775806}, + test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 0, want: 9223372036854775806}, + test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 1, want: 9223372036854775807}, + test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 1, want: 9223372036854775807}, + test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 4294967296, want: -9223372032559808514}, + test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 4294967296, want: -9223372032559808514}, + test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 9223372036854775806, want: -4}, + test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 9223372036854775806, want: -4}, + test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 9223372036854775807, want: -3}, + test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 9223372036854775807, want: -3}, + test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -9223372036854775808, want: -1}, + test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -9223372036854775808, want: -1}, + test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -9223372036854775807, want: 0}, + test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -4294967296, want: 9223372032559808511}, + test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -4294967296, want: 9223372032559808511}, + test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -1, want: 9223372036854775806}, + test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -1, want: 9223372036854775806}, + test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 0, want: 9223372036854775807}, + test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 0, want: 9223372036854775807}, + test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 1, want: -9223372036854775808}, + test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 1, want: -9223372036854775808}, + test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 4294967296, want: -9223372032559808513}, + test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 4294967296, want: -9223372032559808513}, + test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 9223372036854775806, want: -3}, + test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 9223372036854775806, want: -3}, + test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 9223372036854775807, want: -2}, + test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 9223372036854775807, want: -2}, + test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0}, + test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -9223372036854775807, want: -1}, + test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1}, + test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -4294967296, want: -9223372032559808512}, + test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512}, + test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -1, want: -9223372036854775807}, + test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807}, + test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808}, + test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808}, + test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 1, want: 9223372036854775807}, + test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807}, + test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 4294967296, want: 9223372032559808512}, + test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512}, + test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 9223372036854775806, want: 2}, + test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2}, + test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 9223372036854775807, want: 1}, + test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1}, + test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1}, + test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -9223372036854775808, want: -1}, + test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0}, + test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -4294967296, want: -9223372032559808511}, + test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808511}, + test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -1, want: -9223372036854775806}, + test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -1, want: 9223372036854775806}, + test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807}, + test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 0, want: 9223372036854775807}, + test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 1, want: -9223372036854775808}, + test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 1, want: -9223372036854775808}, + test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 4294967296, want: 9223372032559808513}, + test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808513}, + test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 9223372036854775806, want: 3}, + test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 9223372036854775806, want: -3}, + test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 9223372036854775807, want: 2}, + test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 9223372036854775807, want: -2}, + test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512}, + test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -9223372036854775808, want: -9223372032559808512}, + test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808511}, + test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -9223372036854775807, want: -9223372032559808511}, + test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -4294967296, want: 0}, + test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -4294967296, want: 0}, + test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -1, want: -4294967295}, + test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -1, want: 4294967295}, + test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 0, want: -4294967296}, + test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 0, want: 4294967296}, + test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 1, want: -4294967297}, + test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 1, want: 4294967297}, + test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 4294967296, want: -8589934592}, + test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 4294967296, want: 8589934592}, + test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808514}, + test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 9223372036854775806, want: -9223372032559808514}, + test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808513}, + test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 9223372036854775807, want: -9223372032559808513}, + test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807}, + test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -9223372036854775808, want: -9223372036854775807}, + test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -9223372036854775807, want: 9223372036854775806}, + test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -9223372036854775807, want: -9223372036854775806}, + test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -4294967296, want: 4294967295}, + test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -4294967296, want: -4294967295}, + test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -1, want: 0}, + test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -1, want: 0}, + test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 0, want: -1}, + test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 0, want: 1}, + test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 1, want: -2}, + test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 1, want: 2}, + test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 4294967296, want: -4294967297}, + test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 4294967296, want: 4294967297}, + test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 9223372036854775806, want: -9223372036854775807}, + test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 9223372036854775806, want: 9223372036854775807}, + test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 9223372036854775807, want: -9223372036854775808}, + test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 9223372036854775807, want: -9223372036854775808}, + test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -9223372036854775807, want: 9223372036854775807}, + test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -4294967296, want: 4294967296}, + test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -4294967296, want: -4294967296}, + test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -1, want: 1}, + test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -1, want: -1}, + test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 0, want: 0}, + test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 0, want: 0}, + test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 1, want: -1}, + test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 1, want: 1}, + test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 4294967296, want: -4294967296}, + test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 4294967296, want: 4294967296}, + test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 9223372036854775806, want: -9223372036854775806}, + test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 9223372036854775807, want: -9223372036854775807}, + test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -9223372036854775808, want: -9223372036854775807}, + test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -9223372036854775808, want: 9223372036854775807}, + test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -4294967296, want: 4294967297}, + test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -4294967296, want: -4294967297}, + test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -1, want: 2}, + test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -1, want: -2}, + test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 0, want: 1}, + test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 0, want: -1}, + test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 1, want: 0}, + test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 1, want: 0}, + test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 4294967296, want: -4294967295}, + test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 4294967296, want: 4294967295}, + test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 9223372036854775806, want: -9223372036854775805}, + test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 9223372036854775806, want: 9223372036854775805}, + test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 9223372036854775807, want: -9223372036854775806}, + test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 9223372036854775807, want: 9223372036854775806}, + test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512}, + test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -9223372036854775808, want: 9223372032559808512}, + test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -9223372036854775807, want: -9223372032559808513}, + test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -9223372036854775807, want: 9223372032559808513}, + test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -4294967296, want: 8589934592}, + test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -4294967296, want: -8589934592}, + test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -1, want: 4294967297}, + test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -1, want: -4294967297}, + test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 0, want: 4294967296}, + test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 0, want: -4294967296}, + test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 1, want: 4294967295}, + test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 1, want: -4294967295}, + test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 4294967296, want: 0}, + test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 4294967296, want: 0}, + test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 9223372036854775806, want: -9223372032559808510}, + test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 9223372036854775806, want: 9223372032559808510}, + test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 9223372036854775807, want: -9223372032559808511}, + test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 9223372036854775807, want: 9223372032559808511}, + test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -9223372036854775808, want: -2}, + test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -9223372036854775808, want: 2}, + test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -9223372036854775807, want: -3}, + test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -9223372036854775807, want: 3}, + test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -4294967296, want: -9223372032559808514}, + test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -4294967296, want: 9223372032559808514}, + test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -1, want: 9223372036854775807}, + test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -1, want: -9223372036854775807}, + test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 0, want: 9223372036854775806}, + test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 0, want: -9223372036854775806}, + test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 1, want: 9223372036854775805}, + test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 1, want: -9223372036854775805}, + test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 4294967296, want: 9223372032559808510}, + test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 4294967296, want: -9223372032559808510}, + test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 9223372036854775806, want: 0}, + test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 9223372036854775807, want: 1}, + test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -9223372036854775808, want: -1}, + test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -9223372036854775808, want: 1}, + test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -9223372036854775807, want: -2}, + test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -9223372036854775807, want: 2}, + test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -4294967296, want: -9223372032559808513}, + test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -4294967296, want: 9223372032559808513}, + test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -1, want: -9223372036854775808}, + test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -1, want: -9223372036854775808}, + test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 0, want: 9223372036854775807}, + test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 0, want: -9223372036854775807}, + test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 1, want: 9223372036854775806}, + test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 1, want: -9223372036854775806}, + test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 4294967296, want: 9223372032559808511}, + test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 4294967296, want: -9223372032559808511}, + test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 9223372036854775806, want: 1}, + test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 9223372036854775806, want: -1}, + test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 9223372036854775807, want: 0}, + test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -9223372036854775808, want: 1}, + test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -9223372036854775808, want: 1}, + test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1}, + test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -9223372036854775807, want: 0}, + test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -4294967296, want: 2147483648}, + test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -4294967296, want: 0}, + test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808}, + test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -1, want: 0}, + test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 0, want: 0}, + test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 1, want: -9223372036854775808}, + test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 1, want: 0}, + test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 4294967296, want: -2147483648}, + test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 4294967296, want: 0}, + test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 9223372036854775806, want: -1}, + test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0}, + test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 9223372036854775807, want: 0}, + test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1}, + test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -9223372036854775807, want: 1}, + test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -9223372036854775807, want: 1}, + test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -4294967296, want: 2147483647}, + test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -4294967296, want: 0}, + test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -1, want: 9223372036854775807}, + test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -1, want: 0}, + test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 0, want: 0}, + test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807}, + test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 1, want: 0}, + test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 4294967296, want: -2147483647}, + test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 4294967296, want: 0}, + test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1}, + test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 9223372036854775806, want: 0}, + test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1}, + test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -9223372036854775808, want: 2147483648}, + test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -9223372036854775807, want: 2147483647}, + test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -4294967296, want: 1}, + test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -4294967296, want: 1}, + test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -1, want: 4294967296}, + test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -1, want: 0}, + test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 0, want: 0}, + test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 1, want: -4294967296}, + test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 1, want: 0}, + test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 4294967296, want: -1}, + test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 4294967296, want: -1}, + test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 9223372036854775806, want: -2147483647}, + test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 9223372036854775807, want: -2147483647}, + test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -9223372036854775807, want: 9223372036854775807}, + test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -4294967296, want: 0}, + test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -4294967296, want: 4294967296}, + test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -1, want: 1}, + test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -1, want: 1}, + test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 0, want: 0}, + test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 1, want: -1}, + test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 1, want: -1}, + test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 4294967296, want: 0}, + test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 4294967296, want: -4294967296}, + test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 9223372036854775806, want: -9223372036854775806}, + test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 9223372036854775807, want: -9223372036854775807}, + test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -4294967296, want: 0}, + test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -1, want: 0}, + test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 1, want: 0}, + test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 4294967296, want: 0}, + test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -4294967296, want: 0}, + test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -4294967296, want: -4294967296}, + test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -1, want: -1}, + test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -1, want: -1}, + test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 0, want: 0}, + test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 1, want: 1}, + test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 1, want: 1}, + test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 4294967296, want: 0}, + test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 4294967296, want: 4294967296}, + test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -9223372036854775808, want: -2147483648}, + test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -9223372036854775807, want: -2147483647}, + test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -4294967296, want: -1}, + test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -4294967296, want: -1}, + test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -1, want: -4294967296}, + test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -1, want: 0}, + test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 0, want: 0}, + test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 1, want: 4294967296}, + test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 1, want: 0}, + test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 4294967296, want: 1}, + test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 4294967296, want: 1}, + test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 9223372036854775806, want: 2147483647}, + test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 9223372036854775807, want: 2147483647}, + test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -9223372036854775808, want: -1}, + test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -9223372036854775807, want: -1}, + test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -4294967296, want: -2147483647}, + test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -4294967296, want: 0}, + test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -1, want: -9223372036854775806}, + test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -1, want: 0}, + test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 0, want: 0}, + test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 1, want: 9223372036854775806}, + test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 1, want: 0}, + test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 4294967296, want: 2147483647}, + test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 4294967296, want: 0}, + test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 9223372036854775806, want: 1}, + test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 9223372036854775806, want: 1}, + test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 9223372036854775807, want: 1}, + test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -9223372036854775808, want: -1}, + test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -9223372036854775807, want: -1}, + test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -9223372036854775807, want: -1}, + test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -4294967296, want: -2147483647}, + test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -4294967296, want: 0}, + test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -1, want: -9223372036854775807}, + test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -1, want: 0}, + test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 0, want: 0}, + test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 1, want: 9223372036854775807}, + test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 1, want: 0}, + test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 4294967296, want: 2147483647}, + test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 4294967296, want: 0}, + test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 9223372036854775806, want: 1}, + test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 9223372036854775806, want: 0}, + test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 9223372036854775807, want: 1}, + test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 9223372036854775807, want: 1}, + test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0}, + test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -4294967296, want: 0}, + test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -4294967296, want: 0}, + test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808}, + test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -1, want: -9223372036854775808}, + test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 0, want: 0}, + test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 0, want: 0}, + test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 1, want: -9223372036854775808}, + test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 1, want: -9223372036854775808}, + test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 4294967296, want: 0}, + test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 4294967296, want: 0}, + test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0}, + test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 9223372036854775807, want: -9223372036854775808}, + test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 9223372036854775807, want: -9223372036854775808}, + test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -9223372036854775807, want: 1}, + test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -9223372036854775807, want: 1}, + test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -4294967296, want: -4294967296}, + test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -4294967296, want: -4294967296}, + test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -1, want: 9223372036854775807}, + test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -1, want: 9223372036854775807}, + test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 0, want: 0}, + test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 0, want: 0}, + test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807}, + test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 1, want: -9223372036854775807}, + test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 4294967296, want: 4294967296}, + test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 4294967296, want: 4294967296}, + test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1}, + test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -9223372036854775808, want: 0}, + test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -9223372036854775807, want: -4294967296}, + test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -9223372036854775807, want: -4294967296}, + test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -4294967296, want: 0}, + test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -4294967296, want: 0}, + test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -1, want: 4294967296}, + test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -1, want: 4294967296}, + test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 0, want: 0}, + test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 0, want: 0}, + test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 1, want: -4294967296}, + test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 1, want: -4294967296}, + test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 4294967296, want: 0}, + test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 4294967296, want: 0}, + test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 9223372036854775806, want: 8589934592}, + test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 9223372036854775806, want: 8589934592}, + test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 9223372036854775807, want: 4294967296}, + test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 9223372036854775807, want: 4294967296}, + test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -9223372036854775807, want: 9223372036854775807}, + test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -9223372036854775807, want: 9223372036854775807}, + test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -4294967296, want: 4294967296}, + test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -4294967296, want: 4294967296}, + test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -1, want: 1}, + test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -1, want: 1}, + test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 0, want: 0}, + test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 0, want: 0}, + test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 1, want: -1}, + test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 1, want: -1}, + test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 4294967296, want: -4294967296}, + test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 4294967296, want: -4294967296}, + test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 9223372036854775806, want: -9223372036854775806}, + test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 9223372036854775806, want: -9223372036854775806}, + test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 9223372036854775807, want: -9223372036854775807}, + test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 9223372036854775807, want: -9223372036854775807}, + test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -9223372036854775808, want: 0}, + test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -9223372036854775807, want: 0}, + test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -4294967296, want: 0}, + test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -4294967296, want: 0}, + test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -1, want: 0}, + test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -1, want: 0}, + test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 0, want: 0}, + test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 0, want: 0}, + test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 1, want: 0}, + test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 1, want: 0}, + test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 4294967296, want: 0}, + test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 4294967296, want: 0}, + test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 9223372036854775806, want: 0}, + test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 9223372036854775807, want: 0}, + test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -4294967296, want: -4294967296}, + test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -4294967296, want: -4294967296}, + test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -1, want: -1}, + test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -1, want: -1}, + test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 0, want: 0}, + test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 0, want: 0}, + test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 1, want: 1}, + test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 1, want: 1}, + test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 4294967296, want: 4294967296}, + test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 4294967296, want: 4294967296}, + test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -9223372036854775808, want: 0}, + test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -9223372036854775807, want: 4294967296}, + test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -9223372036854775807, want: 4294967296}, + test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -4294967296, want: 0}, + test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -4294967296, want: 0}, + test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -1, want: -4294967296}, + test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -1, want: -4294967296}, + test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 0, want: 0}, + test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 0, want: 0}, + test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 1, want: 4294967296}, + test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 1, want: 4294967296}, + test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 4294967296, want: 0}, + test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 4294967296, want: 0}, + test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 9223372036854775806, want: -8589934592}, + test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 9223372036854775806, want: -8589934592}, + test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 9223372036854775807, want: -4294967296}, + test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 9223372036854775807, want: -4294967296}, + test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -9223372036854775808, want: 0}, + test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -9223372036854775807, want: 9223372036854775806}, + test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -9223372036854775807, want: 9223372036854775806}, + test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -4294967296, want: 8589934592}, + test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -4294967296, want: 8589934592}, + test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -1, want: -9223372036854775806}, + test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -1, want: -9223372036854775806}, + test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 0, want: 0}, + test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 0, want: 0}, + test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 1, want: 9223372036854775806}, + test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 1, want: 9223372036854775806}, + test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 4294967296, want: -8589934592}, + test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 4294967296, want: -8589934592}, + test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 9223372036854775806, want: 4}, + test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 9223372036854775806, want: 4}, + test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 9223372036854775807, want: -9223372036854775806}, + test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 9223372036854775807, want: -9223372036854775806}, + test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -9223372036854775807, want: -1}, + test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -9223372036854775807, want: -1}, + test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -4294967296, want: 4294967296}, + test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -4294967296, want: 4294967296}, + test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -1, want: -9223372036854775807}, + test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -1, want: -9223372036854775807}, + test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 0, want: 0}, + test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 0, want: 0}, + test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 1, want: 9223372036854775807}, + test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 1, want: 9223372036854775807}, + test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 4294967296, want: -4294967296}, + test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 4294967296, want: -4294967296}, + test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 9223372036854775806, want: -9223372036854775806}, + test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 9223372036854775806, want: -9223372036854775806}, + test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 9223372036854775807, want: 1}, + test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 9223372036854775807, want: 1}, + test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0}, + test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -9223372036854775807, want: -1}, + test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -4294967296, want: 0}, + test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -4294967296, want: -4294967296}, + test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -1, want: 0}, + test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -1, want: -1}, + test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 0, want: 0}, + test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 1, want: 0}, + test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 1, want: 1}, + test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 4294967296, want: 0}, + test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 4294967296, want: 4294967296}, + test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2}, + test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775807}, + test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -9223372036854775808, want: -1}, + test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0}, + test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -4294967296, want: -4294967295}, + test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -4294967296, want: -4294967296}, + test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -1, want: 0}, + test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -1, want: -1}, + test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 0, want: 0}, + test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 1, want: 0}, + test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 1, want: 1}, + test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 4294967296, want: -4294967295}, + test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 4294967296, want: 4294967296}, + test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1}, + test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 9223372036854775807, want: 0}, + test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -9223372036854775808, want: -4294967296}, + test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -9223372036854775808, want: 0}, + test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -9223372036854775807, want: -4294967296}, + test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -9223372036854775807, want: -4294967295}, + test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -4294967296, want: 0}, + test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -4294967296, want: 0}, + test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -1, want: 0}, + test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -1, want: -1}, + test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 0, want: 0}, + test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 1, want: 0}, + test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 1, want: 1}, + test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 4294967296, want: 0}, + test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 4294967296, want: 0}, + test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 9223372036854775806, want: -4294967296}, + test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 9223372036854775806, want: 4294967294}, + test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 9223372036854775807, want: -4294967296}, + test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 9223372036854775807, want: 4294967295}, + test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -9223372036854775808, want: -1}, + test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -9223372036854775808, want: 0}, + test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -9223372036854775807, want: -1}, + test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -9223372036854775807, want: 0}, + test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -4294967296, want: -1}, + test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -4294967296, want: 0}, + test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -1, want: 0}, + test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -1, want: 0}, + test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 0, want: 0}, + test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 1, want: 0}, + test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 1, want: 0}, + test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 4294967296, want: -1}, + test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 4294967296, want: 0}, + test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 9223372036854775806, want: -1}, + test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 9223372036854775806, want: 0}, + test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 9223372036854775807, want: 0}, + test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -4294967296, want: 0}, + test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -1, want: 0}, + test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 1, want: 0}, + test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 4294967296, want: 0}, + test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -9223372036854775808, want: 1}, + test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -9223372036854775808, want: 0}, + test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -9223372036854775807, want: 1}, + test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -9223372036854775807, want: 0}, + test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -4294967296, want: 1}, + test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -4294967296, want: 0}, + test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -1, want: 0}, + test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -1, want: 0}, + test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 0, want: 0}, + test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 1, want: 0}, + test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 1, want: 0}, + test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 4294967296, want: 1}, + test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 4294967296, want: 0}, + test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 9223372036854775806, want: 1}, + test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 9223372036854775806, want: 0}, + test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 9223372036854775807, want: 1}, + test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 9223372036854775807, want: 0}, + test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -9223372036854775808, want: 4294967296}, + test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -9223372036854775808, want: 0}, + test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -9223372036854775807, want: 4294967296}, + test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -9223372036854775807, want: -4294967295}, + test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -4294967296, want: 0}, + test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -4294967296, want: 0}, + test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -1, want: 0}, + test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -1, want: -1}, + test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 0, want: 0}, + test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 1, want: 0}, + test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 1, want: 1}, + test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 4294967296, want: 0}, + test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 4294967296, want: 0}, + test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 9223372036854775806, want: 4294967296}, + test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 9223372036854775806, want: 4294967294}, + test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 9223372036854775807, want: 4294967296}, + test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 9223372036854775807, want: 4294967295}, + test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -9223372036854775808, want: 9223372036854775806}, + test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -9223372036854775808, want: -2}, + test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -9223372036854775807, want: 9223372036854775806}, + test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -9223372036854775807, want: -1}, + test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -4294967296, want: 4294967294}, + test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -4294967296, want: -4294967296}, + test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -1, want: 0}, + test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -1, want: -1}, + test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 0, want: 0}, + test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 1, want: 0}, + test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 1, want: 1}, + test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 4294967296, want: 4294967294}, + test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 4294967296, want: 4294967296}, + test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 9223372036854775806, want: 0}, + test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775806}, + test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 9223372036854775807, want: 1}, + test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -9223372036854775808, want: 9223372036854775807}, + test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -9223372036854775808, want: -1}, + test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -9223372036854775807, want: 0}, + test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -4294967296, want: 4294967295}, + test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -4294967296, want: -4294967296}, + test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -1, want: 0}, + test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -1, want: -1}, + test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 0, want: 0}, + test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 1, want: 0}, + test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 1, want: 1}, + test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 4294967296, want: 4294967295}, + test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 4294967296, want: 4294967296}, + test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 9223372036854775806, want: 1}, + test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 9223372036854775807, want: 0}, + test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -4294967296, want: -9223372036854775808}, + test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -4294967296, want: -9223372036854775808}, + test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808}, + test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -1, want: -9223372036854775808}, + test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 0, want: 0}, + test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 0, want: 0}, + test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 1, want: 0}, + test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 1, want: 0}, + test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 4294967296, want: 0}, + test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 4294967296, want: 0}, + test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0}, + test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 9223372036854775807, want: 0}, + test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -4294967296, want: -9223372036854775808}, + test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -4294967296, want: -9223372036854775808}, + test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -1, want: -9223372036854775807}, + test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -1, want: -9223372036854775807}, + test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 0, want: 0}, + test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 0, want: 0}, + test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 1, want: 1}, + test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 1, want: 1}, + test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 4294967296, want: 0}, + test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 4294967296, want: 0}, + test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 9223372036854775806, want: 0}, + test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 9223372036854775807, want: 1}, + test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 9223372036854775807, want: 1}, + test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -4294967296, want: -4294967296}, + test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -4294967296, want: -4294967296}, + test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -1, want: -4294967296}, + test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -1, want: -4294967296}, + test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 0, want: 0}, + test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 0, want: 0}, + test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 1, want: 0}, + test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 1, want: 0}, + test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 4294967296, want: 4294967296}, + test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 4294967296, want: 4294967296}, + test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808512}, + test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 9223372036854775806, want: 9223372032559808512}, + test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808512}, + test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 9223372036854775807, want: 9223372032559808512}, + test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -4294967296, want: -4294967296}, + test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -4294967296, want: -4294967296}, + test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -1, want: -1}, + test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -1, want: -1}, + test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 0, want: 0}, + test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 0, want: 0}, + test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 1, want: 1}, + test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 1, want: 1}, + test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 4294967296, want: 4294967296}, + test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 4294967296, want: 4294967296}, + test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -9223372036854775808, want: 0}, + test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -9223372036854775807, want: 0}, + test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -4294967296, want: 0}, + test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -4294967296, want: 0}, + test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -1, want: 0}, + test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -1, want: 0}, + test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 0, want: 0}, + test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 0, want: 0}, + test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 1, want: 0}, + test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 1, want: 0}, + test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 4294967296, want: 0}, + test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 4294967296, want: 0}, + test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 9223372036854775806, want: 0}, + test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 9223372036854775807, want: 0}, + test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -9223372036854775808, want: 0}, + test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -9223372036854775807, want: 1}, + test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -9223372036854775807, want: 1}, + test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -4294967296, want: 0}, + test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -4294967296, want: 0}, + test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -1, want: 1}, + test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -1, want: 1}, + test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 0, want: 0}, + test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 0, want: 0}, + test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 1, want: 1}, + test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 1, want: 1}, + test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 4294967296, want: 0}, + test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 4294967296, want: 0}, + test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 9223372036854775806, want: 0}, + test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 9223372036854775807, want: 1}, + test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 9223372036854775807, want: 1}, + test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -9223372036854775808, want: 0}, + test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -9223372036854775807, want: 0}, + test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -4294967296, want: 4294967296}, + test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -4294967296, want: 4294967296}, + test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -1, want: 4294967296}, + test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -1, want: 4294967296}, + test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 0, want: 0}, + test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 0, want: 0}, + test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 1, want: 0}, + test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 1, want: 0}, + test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 4294967296, want: 4294967296}, + test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 4294967296, want: 4294967296}, + test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 9223372036854775806, want: 4294967296}, + test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 9223372036854775806, want: 4294967296}, + test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 9223372036854775807, want: 4294967296}, + test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 9223372036854775807, want: 4294967296}, + test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -9223372036854775808, want: 0}, + test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -9223372036854775807, want: 0}, + test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -4294967296, want: 9223372032559808512}, + test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -4294967296, want: 9223372032559808512}, + test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -1, want: 9223372036854775806}, + test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -1, want: 9223372036854775806}, + test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 0, want: 0}, + test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 0, want: 0}, + test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 1, want: 0}, + test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 1, want: 0}, + test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 4294967296, want: 4294967296}, + test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 4294967296, want: 4294967296}, + test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775806}, + test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 9223372036854775807, want: 9223372036854775806}, + test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -9223372036854775808, want: 0}, + test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -9223372036854775807, want: 1}, + test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -9223372036854775807, want: 1}, + test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -4294967296, want: 9223372032559808512}, + test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -4294967296, want: 9223372032559808512}, + test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -1, want: 9223372036854775807}, + test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -1, want: 9223372036854775807}, + test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 0, want: 0}, + test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 0, want: 0}, + test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 1, want: 1}, + test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 1, want: 1}, + test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 4294967296, want: 4294967296}, + test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 4294967296, want: 4294967296}, + test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -4294967296, want: -4294967296}, + test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -4294967296, want: -4294967296}, + test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -1, want: -1}, + test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -1, want: -1}, + test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808}, + test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808}, + test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807}, + test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807}, + test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512}, + test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512}, + test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2}, + test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2}, + test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1}, + test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775807}, + test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775807}, + test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -4294967296, want: -4294967295}, + test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -4294967296, want: -4294967295}, + test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -1, want: -1}, + test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -1, want: -1}, + test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807}, + test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807}, + test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807}, + test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 1, want: -9223372036854775807}, + test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511}, + test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511}, + test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1}, + test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1}, + test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1}, + test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -9223372036854775808, want: -4294967296}, + test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -9223372036854775808, want: -4294967296}, + test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -9223372036854775807, want: -4294967295}, + test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -9223372036854775807, want: -4294967295}, + test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -4294967296, want: -4294967296}, + test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -4294967296, want: -4294967296}, + test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -1, want: -1}, + test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -1, want: -1}, + test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 0, want: -4294967296}, + test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 0, want: -4294967296}, + test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 1, want: -4294967295}, + test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 1, want: -4294967295}, + test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 4294967296, want: -4294967296}, + test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 4294967296, want: -4294967296}, + test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 9223372036854775806, want: -2}, + test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 9223372036854775806, want: -2}, + test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 9223372036854775807, want: -1}, + test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -9223372036854775808, want: -1}, + test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -9223372036854775808, want: -1}, + test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -9223372036854775807, want: -1}, + test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -9223372036854775807, want: -1}, + test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -4294967296, want: -1}, + test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -4294967296, want: -1}, + test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -1, want: -1}, + test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -1, want: -1}, + test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 0, want: -1}, + test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 0, want: -1}, + test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 1, want: -1}, + test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 1, want: -1}, + test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 4294967296, want: -1}, + test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 4294967296, want: -1}, + test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 9223372036854775806, want: -1}, + test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 9223372036854775806, want: -1}, + test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 9223372036854775807, want: -1}, + test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -4294967296, want: -4294967296}, + test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -4294967296, want: -4294967296}, + test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -1, want: -1}, + test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -1, want: -1}, + test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 0, want: 0}, + test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 0, want: 0}, + test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 1, want: 1}, + test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 1, want: 1}, + test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 4294967296, want: 4294967296}, + test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 4294967296, want: 4294967296}, + test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -9223372036854775808, want: -9223372036854775807}, + test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -9223372036854775808, want: -9223372036854775807}, + test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -4294967296, want: -4294967295}, + test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -4294967296, want: -4294967295}, + test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -1, want: -1}, + test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -1, want: -1}, + test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 0, want: 1}, + test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 0, want: 1}, + test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 1, want: 1}, + test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 1, want: 1}, + test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 4294967296, want: 4294967297}, + test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 4294967296, want: 4294967297}, + test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 9223372036854775806, want: 9223372036854775807}, + test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 9223372036854775806, want: 9223372036854775807}, + test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512}, + test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512}, + test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511}, + test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511}, + test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -4294967296, want: -4294967296}, + test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -4294967296, want: -4294967296}, + test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -1, want: -1}, + test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -1, want: -1}, + test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 0, want: 4294967296}, + test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 0, want: 4294967296}, + test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 1, want: 4294967297}, + test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 1, want: 4294967297}, + test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 4294967296, want: 4294967296}, + test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 4294967296, want: 4294967296}, + test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -9223372036854775808, want: -2}, + test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -9223372036854775808, want: -2}, + test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -9223372036854775807, want: -1}, + test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -9223372036854775807, want: -1}, + test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -4294967296, want: -2}, + test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -4294967296, want: -2}, + test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -1, want: -1}, + test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -1, want: -1}, + test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 0, want: 9223372036854775806}, + test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 0, want: 9223372036854775806}, + test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 1, want: 9223372036854775807}, + test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 1, want: 9223372036854775807}, + test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 4294967296, want: 9223372036854775806}, + test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 4294967296, want: 9223372036854775806}, + test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -9223372036854775808, want: -1}, + test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -9223372036854775808, want: -1}, + test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -9223372036854775807, want: -1}, + test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -9223372036854775807, want: -1}, + test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -4294967296, want: -1}, + test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -4294967296, want: -1}, + test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -1, want: -1}, + test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -1, want: -1}, + test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 0, want: 9223372036854775807}, + test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 0, want: 9223372036854775807}, + test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 1, want: 9223372036854775807}, + test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 1, want: 9223372036854775807}, + test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 4294967296, want: 9223372036854775807}, + test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 4294967296, want: 9223372036854775807}, + test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775807}, + test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775807}, + test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0}, + test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0}, + test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1}, + test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1}, + test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -4294967296, want: 9223372032559808512}, + test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512}, + test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -1, want: 9223372036854775807}, + test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807}, + test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808}, + test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808}, + test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807}, + test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807}, + test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512}, + test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512}, + test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2}, + test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2}, + test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1}, + test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1}, + test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1}, + test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1}, + test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0}, + test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0}, + test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -4294967296, want: 9223372032559808513}, + test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808513}, + test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -1, want: 9223372036854775806}, + test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -1, want: 9223372036854775806}, + test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807}, + test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807}, + test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 1, want: -9223372036854775808}, + test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 1, want: -9223372036854775808}, + test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511}, + test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511}, + test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1}, + test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1}, + test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 9223372036854775807, want: -2}, + test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 9223372036854775807, want: -2}, + test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512}, + test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -9223372036854775808, want: 9223372032559808512}, + test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808513}, + test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -9223372036854775807, want: 9223372032559808513}, + test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -4294967296, want: 0}, + test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -4294967296, want: 0}, + test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -1, want: 4294967295}, + test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -1, want: 4294967295}, + test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 0, want: -4294967296}, + test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 0, want: -4294967296}, + test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 1, want: -4294967295}, + test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 1, want: -4294967295}, + test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 4294967296, want: -8589934592}, + test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 4294967296, want: -8589934592}, + test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 9223372036854775806, want: -9223372032559808514}, + test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 9223372036854775806, want: -9223372032559808514}, + test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 9223372036854775807, want: -9223372032559808513}, + test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 9223372036854775807, want: -9223372032559808513}, + test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807}, + test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -9223372036854775808, want: 9223372036854775807}, + test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -9223372036854775807, want: 9223372036854775806}, + test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -9223372036854775807, want: 9223372036854775806}, + test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -4294967296, want: 4294967295}, + test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -4294967296, want: 4294967295}, + test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -1, want: 0}, + test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -1, want: 0}, + test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 0, want: -1}, + test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 0, want: -1}, + test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 1, want: -2}, + test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 1, want: -2}, + test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 4294967296, want: -4294967297}, + test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 4294967296, want: -4294967297}, + test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 9223372036854775806, want: -9223372036854775807}, + test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 9223372036854775806, want: -9223372036854775807}, + test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 9223372036854775807, want: -9223372036854775808}, + test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 9223372036854775807, want: -9223372036854775808}, + test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -9223372036854775808, want: -9223372036854775808}, + test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -9223372036854775807, want: -9223372036854775807}, + test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -4294967296, want: -4294967296}, + test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -4294967296, want: -4294967296}, + test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -1, want: -1}, + test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -1, want: -1}, + test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 0, want: 0}, + test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 0, want: 0}, + test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 1, want: 1}, + test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 1, want: 1}, + test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 4294967296, want: 4294967296}, + test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 4294967296, want: 4294967296}, + test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 9223372036854775806, want: 9223372036854775806}, + test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 9223372036854775807, want: 9223372036854775807}, + test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -9223372036854775808, want: -9223372036854775807}, + test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -9223372036854775808, want: -9223372036854775807}, + test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -9223372036854775807, want: -9223372036854775808}, + test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -4294967296, want: -4294967295}, + test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -4294967296, want: -4294967295}, + test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -1, want: -2}, + test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -1, want: -2}, + test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 0, want: 1}, + test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 0, want: 1}, + test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 1, want: 0}, + test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 1, want: 0}, + test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 4294967296, want: 4294967297}, + test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 4294967296, want: 4294967297}, + test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 9223372036854775806, want: 9223372036854775807}, + test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 9223372036854775806, want: 9223372036854775807}, + test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 9223372036854775807, want: 9223372036854775806}, + test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 9223372036854775807, want: 9223372036854775806}, + test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512}, + test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512}, + test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511}, + test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511}, + test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -4294967296, want: -8589934592}, + test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -4294967296, want: -8589934592}, + test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -1, want: -4294967297}, + test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -1, want: -4294967297}, + test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 0, want: 4294967296}, + test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 0, want: 4294967296}, + test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 1, want: 4294967297}, + test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 1, want: 4294967297}, + test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 4294967296, want: 0}, + test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 4294967296, want: 0}, + test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 9223372036854775806, want: 9223372032559808510}, + test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 9223372036854775806, want: 9223372032559808510}, + test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 9223372036854775807, want: 9223372032559808511}, + test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 9223372036854775807, want: 9223372032559808511}, + test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -9223372036854775808, want: -2}, + test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -9223372036854775808, want: -2}, + test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -9223372036854775807, want: -1}, + test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -9223372036854775807, want: -1}, + test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -4294967296, want: -9223372032559808514}, + test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -4294967296, want: -9223372032559808514}, + test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -1, want: -9223372036854775807}, + test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -1, want: -9223372036854775807}, + test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 0, want: 9223372036854775806}, + test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 0, want: 9223372036854775806}, + test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 1, want: 9223372036854775807}, + test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 1, want: 9223372036854775807}, + test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 4294967296, want: 9223372032559808510}, + test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 4294967296, want: 9223372032559808510}, + test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 9223372036854775806, want: 0}, + test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 9223372036854775806, want: 0}, + test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 9223372036854775807, want: 1}, + test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 9223372036854775807, want: 1}, + test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -9223372036854775808, want: -1}, + test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -9223372036854775808, want: -1}, + test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -9223372036854775807, want: -2}, + test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -9223372036854775807, want: -2}, + test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -4294967296, want: -9223372032559808513}, + test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -4294967296, want: -9223372032559808513}, + test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -1, want: -9223372036854775808}, + test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -1, want: -9223372036854775808}, + test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 0, want: 9223372036854775807}, + test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 0, want: 9223372036854775807}, + test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 1, want: 9223372036854775806}, + test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 1, want: 9223372036854775806}, + test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 4294967296, want: 9223372032559808511}, + test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 4294967296, want: 9223372032559808511}, + test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 9223372036854775806, want: 1}, + test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 9223372036854775806, want: 1}, + test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 9223372036854775807, want: 0}, + test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 9223372036854775807, want: 0}} + +type test_int64mul struct { + fn func(int64) int64 + fnname string + in int64 + want int64 +} + +var tests_int64mul = []test_int64{ + + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -9, want: 81}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -9, want: 81}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -5, want: 45}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -5, want: 45}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -3, want: 27}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -3, want: 27}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 3, want: -27}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 3, want: -27}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 5, want: -45}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 5, want: -45}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 7, want: -63}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 7, want: -63}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 9, want: -81}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 9, want: -81}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 10, want: -90}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 10, want: -90}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 11, want: -99}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 11, want: -99}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 13, want: -117}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 13, want: -117}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 19, want: -171}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 19, want: -171}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 21, want: -189}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 21, want: -189}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 25, want: -225}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 25, want: -225}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 27, want: -243}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 27, want: -243}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 37, want: -333}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 37, want: -333}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 41, want: -369}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 41, want: -369}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 45, want: -405}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 45, want: -405}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 73, want: -657}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 73, want: -657}, + test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 81, want: -729}, + test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 81, want: -729}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -9, want: 45}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -9, want: 45}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -5, want: 25}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -5, want: 25}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -3, want: 15}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -3, want: 15}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 3, want: -15}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 3, want: -15}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 5, want: -25}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 5, want: -25}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 7, want: -35}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 7, want: -35}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 9, want: -45}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 9, want: -45}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 10, want: -50}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 10, want: -50}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 11, want: -55}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 11, want: -55}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 13, want: -65}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 13, want: -65}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 19, want: -95}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 19, want: -95}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 21, want: -105}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 21, want: -105}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 25, want: -125}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 25, want: -125}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 27, want: -135}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 27, want: -135}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 37, want: -185}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 37, want: -185}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 41, want: -205}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 41, want: -205}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 45, want: -225}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 45, want: -225}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 73, want: -365}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 73, want: -365}, + test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 81, want: -405}, + test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 81, want: -405}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -9, want: 27}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -9, want: 27}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -5, want: 15}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -5, want: 15}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -3, want: 9}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -3, want: 9}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 3, want: -9}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 3, want: -9}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 5, want: -15}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 5, want: -15}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 7, want: -21}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 7, want: -21}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 9, want: -27}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 9, want: -27}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 10, want: -30}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 10, want: -30}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 11, want: -33}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 11, want: -33}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 13, want: -39}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 13, want: -39}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 19, want: -57}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 19, want: -57}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 21, want: -63}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 21, want: -63}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 25, want: -75}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 25, want: -75}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 27, want: -81}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 27, want: -81}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 37, want: -111}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 37, want: -111}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 41, want: -123}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 41, want: -123}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 45, want: -135}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 45, want: -135}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 73, want: -219}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 73, want: -219}, + test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 81, want: -243}, + test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 81, want: -243}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -9, want: -27}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -9, want: -27}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -5, want: -15}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -5, want: -15}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -3, want: -9}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -3, want: -9}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 3, want: 9}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 3, want: 9}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 5, want: 15}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 5, want: 15}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 7, want: 21}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 7, want: 21}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 9, want: 27}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 9, want: 27}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 10, want: 30}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 10, want: 30}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 11, want: 33}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 11, want: 33}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 13, want: 39}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 13, want: 39}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 19, want: 57}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 19, want: 57}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 21, want: 63}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 21, want: 63}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 25, want: 75}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 25, want: 75}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 27, want: 81}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 27, want: 81}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 37, want: 111}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 37, want: 111}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 41, want: 123}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 41, want: 123}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 45, want: 135}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 45, want: 135}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 73, want: 219}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 73, want: 219}, + test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 81, want: 243}, + test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 81, want: 243}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -9, want: -45}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -9, want: -45}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -5, want: -25}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -5, want: -25}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -3, want: -15}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -3, want: -15}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 3, want: 15}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 3, want: 15}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 5, want: 25}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 5, want: 25}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 7, want: 35}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 7, want: 35}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 9, want: 45}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 9, want: 45}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 10, want: 50}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 10, want: 50}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 11, want: 55}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 11, want: 55}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 13, want: 65}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 13, want: 65}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 19, want: 95}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 19, want: 95}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 21, want: 105}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 21, want: 105}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 25, want: 125}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 25, want: 125}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 27, want: 135}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 27, want: 135}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 37, want: 185}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 37, want: 185}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 41, want: 205}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 41, want: 205}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 45, want: 225}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 45, want: 225}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 73, want: 365}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 73, want: 365}, + test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 81, want: 405}, + test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 81, want: 405}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -9, want: -63}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -9, want: -63}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -5, want: -35}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -5, want: -35}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -3, want: -21}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -3, want: -21}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 3, want: 21}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 3, want: 21}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 5, want: 35}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 5, want: 35}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 7, want: 49}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 7, want: 49}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 9, want: 63}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 9, want: 63}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 10, want: 70}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 10, want: 70}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 11, want: 77}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 11, want: 77}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 13, want: 91}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 13, want: 91}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 19, want: 133}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 19, want: 133}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 21, want: 147}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 21, want: 147}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 25, want: 175}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 25, want: 175}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 27, want: 189}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 27, want: 189}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 37, want: 259}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 37, want: 259}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 41, want: 287}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 41, want: 287}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 45, want: 315}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 45, want: 315}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 73, want: 511}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 73, want: 511}, + test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 81, want: 567}, + test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 81, want: 567}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -9, want: -81}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -9, want: -81}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -5, want: -45}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -5, want: -45}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -3, want: -27}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -3, want: -27}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 3, want: 27}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 3, want: 27}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 5, want: 45}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 5, want: 45}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 7, want: 63}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 7, want: 63}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 9, want: 81}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 9, want: 81}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 10, want: 90}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 10, want: 90}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 11, want: 99}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 11, want: 99}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 13, want: 117}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 13, want: 117}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 19, want: 171}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 19, want: 171}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 21, want: 189}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 21, want: 189}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 25, want: 225}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 25, want: 225}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 27, want: 243}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 27, want: 243}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 37, want: 333}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 37, want: 333}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 41, want: 369}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 41, want: 369}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 45, want: 405}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 45, want: 405}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 73, want: 657}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 73, want: 657}, + test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 81, want: 729}, + test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 81, want: 729}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -9, want: -90}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -9, want: -90}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -5, want: -50}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -5, want: -50}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -3, want: -30}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -3, want: -30}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 3, want: 30}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 3, want: 30}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 5, want: 50}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 5, want: 50}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 7, want: 70}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 7, want: 70}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 9, want: 90}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 9, want: 90}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 10, want: 100}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 10, want: 100}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 11, want: 110}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 11, want: 110}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 13, want: 130}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 13, want: 130}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 19, want: 190}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 19, want: 190}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 21, want: 210}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 21, want: 210}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 25, want: 250}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 25, want: 250}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 27, want: 270}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 27, want: 270}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 37, want: 370}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 37, want: 370}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 41, want: 410}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 41, want: 410}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 45, want: 450}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 45, want: 450}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 73, want: 730}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 73, want: 730}, + test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 81, want: 810}, + test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 81, want: 810}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -9, want: -99}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -9, want: -99}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -5, want: -55}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -5, want: -55}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -3, want: -33}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -3, want: -33}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 3, want: 33}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 3, want: 33}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 5, want: 55}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 5, want: 55}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 7, want: 77}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 7, want: 77}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 9, want: 99}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 9, want: 99}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 10, want: 110}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 10, want: 110}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 11, want: 121}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 11, want: 121}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 13, want: 143}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 13, want: 143}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 19, want: 209}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 19, want: 209}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 21, want: 231}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 21, want: 231}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 25, want: 275}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 25, want: 275}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 27, want: 297}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 27, want: 297}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 37, want: 407}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 37, want: 407}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 41, want: 451}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 41, want: 451}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 45, want: 495}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 45, want: 495}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 73, want: 803}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 73, want: 803}, + test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 81, want: 891}, + test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 81, want: 891}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -9, want: -117}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -9, want: -117}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -5, want: -65}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -5, want: -65}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -3, want: -39}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -3, want: -39}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 3, want: 39}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 3, want: 39}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 5, want: 65}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 5, want: 65}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 7, want: 91}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 7, want: 91}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 9, want: 117}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 9, want: 117}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 10, want: 130}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 10, want: 130}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 11, want: 143}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 11, want: 143}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 13, want: 169}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 13, want: 169}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 19, want: 247}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 19, want: 247}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 21, want: 273}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 21, want: 273}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 25, want: 325}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 25, want: 325}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 27, want: 351}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 27, want: 351}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 37, want: 481}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 37, want: 481}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 41, want: 533}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 41, want: 533}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 45, want: 585}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 45, want: 585}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 73, want: 949}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 73, want: 949}, + test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 81, want: 1053}, + test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 81, want: 1053}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -9, want: -171}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -9, want: -171}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -5, want: -95}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -5, want: -95}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -3, want: -57}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -3, want: -57}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 3, want: 57}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 3, want: 57}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 5, want: 95}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 5, want: 95}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 7, want: 133}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 7, want: 133}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 9, want: 171}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 9, want: 171}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 10, want: 190}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 10, want: 190}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 11, want: 209}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 11, want: 209}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 13, want: 247}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 13, want: 247}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 19, want: 361}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 19, want: 361}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 21, want: 399}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 21, want: 399}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 25, want: 475}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 25, want: 475}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 27, want: 513}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 27, want: 513}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 37, want: 703}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 37, want: 703}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 41, want: 779}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 41, want: 779}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 45, want: 855}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 45, want: 855}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 73, want: 1387}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 73, want: 1387}, + test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 81, want: 1539}, + test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 81, want: 1539}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -9, want: -189}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -9, want: -189}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -5, want: -105}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -5, want: -105}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -3, want: -63}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -3, want: -63}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 3, want: 63}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 3, want: 63}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 5, want: 105}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 5, want: 105}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 7, want: 147}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 7, want: 147}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 9, want: 189}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 9, want: 189}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 10, want: 210}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 10, want: 210}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 11, want: 231}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 11, want: 231}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 13, want: 273}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 13, want: 273}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 19, want: 399}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 19, want: 399}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 21, want: 441}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 21, want: 441}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 25, want: 525}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 25, want: 525}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 27, want: 567}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 27, want: 567}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 37, want: 777}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 37, want: 777}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 41, want: 861}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 41, want: 861}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 45, want: 945}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 45, want: 945}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 73, want: 1533}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 73, want: 1533}, + test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 81, want: 1701}, + test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 81, want: 1701}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -9, want: -225}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -9, want: -225}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -5, want: -125}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -5, want: -125}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -3, want: -75}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -3, want: -75}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 3, want: 75}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 3, want: 75}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 5, want: 125}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 5, want: 125}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 7, want: 175}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 7, want: 175}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 9, want: 225}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 9, want: 225}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 10, want: 250}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 10, want: 250}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 11, want: 275}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 11, want: 275}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 13, want: 325}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 13, want: 325}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 19, want: 475}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 19, want: 475}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 21, want: 525}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 21, want: 525}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 25, want: 625}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 25, want: 625}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 27, want: 675}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 27, want: 675}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 37, want: 925}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 37, want: 925}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 41, want: 1025}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 41, want: 1025}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 45, want: 1125}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 45, want: 1125}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 73, want: 1825}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 73, want: 1825}, + test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 81, want: 2025}, + test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 81, want: 2025}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -9, want: -243}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -9, want: -243}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -5, want: -135}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -5, want: -135}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -3, want: -81}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -3, want: -81}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 3, want: 81}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 3, want: 81}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 5, want: 135}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 5, want: 135}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 7, want: 189}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 7, want: 189}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 9, want: 243}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 9, want: 243}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 10, want: 270}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 10, want: 270}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 11, want: 297}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 11, want: 297}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 13, want: 351}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 13, want: 351}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 19, want: 513}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 19, want: 513}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 21, want: 567}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 21, want: 567}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 25, want: 675}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 25, want: 675}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 27, want: 729}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 27, want: 729}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 37, want: 999}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 37, want: 999}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 41, want: 1107}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 41, want: 1107}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 45, want: 1215}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 45, want: 1215}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 73, want: 1971}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 73, want: 1971}, + test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 81, want: 2187}, + test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 81, want: 2187}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -9, want: -333}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -9, want: -333}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -5, want: -185}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -5, want: -185}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -3, want: -111}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -3, want: -111}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 3, want: 111}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 3, want: 111}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 5, want: 185}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 5, want: 185}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 7, want: 259}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 7, want: 259}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 9, want: 333}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 9, want: 333}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 10, want: 370}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 10, want: 370}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 11, want: 407}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 11, want: 407}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 13, want: 481}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 13, want: 481}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 19, want: 703}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 19, want: 703}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 21, want: 777}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 21, want: 777}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 25, want: 925}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 25, want: 925}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 27, want: 999}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 27, want: 999}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 37, want: 1369}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 37, want: 1369}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 41, want: 1517}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 41, want: 1517}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 45, want: 1665}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 45, want: 1665}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 73, want: 2701}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 73, want: 2701}, + test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 81, want: 2997}, + test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 81, want: 2997}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -9, want: -369}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -9, want: -369}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -5, want: -205}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -5, want: -205}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -3, want: -123}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -3, want: -123}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 3, want: 123}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 3, want: 123}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 5, want: 205}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 5, want: 205}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 7, want: 287}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 7, want: 287}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 9, want: 369}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 9, want: 369}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 10, want: 410}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 10, want: 410}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 11, want: 451}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 11, want: 451}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 13, want: 533}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 13, want: 533}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 19, want: 779}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 19, want: 779}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 21, want: 861}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 21, want: 861}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 25, want: 1025}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 25, want: 1025}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 27, want: 1107}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 27, want: 1107}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 37, want: 1517}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 37, want: 1517}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 41, want: 1681}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 41, want: 1681}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 45, want: 1845}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 45, want: 1845}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 73, want: 2993}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 73, want: 2993}, + test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 81, want: 3321}, + test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 81, want: 3321}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -9, want: -405}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -9, want: -405}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -5, want: -225}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -5, want: -225}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -3, want: -135}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -3, want: -135}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 3, want: 135}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 3, want: 135}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 5, want: 225}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 5, want: 225}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 7, want: 315}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 7, want: 315}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 9, want: 405}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 9, want: 405}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 10, want: 450}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 10, want: 450}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 11, want: 495}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 11, want: 495}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 13, want: 585}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 13, want: 585}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 19, want: 855}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 19, want: 855}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 21, want: 945}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 21, want: 945}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 25, want: 1125}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 25, want: 1125}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 27, want: 1215}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 27, want: 1215}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 37, want: 1665}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 37, want: 1665}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 41, want: 1845}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 41, want: 1845}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 45, want: 2025}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 45, want: 2025}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 73, want: 3285}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 73, want: 3285}, + test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 81, want: 3645}, + test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 81, want: 3645}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -9, want: -657}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -9, want: -657}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -5, want: -365}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -5, want: -365}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -3, want: -219}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -3, want: -219}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 3, want: 219}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 3, want: 219}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 5, want: 365}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 5, want: 365}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 7, want: 511}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 7, want: 511}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 9, want: 657}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 9, want: 657}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 10, want: 730}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 10, want: 730}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 11, want: 803}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 11, want: 803}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 13, want: 949}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 13, want: 949}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 19, want: 1387}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 19, want: 1387}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 21, want: 1533}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 21, want: 1533}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 25, want: 1825}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 25, want: 1825}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 27, want: 1971}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 27, want: 1971}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 37, want: 2701}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 37, want: 2701}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 41, want: 2993}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 41, want: 2993}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 45, want: 3285}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 45, want: 3285}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 73, want: 5329}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 73, want: 5329}, + test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 81, want: 5913}, + test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 81, want: 5913}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -9, want: -729}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -9, want: -729}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -5, want: -405}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -5, want: -405}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -3, want: -243}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -3, want: -243}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 3, want: 243}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 3, want: 243}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 5, want: 405}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 5, want: 405}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 7, want: 567}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 7, want: 567}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 9, want: 729}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 9, want: 729}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 10, want: 810}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 10, want: 810}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 11, want: 891}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 11, want: 891}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 13, want: 1053}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 13, want: 1053}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 19, want: 1539}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 19, want: 1539}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 21, want: 1701}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 21, want: 1701}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 25, want: 2025}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 25, want: 2025}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 27, want: 2187}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 27, want: 2187}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 37, want: 2997}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 37, want: 2997}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 41, want: 3321}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 41, want: 3321}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 45, want: 3645}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 45, want: 3645}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 73, want: 5913}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 73, want: 5913}, + test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 81, want: 6561}, + test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 81, want: 6561}} + +type test_uint32 struct { + fn func(uint32) uint32 + fnname string + in uint32 + want uint32 +} + +var tests_uint32 = []test_uint32{ + + test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 0, want: 0}, + test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 0, want: 0}, + test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 1, want: 1}, + test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 1, want: 1}, + test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 4294967295, want: 4294967295}, + test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 4294967295, want: 4294967295}, + test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 0, want: 1}, + test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 0, want: 1}, + test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 1, want: 2}, + test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 1, want: 2}, + test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 4294967295, want: 0}, + test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 4294967295, want: 0}, + test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 0, want: 4294967295}, + test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 0, want: 4294967295}, + test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 1, want: 0}, + test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 1, want: 0}, + test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 4294967295, want: 4294967294}, + test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 4294967295, want: 4294967294}, + test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 0, want: 0}, + test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 0, want: 0}, + test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 1, want: 4294967295}, + test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 1, want: 1}, + test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 4294967295, want: 1}, + test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 4294967295, want: 4294967295}, + test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 0, want: 1}, + test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 0, want: 4294967295}, + test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 1, want: 0}, + test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 1, want: 0}, + test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 4294967295, want: 2}, + test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 4294967295, want: 4294967294}, + test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 0, want: 4294967295}, + test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 0, want: 1}, + test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 1, want: 4294967294}, + test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 1, want: 2}, + test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 4294967295, want: 0}, + test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 4294967295, want: 0}, + test_uint32{fn: div_0_uint32, fnname: "div_0_uint32", in: 1, want: 0}, + test_uint32{fn: div_0_uint32, fnname: "div_0_uint32", in: 4294967295, want: 0}, + test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 0, want: 0}, + test_uint32{fn: div_1_uint32, fnname: "div_1_uint32", in: 1, want: 1}, + test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 1, want: 1}, + test_uint32{fn: div_1_uint32, fnname: "div_1_uint32", in: 4294967295, want: 0}, + test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 4294967295, want: 4294967295}, + test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 0, want: 0}, + test_uint32{fn: div_4294967295_uint32, fnname: "div_4294967295_uint32", in: 1, want: 4294967295}, + test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 1, want: 0}, + test_uint32{fn: div_4294967295_uint32, fnname: "div_4294967295_uint32", in: 4294967295, want: 1}, + test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 4294967295, want: 1}, + test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 0, want: 0}, + test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 0, want: 0}, + test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 1, want: 0}, + test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 1, want: 0}, + test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 4294967295, want: 0}, + test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 4294967295, want: 0}, + test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 0, want: 0}, + test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 0, want: 0}, + test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 1, want: 1}, + test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 1, want: 1}, + test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 4294967295, want: 4294967295}, + test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 4294967295, want: 4294967295}, + test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 0, want: 0}, + test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 0, want: 0}, + test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 1, want: 4294967295}, + test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 1, want: 4294967295}, + test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 4294967295, want: 1}, + test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 4294967295, want: 1}, + test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 0, want: 0}, + test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 0, want: 0}, + test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 1, want: 0}, + test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 1, want: 1}, + test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 4294967295, want: 0}, + test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 4294967295, want: 4294967295}, + test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 0, want: 1}, + test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 0, want: 0}, + test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 1, want: 2}, + test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 1, want: 2}, + test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 4294967295, want: 0}, + test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 4294967295, want: 4294967294}, + test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 0, want: 4294967295}, + test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 0, want: 0}, + test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 1, want: 4294967294}, + test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 1, want: 0}, + test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 4294967295, want: 0}, + test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 4294967295, want: 0}, + test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 0, want: 0}, + test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 0, want: 0}, + test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 1, want: 0}, + test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 1, want: 1}, + test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 4294967295, want: 0}, + test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 4294967295, want: 4294967295}, + test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 0, want: 1}, + test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 0, want: 0}, + test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 1, want: 0}, + test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 1, want: 0}, + test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 4294967295, want: 0}, + test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 4294967295, want: 2147483647}, + test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 0, want: 4294967295}, + test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 0, want: 0}, + test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 1, want: 2147483647}, + test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 1, want: 0}, + test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 4294967295, want: 0}, + test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 4294967295, want: 0}, + test_uint32{fn: mod_0_uint32, fnname: "mod_0_uint32", in: 1, want: 0}, + test_uint32{fn: mod_0_uint32, fnname: "mod_0_uint32", in: 4294967295, want: 0}, + test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 0, want: 0}, + test_uint32{fn: mod_1_uint32, fnname: "mod_1_uint32", in: 1, want: 0}, + test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 1, want: 0}, + test_uint32{fn: mod_1_uint32, fnname: "mod_1_uint32", in: 4294967295, want: 1}, + test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 4294967295, want: 0}, + test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 0, want: 0}, + test_uint32{fn: mod_4294967295_uint32, fnname: "mod_4294967295_uint32", in: 1, want: 0}, + test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 1, want: 1}, + test_uint32{fn: mod_4294967295_uint32, fnname: "mod_4294967295_uint32", in: 4294967295, want: 0}, + test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 4294967295, want: 0}, + test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 0, want: 0}, + test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 0, want: 0}, + test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 1, want: 0}, + test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 1, want: 0}, + test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 4294967295, want: 0}, + test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 4294967295, want: 0}, + test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 0, want: 0}, + test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 0, want: 0}, + test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 1, want: 1}, + test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 1, want: 1}, + test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 4294967295, want: 1}, + test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 4294967295, want: 1}, + test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 0, want: 0}, + test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 0, want: 0}, + test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 1, want: 1}, + test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 1, want: 1}, + test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 4294967295, want: 4294967295}, + test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 4294967295, want: 4294967295}, + test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 0, want: 0}, + test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 0, want: 0}, + test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 1, want: 1}, + test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 1, want: 1}, + test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 4294967295, want: 4294967295}, + test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 4294967295, want: 4294967295}, + test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 0, want: 1}, + test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 0, want: 1}, + test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 1, want: 1}, + test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 1, want: 1}, + test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 4294967295, want: 4294967295}, + test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 4294967295, want: 4294967295}, + test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 0, want: 4294967295}, + test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 0, want: 4294967295}, + test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 1, want: 4294967295}, + test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 1, want: 4294967295}, + test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 4294967295, want: 4294967295}, + test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 4294967295, want: 4294967295}, + test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 0, want: 0}, + test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 0, want: 0}, + test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 1, want: 1}, + test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 1, want: 1}, + test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 4294967295, want: 4294967295}, + test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 4294967295, want: 4294967295}, + test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 0, want: 1}, + test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 0, want: 1}, + test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 1, want: 0}, + test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 1, want: 0}, + test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 4294967295, want: 4294967294}, + test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 4294967295, want: 4294967294}, + test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 0, want: 4294967295}, + test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 0, want: 4294967295}, + test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 1, want: 4294967294}, + test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 1, want: 4294967294}, + test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 4294967295, want: 0}, + test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 4294967295, want: 0}} + +type test_uint32mul struct { + fn func(uint32) uint32 + fnname string + in uint32 + want uint32 +} + +var tests_uint32mul = []test_uint32{ + + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 3, want: 9}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 3, want: 9}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 5, want: 15}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 5, want: 15}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 7, want: 21}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 7, want: 21}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 9, want: 27}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 9, want: 27}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 10, want: 30}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 10, want: 30}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 11, want: 33}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 11, want: 33}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 13, want: 39}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 13, want: 39}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 19, want: 57}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 19, want: 57}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 21, want: 63}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 21, want: 63}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 25, want: 75}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 25, want: 75}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 27, want: 81}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 27, want: 81}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 37, want: 111}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 37, want: 111}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 41, want: 123}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 41, want: 123}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 45, want: 135}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 45, want: 135}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 73, want: 219}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 73, want: 219}, + test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 81, want: 243}, + test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 81, want: 243}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 3, want: 15}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 3, want: 15}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 5, want: 25}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 5, want: 25}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 7, want: 35}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 7, want: 35}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 9, want: 45}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 9, want: 45}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 10, want: 50}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 10, want: 50}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 11, want: 55}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 11, want: 55}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 13, want: 65}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 13, want: 65}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 19, want: 95}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 19, want: 95}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 21, want: 105}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 21, want: 105}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 25, want: 125}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 25, want: 125}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 27, want: 135}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 27, want: 135}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 37, want: 185}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 37, want: 185}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 41, want: 205}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 41, want: 205}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 45, want: 225}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 45, want: 225}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 73, want: 365}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 73, want: 365}, + test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 81, want: 405}, + test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 81, want: 405}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 3, want: 21}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 3, want: 21}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 5, want: 35}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 5, want: 35}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 7, want: 49}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 7, want: 49}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 9, want: 63}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 9, want: 63}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 10, want: 70}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 10, want: 70}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 11, want: 77}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 11, want: 77}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 13, want: 91}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 13, want: 91}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 19, want: 133}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 19, want: 133}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 21, want: 147}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 21, want: 147}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 25, want: 175}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 25, want: 175}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 27, want: 189}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 27, want: 189}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 37, want: 259}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 37, want: 259}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 41, want: 287}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 41, want: 287}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 45, want: 315}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 45, want: 315}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 73, want: 511}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 73, want: 511}, + test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 81, want: 567}, + test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 81, want: 567}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 3, want: 27}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 3, want: 27}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 5, want: 45}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 5, want: 45}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 7, want: 63}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 7, want: 63}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 9, want: 81}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 9, want: 81}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 10, want: 90}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 10, want: 90}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 11, want: 99}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 11, want: 99}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 13, want: 117}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 13, want: 117}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 19, want: 171}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 19, want: 171}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 21, want: 189}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 21, want: 189}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 25, want: 225}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 25, want: 225}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 27, want: 243}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 27, want: 243}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 37, want: 333}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 37, want: 333}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 41, want: 369}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 41, want: 369}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 45, want: 405}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 45, want: 405}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 73, want: 657}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 73, want: 657}, + test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 81, want: 729}, + test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 81, want: 729}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 3, want: 30}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 3, want: 30}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 5, want: 50}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 5, want: 50}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 7, want: 70}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 7, want: 70}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 9, want: 90}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 9, want: 90}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 10, want: 100}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 10, want: 100}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 11, want: 110}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 11, want: 110}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 13, want: 130}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 13, want: 130}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 19, want: 190}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 19, want: 190}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 21, want: 210}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 21, want: 210}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 25, want: 250}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 25, want: 250}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 27, want: 270}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 27, want: 270}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 37, want: 370}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 37, want: 370}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 41, want: 410}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 41, want: 410}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 45, want: 450}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 45, want: 450}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 73, want: 730}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 73, want: 730}, + test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 81, want: 810}, + test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 81, want: 810}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 3, want: 33}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 3, want: 33}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 5, want: 55}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 5, want: 55}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 7, want: 77}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 7, want: 77}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 9, want: 99}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 9, want: 99}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 10, want: 110}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 10, want: 110}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 11, want: 121}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 11, want: 121}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 13, want: 143}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 13, want: 143}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 19, want: 209}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 19, want: 209}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 21, want: 231}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 21, want: 231}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 25, want: 275}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 25, want: 275}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 27, want: 297}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 27, want: 297}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 37, want: 407}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 37, want: 407}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 41, want: 451}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 41, want: 451}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 45, want: 495}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 45, want: 495}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 73, want: 803}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 73, want: 803}, + test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 81, want: 891}, + test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 81, want: 891}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 3, want: 39}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 3, want: 39}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 5, want: 65}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 5, want: 65}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 7, want: 91}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 7, want: 91}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 9, want: 117}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 9, want: 117}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 10, want: 130}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 10, want: 130}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 11, want: 143}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 11, want: 143}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 13, want: 169}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 13, want: 169}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 19, want: 247}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 19, want: 247}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 21, want: 273}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 21, want: 273}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 25, want: 325}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 25, want: 325}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 27, want: 351}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 27, want: 351}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 37, want: 481}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 37, want: 481}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 41, want: 533}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 41, want: 533}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 45, want: 585}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 45, want: 585}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 73, want: 949}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 73, want: 949}, + test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 81, want: 1053}, + test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 81, want: 1053}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 3, want: 57}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 3, want: 57}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 5, want: 95}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 5, want: 95}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 7, want: 133}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 7, want: 133}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 9, want: 171}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 9, want: 171}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 10, want: 190}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 10, want: 190}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 11, want: 209}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 11, want: 209}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 13, want: 247}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 13, want: 247}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 19, want: 361}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 19, want: 361}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 21, want: 399}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 21, want: 399}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 25, want: 475}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 25, want: 475}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 27, want: 513}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 27, want: 513}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 37, want: 703}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 37, want: 703}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 41, want: 779}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 41, want: 779}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 45, want: 855}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 45, want: 855}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 73, want: 1387}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 73, want: 1387}, + test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 81, want: 1539}, + test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 81, want: 1539}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 3, want: 63}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 3, want: 63}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 5, want: 105}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 5, want: 105}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 7, want: 147}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 7, want: 147}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 9, want: 189}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 9, want: 189}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 10, want: 210}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 10, want: 210}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 11, want: 231}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 11, want: 231}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 13, want: 273}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 13, want: 273}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 19, want: 399}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 19, want: 399}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 21, want: 441}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 21, want: 441}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 25, want: 525}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 25, want: 525}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 27, want: 567}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 27, want: 567}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 37, want: 777}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 37, want: 777}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 41, want: 861}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 41, want: 861}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 45, want: 945}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 45, want: 945}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 73, want: 1533}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 73, want: 1533}, + test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 81, want: 1701}, + test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 81, want: 1701}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 3, want: 75}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 3, want: 75}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 5, want: 125}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 5, want: 125}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 7, want: 175}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 7, want: 175}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 9, want: 225}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 9, want: 225}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 10, want: 250}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 10, want: 250}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 11, want: 275}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 11, want: 275}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 13, want: 325}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 13, want: 325}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 19, want: 475}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 19, want: 475}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 21, want: 525}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 21, want: 525}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 25, want: 625}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 25, want: 625}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 27, want: 675}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 27, want: 675}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 37, want: 925}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 37, want: 925}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 41, want: 1025}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 41, want: 1025}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 45, want: 1125}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 45, want: 1125}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 73, want: 1825}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 73, want: 1825}, + test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 81, want: 2025}, + test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 81, want: 2025}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 3, want: 81}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 3, want: 81}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 5, want: 135}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 5, want: 135}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 7, want: 189}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 7, want: 189}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 9, want: 243}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 9, want: 243}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 10, want: 270}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 10, want: 270}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 11, want: 297}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 11, want: 297}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 13, want: 351}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 13, want: 351}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 19, want: 513}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 19, want: 513}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 21, want: 567}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 21, want: 567}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 25, want: 675}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 25, want: 675}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 27, want: 729}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 27, want: 729}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 37, want: 999}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 37, want: 999}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 41, want: 1107}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 41, want: 1107}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 45, want: 1215}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 45, want: 1215}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 73, want: 1971}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 73, want: 1971}, + test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 81, want: 2187}, + test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 81, want: 2187}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 3, want: 111}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 3, want: 111}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 5, want: 185}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 5, want: 185}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 7, want: 259}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 7, want: 259}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 9, want: 333}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 9, want: 333}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 10, want: 370}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 10, want: 370}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 11, want: 407}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 11, want: 407}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 13, want: 481}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 13, want: 481}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 19, want: 703}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 19, want: 703}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 21, want: 777}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 21, want: 777}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 25, want: 925}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 25, want: 925}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 27, want: 999}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 27, want: 999}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 37, want: 1369}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 37, want: 1369}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 41, want: 1517}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 41, want: 1517}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 45, want: 1665}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 45, want: 1665}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 73, want: 2701}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 73, want: 2701}, + test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 81, want: 2997}, + test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 81, want: 2997}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 3, want: 123}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 3, want: 123}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 5, want: 205}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 5, want: 205}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 7, want: 287}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 7, want: 287}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 9, want: 369}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 9, want: 369}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 10, want: 410}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 10, want: 410}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 11, want: 451}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 11, want: 451}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 13, want: 533}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 13, want: 533}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 19, want: 779}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 19, want: 779}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 21, want: 861}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 21, want: 861}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 25, want: 1025}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 25, want: 1025}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 27, want: 1107}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 27, want: 1107}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 37, want: 1517}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 37, want: 1517}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 41, want: 1681}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 41, want: 1681}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 45, want: 1845}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 45, want: 1845}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 73, want: 2993}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 73, want: 2993}, + test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 81, want: 3321}, + test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 81, want: 3321}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 3, want: 135}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 3, want: 135}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 5, want: 225}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 5, want: 225}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 7, want: 315}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 7, want: 315}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 9, want: 405}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 9, want: 405}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 10, want: 450}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 10, want: 450}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 11, want: 495}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 11, want: 495}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 13, want: 585}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 13, want: 585}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 19, want: 855}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 19, want: 855}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 21, want: 945}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 21, want: 945}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 25, want: 1125}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 25, want: 1125}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 27, want: 1215}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 27, want: 1215}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 37, want: 1665}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 37, want: 1665}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 41, want: 1845}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 41, want: 1845}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 45, want: 2025}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 45, want: 2025}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 73, want: 3285}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 73, want: 3285}, + test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 81, want: 3645}, + test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 81, want: 3645}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 3, want: 219}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 3, want: 219}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 5, want: 365}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 5, want: 365}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 7, want: 511}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 7, want: 511}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 9, want: 657}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 9, want: 657}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 10, want: 730}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 10, want: 730}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 11, want: 803}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 11, want: 803}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 13, want: 949}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 13, want: 949}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 19, want: 1387}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 19, want: 1387}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 21, want: 1533}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 21, want: 1533}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 25, want: 1825}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 25, want: 1825}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 27, want: 1971}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 27, want: 1971}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 37, want: 2701}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 37, want: 2701}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 41, want: 2993}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 41, want: 2993}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 45, want: 3285}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 45, want: 3285}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 73, want: 5329}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 73, want: 5329}, + test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 81, want: 5913}, + test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 81, want: 5913}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 3, want: 243}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 3, want: 243}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 5, want: 405}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 5, want: 405}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 7, want: 567}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 7, want: 567}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 9, want: 729}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 9, want: 729}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 10, want: 810}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 10, want: 810}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 11, want: 891}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 11, want: 891}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 13, want: 1053}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 13, want: 1053}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 19, want: 1539}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 19, want: 1539}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 21, want: 1701}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 21, want: 1701}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 25, want: 2025}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 25, want: 2025}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 27, want: 2187}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 27, want: 2187}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 37, want: 2997}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 37, want: 2997}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 41, want: 3321}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 41, want: 3321}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 45, want: 3645}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 45, want: 3645}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 73, want: 5913}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 73, want: 5913}, + test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 81, want: 6561}, + test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 81, want: 6561}} + +type test_int32 struct { + fn func(int32) int32 + fnname string + in int32 + want int32 +} + +var tests_int32 = []test_int32{ + + test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -2147483648, want: 0}, + test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -2147483648, want: 0}, + test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -2147483647, want: 1}, + test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -2147483647, want: 1}, + test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -1, want: 2147483647}, + test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -1, want: 2147483647}, + test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 0, want: -2147483648}, + test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 0, want: -2147483648}, + test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 1, want: -2147483647}, + test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 1, want: -2147483647}, + test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 2147483647, want: -1}, + test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 2147483647, want: -1}, + test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -2147483648, want: 1}, + test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -2147483648, want: 1}, + test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -2147483647, want: 2}, + test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -2147483647, want: 2}, + test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -1, want: -2147483648}, + test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -1, want: -2147483648}, + test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 0, want: -2147483647}, + test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 0, want: -2147483647}, + test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 1, want: -2147483646}, + test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 1, want: -2147483646}, + test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 2147483647, want: 0}, + test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 2147483647, want: 0}, + test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -2147483648, want: 2147483647}, + test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -2147483648, want: 2147483647}, + test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -2147483647, want: -2147483648}, + test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -2147483647, want: -2147483648}, + test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -1, want: -2}, + test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -1, want: -2}, + test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 0, want: -1}, + test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 0, want: -1}, + test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 1, want: 0}, + test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 1, want: 0}, + test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 2147483647, want: 2147483646}, + test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 2147483647, want: 2147483646}, + test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -2147483648, want: -2147483648}, + test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -2147483647, want: -2147483647}, + test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -2147483647, want: -2147483647}, + test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -1, want: -1}, + test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -1, want: -1}, + test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 0, want: 0}, + test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 0, want: 0}, + test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 1, want: 1}, + test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 1, want: 1}, + test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 2147483647, want: 2147483647}, + test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 2147483647, want: 2147483647}, + test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -2147483648, want: -2147483647}, + test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -2147483648, want: -2147483647}, + test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -2147483647, want: -2147483646}, + test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -2147483647, want: -2147483646}, + test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -1, want: 0}, + test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -1, want: 0}, + test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 0, want: 1}, + test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 0, want: 1}, + test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 1, want: 2}, + test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 1, want: 2}, + test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 2147483647, want: -2147483648}, + test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 2147483647, want: -2147483648}, + test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -2147483648, want: -1}, + test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -2147483648, want: -1}, + test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -2147483647, want: 0}, + test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -2147483647, want: 0}, + test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -1, want: 2147483646}, + test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -1, want: 2147483646}, + test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 0, want: 2147483647}, + test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 0, want: 2147483647}, + test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 1, want: -2147483648}, + test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 1, want: -2147483648}, + test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 2147483647, want: -2}, + test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 2147483647, want: -2}, + test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -2147483648, want: 0}, + test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -2147483648, want: 0}, + test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -2147483647, want: -1}, + test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -2147483647, want: 1}, + test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -1, want: -2147483647}, + test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -1, want: 2147483647}, + test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 0, want: -2147483648}, + test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 0, want: -2147483648}, + test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 1, want: 2147483647}, + test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 1, want: -2147483647}, + test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 2147483647, want: 1}, + test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 2147483647, want: -1}, + test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -2147483648, want: 1}, + test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -2147483648, want: -1}, + test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -2147483647, want: 0}, + test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -2147483647, want: 0}, + test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -1, want: -2147483646}, + test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -1, want: 2147483646}, + test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 0, want: -2147483647}, + test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 0, want: 2147483647}, + test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 1, want: -2147483648}, + test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 1, want: -2147483648}, + test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 2147483647, want: 2}, + test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 2147483647, want: -2}, + test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -2147483648, want: 2147483647}, + test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -2147483648, want: -2147483647}, + test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -2147483647, want: 2147483646}, + test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -2147483647, want: -2147483646}, + test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -1, want: 0}, + test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -1, want: 0}, + test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 0, want: -1}, + test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 0, want: 1}, + test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 1, want: -2}, + test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 1, want: 2}, + test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 2147483647, want: -2147483648}, + test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 2147483647, want: -2147483648}, + test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -2147483648, want: -2147483648}, + test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -2147483647, want: 2147483647}, + test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -2147483647, want: -2147483647}, + test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -1, want: 1}, + test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -1, want: -1}, + test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 0, want: 0}, + test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 0, want: 0}, + test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 1, want: -1}, + test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 1, want: 1}, + test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 2147483647, want: -2147483647}, + test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 2147483647, want: 2147483647}, + test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -2147483648, want: -2147483647}, + test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -2147483648, want: 2147483647}, + test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -2147483647, want: -2147483648}, + test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -2147483647, want: -2147483648}, + test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -1, want: 2}, + test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -1, want: -2}, + test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 0, want: 1}, + test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 0, want: -1}, + test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 1, want: 0}, + test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 1, want: 0}, + test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 2147483647, want: -2147483646}, + test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 2147483647, want: 2147483646}, + test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -2147483648, want: -1}, + test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -2147483648, want: 1}, + test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -2147483647, want: -2}, + test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -2147483647, want: 2}, + test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -1, want: -2147483648}, + test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -1, want: -2147483648}, + test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 0, want: 2147483647}, + test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 0, want: -2147483647}, + test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 1, want: 2147483646}, + test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 1, want: -2147483646}, + test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 2147483647, want: 0}, + test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 2147483647, want: 0}, + test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -2147483648, want: 1}, + test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -2147483648, want: 1}, + test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -2147483647, want: 1}, + test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -2147483647, want: 0}, + test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -1, want: -2147483648}, + test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -1, want: 0}, + test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 0, want: 0}, + test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: 1, want: -2147483648}, + test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 1, want: 0}, + test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: 2147483647, want: -1}, + test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 2147483647, want: 0}, + test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -2147483648, want: 0}, + test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -2147483648, want: 1}, + test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -2147483647, want: 1}, + test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -2147483647, want: 1}, + test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -1, want: 2147483647}, + test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -1, want: 0}, + test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 0, want: 0}, + test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: 1, want: -2147483647}, + test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 1, want: 0}, + test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: 2147483647, want: -1}, + test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 2147483647, want: -1}, + test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -2147483648, want: 0}, + test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -2147483648, want: -2147483648}, + test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -2147483647, want: 0}, + test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -2147483647, want: 2147483647}, + test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -1, want: 1}, + test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -1, want: 1}, + test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 0, want: 0}, + test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: 1, want: -1}, + test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 1, want: -1}, + test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: 2147483647, want: 0}, + test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 2147483647, want: -2147483647}, + test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -2147483648, want: 0}, + test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -2147483647, want: 0}, + test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -1, want: 0}, + test_int32{fn: div_0_int32, fnname: "div_0_int32", in: 1, want: 0}, + test_int32{fn: div_0_int32, fnname: "div_0_int32", in: 2147483647, want: 0}, + test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -2147483648, want: 0}, + test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -2147483648, want: -2147483648}, + test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -2147483647, want: 0}, + test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -2147483647, want: -2147483647}, + test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -1, want: -1}, + test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -1, want: -1}, + test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 0, want: 0}, + test_int32{fn: div_1_int32, fnname: "div_1_int32", in: 1, want: 1}, + test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 1, want: 1}, + test_int32{fn: div_1_int32, fnname: "div_1_int32", in: 2147483647, want: 0}, + test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 2147483647, want: 2147483647}, + test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -2147483648, want: 0}, + test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -2147483648, want: -1}, + test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -2147483647, want: -1}, + test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -2147483647, want: -1}, + test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -1, want: -2147483647}, + test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -1, want: 0}, + test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 0, want: 0}, + test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: 1, want: 2147483647}, + test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 1, want: 0}, + test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: 2147483647, want: 1}, + test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 2147483647, want: 1}, + test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -2147483648, want: 0}, + test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -2147483648, want: 0}, + test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -2147483647, want: -2147483648}, + test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -2147483647, want: -2147483648}, + test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -1, want: -2147483648}, + test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -1, want: -2147483648}, + test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 0, want: 0}, + test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 0, want: 0}, + test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 1, want: -2147483648}, + test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 1, want: -2147483648}, + test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 2147483647, want: -2147483648}, + test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 2147483647, want: -2147483648}, + test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -2147483648, want: -2147483648}, + test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -2147483647, want: 1}, + test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -2147483647, want: 1}, + test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -1, want: 2147483647}, + test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -1, want: 2147483647}, + test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 0, want: 0}, + test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 0, want: 0}, + test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 1, want: -2147483647}, + test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 1, want: -2147483647}, + test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 2147483647, want: -1}, + test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 2147483647, want: -1}, + test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -2147483648, want: -2147483648}, + test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -2147483647, want: 2147483647}, + test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -2147483647, want: 2147483647}, + test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -1, want: 1}, + test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -1, want: 1}, + test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 0, want: 0}, + test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 0, want: 0}, + test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 1, want: -1}, + test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 1, want: -1}, + test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 2147483647, want: -2147483647}, + test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 2147483647, want: -2147483647}, + test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -2147483648, want: 0}, + test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -2147483648, want: 0}, + test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -2147483647, want: 0}, + test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -2147483647, want: 0}, + test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -1, want: 0}, + test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -1, want: 0}, + test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 0, want: 0}, + test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 0, want: 0}, + test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 1, want: 0}, + test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 1, want: 0}, + test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 2147483647, want: 0}, + test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 2147483647, want: 0}, + test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -2147483648, want: -2147483648}, + test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -2147483647, want: -2147483647}, + test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -2147483647, want: -2147483647}, + test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -1, want: -1}, + test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -1, want: -1}, + test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 0, want: 0}, + test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 0, want: 0}, + test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 1, want: 1}, + test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 1, want: 1}, + test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 2147483647, want: 2147483647}, + test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 2147483647, want: 2147483647}, + test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -2147483648, want: -2147483648}, + test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -2147483647, want: -1}, + test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -2147483647, want: -1}, + test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -1, want: -2147483647}, + test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -1, want: -2147483647}, + test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 0, want: 0}, + test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 0, want: 0}, + test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 1, want: 2147483647}, + test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 1, want: 2147483647}, + test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 2147483647, want: 1}, + test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 2147483647, want: 1}, + test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -2147483648, want: 0}, + test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -2147483648, want: 0}, + test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -2147483647, want: -1}, + test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -2147483647, want: -2147483647}, + test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -1, want: 0}, + test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -1, want: -1}, + test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 0, want: 0}, + test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: 1, want: 0}, + test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 1, want: 1}, + test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: 2147483647, want: -1}, + test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 2147483647, want: 2147483647}, + test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -2147483648, want: -2147483647}, + test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -2147483648, want: -1}, + test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -2147483647, want: 0}, + test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -2147483647, want: 0}, + test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -1, want: 0}, + test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -1, want: -1}, + test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 0, want: 0}, + test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: 1, want: 0}, + test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 1, want: 1}, + test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: 2147483647, want: 0}, + test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 2147483647, want: 0}, + test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -2147483648, want: -1}, + test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -2147483648, want: 0}, + test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -2147483647, want: -1}, + test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -2147483647, want: 0}, + test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -1, want: 0}, + test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -1, want: 0}, + test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 0, want: 0}, + test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: 1, want: 0}, + test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 1, want: 0}, + test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: 2147483647, want: -1}, + test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 2147483647, want: 0}, + test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -2147483648, want: 0}, + test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -2147483647, want: 0}, + test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -1, want: 0}, + test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: 1, want: 0}, + test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: 2147483647, want: 0}, + test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -2147483648, want: 1}, + test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -2147483648, want: 0}, + test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -2147483647, want: 1}, + test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -2147483647, want: 0}, + test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -1, want: 0}, + test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -1, want: 0}, + test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 0, want: 0}, + test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: 1, want: 0}, + test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 1, want: 0}, + test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: 2147483647, want: 1}, + test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 2147483647, want: 0}, + test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -2147483648, want: 2147483647}, + test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -2147483648, want: -1}, + test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -2147483647, want: 0}, + test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -2147483647, want: 0}, + test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -1, want: 0}, + test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -1, want: -1}, + test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 0, want: 0}, + test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: 1, want: 0}, + test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 1, want: 1}, + test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: 2147483647, want: 0}, + test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 2147483647, want: 0}, + test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -2147483648, want: -2147483648}, + test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -2147483647, want: -2147483648}, + test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -2147483647, want: -2147483648}, + test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -1, want: -2147483648}, + test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -1, want: -2147483648}, + test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 0, want: 0}, + test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 0, want: 0}, + test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 1, want: 0}, + test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 1, want: 0}, + test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 2147483647, want: 0}, + test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 2147483647, want: 0}, + test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -2147483648, want: -2147483648}, + test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -2147483647, want: -2147483647}, + test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -2147483647, want: -2147483647}, + test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -1, want: -2147483647}, + test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -1, want: -2147483647}, + test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 0, want: 0}, + test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 0, want: 0}, + test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 1, want: 1}, + test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 1, want: 1}, + test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 2147483647, want: 1}, + test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 2147483647, want: 1}, + test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -2147483648, want: -2147483648}, + test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -2147483647, want: -2147483647}, + test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -2147483647, want: -2147483647}, + test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -1, want: -1}, + test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -1, want: -1}, + test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 0, want: 0}, + test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 0, want: 0}, + test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 1, want: 1}, + test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 1, want: 1}, + test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 2147483647, want: 2147483647}, + test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 2147483647, want: 2147483647}, + test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -2147483648, want: 0}, + test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -2147483648, want: 0}, + test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -2147483647, want: 0}, + test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -2147483647, want: 0}, + test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -1, want: 0}, + test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -1, want: 0}, + test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 0, want: 0}, + test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 0, want: 0}, + test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 1, want: 0}, + test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 1, want: 0}, + test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 2147483647, want: 0}, + test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 2147483647, want: 0}, + test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -2147483648, want: 0}, + test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -2147483648, want: 0}, + test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -2147483647, want: 1}, + test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -2147483647, want: 1}, + test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -1, want: 1}, + test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -1, want: 1}, + test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 0, want: 0}, + test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 0, want: 0}, + test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 1, want: 1}, + test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 1, want: 1}, + test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 2147483647, want: 1}, + test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 2147483647, want: 1}, + test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -2147483648, want: 0}, + test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -2147483648, want: 0}, + test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -2147483647, want: 1}, + test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -2147483647, want: 1}, + test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -1, want: 2147483647}, + test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -1, want: 2147483647}, + test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 0, want: 0}, + test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 0, want: 0}, + test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 1, want: 1}, + test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 1, want: 1}, + test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 2147483647, want: 2147483647}, + test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 2147483647, want: 2147483647}, + test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -2147483648, want: -2147483648}, + test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -2147483647, want: -2147483647}, + test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -2147483647, want: -2147483647}, + test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -1, want: -1}, + test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -1, want: -1}, + test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 0, want: -2147483648}, + test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 0, want: -2147483648}, + test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 1, want: -2147483647}, + test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 1, want: -2147483647}, + test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 2147483647, want: -1}, + test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 2147483647, want: -1}, + test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -2147483648, want: -2147483647}, + test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -2147483648, want: -2147483647}, + test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -2147483647, want: -2147483647}, + test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -2147483647, want: -2147483647}, + test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -1, want: -1}, + test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -1, want: -1}, + test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 0, want: -2147483647}, + test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 0, want: -2147483647}, + test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 1, want: -2147483647}, + test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 1, want: -2147483647}, + test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 2147483647, want: -1}, + test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 2147483647, want: -1}, + test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -2147483648, want: -1}, + test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -2147483648, want: -1}, + test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -2147483647, want: -1}, + test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -2147483647, want: -1}, + test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -1, want: -1}, + test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -1, want: -1}, + test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 0, want: -1}, + test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 0, want: -1}, + test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 1, want: -1}, + test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 1, want: -1}, + test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 2147483647, want: -1}, + test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 2147483647, want: -1}, + test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -2147483648, want: -2147483648}, + test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -2147483647, want: -2147483647}, + test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -2147483647, want: -2147483647}, + test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -1, want: -1}, + test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -1, want: -1}, + test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 0, want: 0}, + test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 0, want: 0}, + test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 1, want: 1}, + test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 1, want: 1}, + test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 2147483647, want: 2147483647}, + test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 2147483647, want: 2147483647}, + test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -2147483648, want: -2147483647}, + test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -2147483648, want: -2147483647}, + test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -2147483647, want: -2147483647}, + test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -2147483647, want: -2147483647}, + test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -1, want: -1}, + test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -1, want: -1}, + test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 0, want: 1}, + test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 0, want: 1}, + test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 1, want: 1}, + test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 1, want: 1}, + test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 2147483647, want: 2147483647}, + test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 2147483647, want: 2147483647}, + test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -2147483648, want: -1}, + test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -2147483648, want: -1}, + test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -2147483647, want: -1}, + test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -2147483647, want: -1}, + test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -1, want: -1}, + test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -1, want: -1}, + test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 0, want: 2147483647}, + test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 0, want: 2147483647}, + test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 1, want: 2147483647}, + test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 1, want: 2147483647}, + test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 2147483647, want: 2147483647}, + test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 2147483647, want: 2147483647}, + test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -2147483648, want: 0}, + test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -2147483648, want: 0}, + test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -2147483647, want: 1}, + test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -2147483647, want: 1}, + test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -1, want: 2147483647}, + test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -1, want: 2147483647}, + test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 0, want: -2147483648}, + test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 0, want: -2147483648}, + test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 1, want: -2147483647}, + test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 1, want: -2147483647}, + test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 2147483647, want: -1}, + test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 2147483647, want: -1}, + test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -2147483648, want: 1}, + test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -2147483648, want: 1}, + test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -2147483647, want: 0}, + test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -2147483647, want: 0}, + test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -1, want: 2147483646}, + test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -1, want: 2147483646}, + test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 0, want: -2147483647}, + test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 0, want: -2147483647}, + test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 1, want: -2147483648}, + test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 1, want: -2147483648}, + test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 2147483647, want: -2}, + test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 2147483647, want: -2}, + test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -2147483648, want: 2147483647}, + test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -2147483648, want: 2147483647}, + test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -2147483647, want: 2147483646}, + test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -2147483647, want: 2147483646}, + test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -1, want: 0}, + test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -1, want: 0}, + test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 0, want: -1}, + test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 0, want: -1}, + test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 1, want: -2}, + test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 1, want: -2}, + test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 2147483647, want: -2147483648}, + test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 2147483647, want: -2147483648}, + test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -2147483648, want: -2147483648}, + test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -2147483648, want: -2147483648}, + test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -2147483647, want: -2147483647}, + test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -2147483647, want: -2147483647}, + test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -1, want: -1}, + test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -1, want: -1}, + test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 0, want: 0}, + test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 0, want: 0}, + test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 1, want: 1}, + test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 1, want: 1}, + test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 2147483647, want: 2147483647}, + test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 2147483647, want: 2147483647}, + test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -2147483648, want: -2147483647}, + test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -2147483648, want: -2147483647}, + test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -2147483647, want: -2147483648}, + test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -2147483647, want: -2147483648}, + test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -1, want: -2}, + test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -1, want: -2}, + test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 0, want: 1}, + test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 0, want: 1}, + test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 1, want: 0}, + test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 1, want: 0}, + test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 2147483647, want: 2147483646}, + test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 2147483647, want: 2147483646}, + test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -2147483648, want: -1}, + test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -2147483648, want: -1}, + test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -2147483647, want: -2}, + test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -2147483647, want: -2}, + test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -1, want: -2147483648}, + test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -1, want: -2147483648}, + test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 0, want: 2147483647}, + test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 0, want: 2147483647}, + test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 1, want: 2147483646}, + test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 1, want: 2147483646}, + test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 2147483647, want: 0}, + test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 2147483647, want: 0}} + +type test_int32mul struct { + fn func(int32) int32 + fnname string + in int32 + want int32 +} + +var tests_int32mul = []test_int32{ + + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -9, want: 81}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -9, want: 81}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -5, want: 45}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -5, want: 45}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -3, want: 27}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -3, want: 27}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 3, want: -27}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 3, want: -27}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 5, want: -45}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 5, want: -45}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 7, want: -63}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 7, want: -63}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 9, want: -81}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 9, want: -81}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 10, want: -90}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 10, want: -90}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 11, want: -99}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 11, want: -99}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 13, want: -117}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 13, want: -117}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 19, want: -171}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 19, want: -171}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 21, want: -189}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 21, want: -189}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 25, want: -225}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 25, want: -225}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 27, want: -243}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 27, want: -243}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 37, want: -333}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 37, want: -333}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 41, want: -369}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 41, want: -369}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 45, want: -405}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 45, want: -405}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 73, want: -657}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 73, want: -657}, + test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 81, want: -729}, + test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 81, want: -729}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -9, want: 45}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -9, want: 45}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -5, want: 25}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -5, want: 25}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -3, want: 15}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -3, want: 15}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 3, want: -15}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 3, want: -15}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 5, want: -25}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 5, want: -25}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 7, want: -35}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 7, want: -35}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 9, want: -45}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 9, want: -45}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 10, want: -50}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 10, want: -50}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 11, want: -55}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 11, want: -55}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 13, want: -65}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 13, want: -65}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 19, want: -95}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 19, want: -95}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 21, want: -105}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 21, want: -105}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 25, want: -125}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 25, want: -125}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 27, want: -135}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 27, want: -135}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 37, want: -185}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 37, want: -185}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 41, want: -205}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 41, want: -205}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 45, want: -225}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 45, want: -225}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 73, want: -365}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 73, want: -365}, + test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 81, want: -405}, + test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 81, want: -405}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -9, want: 27}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -9, want: 27}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -5, want: 15}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -5, want: 15}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -3, want: 9}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -3, want: 9}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 3, want: -9}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 3, want: -9}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 5, want: -15}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 5, want: -15}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 7, want: -21}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 7, want: -21}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 9, want: -27}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 9, want: -27}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 10, want: -30}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 10, want: -30}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 11, want: -33}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 11, want: -33}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 13, want: -39}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 13, want: -39}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 19, want: -57}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 19, want: -57}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 21, want: -63}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 21, want: -63}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 25, want: -75}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 25, want: -75}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 27, want: -81}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 27, want: -81}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 37, want: -111}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 37, want: -111}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 41, want: -123}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 41, want: -123}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 45, want: -135}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 45, want: -135}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 73, want: -219}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 73, want: -219}, + test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 81, want: -243}, + test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 81, want: -243}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -9, want: -27}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -9, want: -27}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -5, want: -15}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -5, want: -15}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -3, want: -9}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -3, want: -9}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 3, want: 9}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 3, want: 9}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 5, want: 15}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 5, want: 15}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 7, want: 21}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 7, want: 21}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 9, want: 27}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 9, want: 27}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 10, want: 30}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 10, want: 30}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 11, want: 33}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 11, want: 33}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 13, want: 39}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 13, want: 39}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 19, want: 57}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 19, want: 57}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 21, want: 63}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 21, want: 63}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 25, want: 75}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 25, want: 75}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 27, want: 81}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 27, want: 81}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 37, want: 111}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 37, want: 111}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 41, want: 123}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 41, want: 123}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 45, want: 135}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 45, want: 135}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 73, want: 219}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 73, want: 219}, + test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 81, want: 243}, + test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 81, want: 243}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -9, want: -45}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -9, want: -45}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -5, want: -25}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -5, want: -25}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -3, want: -15}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -3, want: -15}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 3, want: 15}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 3, want: 15}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 5, want: 25}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 5, want: 25}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 7, want: 35}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 7, want: 35}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 9, want: 45}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 9, want: 45}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 10, want: 50}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 10, want: 50}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 11, want: 55}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 11, want: 55}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 13, want: 65}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 13, want: 65}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 19, want: 95}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 19, want: 95}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 21, want: 105}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 21, want: 105}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 25, want: 125}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 25, want: 125}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 27, want: 135}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 27, want: 135}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 37, want: 185}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 37, want: 185}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 41, want: 205}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 41, want: 205}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 45, want: 225}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 45, want: 225}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 73, want: 365}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 73, want: 365}, + test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 81, want: 405}, + test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 81, want: 405}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -9, want: -63}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -9, want: -63}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -5, want: -35}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -5, want: -35}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -3, want: -21}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -3, want: -21}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 3, want: 21}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 3, want: 21}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 5, want: 35}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 5, want: 35}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 7, want: 49}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 7, want: 49}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 9, want: 63}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 9, want: 63}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 10, want: 70}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 10, want: 70}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 11, want: 77}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 11, want: 77}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 13, want: 91}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 13, want: 91}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 19, want: 133}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 19, want: 133}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 21, want: 147}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 21, want: 147}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 25, want: 175}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 25, want: 175}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 27, want: 189}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 27, want: 189}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 37, want: 259}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 37, want: 259}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 41, want: 287}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 41, want: 287}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 45, want: 315}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 45, want: 315}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 73, want: 511}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 73, want: 511}, + test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 81, want: 567}, + test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 81, want: 567}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -9, want: -81}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -9, want: -81}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -5, want: -45}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -5, want: -45}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -3, want: -27}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -3, want: -27}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 3, want: 27}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 3, want: 27}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 5, want: 45}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 5, want: 45}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 7, want: 63}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 7, want: 63}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 9, want: 81}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 9, want: 81}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 10, want: 90}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 10, want: 90}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 11, want: 99}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 11, want: 99}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 13, want: 117}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 13, want: 117}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 19, want: 171}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 19, want: 171}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 21, want: 189}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 21, want: 189}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 25, want: 225}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 25, want: 225}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 27, want: 243}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 27, want: 243}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 37, want: 333}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 37, want: 333}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 41, want: 369}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 41, want: 369}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 45, want: 405}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 45, want: 405}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 73, want: 657}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 73, want: 657}, + test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 81, want: 729}, + test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 81, want: 729}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -9, want: -90}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -9, want: -90}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -5, want: -50}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -5, want: -50}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -3, want: -30}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -3, want: -30}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 3, want: 30}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 3, want: 30}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 5, want: 50}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 5, want: 50}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 7, want: 70}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 7, want: 70}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 9, want: 90}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 9, want: 90}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 10, want: 100}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 10, want: 100}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 11, want: 110}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 11, want: 110}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 13, want: 130}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 13, want: 130}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 19, want: 190}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 19, want: 190}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 21, want: 210}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 21, want: 210}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 25, want: 250}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 25, want: 250}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 27, want: 270}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 27, want: 270}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 37, want: 370}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 37, want: 370}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 41, want: 410}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 41, want: 410}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 45, want: 450}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 45, want: 450}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 73, want: 730}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 73, want: 730}, + test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 81, want: 810}, + test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 81, want: 810}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -9, want: -99}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -9, want: -99}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -5, want: -55}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -5, want: -55}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -3, want: -33}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -3, want: -33}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 3, want: 33}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 3, want: 33}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 5, want: 55}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 5, want: 55}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 7, want: 77}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 7, want: 77}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 9, want: 99}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 9, want: 99}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 10, want: 110}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 10, want: 110}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 11, want: 121}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 11, want: 121}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 13, want: 143}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 13, want: 143}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 19, want: 209}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 19, want: 209}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 21, want: 231}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 21, want: 231}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 25, want: 275}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 25, want: 275}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 27, want: 297}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 27, want: 297}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 37, want: 407}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 37, want: 407}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 41, want: 451}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 41, want: 451}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 45, want: 495}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 45, want: 495}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 73, want: 803}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 73, want: 803}, + test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 81, want: 891}, + test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 81, want: 891}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -9, want: -117}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -9, want: -117}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -5, want: -65}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -5, want: -65}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -3, want: -39}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -3, want: -39}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 3, want: 39}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 3, want: 39}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 5, want: 65}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 5, want: 65}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 7, want: 91}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 7, want: 91}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 9, want: 117}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 9, want: 117}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 10, want: 130}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 10, want: 130}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 11, want: 143}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 11, want: 143}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 13, want: 169}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 13, want: 169}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 19, want: 247}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 19, want: 247}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 21, want: 273}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 21, want: 273}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 25, want: 325}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 25, want: 325}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 27, want: 351}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 27, want: 351}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 37, want: 481}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 37, want: 481}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 41, want: 533}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 41, want: 533}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 45, want: 585}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 45, want: 585}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 73, want: 949}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 73, want: 949}, + test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 81, want: 1053}, + test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 81, want: 1053}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -9, want: -171}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -9, want: -171}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -5, want: -95}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -5, want: -95}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -3, want: -57}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -3, want: -57}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 3, want: 57}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 3, want: 57}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 5, want: 95}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 5, want: 95}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 7, want: 133}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 7, want: 133}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 9, want: 171}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 9, want: 171}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 10, want: 190}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 10, want: 190}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 11, want: 209}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 11, want: 209}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 13, want: 247}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 13, want: 247}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 19, want: 361}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 19, want: 361}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 21, want: 399}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 21, want: 399}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 25, want: 475}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 25, want: 475}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 27, want: 513}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 27, want: 513}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 37, want: 703}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 37, want: 703}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 41, want: 779}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 41, want: 779}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 45, want: 855}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 45, want: 855}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 73, want: 1387}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 73, want: 1387}, + test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 81, want: 1539}, + test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 81, want: 1539}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -9, want: -189}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -9, want: -189}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -5, want: -105}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -5, want: -105}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -3, want: -63}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -3, want: -63}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 3, want: 63}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 3, want: 63}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 5, want: 105}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 5, want: 105}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 7, want: 147}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 7, want: 147}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 9, want: 189}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 9, want: 189}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 10, want: 210}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 10, want: 210}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 11, want: 231}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 11, want: 231}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 13, want: 273}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 13, want: 273}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 19, want: 399}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 19, want: 399}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 21, want: 441}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 21, want: 441}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 25, want: 525}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 25, want: 525}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 27, want: 567}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 27, want: 567}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 37, want: 777}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 37, want: 777}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 41, want: 861}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 41, want: 861}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 45, want: 945}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 45, want: 945}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 73, want: 1533}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 73, want: 1533}, + test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 81, want: 1701}, + test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 81, want: 1701}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -9, want: -225}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -9, want: -225}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -5, want: -125}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -5, want: -125}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -3, want: -75}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -3, want: -75}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 3, want: 75}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 3, want: 75}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 5, want: 125}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 5, want: 125}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 7, want: 175}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 7, want: 175}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 9, want: 225}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 9, want: 225}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 10, want: 250}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 10, want: 250}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 11, want: 275}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 11, want: 275}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 13, want: 325}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 13, want: 325}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 19, want: 475}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 19, want: 475}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 21, want: 525}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 21, want: 525}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 25, want: 625}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 25, want: 625}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 27, want: 675}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 27, want: 675}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 37, want: 925}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 37, want: 925}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 41, want: 1025}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 41, want: 1025}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 45, want: 1125}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 45, want: 1125}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 73, want: 1825}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 73, want: 1825}, + test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 81, want: 2025}, + test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 81, want: 2025}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -9, want: -243}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -9, want: -243}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -5, want: -135}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -5, want: -135}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -3, want: -81}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -3, want: -81}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 3, want: 81}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 3, want: 81}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 5, want: 135}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 5, want: 135}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 7, want: 189}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 7, want: 189}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 9, want: 243}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 9, want: 243}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 10, want: 270}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 10, want: 270}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 11, want: 297}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 11, want: 297}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 13, want: 351}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 13, want: 351}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 19, want: 513}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 19, want: 513}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 21, want: 567}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 21, want: 567}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 25, want: 675}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 25, want: 675}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 27, want: 729}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 27, want: 729}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 37, want: 999}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 37, want: 999}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 41, want: 1107}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 41, want: 1107}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 45, want: 1215}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 45, want: 1215}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 73, want: 1971}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 73, want: 1971}, + test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 81, want: 2187}, + test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 81, want: 2187}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -9, want: -333}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -9, want: -333}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -5, want: -185}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -5, want: -185}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -3, want: -111}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -3, want: -111}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 3, want: 111}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 3, want: 111}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 5, want: 185}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 5, want: 185}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 7, want: 259}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 7, want: 259}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 9, want: 333}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 9, want: 333}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 10, want: 370}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 10, want: 370}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 11, want: 407}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 11, want: 407}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 13, want: 481}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 13, want: 481}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 19, want: 703}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 19, want: 703}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 21, want: 777}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 21, want: 777}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 25, want: 925}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 25, want: 925}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 27, want: 999}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 27, want: 999}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 37, want: 1369}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 37, want: 1369}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 41, want: 1517}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 41, want: 1517}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 45, want: 1665}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 45, want: 1665}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 73, want: 2701}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 73, want: 2701}, + test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 81, want: 2997}, + test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 81, want: 2997}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -9, want: -369}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -9, want: -369}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -5, want: -205}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -5, want: -205}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -3, want: -123}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -3, want: -123}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 3, want: 123}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 3, want: 123}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 5, want: 205}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 5, want: 205}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 7, want: 287}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 7, want: 287}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 9, want: 369}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 9, want: 369}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 10, want: 410}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 10, want: 410}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 11, want: 451}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 11, want: 451}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 13, want: 533}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 13, want: 533}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 19, want: 779}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 19, want: 779}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 21, want: 861}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 21, want: 861}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 25, want: 1025}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 25, want: 1025}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 27, want: 1107}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 27, want: 1107}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 37, want: 1517}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 37, want: 1517}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 41, want: 1681}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 41, want: 1681}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 45, want: 1845}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 45, want: 1845}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 73, want: 2993}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 73, want: 2993}, + test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 81, want: 3321}, + test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 81, want: 3321}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -9, want: -405}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -9, want: -405}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -5, want: -225}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -5, want: -225}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -3, want: -135}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -3, want: -135}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 3, want: 135}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 3, want: 135}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 5, want: 225}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 5, want: 225}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 7, want: 315}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 7, want: 315}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 9, want: 405}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 9, want: 405}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 10, want: 450}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 10, want: 450}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 11, want: 495}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 11, want: 495}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 13, want: 585}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 13, want: 585}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 19, want: 855}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 19, want: 855}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 21, want: 945}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 21, want: 945}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 25, want: 1125}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 25, want: 1125}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 27, want: 1215}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 27, want: 1215}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 37, want: 1665}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 37, want: 1665}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 41, want: 1845}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 41, want: 1845}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 45, want: 2025}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 45, want: 2025}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 73, want: 3285}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 73, want: 3285}, + test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 81, want: 3645}, + test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 81, want: 3645}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -9, want: -657}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -9, want: -657}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -5, want: -365}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -5, want: -365}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -3, want: -219}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -3, want: -219}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 3, want: 219}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 3, want: 219}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 5, want: 365}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 5, want: 365}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 7, want: 511}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 7, want: 511}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 9, want: 657}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 9, want: 657}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 10, want: 730}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 10, want: 730}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 11, want: 803}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 11, want: 803}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 13, want: 949}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 13, want: 949}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 19, want: 1387}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 19, want: 1387}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 21, want: 1533}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 21, want: 1533}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 25, want: 1825}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 25, want: 1825}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 27, want: 1971}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 27, want: 1971}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 37, want: 2701}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 37, want: 2701}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 41, want: 2993}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 41, want: 2993}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 45, want: 3285}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 45, want: 3285}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 73, want: 5329}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 73, want: 5329}, + test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 81, want: 5913}, + test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 81, want: 5913}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -9, want: -729}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -9, want: -729}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -5, want: -405}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -5, want: -405}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -3, want: -243}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -3, want: -243}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 3, want: 243}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 3, want: 243}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 5, want: 405}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 5, want: 405}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 7, want: 567}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 7, want: 567}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 9, want: 729}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 9, want: 729}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 10, want: 810}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 10, want: 810}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 11, want: 891}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 11, want: 891}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 13, want: 1053}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 13, want: 1053}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 19, want: 1539}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 19, want: 1539}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 21, want: 1701}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 21, want: 1701}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 25, want: 2025}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 25, want: 2025}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 27, want: 2187}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 27, want: 2187}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 37, want: 2997}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 37, want: 2997}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 41, want: 3321}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 41, want: 3321}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 45, want: 3645}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 45, want: 3645}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 73, want: 5913}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 73, want: 5913}, + test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 81, want: 6561}, + test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 81, want: 6561}} + +type test_uint16 struct { + fn func(uint16) uint16 + fnname string + in uint16 + want uint16 +} + +var tests_uint16 = []test_uint16{ + + test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 0, want: 0}, + test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 0, want: 0}, + test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 1, want: 1}, + test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 1, want: 1}, + test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 65535, want: 65535}, + test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 65535, want: 65535}, + test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 0, want: 1}, + test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 0, want: 1}, + test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 1, want: 2}, + test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 1, want: 2}, + test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 65535, want: 0}, + test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 65535, want: 0}, + test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 0, want: 65535}, + test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 0, want: 65535}, + test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 1, want: 0}, + test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 1, want: 0}, + test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 65535, want: 65534}, + test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 65535, want: 65534}, + test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 0, want: 0}, + test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 0, want: 0}, + test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 1, want: 65535}, + test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 1, want: 1}, + test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 65535, want: 1}, + test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 65535, want: 65535}, + test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 0, want: 1}, + test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 0, want: 65535}, + test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 1, want: 0}, + test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 1, want: 0}, + test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 65535, want: 2}, + test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 65535, want: 65534}, + test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 0, want: 65535}, + test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 0, want: 1}, + test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 1, want: 65534}, + test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 1, want: 2}, + test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 65535, want: 0}, + test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 65535, want: 0}, + test_uint16{fn: div_0_uint16, fnname: "div_0_uint16", in: 1, want: 0}, + test_uint16{fn: div_0_uint16, fnname: "div_0_uint16", in: 65535, want: 0}, + test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 0, want: 0}, + test_uint16{fn: div_1_uint16, fnname: "div_1_uint16", in: 1, want: 1}, + test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 1, want: 1}, + test_uint16{fn: div_1_uint16, fnname: "div_1_uint16", in: 65535, want: 0}, + test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 65535, want: 65535}, + test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 0, want: 0}, + test_uint16{fn: div_65535_uint16, fnname: "div_65535_uint16", in: 1, want: 65535}, + test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 1, want: 0}, + test_uint16{fn: div_65535_uint16, fnname: "div_65535_uint16", in: 65535, want: 1}, + test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 65535, want: 1}, + test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 0, want: 0}, + test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 0, want: 0}, + test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 1, want: 0}, + test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 1, want: 0}, + test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 65535, want: 0}, + test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 65535, want: 0}, + test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 0, want: 0}, + test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 0, want: 0}, + test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 1, want: 1}, + test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 1, want: 1}, + test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 65535, want: 65535}, + test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 65535, want: 65535}, + test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 0, want: 0}, + test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 0, want: 0}, + test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 1, want: 65535}, + test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 1, want: 65535}, + test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 65535, want: 1}, + test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 65535, want: 1}, + test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 0, want: 0}, + test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 0, want: 0}, + test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 1, want: 0}, + test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 1, want: 1}, + test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 65535, want: 0}, + test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 65535, want: 65535}, + test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 0, want: 1}, + test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 0, want: 0}, + test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 1, want: 2}, + test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 1, want: 2}, + test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 65535, want: 0}, + test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 65535, want: 65534}, + test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 0, want: 65535}, + test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 0, want: 0}, + test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 1, want: 65534}, + test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 1, want: 0}, + test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 65535, want: 0}, + test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 65535, want: 0}, + test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 0, want: 0}, + test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 0, want: 0}, + test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 1, want: 0}, + test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 1, want: 1}, + test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 65535, want: 0}, + test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 65535, want: 65535}, + test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 0, want: 1}, + test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 0, want: 0}, + test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 1, want: 0}, + test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 1, want: 0}, + test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 65535, want: 0}, + test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 65535, want: 32767}, + test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 0, want: 65535}, + test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 0, want: 0}, + test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 1, want: 32767}, + test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 1, want: 0}, + test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 65535, want: 0}, + test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 65535, want: 0}, + test_uint16{fn: mod_0_uint16, fnname: "mod_0_uint16", in: 1, want: 0}, + test_uint16{fn: mod_0_uint16, fnname: "mod_0_uint16", in: 65535, want: 0}, + test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 0, want: 0}, + test_uint16{fn: mod_1_uint16, fnname: "mod_1_uint16", in: 1, want: 0}, + test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 1, want: 0}, + test_uint16{fn: mod_1_uint16, fnname: "mod_1_uint16", in: 65535, want: 1}, + test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 65535, want: 0}, + test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 0, want: 0}, + test_uint16{fn: mod_65535_uint16, fnname: "mod_65535_uint16", in: 1, want: 0}, + test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 1, want: 1}, + test_uint16{fn: mod_65535_uint16, fnname: "mod_65535_uint16", in: 65535, want: 0}, + test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 65535, want: 0}, + test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 0, want: 0}, + test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 0, want: 0}, + test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 1, want: 0}, + test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 1, want: 0}, + test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 65535, want: 0}, + test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 65535, want: 0}, + test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 0, want: 0}, + test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 0, want: 0}, + test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 1, want: 1}, + test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 1, want: 1}, + test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 65535, want: 1}, + test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 65535, want: 1}, + test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 0, want: 0}, + test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 0, want: 0}, + test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 1, want: 1}, + test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 1, want: 1}, + test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 65535, want: 65535}, + test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 65535, want: 65535}, + test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 0, want: 0}, + test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 0, want: 0}, + test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 1, want: 1}, + test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 1, want: 1}, + test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 65535, want: 65535}, + test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 65535, want: 65535}, + test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 0, want: 1}, + test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 0, want: 1}, + test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 1, want: 1}, + test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 1, want: 1}, + test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 65535, want: 65535}, + test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 65535, want: 65535}, + test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 0, want: 65535}, + test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 0, want: 65535}, + test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 1, want: 65535}, + test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 1, want: 65535}, + test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 65535, want: 65535}, + test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 65535, want: 65535}, + test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 0, want: 0}, + test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 0, want: 0}, + test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 1, want: 1}, + test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 1, want: 1}, + test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 65535, want: 65535}, + test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 65535, want: 65535}, + test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 0, want: 1}, + test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 0, want: 1}, + test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 1, want: 0}, + test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 1, want: 0}, + test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 65535, want: 65534}, + test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 65535, want: 65534}, + test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 0, want: 65535}, + test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 0, want: 65535}, + test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 1, want: 65534}, + test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 1, want: 65534}, + test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 65535, want: 0}, + test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 65535, want: 0}} + +type test_int16 struct { + fn func(int16) int16 + fnname string + in int16 + want int16 +} + +var tests_int16 = []test_int16{ + + test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -32768, want: 0}, + test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -32768, want: 0}, + test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -32767, want: 1}, + test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -32767, want: 1}, + test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -1, want: 32767}, + test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -1, want: 32767}, + test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 0, want: -32768}, + test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 0, want: -32768}, + test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 1, want: -32767}, + test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 1, want: -32767}, + test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 32766, want: -2}, + test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 32766, want: -2}, + test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 32767, want: -1}, + test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 32767, want: -1}, + test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -32768, want: 1}, + test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -32768, want: 1}, + test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -32767, want: 2}, + test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -32767, want: 2}, + test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -1, want: -32768}, + test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -1, want: -32768}, + test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 0, want: -32767}, + test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 0, want: -32767}, + test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 1, want: -32766}, + test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 1, want: -32766}, + test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 32766, want: -1}, + test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 32766, want: -1}, + test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 32767, want: 0}, + test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 32767, want: 0}, + test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -32768, want: 32767}, + test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -32768, want: 32767}, + test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -32767, want: -32768}, + test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -32767, want: -32768}, + test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -1, want: -2}, + test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -1, want: -2}, + test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 0, want: -1}, + test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 0, want: -1}, + test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 1, want: 0}, + test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 1, want: 0}, + test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 32766, want: 32765}, + test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 32766, want: 32765}, + test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 32767, want: 32766}, + test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 32767, want: 32766}, + test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -32768, want: -32768}, + test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -32768, want: -32768}, + test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -32767, want: -32767}, + test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -32767, want: -32767}, + test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -1, want: -1}, + test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -1, want: -1}, + test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 0, want: 0}, + test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 0, want: 0}, + test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 1, want: 1}, + test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 1, want: 1}, + test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 32766, want: 32766}, + test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 32766, want: 32766}, + test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 32767, want: 32767}, + test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 32767, want: 32767}, + test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -32768, want: -32767}, + test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -32768, want: -32767}, + test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -32767, want: -32766}, + test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -32767, want: -32766}, + test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -1, want: 0}, + test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -1, want: 0}, + test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 0, want: 1}, + test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 0, want: 1}, + test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 1, want: 2}, + test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 1, want: 2}, + test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 32766, want: 32767}, + test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 32766, want: 32767}, + test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 32767, want: -32768}, + test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 32767, want: -32768}, + test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -32768, want: -2}, + test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -32768, want: -2}, + test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -32767, want: -1}, + test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -32767, want: -1}, + test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -1, want: 32765}, + test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -1, want: 32765}, + test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 0, want: 32766}, + test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 0, want: 32766}, + test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 1, want: 32767}, + test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 1, want: 32767}, + test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 32766, want: -4}, + test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 32766, want: -4}, + test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 32767, want: -3}, + test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 32767, want: -3}, + test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -32768, want: -1}, + test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -32768, want: -1}, + test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -32767, want: 0}, + test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -32767, want: 0}, + test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -1, want: 32766}, + test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -1, want: 32766}, + test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 0, want: 32767}, + test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 0, want: 32767}, + test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 1, want: -32768}, + test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 1, want: -32768}, + test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 32766, want: -3}, + test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 32766, want: -3}, + test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 32767, want: -2}, + test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 32767, want: -2}, + test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -32768, want: 0}, + test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -32768, want: 0}, + test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -32767, want: -1}, + test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -32767, want: 1}, + test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -1, want: -32767}, + test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -1, want: 32767}, + test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 0, want: -32768}, + test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 0, want: -32768}, + test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 1, want: 32767}, + test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 1, want: -32767}, + test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 32766, want: 2}, + test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 32766, want: -2}, + test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 32767, want: 1}, + test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 32767, want: -1}, + test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -32768, want: 1}, + test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -32768, want: -1}, + test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -32767, want: 0}, + test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -32767, want: 0}, + test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -1, want: -32766}, + test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -1, want: 32766}, + test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 0, want: -32767}, + test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 0, want: 32767}, + test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 1, want: -32768}, + test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 1, want: -32768}, + test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 32766, want: 3}, + test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 32766, want: -3}, + test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 32767, want: 2}, + test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 32767, want: -2}, + test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -32768, want: 32767}, + test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -32768, want: -32767}, + test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -32767, want: 32766}, + test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -32767, want: -32766}, + test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -1, want: 0}, + test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -1, want: 0}, + test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 0, want: -1}, + test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 0, want: 1}, + test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 1, want: -2}, + test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 1, want: 2}, + test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 32766, want: -32767}, + test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 32766, want: 32767}, + test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 32767, want: -32768}, + test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 32767, want: -32768}, + test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -32768, want: -32768}, + test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -32768, want: -32768}, + test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -32767, want: 32767}, + test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -32767, want: -32767}, + test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -1, want: 1}, + test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -1, want: -1}, + test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 0, want: 0}, + test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 0, want: 0}, + test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 1, want: -1}, + test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 1, want: 1}, + test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 32766, want: -32766}, + test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 32766, want: 32766}, + test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 32767, want: -32767}, + test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 32767, want: 32767}, + test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -32768, want: -32767}, + test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -32768, want: 32767}, + test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -32767, want: -32768}, + test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -32767, want: -32768}, + test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -1, want: 2}, + test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -1, want: -2}, + test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 0, want: 1}, + test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 0, want: -1}, + test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 1, want: 0}, + test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 1, want: 0}, + test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 32766, want: -32765}, + test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 32766, want: 32765}, + test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 32767, want: -32766}, + test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 32767, want: 32766}, + test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -32768, want: -2}, + test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -32768, want: 2}, + test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -32767, want: -3}, + test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -32767, want: 3}, + test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -1, want: 32767}, + test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -1, want: -32767}, + test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 0, want: 32766}, + test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 0, want: -32766}, + test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 1, want: 32765}, + test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 1, want: -32765}, + test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 32766, want: 0}, + test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 32766, want: 0}, + test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 32767, want: -1}, + test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 32767, want: 1}, + test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -32768, want: -1}, + test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -32768, want: 1}, + test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -32767, want: -2}, + test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -32767, want: 2}, + test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -1, want: -32768}, + test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -1, want: -32768}, + test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 0, want: 32767}, + test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 0, want: -32767}, + test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 1, want: 32766}, + test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 1, want: -32766}, + test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 32766, want: 1}, + test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 32766, want: -1}, + test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 32767, want: 0}, + test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 32767, want: 0}, + test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -32768, want: 1}, + test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -32768, want: 1}, + test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -32767, want: 1}, + test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -32767, want: 0}, + test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -1, want: -32768}, + test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -1, want: 0}, + test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 0, want: 0}, + test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 1, want: -32768}, + test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 1, want: 0}, + test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 32766, want: -1}, + test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 32766, want: 0}, + test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 32767, want: -1}, + test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 32767, want: 0}, + test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -32768, want: 0}, + test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -32768, want: 1}, + test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -32767, want: 1}, + test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -32767, want: 1}, + test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -1, want: 32767}, + test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -1, want: 0}, + test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 0, want: 0}, + test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 1, want: -32767}, + test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 1, want: 0}, + test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 32766, want: -1}, + test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 32766, want: 0}, + test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 32767, want: -1}, + test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 32767, want: -1}, + test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -32768, want: 0}, + test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -32768, want: -32768}, + test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -32767, want: 0}, + test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -32767, want: 32767}, + test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -1, want: 1}, + test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -1, want: 1}, + test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 0, want: 0}, + test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 1, want: -1}, + test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 1, want: -1}, + test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 32766, want: 0}, + test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 32766, want: -32766}, + test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 32767, want: 0}, + test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 32767, want: -32767}, + test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -32768, want: 0}, + test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -32767, want: 0}, + test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -1, want: 0}, + test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 1, want: 0}, + test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 32766, want: 0}, + test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 32767, want: 0}, + test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -32768, want: 0}, + test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -32768, want: -32768}, + test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -32767, want: 0}, + test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -32767, want: -32767}, + test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -1, want: -1}, + test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -1, want: -1}, + test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 0, want: 0}, + test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 1, want: 1}, + test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 1, want: 1}, + test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 32766, want: 0}, + test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 32766, want: 32766}, + test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 32767, want: 0}, + test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 32767, want: 32767}, + test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -32768, want: 0}, + test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -32768, want: -1}, + test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -32767, want: 0}, + test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -32767, want: -1}, + test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -1, want: -32766}, + test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -1, want: 0}, + test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 0, want: 0}, + test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 1, want: 32766}, + test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 1, want: 0}, + test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 32766, want: 1}, + test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 32766, want: 1}, + test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 32767, want: 0}, + test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 32767, want: 1}, + test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -32768, want: 0}, + test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -32768, want: -1}, + test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -32767, want: -1}, + test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -32767, want: -1}, + test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -1, want: -32767}, + test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -1, want: 0}, + test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 0, want: 0}, + test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 1, want: 32767}, + test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 1, want: 0}, + test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 32766, want: 1}, + test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 32766, want: 0}, + test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 32767, want: 1}, + test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 32767, want: 1}, + test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -32768, want: 0}, + test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -32768, want: 0}, + test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -32767, want: -32768}, + test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -32767, want: -32768}, + test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -1, want: -32768}, + test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -1, want: -32768}, + test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 0, want: 0}, + test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 0, want: 0}, + test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 1, want: -32768}, + test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 1, want: -32768}, + test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 32766, want: 0}, + test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 32766, want: 0}, + test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 32767, want: -32768}, + test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 32767, want: -32768}, + test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -32768, want: -32768}, + test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -32768, want: -32768}, + test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -32767, want: 1}, + test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -32767, want: 1}, + test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -1, want: 32767}, + test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -1, want: 32767}, + test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 0, want: 0}, + test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 0, want: 0}, + test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 1, want: -32767}, + test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 1, want: -32767}, + test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 32766, want: 32766}, + test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 32766, want: 32766}, + test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 32767, want: -1}, + test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 32767, want: -1}, + test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -32768, want: -32768}, + test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -32768, want: -32768}, + test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -32767, want: 32767}, + test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -32767, want: 32767}, + test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -1, want: 1}, + test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -1, want: 1}, + test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 0, want: 0}, + test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 0, want: 0}, + test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 1, want: -1}, + test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 1, want: -1}, + test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 32766, want: -32766}, + test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 32766, want: -32766}, + test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 32767, want: -32767}, + test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 32767, want: -32767}, + test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -32768, want: 0}, + test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -32768, want: 0}, + test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -32767, want: 0}, + test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -32767, want: 0}, + test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -1, want: 0}, + test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -1, want: 0}, + test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 0, want: 0}, + test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 0, want: 0}, + test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 1, want: 0}, + test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 1, want: 0}, + test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 32766, want: 0}, + test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 32766, want: 0}, + test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 32767, want: 0}, + test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 32767, want: 0}, + test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -32768, want: -32768}, + test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -32768, want: -32768}, + test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -32767, want: -32767}, + test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -32767, want: -32767}, + test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -1, want: -1}, + test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -1, want: -1}, + test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 0, want: 0}, + test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 0, want: 0}, + test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 1, want: 1}, + test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 1, want: 1}, + test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 32766, want: 32766}, + test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 32766, want: 32766}, + test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 32767, want: 32767}, + test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 32767, want: 32767}, + test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -32768, want: 0}, + test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -32768, want: 0}, + test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -32767, want: 32766}, + test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -32767, want: 32766}, + test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -1, want: -32766}, + test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -1, want: -32766}, + test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 0, want: 0}, + test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 0, want: 0}, + test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 1, want: 32766}, + test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 1, want: 32766}, + test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 32766, want: 4}, + test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 32766, want: 4}, + test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 32767, want: -32766}, + test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 32767, want: -32766}, + test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -32768, want: -32768}, + test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -32768, want: -32768}, + test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -32767, want: -1}, + test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -32767, want: -1}, + test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -1, want: -32767}, + test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -1, want: -32767}, + test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 0, want: 0}, + test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 0, want: 0}, + test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 1, want: 32767}, + test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 1, want: 32767}, + test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 32766, want: -32766}, + test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 32766, want: -32766}, + test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 32767, want: 1}, + test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 32767, want: 1}, + test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -32768, want: 0}, + test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -32768, want: 0}, + test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -32767, want: -1}, + test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -32767, want: -32767}, + test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -1, want: 0}, + test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -1, want: -1}, + test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 0, want: 0}, + test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 1, want: 0}, + test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 1, want: 1}, + test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 32766, want: -2}, + test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 32766, want: 32766}, + test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 32767, want: -1}, + test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 32767, want: 32767}, + test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -32768, want: -32767}, + test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -32768, want: -1}, + test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -32767, want: 0}, + test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -32767, want: 0}, + test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -1, want: 0}, + test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -1, want: -1}, + test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 0, want: 0}, + test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 1, want: 0}, + test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 1, want: 1}, + test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 32766, want: -1}, + test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 32766, want: 32766}, + test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 32767, want: 0}, + test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 32767, want: 0}, + test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -32768, want: -1}, + test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -32768, want: 0}, + test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -32767, want: -1}, + test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -32767, want: 0}, + test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -1, want: 0}, + test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -1, want: 0}, + test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 0, want: 0}, + test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 1, want: 0}, + test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 1, want: 0}, + test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 32766, want: -1}, + test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 32766, want: 0}, + test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 32767, want: -1}, + test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 32767, want: 0}, + test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -32768, want: 0}, + test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -32767, want: 0}, + test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -1, want: 0}, + test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 1, want: 0}, + test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 32766, want: 0}, + test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 32767, want: 0}, + test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -32768, want: 1}, + test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -32768, want: 0}, + test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -32767, want: 1}, + test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -32767, want: 0}, + test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -1, want: 0}, + test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -1, want: 0}, + test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 0, want: 0}, + test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 1, want: 0}, + test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 1, want: 0}, + test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 32766, want: 1}, + test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 32766, want: 0}, + test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 32767, want: 1}, + test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 32767, want: 0}, + test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -32768, want: 32766}, + test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -32768, want: -2}, + test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -32767, want: 32766}, + test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -32767, want: -1}, + test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -1, want: 0}, + test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -1, want: -1}, + test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 0, want: 0}, + test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 1, want: 0}, + test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 1, want: 1}, + test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 32766, want: 0}, + test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 32766, want: 0}, + test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 32767, want: 32766}, + test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 32767, want: 1}, + test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -32768, want: 32767}, + test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -32768, want: -1}, + test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -32767, want: 0}, + test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -32767, want: 0}, + test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -1, want: 0}, + test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -1, want: -1}, + test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 0, want: 0}, + test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 1, want: 0}, + test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 1, want: 1}, + test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 32766, want: 1}, + test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 32766, want: 32766}, + test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 32767, want: 0}, + test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 32767, want: 0}, + test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -32768, want: -32768}, + test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -32768, want: -32768}, + test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -32767, want: -32768}, + test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -32767, want: -32768}, + test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -1, want: -32768}, + test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -1, want: -32768}, + test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 0, want: 0}, + test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 0, want: 0}, + test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 1, want: 0}, + test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 1, want: 0}, + test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 32766, want: 0}, + test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 32766, want: 0}, + test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 32767, want: 0}, + test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 32767, want: 0}, + test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -32768, want: -32768}, + test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -32768, want: -32768}, + test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -32767, want: -32767}, + test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -32767, want: -32767}, + test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -1, want: -32767}, + test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -1, want: -32767}, + test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 0, want: 0}, + test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 0, want: 0}, + test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 1, want: 1}, + test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 1, want: 1}, + test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 32766, want: 0}, + test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 32766, want: 0}, + test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 32767, want: 1}, + test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 32767, want: 1}, + test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -32768, want: -32768}, + test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -32768, want: -32768}, + test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -32767, want: -32767}, + test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -32767, want: -32767}, + test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -1, want: -1}, + test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -1, want: -1}, + test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 0, want: 0}, + test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 0, want: 0}, + test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 1, want: 1}, + test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 1, want: 1}, + test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 32766, want: 32766}, + test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 32766, want: 32766}, + test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 32767, want: 32767}, + test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 32767, want: 32767}, + test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -32768, want: 0}, + test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -32768, want: 0}, + test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -32767, want: 0}, + test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -32767, want: 0}, + test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -1, want: 0}, + test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -1, want: 0}, + test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 0, want: 0}, + test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 0, want: 0}, + test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 1, want: 0}, + test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 1, want: 0}, + test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 32766, want: 0}, + test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 32766, want: 0}, + test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 32767, want: 0}, + test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 32767, want: 0}, + test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -32768, want: 0}, + test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -32768, want: 0}, + test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -32767, want: 1}, + test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -32767, want: 1}, + test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -1, want: 1}, + test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -1, want: 1}, + test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 0, want: 0}, + test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 0, want: 0}, + test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 1, want: 1}, + test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 1, want: 1}, + test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 32766, want: 0}, + test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 32766, want: 0}, + test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 32767, want: 1}, + test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 32767, want: 1}, + test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -32768, want: 0}, + test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -32768, want: 0}, + test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -32767, want: 0}, + test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -32767, want: 0}, + test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -1, want: 32766}, + test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -1, want: 32766}, + test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 0, want: 0}, + test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 0, want: 0}, + test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 1, want: 0}, + test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 1, want: 0}, + test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 32766, want: 32766}, + test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 32766, want: 32766}, + test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 32767, want: 32766}, + test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 32767, want: 32766}, + test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -32768, want: 0}, + test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -32768, want: 0}, + test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -32767, want: 1}, + test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -32767, want: 1}, + test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -1, want: 32767}, + test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -1, want: 32767}, + test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 0, want: 0}, + test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 0, want: 0}, + test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 1, want: 1}, + test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 1, want: 1}, + test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 32766, want: 32766}, + test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 32766, want: 32766}, + test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 32767, want: 32767}, + test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 32767, want: 32767}, + test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -32768, want: -32768}, + test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -32768, want: -32768}, + test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -32767, want: -32767}, + test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -32767, want: -32767}, + test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -1, want: -1}, + test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -1, want: -1}, + test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 0, want: -32768}, + test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 0, want: -32768}, + test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 1, want: -32767}, + test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 1, want: -32767}, + test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 32766, want: -2}, + test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 32766, want: -2}, + test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 32767, want: -1}, + test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 32767, want: -1}, + test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -32768, want: -32767}, + test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -32768, want: -32767}, + test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -32767, want: -32767}, + test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -32767, want: -32767}, + test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -1, want: -1}, + test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -1, want: -1}, + test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 0, want: -32767}, + test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 0, want: -32767}, + test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 1, want: -32767}, + test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 1, want: -32767}, + test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 32766, want: -1}, + test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 32766, want: -1}, + test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 32767, want: -1}, + test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 32767, want: -1}, + test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -32768, want: -1}, + test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -32768, want: -1}, + test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -32767, want: -1}, + test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -32767, want: -1}, + test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -1, want: -1}, + test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -1, want: -1}, + test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 0, want: -1}, + test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 0, want: -1}, + test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 1, want: -1}, + test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 1, want: -1}, + test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 32766, want: -1}, + test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 32766, want: -1}, + test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 32767, want: -1}, + test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 32767, want: -1}, + test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -32768, want: -32768}, + test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -32768, want: -32768}, + test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -32767, want: -32767}, + test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -32767, want: -32767}, + test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -1, want: -1}, + test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -1, want: -1}, + test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 0, want: 0}, + test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 0, want: 0}, + test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 1, want: 1}, + test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 1, want: 1}, + test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 32766, want: 32766}, + test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 32766, want: 32766}, + test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 32767, want: 32767}, + test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 32767, want: 32767}, + test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -32768, want: -32767}, + test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -32768, want: -32767}, + test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -32767, want: -32767}, + test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -32767, want: -32767}, + test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -1, want: -1}, + test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -1, want: -1}, + test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 0, want: 1}, + test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 0, want: 1}, + test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 1, want: 1}, + test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 1, want: 1}, + test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 32766, want: 32767}, + test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 32766, want: 32767}, + test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 32767, want: 32767}, + test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 32767, want: 32767}, + test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -32768, want: -2}, + test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -32768, want: -2}, + test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -32767, want: -1}, + test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -32767, want: -1}, + test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -1, want: -1}, + test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -1, want: -1}, + test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 0, want: 32766}, + test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 0, want: 32766}, + test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 1, want: 32767}, + test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 1, want: 32767}, + test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 32766, want: 32766}, + test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 32766, want: 32766}, + test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 32767, want: 32767}, + test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 32767, want: 32767}, + test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -32768, want: -1}, + test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -32768, want: -1}, + test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -32767, want: -1}, + test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -32767, want: -1}, + test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -1, want: -1}, + test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -1, want: -1}, + test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 0, want: 32767}, + test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 0, want: 32767}, + test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 1, want: 32767}, + test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 1, want: 32767}, + test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 32766, want: 32767}, + test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 32766, want: 32767}, + test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 32767, want: 32767}, + test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 32767, want: 32767}, + test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -32768, want: 0}, + test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -32768, want: 0}, + test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -32767, want: 1}, + test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -32767, want: 1}, + test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -1, want: 32767}, + test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -1, want: 32767}, + test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 0, want: -32768}, + test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 0, want: -32768}, + test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 1, want: -32767}, + test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 1, want: -32767}, + test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 32766, want: -2}, + test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 32766, want: -2}, + test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 32767, want: -1}, + test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 32767, want: -1}, + test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -32768, want: 1}, + test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -32768, want: 1}, + test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -32767, want: 0}, + test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -32767, want: 0}, + test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -1, want: 32766}, + test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -1, want: 32766}, + test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 0, want: -32767}, + test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 0, want: -32767}, + test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 1, want: -32768}, + test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 1, want: -32768}, + test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 32766, want: -1}, + test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 32766, want: -1}, + test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 32767, want: -2}, + test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 32767, want: -2}, + test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -32768, want: 32767}, + test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -32768, want: 32767}, + test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -32767, want: 32766}, + test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -32767, want: 32766}, + test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -1, want: 0}, + test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -1, want: 0}, + test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 0, want: -1}, + test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 0, want: -1}, + test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 1, want: -2}, + test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 1, want: -2}, + test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 32766, want: -32767}, + test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 32766, want: -32767}, + test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 32767, want: -32768}, + test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 32767, want: -32768}, + test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -32768, want: -32768}, + test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -32768, want: -32768}, + test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -32767, want: -32767}, + test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -32767, want: -32767}, + test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -1, want: -1}, + test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -1, want: -1}, + test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 0, want: 0}, + test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 0, want: 0}, + test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 1, want: 1}, + test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 1, want: 1}, + test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 32766, want: 32766}, + test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 32766, want: 32766}, + test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 32767, want: 32767}, + test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 32767, want: 32767}, + test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -32768, want: -32767}, + test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -32768, want: -32767}, + test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -32767, want: -32768}, + test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -32767, want: -32768}, + test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -1, want: -2}, + test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -1, want: -2}, + test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 0, want: 1}, + test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 0, want: 1}, + test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 1, want: 0}, + test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 1, want: 0}, + test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 32766, want: 32767}, + test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 32766, want: 32767}, + test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 32767, want: 32766}, + test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 32767, want: 32766}, + test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -32768, want: -2}, + test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -32768, want: -2}, + test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -32767, want: -1}, + test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -32767, want: -1}, + test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -1, want: -32767}, + test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -1, want: -32767}, + test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 0, want: 32766}, + test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 0, want: 32766}, + test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 1, want: 32767}, + test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 1, want: 32767}, + test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 32766, want: 0}, + test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 32766, want: 0}, + test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 32767, want: 1}, + test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 32767, want: 1}, + test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -32768, want: -1}, + test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -32768, want: -1}, + test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -32767, want: -2}, + test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -32767, want: -2}, + test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -1, want: -32768}, + test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -1, want: -32768}, + test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 0, want: 32767}, + test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 0, want: 32767}, + test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 1, want: 32766}, + test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 1, want: 32766}, + test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 32766, want: 1}, + test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 32766, want: 1}, + test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 32767, want: 0}, + test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 32767, want: 0}} + +type test_uint8 struct { + fn func(uint8) uint8 + fnname string + in uint8 + want uint8 +} + +var tests_uint8 = []test_uint8{ + + test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 0, want: 0}, + test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 0, want: 0}, + test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 1, want: 1}, + test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 1, want: 1}, + test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 255, want: 255}, + test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 255, want: 255}, + test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 0, want: 1}, + test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 0, want: 1}, + test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 1, want: 2}, + test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 1, want: 2}, + test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 255, want: 0}, + test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 255, want: 0}, + test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 0, want: 255}, + test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 0, want: 255}, + test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 1, want: 0}, + test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 1, want: 0}, + test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 255, want: 254}, + test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 255, want: 254}, + test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 0, want: 0}, + test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 0, want: 0}, + test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 1, want: 255}, + test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 1, want: 1}, + test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 255, want: 1}, + test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 255, want: 255}, + test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 0, want: 1}, + test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 0, want: 255}, + test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 1, want: 0}, + test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 1, want: 0}, + test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 255, want: 2}, + test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 255, want: 254}, + test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 0, want: 255}, + test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 0, want: 1}, + test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 1, want: 254}, + test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 1, want: 2}, + test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 255, want: 0}, + test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 255, want: 0}, + test_uint8{fn: div_0_uint8, fnname: "div_0_uint8", in: 1, want: 0}, + test_uint8{fn: div_0_uint8, fnname: "div_0_uint8", in: 255, want: 0}, + test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 0, want: 0}, + test_uint8{fn: div_1_uint8, fnname: "div_1_uint8", in: 1, want: 1}, + test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 1, want: 1}, + test_uint8{fn: div_1_uint8, fnname: "div_1_uint8", in: 255, want: 0}, + test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 255, want: 255}, + test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 0, want: 0}, + test_uint8{fn: div_255_uint8, fnname: "div_255_uint8", in: 1, want: 255}, + test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 1, want: 0}, + test_uint8{fn: div_255_uint8, fnname: "div_255_uint8", in: 255, want: 1}, + test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 255, want: 1}, + test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 0, want: 0}, + test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 0, want: 0}, + test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 1, want: 0}, + test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 1, want: 0}, + test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 255, want: 0}, + test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 255, want: 0}, + test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 0, want: 0}, + test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 0, want: 0}, + test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 1, want: 1}, + test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 1, want: 1}, + test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 255, want: 255}, + test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 255, want: 255}, + test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 0, want: 0}, + test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 0, want: 0}, + test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 1, want: 255}, + test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 1, want: 255}, + test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 255, want: 1}, + test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 255, want: 1}, + test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 0, want: 0}, + test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 0, want: 0}, + test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 1, want: 0}, + test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 1, want: 1}, + test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 255, want: 0}, + test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 255, want: 255}, + test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 0, want: 1}, + test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 0, want: 0}, + test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 1, want: 2}, + test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 1, want: 2}, + test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 255, want: 0}, + test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 255, want: 254}, + test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 0, want: 255}, + test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 0, want: 0}, + test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 1, want: 254}, + test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 1, want: 0}, + test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 255, want: 0}, + test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 255, want: 0}, + test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 0, want: 0}, + test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 0, want: 0}, + test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 1, want: 0}, + test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 1, want: 1}, + test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 255, want: 0}, + test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 255, want: 255}, + test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 0, want: 1}, + test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 0, want: 0}, + test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 1, want: 0}, + test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 1, want: 0}, + test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 255, want: 0}, + test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 255, want: 127}, + test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 0, want: 255}, + test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 0, want: 0}, + test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 1, want: 127}, + test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 1, want: 0}, + test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 255, want: 0}, + test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 255, want: 0}, + test_uint8{fn: mod_0_uint8, fnname: "mod_0_uint8", in: 1, want: 0}, + test_uint8{fn: mod_0_uint8, fnname: "mod_0_uint8", in: 255, want: 0}, + test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 0, want: 0}, + test_uint8{fn: mod_1_uint8, fnname: "mod_1_uint8", in: 1, want: 0}, + test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 1, want: 0}, + test_uint8{fn: mod_1_uint8, fnname: "mod_1_uint8", in: 255, want: 1}, + test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 255, want: 0}, + test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 0, want: 0}, + test_uint8{fn: mod_255_uint8, fnname: "mod_255_uint8", in: 1, want: 0}, + test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 1, want: 1}, + test_uint8{fn: mod_255_uint8, fnname: "mod_255_uint8", in: 255, want: 0}, + test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 255, want: 0}, + test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 0, want: 0}, + test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 0, want: 0}, + test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 1, want: 0}, + test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 1, want: 0}, + test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 255, want: 0}, + test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 255, want: 0}, + test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 0, want: 0}, + test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 0, want: 0}, + test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 1, want: 1}, + test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 1, want: 1}, + test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 255, want: 1}, + test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 255, want: 1}, + test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 0, want: 0}, + test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 0, want: 0}, + test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 1, want: 1}, + test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 1, want: 1}, + test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 255, want: 255}, + test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 255, want: 255}, + test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 0, want: 0}, + test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 0, want: 0}, + test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 1, want: 1}, + test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 1, want: 1}, + test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 255, want: 255}, + test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 255, want: 255}, + test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 0, want: 1}, + test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 0, want: 1}, + test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 1, want: 1}, + test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 1, want: 1}, + test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 255, want: 255}, + test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 255, want: 255}, + test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 0, want: 255}, + test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 0, want: 255}, + test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 1, want: 255}, + test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 1, want: 255}, + test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 255, want: 255}, + test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 255, want: 255}, + test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 0, want: 0}, + test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 0, want: 0}, + test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 1, want: 1}, + test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 1, want: 1}, + test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 255, want: 255}, + test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 255, want: 255}, + test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 0, want: 1}, + test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 0, want: 1}, + test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 1, want: 0}, + test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 1, want: 0}, + test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 255, want: 254}, + test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 255, want: 254}, + test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 0, want: 255}, + test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 0, want: 255}, + test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 1, want: 254}, + test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 1, want: 254}, + test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 255, want: 0}, + test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 255, want: 0}} + +type test_int8 struct { + fn func(int8) int8 + fnname string + in int8 + want int8 +} + +var tests_int8 = []test_int8{ + + test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -128, want: 0}, + test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -128, want: 0}, + test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -127, want: 1}, + test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -127, want: 1}, + test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -1, want: 127}, + test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -1, want: 127}, + test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 0, want: -128}, + test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 0, want: -128}, + test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 1, want: -127}, + test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 1, want: -127}, + test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 126, want: -2}, + test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 126, want: -2}, + test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 127, want: -1}, + test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 127, want: -1}, + test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -128, want: 1}, + test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -128, want: 1}, + test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -127, want: 2}, + test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -127, want: 2}, + test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -1, want: -128}, + test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -1, want: -128}, + test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 0, want: -127}, + test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 0, want: -127}, + test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 1, want: -126}, + test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 1, want: -126}, + test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 126, want: -1}, + test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 126, want: -1}, + test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 127, want: 0}, + test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 127, want: 0}, + test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -128, want: 127}, + test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -128, want: 127}, + test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -127, want: -128}, + test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -127, want: -128}, + test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -1, want: -2}, + test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -1, want: -2}, + test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 0, want: -1}, + test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 0, want: -1}, + test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 1, want: 0}, + test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 1, want: 0}, + test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 126, want: 125}, + test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 126, want: 125}, + test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 127, want: 126}, + test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 127, want: 126}, + test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -128, want: -128}, + test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -128, want: -128}, + test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -127, want: -127}, + test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -127, want: -127}, + test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -1, want: -1}, + test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -1, want: -1}, + test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 0, want: 0}, + test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 0, want: 0}, + test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 1, want: 1}, + test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 1, want: 1}, + test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 126, want: 126}, + test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 126, want: 126}, + test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 127, want: 127}, + test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 127, want: 127}, + test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -128, want: -127}, + test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -128, want: -127}, + test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -127, want: -126}, + test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -127, want: -126}, + test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -1, want: 0}, + test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -1, want: 0}, + test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 0, want: 1}, + test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 0, want: 1}, + test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 1, want: 2}, + test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 1, want: 2}, + test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 126, want: 127}, + test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 126, want: 127}, + test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 127, want: -128}, + test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 127, want: -128}, + test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -128, want: -2}, + test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -128, want: -2}, + test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -127, want: -1}, + test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -127, want: -1}, + test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -1, want: 125}, + test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -1, want: 125}, + test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 0, want: 126}, + test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 0, want: 126}, + test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 1, want: 127}, + test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 1, want: 127}, + test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 126, want: -4}, + test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 126, want: -4}, + test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 127, want: -3}, + test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 127, want: -3}, + test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -128, want: -1}, + test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -128, want: -1}, + test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -127, want: 0}, + test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -127, want: 0}, + test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -1, want: 126}, + test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -1, want: 126}, + test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 0, want: 127}, + test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 0, want: 127}, + test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 1, want: -128}, + test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 1, want: -128}, + test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 126, want: -3}, + test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 126, want: -3}, + test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 127, want: -2}, + test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 127, want: -2}, + test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -128, want: 0}, + test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -128, want: 0}, + test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -127, want: -1}, + test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -127, want: 1}, + test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -1, want: -127}, + test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -1, want: 127}, + test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 0, want: -128}, + test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 0, want: -128}, + test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 1, want: 127}, + test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 1, want: -127}, + test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 126, want: 2}, + test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 126, want: -2}, + test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 127, want: 1}, + test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 127, want: -1}, + test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -128, want: 1}, + test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -128, want: -1}, + test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -127, want: 0}, + test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -127, want: 0}, + test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -1, want: -126}, + test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -1, want: 126}, + test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 0, want: -127}, + test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 0, want: 127}, + test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 1, want: -128}, + test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 1, want: -128}, + test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 126, want: 3}, + test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 126, want: -3}, + test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 127, want: 2}, + test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 127, want: -2}, + test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -128, want: 127}, + test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -128, want: -127}, + test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -127, want: 126}, + test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -127, want: -126}, + test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -1, want: 0}, + test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -1, want: 0}, + test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 0, want: -1}, + test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 0, want: 1}, + test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 1, want: -2}, + test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 1, want: 2}, + test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 126, want: -127}, + test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 126, want: 127}, + test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 127, want: -128}, + test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 127, want: -128}, + test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -128, want: -128}, + test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -128, want: -128}, + test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -127, want: 127}, + test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -127, want: -127}, + test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -1, want: 1}, + test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -1, want: -1}, + test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 0, want: 0}, + test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 0, want: 0}, + test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 1, want: -1}, + test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 1, want: 1}, + test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 126, want: -126}, + test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 126, want: 126}, + test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 127, want: -127}, + test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 127, want: 127}, + test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -128, want: -127}, + test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -128, want: 127}, + test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -127, want: -128}, + test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -127, want: -128}, + test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -1, want: 2}, + test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -1, want: -2}, + test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 0, want: 1}, + test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 0, want: -1}, + test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 1, want: 0}, + test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 1, want: 0}, + test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 126, want: -125}, + test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 126, want: 125}, + test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 127, want: -126}, + test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 127, want: 126}, + test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -128, want: -2}, + test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -128, want: 2}, + test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -127, want: -3}, + test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -127, want: 3}, + test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -1, want: 127}, + test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -1, want: -127}, + test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 0, want: 126}, + test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 0, want: -126}, + test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 1, want: 125}, + test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 1, want: -125}, + test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 126, want: 0}, + test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 126, want: 0}, + test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 127, want: -1}, + test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 127, want: 1}, + test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -128, want: -1}, + test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -128, want: 1}, + test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -127, want: -2}, + test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -127, want: 2}, + test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -1, want: -128}, + test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -1, want: -128}, + test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 0, want: 127}, + test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 0, want: -127}, + test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 1, want: 126}, + test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 1, want: -126}, + test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 126, want: 1}, + test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 126, want: -1}, + test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 127, want: 0}, + test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 127, want: 0}, + test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -128, want: 1}, + test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -128, want: 1}, + test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -127, want: 1}, + test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -127, want: 0}, + test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -1, want: -128}, + test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -1, want: 0}, + test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 0, want: 0}, + test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 1, want: -128}, + test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 1, want: 0}, + test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 126, want: -1}, + test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 126, want: 0}, + test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 127, want: -1}, + test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 127, want: 0}, + test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -128, want: 0}, + test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -128, want: 1}, + test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -127, want: 1}, + test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -127, want: 1}, + test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -1, want: 127}, + test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -1, want: 0}, + test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 0, want: 0}, + test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 1, want: -127}, + test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 1, want: 0}, + test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 126, want: -1}, + test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 126, want: 0}, + test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 127, want: -1}, + test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 127, want: -1}, + test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -128, want: 0}, + test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -128, want: -128}, + test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -127, want: 0}, + test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -127, want: 127}, + test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -1, want: 1}, + test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -1, want: 1}, + test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 0, want: 0}, + test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 1, want: -1}, + test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 1, want: -1}, + test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 126, want: 0}, + test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 126, want: -126}, + test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 127, want: 0}, + test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 127, want: -127}, + test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -128, want: 0}, + test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -127, want: 0}, + test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -1, want: 0}, + test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 1, want: 0}, + test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 126, want: 0}, + test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 127, want: 0}, + test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -128, want: 0}, + test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -128, want: -128}, + test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -127, want: 0}, + test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -127, want: -127}, + test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -1, want: -1}, + test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -1, want: -1}, + test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 0, want: 0}, + test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 1, want: 1}, + test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 1, want: 1}, + test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 126, want: 0}, + test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 126, want: 126}, + test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 127, want: 0}, + test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 127, want: 127}, + test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -128, want: 0}, + test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -128, want: -1}, + test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -127, want: 0}, + test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -127, want: -1}, + test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -1, want: -126}, + test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -1, want: 0}, + test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 0, want: 0}, + test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 1, want: 126}, + test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 1, want: 0}, + test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 126, want: 1}, + test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 126, want: 1}, + test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 127, want: 0}, + test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 127, want: 1}, + test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -128, want: 0}, + test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -128, want: -1}, + test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -127, want: -1}, + test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -127, want: -1}, + test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -1, want: -127}, + test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -1, want: 0}, + test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 0, want: 0}, + test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 1, want: 127}, + test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 1, want: 0}, + test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 126, want: 1}, + test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 126, want: 0}, + test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 127, want: 1}, + test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 127, want: 1}, + test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -128, want: 0}, + test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -128, want: 0}, + test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -127, want: -128}, + test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -127, want: -128}, + test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -1, want: -128}, + test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -1, want: -128}, + test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 0, want: 0}, + test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 0, want: 0}, + test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 1, want: -128}, + test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 1, want: -128}, + test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 126, want: 0}, + test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 126, want: 0}, + test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 127, want: -128}, + test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 127, want: -128}, + test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -128, want: -128}, + test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -128, want: -128}, + test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -127, want: 1}, + test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -127, want: 1}, + test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -1, want: 127}, + test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -1, want: 127}, + test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 0, want: 0}, + test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 0, want: 0}, + test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 1, want: -127}, + test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 1, want: -127}, + test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 126, want: 126}, + test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 126, want: 126}, + test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 127, want: -1}, + test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 127, want: -1}, + test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -128, want: -128}, + test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -128, want: -128}, + test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -127, want: 127}, + test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -127, want: 127}, + test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -1, want: 1}, + test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -1, want: 1}, + test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 0, want: 0}, + test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 0, want: 0}, + test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 1, want: -1}, + test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 1, want: -1}, + test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 126, want: -126}, + test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 126, want: -126}, + test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 127, want: -127}, + test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 127, want: -127}, + test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -128, want: 0}, + test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -128, want: 0}, + test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -127, want: 0}, + test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -127, want: 0}, + test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -1, want: 0}, + test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -1, want: 0}, + test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 0, want: 0}, + test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 0, want: 0}, + test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 1, want: 0}, + test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 1, want: 0}, + test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 126, want: 0}, + test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 126, want: 0}, + test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 127, want: 0}, + test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 127, want: 0}, + test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -128, want: -128}, + test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -128, want: -128}, + test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -127, want: -127}, + test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -127, want: -127}, + test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -1, want: -1}, + test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -1, want: -1}, + test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 0, want: 0}, + test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 0, want: 0}, + test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 1, want: 1}, + test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 1, want: 1}, + test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 126, want: 126}, + test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 126, want: 126}, + test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 127, want: 127}, + test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 127, want: 127}, + test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -128, want: 0}, + test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -128, want: 0}, + test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -127, want: 126}, + test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -127, want: 126}, + test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -1, want: -126}, + test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -1, want: -126}, + test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 0, want: 0}, + test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 0, want: 0}, + test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 1, want: 126}, + test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 1, want: 126}, + test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 126, want: 4}, + test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 126, want: 4}, + test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 127, want: -126}, + test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 127, want: -126}, + test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -128, want: -128}, + test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -128, want: -128}, + test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -127, want: -1}, + test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -127, want: -1}, + test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -1, want: -127}, + test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -1, want: -127}, + test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 0, want: 0}, + test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 0, want: 0}, + test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 1, want: 127}, + test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 1, want: 127}, + test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 126, want: -126}, + test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 126, want: -126}, + test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 127, want: 1}, + test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 127, want: 1}, + test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -128, want: 0}, + test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -128, want: 0}, + test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -127, want: -1}, + test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -127, want: -127}, + test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -1, want: 0}, + test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -1, want: -1}, + test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 0, want: 0}, + test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 1, want: 0}, + test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 1, want: 1}, + test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 126, want: -2}, + test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 126, want: 126}, + test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 127, want: -1}, + test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 127, want: 127}, + test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -128, want: -127}, + test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -128, want: -1}, + test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -127, want: 0}, + test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -127, want: 0}, + test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -1, want: 0}, + test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -1, want: -1}, + test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 0, want: 0}, + test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 1, want: 0}, + test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 1, want: 1}, + test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 126, want: -1}, + test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 126, want: 126}, + test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 127, want: 0}, + test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 127, want: 0}, + test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -128, want: -1}, + test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -128, want: 0}, + test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -127, want: -1}, + test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -127, want: 0}, + test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -1, want: 0}, + test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -1, want: 0}, + test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 0, want: 0}, + test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 1, want: 0}, + test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 1, want: 0}, + test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 126, want: -1}, + test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 126, want: 0}, + test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 127, want: -1}, + test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 127, want: 0}, + test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -128, want: 0}, + test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -127, want: 0}, + test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -1, want: 0}, + test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 1, want: 0}, + test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 126, want: 0}, + test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 127, want: 0}, + test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -128, want: 1}, + test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -128, want: 0}, + test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -127, want: 1}, + test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -127, want: 0}, + test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -1, want: 0}, + test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -1, want: 0}, + test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 0, want: 0}, + test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 1, want: 0}, + test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 1, want: 0}, + test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 126, want: 1}, + test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 126, want: 0}, + test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 127, want: 1}, + test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 127, want: 0}, + test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -128, want: 126}, + test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -128, want: -2}, + test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -127, want: 126}, + test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -127, want: -1}, + test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -1, want: 0}, + test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -1, want: -1}, + test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 0, want: 0}, + test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 1, want: 0}, + test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 1, want: 1}, + test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 126, want: 0}, + test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 126, want: 0}, + test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 127, want: 126}, + test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 127, want: 1}, + test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -128, want: 127}, + test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -128, want: -1}, + test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -127, want: 0}, + test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -127, want: 0}, + test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -1, want: 0}, + test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -1, want: -1}, + test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 0, want: 0}, + test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 1, want: 0}, + test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 1, want: 1}, + test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 126, want: 1}, + test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 126, want: 126}, + test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 127, want: 0}, + test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 127, want: 0}, + test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -128, want: -128}, + test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -128, want: -128}, + test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -127, want: -128}, + test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -127, want: -128}, + test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -1, want: -128}, + test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -1, want: -128}, + test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 0, want: 0}, + test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 0, want: 0}, + test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 1, want: 0}, + test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 1, want: 0}, + test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 126, want: 0}, + test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 126, want: 0}, + test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 127, want: 0}, + test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 127, want: 0}, + test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -128, want: -128}, + test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -128, want: -128}, + test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -127, want: -127}, + test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -127, want: -127}, + test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -1, want: -127}, + test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -1, want: -127}, + test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 0, want: 0}, + test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 0, want: 0}, + test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 1, want: 1}, + test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 1, want: 1}, + test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 126, want: 0}, + test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 126, want: 0}, + test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 127, want: 1}, + test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 127, want: 1}, + test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -128, want: -128}, + test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -128, want: -128}, + test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -127, want: -127}, + test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -127, want: -127}, + test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -1, want: -1}, + test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -1, want: -1}, + test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 0, want: 0}, + test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 0, want: 0}, + test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 1, want: 1}, + test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 1, want: 1}, + test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 126, want: 126}, + test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 126, want: 126}, + test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 127, want: 127}, + test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 127, want: 127}, + test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -128, want: 0}, + test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -128, want: 0}, + test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -127, want: 0}, + test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -127, want: 0}, + test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -1, want: 0}, + test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -1, want: 0}, + test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 0, want: 0}, + test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 0, want: 0}, + test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 1, want: 0}, + test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 1, want: 0}, + test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 126, want: 0}, + test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 126, want: 0}, + test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 127, want: 0}, + test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 127, want: 0}, + test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -128, want: 0}, + test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -128, want: 0}, + test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -127, want: 1}, + test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -127, want: 1}, + test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -1, want: 1}, + test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -1, want: 1}, + test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 0, want: 0}, + test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 0, want: 0}, + test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 1, want: 1}, + test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 1, want: 1}, + test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 126, want: 0}, + test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 126, want: 0}, + test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 127, want: 1}, + test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 127, want: 1}, + test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -128, want: 0}, + test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -128, want: 0}, + test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -127, want: 0}, + test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -127, want: 0}, + test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -1, want: 126}, + test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -1, want: 126}, + test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 0, want: 0}, + test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 0, want: 0}, + test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 1, want: 0}, + test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 1, want: 0}, + test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 126, want: 126}, + test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 126, want: 126}, + test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 127, want: 126}, + test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 127, want: 126}, + test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -128, want: 0}, + test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -128, want: 0}, + test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -127, want: 1}, + test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -127, want: 1}, + test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -1, want: 127}, + test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -1, want: 127}, + test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 0, want: 0}, + test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 0, want: 0}, + test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 1, want: 1}, + test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 1, want: 1}, + test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 126, want: 126}, + test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 126, want: 126}, + test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 127, want: 127}, + test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 127, want: 127}, + test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -128, want: -128}, + test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -128, want: -128}, + test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -127, want: -127}, + test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -127, want: -127}, + test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -1, want: -1}, + test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -1, want: -1}, + test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 0, want: -128}, + test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 0, want: -128}, + test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 1, want: -127}, + test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 1, want: -127}, + test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 126, want: -2}, + test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 126, want: -2}, + test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 127, want: -1}, + test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 127, want: -1}, + test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -128, want: -127}, + test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -128, want: -127}, + test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -127, want: -127}, + test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -127, want: -127}, + test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -1, want: -1}, + test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -1, want: -1}, + test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 0, want: -127}, + test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 0, want: -127}, + test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 1, want: -127}, + test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 1, want: -127}, + test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 126, want: -1}, + test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 126, want: -1}, + test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 127, want: -1}, + test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 127, want: -1}, + test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -128, want: -1}, + test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -128, want: -1}, + test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -127, want: -1}, + test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -127, want: -1}, + test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -1, want: -1}, + test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -1, want: -1}, + test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 0, want: -1}, + test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 0, want: -1}, + test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 1, want: -1}, + test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 1, want: -1}, + test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 126, want: -1}, + test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 126, want: -1}, + test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 127, want: -1}, + test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 127, want: -1}, + test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -128, want: -128}, + test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -128, want: -128}, + test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -127, want: -127}, + test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -127, want: -127}, + test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -1, want: -1}, + test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -1, want: -1}, + test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 0, want: 0}, + test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 0, want: 0}, + test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 1, want: 1}, + test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 1, want: 1}, + test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 126, want: 126}, + test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 126, want: 126}, + test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 127, want: 127}, + test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 127, want: 127}, + test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -128, want: -127}, + test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -128, want: -127}, + test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -127, want: -127}, + test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -127, want: -127}, + test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -1, want: -1}, + test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -1, want: -1}, + test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 0, want: 1}, + test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 0, want: 1}, + test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 1, want: 1}, + test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 1, want: 1}, + test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 126, want: 127}, + test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 126, want: 127}, + test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 127, want: 127}, + test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 127, want: 127}, + test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -128, want: -2}, + test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -128, want: -2}, + test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -127, want: -1}, + test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -127, want: -1}, + test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -1, want: -1}, + test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -1, want: -1}, + test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 0, want: 126}, + test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 0, want: 126}, + test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 1, want: 127}, + test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 1, want: 127}, + test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 126, want: 126}, + test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 126, want: 126}, + test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 127, want: 127}, + test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 127, want: 127}, + test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -128, want: -1}, + test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -128, want: -1}, + test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -127, want: -1}, + test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -127, want: -1}, + test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -1, want: -1}, + test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -1, want: -1}, + test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 0, want: 127}, + test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 0, want: 127}, + test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 1, want: 127}, + test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 1, want: 127}, + test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 126, want: 127}, + test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 126, want: 127}, + test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 127, want: 127}, + test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 127, want: 127}, + test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -128, want: 0}, + test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -128, want: 0}, + test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -127, want: 1}, + test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -127, want: 1}, + test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -1, want: 127}, + test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -1, want: 127}, + test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 0, want: -128}, + test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 0, want: -128}, + test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 1, want: -127}, + test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 1, want: -127}, + test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 126, want: -2}, + test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 126, want: -2}, + test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 127, want: -1}, + test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 127, want: -1}, + test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -128, want: 1}, + test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -128, want: 1}, + test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -127, want: 0}, + test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -127, want: 0}, + test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -1, want: 126}, + test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -1, want: 126}, + test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 0, want: -127}, + test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 0, want: -127}, + test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 1, want: -128}, + test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 1, want: -128}, + test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 126, want: -1}, + test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 126, want: -1}, + test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 127, want: -2}, + test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 127, want: -2}, + test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -128, want: 127}, + test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -128, want: 127}, + test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -127, want: 126}, + test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -127, want: 126}, + test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -1, want: 0}, + test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -1, want: 0}, + test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 0, want: -1}, + test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 0, want: -1}, + test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 1, want: -2}, + test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 1, want: -2}, + test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 126, want: -127}, + test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 126, want: -127}, + test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 127, want: -128}, + test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 127, want: -128}, + test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -128, want: -128}, + test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -128, want: -128}, + test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -127, want: -127}, + test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -127, want: -127}, + test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -1, want: -1}, + test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -1, want: -1}, + test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 0, want: 0}, + test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 0, want: 0}, + test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 1, want: 1}, + test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 1, want: 1}, + test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 126, want: 126}, + test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 126, want: 126}, + test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 127, want: 127}, + test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 127, want: 127}, + test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -128, want: -127}, + test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -128, want: -127}, + test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -127, want: -128}, + test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -127, want: -128}, + test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -1, want: -2}, + test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -1, want: -2}, + test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 0, want: 1}, + test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 0, want: 1}, + test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 1, want: 0}, + test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 1, want: 0}, + test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 126, want: 127}, + test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 126, want: 127}, + test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 127, want: 126}, + test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 127, want: 126}, + test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -128, want: -2}, + test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -128, want: -2}, + test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -127, want: -1}, + test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -127, want: -1}, + test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -1, want: -127}, + test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -1, want: -127}, + test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 0, want: 126}, + test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 0, want: 126}, + test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 1, want: 127}, + test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 1, want: 127}, + test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 126, want: 0}, + test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 126, want: 0}, + test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 127, want: 1}, + test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 127, want: 1}, + test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -128, want: -1}, + test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -128, want: -1}, + test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -127, want: -2}, + test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -127, want: -2}, + test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -1, want: -128}, + test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -1, want: -128}, + test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 0, want: 127}, + test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 0, want: 127}, + test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 1, want: 126}, + test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 1, want: 126}, + test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 126, want: 1}, + test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 126, want: 1}, + test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 127, want: 0}, + test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 127, want: 0}} + +// TestArithmeticConst tests results for arithmetic operations against constants. +func TestArithmeticConst(t *testing.T) { + for _, test := range tests_uint64 { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + for _, test := range tests_uint64mul { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + for _, test := range tests_int64 { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + for _, test := range tests_int64mul { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + for _, test := range tests_uint32 { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + for _, test := range tests_uint32mul { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + for _, test := range tests_int32 { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + for _, test := range tests_int32mul { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + for _, test := range tests_uint16 { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + for _, test := range tests_int16 { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + for _, test := range tests_uint8 { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + for _, test := range tests_int8 { + if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } + +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/arith_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/arith_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cd7b5bc2c4a9858dc1eb79d964789d1a92461827 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/arith_test.go @@ -0,0 +1,1564 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests arithmetic expressions + +package main + +import ( + "math" + "runtime" + "testing" +) + +const ( + y = 0x0fffFFFF +) + +var ( + g8 int8 + g16 int16 + g32 int32 + g64 int64 +) + +//go:noinline +func lshNop1(x uint64) uint64 { + // two outer shifts should be removed + return (((x << 5) >> 2) << 2) +} + +//go:noinline +func lshNop2(x uint64) uint64 { + return (((x << 5) >> 2) << 3) +} + +//go:noinline +func lshNop3(x uint64) uint64 { + return (((x << 5) >> 2) << 6) +} + +//go:noinline +func lshNotNop(x uint64) uint64 { + // outer shift can't be removed + return (((x << 5) >> 2) << 1) +} + +//go:noinline +func rshNop1(x uint64) uint64 { + return (((x >> 5) << 2) >> 2) +} + +//go:noinline +func rshNop2(x uint64) uint64 { + return (((x >> 5) << 2) >> 3) +} + +//go:noinline +func rshNop3(x uint64) uint64 { + return (((x >> 5) << 2) >> 6) +} + +//go:noinline +func rshNotNop(x uint64) uint64 { + return (((x >> 5) << 2) >> 1) +} + +func testShiftRemoval(t *testing.T) { + allSet := ^uint64(0) + if want, got := uint64(0x7ffffffffffffff), rshNop1(allSet); want != got { + t.Errorf("testShiftRemoval rshNop1 failed, wanted %d got %d", want, got) + } + if want, got := uint64(0x3ffffffffffffff), rshNop2(allSet); want != got { + t.Errorf("testShiftRemoval rshNop2 failed, wanted %d got %d", want, got) + } + if want, got := uint64(0x7fffffffffffff), rshNop3(allSet); want != got { + t.Errorf("testShiftRemoval rshNop3 failed, wanted %d got %d", want, got) + } + if want, got := uint64(0xffffffffffffffe), rshNotNop(allSet); want != got { + t.Errorf("testShiftRemoval rshNotNop failed, wanted %d got %d", want, got) + } + if want, got := uint64(0xffffffffffffffe0), lshNop1(allSet); want != got { + t.Errorf("testShiftRemoval lshNop1 failed, wanted %d got %d", want, got) + } + if want, got := uint64(0xffffffffffffffc0), lshNop2(allSet); want != got { + t.Errorf("testShiftRemoval lshNop2 failed, wanted %d got %d", want, got) + } + if want, got := uint64(0xfffffffffffffe00), lshNop3(allSet); want != got { + t.Errorf("testShiftRemoval lshNop3 failed, wanted %d got %d", want, got) + } + if want, got := uint64(0x7ffffffffffffff0), lshNotNop(allSet); want != got { + t.Errorf("testShiftRemoval lshNotNop failed, wanted %d got %d", want, got) + } +} + +//go:noinline +func parseLE64(b []byte) uint64 { + // skip the first two bytes, and parse the remaining 8 as a uint64 + return uint64(b[2]) | uint64(b[3])<<8 | uint64(b[4])<<16 | uint64(b[5])<<24 | + uint64(b[6])<<32 | uint64(b[7])<<40 | uint64(b[8])<<48 | uint64(b[9])<<56 +} + +//go:noinline +func parseLE32(b []byte) uint32 { + return uint32(b[2]) | uint32(b[3])<<8 | uint32(b[4])<<16 | uint32(b[5])<<24 +} + +//go:noinline +func parseLE16(b []byte) uint16 { + return uint16(b[2]) | uint16(b[3])<<8 +} + +// testLoadCombine tests for issue #14694 where load combining didn't respect the pointer offset. +func testLoadCombine(t *testing.T) { + testData := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09} + if want, got := uint64(0x0908070605040302), parseLE64(testData); want != got { + t.Errorf("testLoadCombine failed, wanted %d got %d", want, got) + } + if want, got := uint32(0x05040302), parseLE32(testData); want != got { + t.Errorf("testLoadCombine failed, wanted %d got %d", want, got) + } + if want, got := uint16(0x0302), parseLE16(testData); want != got { + t.Errorf("testLoadCombine failed, wanted %d got %d", want, got) + } +} + +var loadSymData = [...]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08} + +func testLoadSymCombine(t *testing.T) { + w2 := uint16(0x0201) + g2 := uint16(loadSymData[0]) | uint16(loadSymData[1])<<8 + if g2 != w2 { + t.Errorf("testLoadSymCombine failed, wanted %d got %d", w2, g2) + } + w4 := uint32(0x04030201) + g4 := uint32(loadSymData[0]) | uint32(loadSymData[1])<<8 | + uint32(loadSymData[2])<<16 | uint32(loadSymData[3])<<24 + if g4 != w4 { + t.Errorf("testLoadSymCombine failed, wanted %d got %d", w4, g4) + } + w8 := uint64(0x0807060504030201) + g8 := uint64(loadSymData[0]) | uint64(loadSymData[1])<<8 | + uint64(loadSymData[2])<<16 | uint64(loadSymData[3])<<24 | + uint64(loadSymData[4])<<32 | uint64(loadSymData[5])<<40 | + uint64(loadSymData[6])<<48 | uint64(loadSymData[7])<<56 + if g8 != w8 { + t.Errorf("testLoadSymCombine failed, wanted %d got %d", w8, g8) + } +} + +//go:noinline +func invalidAdd_ssa(x uint32) uint32 { + return x + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y +} + +//go:noinline +func invalidSub_ssa(x uint32) uint32 { + return x - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y +} + +//go:noinline +func invalidMul_ssa(x uint32) uint32 { + return x * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y +} + +// testLargeConst tests a situation where larger than 32 bit consts were passed to ADDL +// causing an invalid instruction error. +func testLargeConst(t *testing.T) { + if want, got := uint32(268435440), invalidAdd_ssa(1); want != got { + t.Errorf("testLargeConst add failed, wanted %d got %d", want, got) + } + if want, got := uint32(4026531858), invalidSub_ssa(1); want != got { + t.Errorf("testLargeConst sub failed, wanted %d got %d", want, got) + } + if want, got := uint32(268435455), invalidMul_ssa(1); want != got { + t.Errorf("testLargeConst mul failed, wanted %d got %d", want, got) + } +} + +// testArithRshConst ensures that "const >> const" right shifts correctly perform +// sign extension on the lhs constant +func testArithRshConst(t *testing.T) { + wantu := uint64(0x4000000000000000) + if got := arithRshuConst_ssa(); got != wantu { + t.Errorf("arithRshuConst failed, wanted %d got %d", wantu, got) + } + + wants := int64(-0x4000000000000000) + if got := arithRshConst_ssa(); got != wants { + t.Errorf("arithRshConst failed, wanted %d got %d", wants, got) + } +} + +//go:noinline +func arithRshuConst_ssa() uint64 { + y := uint64(0x8000000000000001) + z := uint64(1) + return uint64(y >> z) +} + +//go:noinline +func arithRshConst_ssa() int64 { + y := int64(-0x8000000000000000) + z := uint64(1) + return int64(y >> z) +} + +//go:noinline +func arithConstShift_ssa(x int64) int64 { + return x >> 100 +} + +// testArithConstShift tests that right shift by large constants preserve +// the sign of the input. +func testArithConstShift(t *testing.T) { + want := int64(-1) + if got := arithConstShift_ssa(-1); want != got { + t.Errorf("arithConstShift_ssa(-1) failed, wanted %d got %d", want, got) + } + want = 0 + if got := arithConstShift_ssa(1); want != got { + t.Errorf("arithConstShift_ssa(1) failed, wanted %d got %d", want, got) + } +} + +// overflowConstShift_ssa verifies that constant folding for shift +// doesn't wrap (i.e. x << MAX_INT << 1 doesn't get folded to x << 0). +// +//go:noinline +func overflowConstShift64_ssa(x int64) int64 { + return x << uint64(0xffffffffffffffff) << uint64(1) +} + +//go:noinline +func overflowConstShift32_ssa(x int64) int32 { + return int32(x) << uint32(0xffffffff) << uint32(1) +} + +//go:noinline +func overflowConstShift16_ssa(x int64) int16 { + return int16(x) << uint16(0xffff) << uint16(1) +} + +//go:noinline +func overflowConstShift8_ssa(x int64) int8 { + return int8(x) << uint8(0xff) << uint8(1) +} + +func testOverflowConstShift(t *testing.T) { + want := int64(0) + for x := int64(-127); x < int64(127); x++ { + got := overflowConstShift64_ssa(x) + if want != got { + t.Errorf("overflowShift64 failed, wanted %d got %d", want, got) + } + got = int64(overflowConstShift32_ssa(x)) + if want != got { + t.Errorf("overflowShift32 failed, wanted %d got %d", want, got) + } + got = int64(overflowConstShift16_ssa(x)) + if want != got { + t.Errorf("overflowShift16 failed, wanted %d got %d", want, got) + } + got = int64(overflowConstShift8_ssa(x)) + if want != got { + t.Errorf("overflowShift8 failed, wanted %d got %d", want, got) + } + } +} + +//go:noinline +func rsh64x64ConstOverflow8(x int8) int64 { + return int64(x) >> 9 +} + +//go:noinline +func rsh64x64ConstOverflow16(x int16) int64 { + return int64(x) >> 17 +} + +//go:noinline +func rsh64x64ConstOverflow32(x int32) int64 { + return int64(x) >> 33 +} + +func testArithRightShiftConstOverflow(t *testing.T) { + allSet := int64(-1) + if got, want := rsh64x64ConstOverflow8(0x7f), int64(0); got != want { + t.Errorf("rsh64x64ConstOverflow8 failed: got %v, want %v", got, want) + } + if got, want := rsh64x64ConstOverflow16(0x7fff), int64(0); got != want { + t.Errorf("rsh64x64ConstOverflow16 failed: got %v, want %v", got, want) + } + if got, want := rsh64x64ConstOverflow32(0x7ffffff), int64(0); got != want { + t.Errorf("rsh64x64ConstOverflow32 failed: got %v, want %v", got, want) + } + if got, want := rsh64x64ConstOverflow8(int8(-1)), allSet; got != want { + t.Errorf("rsh64x64ConstOverflow8 failed: got %v, want %v", got, want) + } + if got, want := rsh64x64ConstOverflow16(int16(-1)), allSet; got != want { + t.Errorf("rsh64x64ConstOverflow16 failed: got %v, want %v", got, want) + } + if got, want := rsh64x64ConstOverflow32(int32(-1)), allSet; got != want { + t.Errorf("rsh64x64ConstOverflow32 failed: got %v, want %v", got, want) + } +} + +//go:noinline +func rsh64Ux64ConstOverflow8(x uint8) uint64 { + return uint64(x) >> 9 +} + +//go:noinline +func rsh64Ux64ConstOverflow16(x uint16) uint64 { + return uint64(x) >> 17 +} + +//go:noinline +func rsh64Ux64ConstOverflow32(x uint32) uint64 { + return uint64(x) >> 33 +} + +func testRightShiftConstOverflow(t *testing.T) { + if got, want := rsh64Ux64ConstOverflow8(0xff), uint64(0); got != want { + t.Errorf("rsh64Ux64ConstOverflow8 failed: got %v, want %v", got, want) + } + if got, want := rsh64Ux64ConstOverflow16(0xffff), uint64(0); got != want { + t.Errorf("rsh64Ux64ConstOverflow16 failed: got %v, want %v", got, want) + } + if got, want := rsh64Ux64ConstOverflow32(0xffffffff), uint64(0); got != want { + t.Errorf("rsh64Ux64ConstOverflow32 failed: got %v, want %v", got, want) + } +} + +// test64BitConstMult tests that rewrite rules don't fold 64 bit constants +// into multiply instructions. +func test64BitConstMult(t *testing.T) { + want := int64(103079215109) + if got := test64BitConstMult_ssa(1, 2); want != got { + t.Errorf("test64BitConstMult failed, wanted %d got %d", want, got) + } +} + +//go:noinline +func test64BitConstMult_ssa(a, b int64) int64 { + return 34359738369*a + b*34359738370 +} + +// test64BitConstAdd tests that rewrite rules don't fold 64 bit constants +// into add instructions. +func test64BitConstAdd(t *testing.T) { + want := int64(3567671782835376650) + if got := test64BitConstAdd_ssa(1, 2); want != got { + t.Errorf("test64BitConstAdd failed, wanted %d got %d", want, got) + } +} + +//go:noinline +func test64BitConstAdd_ssa(a, b int64) int64 { + return a + 575815584948629622 + b + 2991856197886747025 +} + +// testRegallocCVSpill tests that regalloc spills a value whose last use is the +// current value. +func testRegallocCVSpill(t *testing.T) { + want := int8(-9) + if got := testRegallocCVSpill_ssa(1, 2, 3, 4); want != got { + t.Errorf("testRegallocCVSpill failed, wanted %d got %d", want, got) + } +} + +//go:noinline +func testRegallocCVSpill_ssa(a, b, c, d int8) int8 { + return a + -32 + b + 63*c*-87*d +} + +func testBitwiseLogic(t *testing.T) { + a, b := uint32(57623283), uint32(1314713839) + if want, got := uint32(38551779), testBitwiseAnd_ssa(a, b); want != got { + t.Errorf("testBitwiseAnd failed, wanted %d got %d", want, got) + } + if want, got := uint32(1333785343), testBitwiseOr_ssa(a, b); want != got { + t.Errorf("testBitwiseOr failed, wanted %d got %d", want, got) + } + if want, got := uint32(1295233564), testBitwiseXor_ssa(a, b); want != got { + t.Errorf("testBitwiseXor failed, wanted %d got %d", want, got) + } + if want, got := int32(832), testBitwiseLsh_ssa(13, 4, 2); want != got { + t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got) + } + if want, got := int32(0), testBitwiseLsh_ssa(13, 25, 15); want != got { + t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got) + } + if want, got := int32(0), testBitwiseLsh_ssa(-13, 25, 15); want != got { + t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got) + } + if want, got := int32(-13), testBitwiseRsh_ssa(-832, 4, 2); want != got { + t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got) + } + if want, got := int32(0), testBitwiseRsh_ssa(13, 25, 15); want != got { + t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got) + } + if want, got := int32(-1), testBitwiseRsh_ssa(-13, 25, 15); want != got { + t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got) + } + if want, got := uint32(0x3ffffff), testBitwiseRshU_ssa(0xffffffff, 4, 2); want != got { + t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got) + } + if want, got := uint32(0), testBitwiseRshU_ssa(13, 25, 15); want != got { + t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got) + } + if want, got := uint32(0), testBitwiseRshU_ssa(0x8aaaaaaa, 25, 15); want != got { + t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got) + } +} + +//go:noinline +func testBitwiseAnd_ssa(a, b uint32) uint32 { + return a & b +} + +//go:noinline +func testBitwiseOr_ssa(a, b uint32) uint32 { + return a | b +} + +//go:noinline +func testBitwiseXor_ssa(a, b uint32) uint32 { + return a ^ b +} + +//go:noinline +func testBitwiseLsh_ssa(a int32, b, c uint32) int32 { + return a << b << c +} + +//go:noinline +func testBitwiseRsh_ssa(a int32, b, c uint32) int32 { + return a >> b >> c +} + +//go:noinline +func testBitwiseRshU_ssa(a uint32, b, c uint32) uint32 { + return a >> b >> c +} + +//go:noinline +func testShiftCX_ssa() int { + v1 := uint8(3) + v4 := (v1 * v1) ^ v1 | v1 - v1 - v1&v1 ^ uint8(3+2) + v1*1>>0 - v1 | 1 | v1<<(2*3|0-0*0^1) + v5 := v4>>(3-0-uint(3)) | v1 | v1 + v1 ^ v4<<(0+1|3&1)<<(uint64(1)<<0*2*0<<0) ^ v1 + v6 := v5 ^ (v1+v1)*v1 | v1 | v1*v1>>(v1&v1)>>(uint(1)<<0*uint(3)>>1)*v1<<2*v1<>2 | (v4 - v1) ^ v1 + v1 ^ v1>>1 | v1 + v1 - v1 ^ v1 + v7 := v6 & v5 << 0 + v1++ + v11 := 2&1 ^ 0 + 3 | int(0^0)<<1>>(1*0*3) ^ 0*0 ^ 3&0*3&3 ^ 3*3 ^ 1 ^ int(2)<<(2*3) + 2 | 2 | 2 ^ 2 + 1 | 3 | 0 ^ int(1)>>1 ^ 2 // int + v7-- + return int(uint64(2*1)<<(3-2)<>v7)-2)&v11 | v11 - int(2)<<0>>(2-1)*(v11*0&v11<<1<<(uint8(2)+v4)) +} + +func testShiftCX(t *testing.T) { + want := 141 + if got := testShiftCX_ssa(); want != got { + t.Errorf("testShiftCX failed, wanted %d got %d", want, got) + } +} + +// testSubqToNegq ensures that the SUBQ -> NEGQ translation works correctly. +func testSubqToNegq(t *testing.T) { + want := int64(-318294940372190156) + if got := testSubqToNegq_ssa(1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2); want != got { + t.Errorf("testSubqToNegq failed, wanted %d got %d", want, got) + } +} + +//go:noinline +func testSubqToNegq_ssa(a, b, c, d, e, f, g, h, i, j, k int64) int64 { + return a + 8207351403619448057 - b - 1779494519303207690 + c*8810076340510052032*d - 4465874067674546219 - e*4361839741470334295 - f + 8688847565426072650*g*8065564729145417479 +} + +func testOcom(t *testing.T) { + want1, want2 := int32(0x55555555), int32(-0x55555556) + if got1, got2 := testOcom_ssa(0x55555555, 0x55555555); want1 != got1 || want2 != got2 { + t.Errorf("testOcom failed, wanted %d and %d got %d and %d", want1, want2, got1, got2) + } +} + +//go:noinline +func testOcom_ssa(a, b int32) (int32, int32) { + return ^^^^a, ^^^^^b +} + +func lrot1_ssa(w uint8, x uint16, y uint32, z uint64) (a uint8, b uint16, c uint32, d uint64) { + a = (w << 5) | (w >> 3) + b = (x << 13) | (x >> 3) + c = (y << 29) | (y >> 3) + d = (z << 61) | (z >> 3) + return +} + +//go:noinline +func lrot2_ssa(w, n uint32) uint32 { + // Want to be sure that a "rotate by 32" which + // is really 0 | (w >> 0) == w + // is correctly compiled. + return (w << n) | (w >> (32 - n)) +} + +//go:noinline +func lrot3_ssa(w uint32) uint32 { + // Want to be sure that a "rotate by 32" which + // is really 0 | (w >> 0) == w + // is correctly compiled. + return (w << 32) | (w >> (32 - 32)) +} + +func testLrot(t *testing.T) { + wantA, wantB, wantC, wantD := uint8(0xe1), uint16(0xe001), + uint32(0xe0000001), uint64(0xe000000000000001) + a, b, c, d := lrot1_ssa(0xf, 0xf, 0xf, 0xf) + if a != wantA || b != wantB || c != wantC || d != wantD { + t.Errorf("lrot1_ssa(0xf, 0xf, 0xf, 0xf)=%d %d %d %d, got %d %d %d %d", wantA, wantB, wantC, wantD, a, b, c, d) + } + x := lrot2_ssa(0xb0000001, 32) + wantX := uint32(0xb0000001) + if x != wantX { + t.Errorf("lrot2_ssa(0xb0000001, 32)=%d, got %d", wantX, x) + } + x = lrot3_ssa(0xb0000001) + if x != wantX { + t.Errorf("lrot3_ssa(0xb0000001)=%d, got %d", wantX, x) + } + +} + +//go:noinline +func sub1_ssa() uint64 { + v1 := uint64(3) // uint64 + return v1*v1 - (v1&v1)&v1 +} + +//go:noinline +func sub2_ssa() uint8 { + v1 := uint8(0) + v3 := v1 + v1 + v1 ^ v1 | 3 + v1 ^ v1 | v1 ^ v1 + v1-- // dev.ssa doesn't see this one + return v1 ^ v1*v1 - v3 +} + +func testSubConst(t *testing.T) { + x1 := sub1_ssa() + want1 := uint64(6) + if x1 != want1 { + t.Errorf("sub1_ssa()=%d, got %d", want1, x1) + } + x2 := sub2_ssa() + want2 := uint8(251) + if x2 != want2 { + t.Errorf("sub2_ssa()=%d, got %d", want2, x2) + } +} + +//go:noinline +func orPhi_ssa(a bool, x int) int { + v := 0 + if a { + v = -1 + } else { + v = -1 + } + return x | v +} + +func testOrPhi(t *testing.T) { + if want, got := -1, orPhi_ssa(true, 4); got != want { + t.Errorf("orPhi_ssa(true, 4)=%d, want %d", got, want) + } + if want, got := -1, orPhi_ssa(false, 0); got != want { + t.Errorf("orPhi_ssa(false, 0)=%d, want %d", got, want) + } +} + +//go:noinline +func addshiftLL_ssa(a, b uint32) uint32 { + return a + b<<3 +} + +//go:noinline +func subshiftLL_ssa(a, b uint32) uint32 { + return a - b<<3 +} + +//go:noinline +func rsbshiftLL_ssa(a, b uint32) uint32 { + return a<<3 - b +} + +//go:noinline +func andshiftLL_ssa(a, b uint32) uint32 { + return a & (b << 3) +} + +//go:noinline +func orshiftLL_ssa(a, b uint32) uint32 { + return a | b<<3 +} + +//go:noinline +func xorshiftLL_ssa(a, b uint32) uint32 { + return a ^ b<<3 +} + +//go:noinline +func bicshiftLL_ssa(a, b uint32) uint32 { + return a &^ (b << 3) +} + +//go:noinline +func notshiftLL_ssa(a uint32) uint32 { + return ^(a << 3) +} + +//go:noinline +func addshiftRL_ssa(a, b uint32) uint32 { + return a + b>>3 +} + +//go:noinline +func subshiftRL_ssa(a, b uint32) uint32 { + return a - b>>3 +} + +//go:noinline +func rsbshiftRL_ssa(a, b uint32) uint32 { + return a>>3 - b +} + +//go:noinline +func andshiftRL_ssa(a, b uint32) uint32 { + return a & (b >> 3) +} + +//go:noinline +func orshiftRL_ssa(a, b uint32) uint32 { + return a | b>>3 +} + +//go:noinline +func xorshiftRL_ssa(a, b uint32) uint32 { + return a ^ b>>3 +} + +//go:noinline +func bicshiftRL_ssa(a, b uint32) uint32 { + return a &^ (b >> 3) +} + +//go:noinline +func notshiftRL_ssa(a uint32) uint32 { + return ^(a >> 3) +} + +//go:noinline +func addshiftRA_ssa(a, b int32) int32 { + return a + b>>3 +} + +//go:noinline +func subshiftRA_ssa(a, b int32) int32 { + return a - b>>3 +} + +//go:noinline +func rsbshiftRA_ssa(a, b int32) int32 { + return a>>3 - b +} + +//go:noinline +func andshiftRA_ssa(a, b int32) int32 { + return a & (b >> 3) +} + +//go:noinline +func orshiftRA_ssa(a, b int32) int32 { + return a | b>>3 +} + +//go:noinline +func xorshiftRA_ssa(a, b int32) int32 { + return a ^ b>>3 +} + +//go:noinline +func bicshiftRA_ssa(a, b int32) int32 { + return a &^ (b >> 3) +} + +//go:noinline +func notshiftRA_ssa(a int32) int32 { + return ^(a >> 3) +} + +//go:noinline +func addshiftLLreg_ssa(a, b uint32, s uint8) uint32 { + return a + b<>s +} + +//go:noinline +func subshiftRLreg_ssa(a, b uint32, s uint8) uint32 { + return a - b>>s +} + +//go:noinline +func rsbshiftRLreg_ssa(a, b uint32, s uint8) uint32 { + return a>>s - b +} + +//go:noinline +func andshiftRLreg_ssa(a, b uint32, s uint8) uint32 { + return a & (b >> s) +} + +//go:noinline +func orshiftRLreg_ssa(a, b uint32, s uint8) uint32 { + return a | b>>s +} + +//go:noinline +func xorshiftRLreg_ssa(a, b uint32, s uint8) uint32 { + return a ^ b>>s +} + +//go:noinline +func bicshiftRLreg_ssa(a, b uint32, s uint8) uint32 { + return a &^ (b >> s) +} + +//go:noinline +func notshiftRLreg_ssa(a uint32, s uint8) uint32 { + return ^(a >> s) +} + +//go:noinline +func addshiftRAreg_ssa(a, b int32, s uint8) int32 { + return a + b>>s +} + +//go:noinline +func subshiftRAreg_ssa(a, b int32, s uint8) int32 { + return a - b>>s +} + +//go:noinline +func rsbshiftRAreg_ssa(a, b int32, s uint8) int32 { + return a>>s - b +} + +//go:noinline +func andshiftRAreg_ssa(a, b int32, s uint8) int32 { + return a & (b >> s) +} + +//go:noinline +func orshiftRAreg_ssa(a, b int32, s uint8) int32 { + return a | b>>s +} + +//go:noinline +func xorshiftRAreg_ssa(a, b int32, s uint8) int32 { + return a ^ b>>s +} + +//go:noinline +func bicshiftRAreg_ssa(a, b int32, s uint8) int32 { + return a &^ (b >> s) +} + +//go:noinline +func notshiftRAreg_ssa(a int32, s uint8) int32 { + return ^(a >> s) +} + +// test ARM shifted ops +func testShiftedOps(t *testing.T) { + a, b := uint32(10), uint32(42) + if want, got := a+b<<3, addshiftLL_ssa(a, b); got != want { + t.Errorf("addshiftLL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a-b<<3, subshiftLL_ssa(a, b); got != want { + t.Errorf("subshiftLL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a<<3-b, rsbshiftLL_ssa(a, b); got != want { + t.Errorf("rsbshiftLL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a&(b<<3), andshiftLL_ssa(a, b); got != want { + t.Errorf("andshiftLL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a|b<<3, orshiftLL_ssa(a, b); got != want { + t.Errorf("orshiftLL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a^b<<3, xorshiftLL_ssa(a, b); got != want { + t.Errorf("xorshiftLL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a&^(b<<3), bicshiftLL_ssa(a, b); got != want { + t.Errorf("bicshiftLL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := ^(a << 3), notshiftLL_ssa(a); got != want { + t.Errorf("notshiftLL_ssa(10) = %d want %d", got, want) + } + if want, got := a+b>>3, addshiftRL_ssa(a, b); got != want { + t.Errorf("addshiftRL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a-b>>3, subshiftRL_ssa(a, b); got != want { + t.Errorf("subshiftRL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a>>3-b, rsbshiftRL_ssa(a, b); got != want { + t.Errorf("rsbshiftRL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a&(b>>3), andshiftRL_ssa(a, b); got != want { + t.Errorf("andshiftRL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a|b>>3, orshiftRL_ssa(a, b); got != want { + t.Errorf("orshiftRL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a^b>>3, xorshiftRL_ssa(a, b); got != want { + t.Errorf("xorshiftRL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := a&^(b>>3), bicshiftRL_ssa(a, b); got != want { + t.Errorf("bicshiftRL_ssa(10, 42) = %d want %d", got, want) + } + if want, got := ^(a >> 3), notshiftRL_ssa(a); got != want { + t.Errorf("notshiftRL_ssa(10) = %d want %d", got, want) + } + c, d := int32(10), int32(-42) + if want, got := c+d>>3, addshiftRA_ssa(c, d); got != want { + t.Errorf("addshiftRA_ssa(10, -42) = %d want %d", got, want) + } + if want, got := c-d>>3, subshiftRA_ssa(c, d); got != want { + t.Errorf("subshiftRA_ssa(10, -42) = %d want %d", got, want) + } + if want, got := c>>3-d, rsbshiftRA_ssa(c, d); got != want { + t.Errorf("rsbshiftRA_ssa(10, -42) = %d want %d", got, want) + } + if want, got := c&(d>>3), andshiftRA_ssa(c, d); got != want { + t.Errorf("andshiftRA_ssa(10, -42) = %d want %d", got, want) + } + if want, got := c|d>>3, orshiftRA_ssa(c, d); got != want { + t.Errorf("orshiftRA_ssa(10, -42) = %d want %d", got, want) + } + if want, got := c^d>>3, xorshiftRA_ssa(c, d); got != want { + t.Errorf("xorshiftRA_ssa(10, -42) = %d want %d", got, want) + } + if want, got := c&^(d>>3), bicshiftRA_ssa(c, d); got != want { + t.Errorf("bicshiftRA_ssa(10, -42) = %d want %d", got, want) + } + if want, got := ^(d >> 3), notshiftRA_ssa(d); got != want { + t.Errorf("notshiftRA_ssa(-42) = %d want %d", got, want) + } + s := uint8(3) + if want, got := a+b<>s, addshiftRLreg_ssa(a, b, s); got != want { + t.Errorf("addshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want) + } + if want, got := a-b>>s, subshiftRLreg_ssa(a, b, s); got != want { + t.Errorf("subshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want) + } + if want, got := a>>s-b, rsbshiftRLreg_ssa(a, b, s); got != want { + t.Errorf("rsbshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want) + } + if want, got := a&(b>>s), andshiftRLreg_ssa(a, b, s); got != want { + t.Errorf("andshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want) + } + if want, got := a|b>>s, orshiftRLreg_ssa(a, b, s); got != want { + t.Errorf("orshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want) + } + if want, got := a^b>>s, xorshiftRLreg_ssa(a, b, s); got != want { + t.Errorf("xorshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want) + } + if want, got := a&^(b>>s), bicshiftRLreg_ssa(a, b, s); got != want { + t.Errorf("bicshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want) + } + if want, got := ^(a >> s), notshiftRLreg_ssa(a, s); got != want { + t.Errorf("notshiftRLreg_ssa(10) = %d want %d", got, want) + } + if want, got := c+d>>s, addshiftRAreg_ssa(c, d, s); got != want { + t.Errorf("addshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want) + } + if want, got := c-d>>s, subshiftRAreg_ssa(c, d, s); got != want { + t.Errorf("subshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want) + } + if want, got := c>>s-d, rsbshiftRAreg_ssa(c, d, s); got != want { + t.Errorf("rsbshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want) + } + if want, got := c&(d>>s), andshiftRAreg_ssa(c, d, s); got != want { + t.Errorf("andshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want) + } + if want, got := c|d>>s, orshiftRAreg_ssa(c, d, s); got != want { + t.Errorf("orshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want) + } + if want, got := c^d>>s, xorshiftRAreg_ssa(c, d, s); got != want { + t.Errorf("xorshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want) + } + if want, got := c&^(d>>s), bicshiftRAreg_ssa(c, d, s); got != want { + t.Errorf("bicshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want) + } + if want, got := ^(d >> s), notshiftRAreg_ssa(d, s); got != want { + t.Errorf("notshiftRAreg_ssa(-42, 3) = %d want %d", got, want) + } +} + +// TestArithmetic tests that both backends have the same result for arithmetic expressions. +func TestArithmetic(t *testing.T) { + test64BitConstMult(t) + test64BitConstAdd(t) + testRegallocCVSpill(t) + testSubqToNegq(t) + testBitwiseLogic(t) + testOcom(t) + testLrot(t) + testShiftCX(t) + testSubConst(t) + testOverflowConstShift(t) + testArithRightShiftConstOverflow(t) + testRightShiftConstOverflow(t) + testArithConstShift(t) + testArithRshConst(t) + testLargeConst(t) + testLoadCombine(t) + testLoadSymCombine(t) + testShiftRemoval(t) + testShiftedOps(t) + testDivFixUp(t) + testDivisibleSignedPow2(t) + testDivisibility(t) +} + +// testDivFixUp ensures that signed division fix-ups are being generated. +func testDivFixUp(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Error("testDivFixUp failed") + if e, ok := r.(runtime.Error); ok { + t.Logf("%v\n", e.Error()) + } + } + }() + var w int8 = -128 + var x int16 = -32768 + var y int32 = -2147483648 + var z int64 = -9223372036854775808 + + for i := -5; i < 0; i++ { + g8 = w / int8(i) + g16 = x / int16(i) + g32 = y / int32(i) + g64 = z / int64(i) + g8 = w % int8(i) + g16 = x % int16(i) + g32 = y % int32(i) + g64 = z % int64(i) + } +} + +//go:noinline +func divisible_int8_2to1(x int8) bool { + return x%(1<<1) == 0 +} + +//go:noinline +func divisible_int8_2to2(x int8) bool { + return x%(1<<2) == 0 +} + +//go:noinline +func divisible_int8_2to3(x int8) bool { + return x%(1<<3) == 0 +} + +//go:noinline +func divisible_int8_2to4(x int8) bool { + return x%(1<<4) == 0 +} + +//go:noinline +func divisible_int8_2to5(x int8) bool { + return x%(1<<5) == 0 +} + +//go:noinline +func divisible_int8_2to6(x int8) bool { + return x%(1<<6) == 0 +} + +//go:noinline +func divisible_int16_2to1(x int16) bool { + return x%(1<<1) == 0 +} + +//go:noinline +func divisible_int16_2to2(x int16) bool { + return x%(1<<2) == 0 +} + +//go:noinline +func divisible_int16_2to3(x int16) bool { + return x%(1<<3) == 0 +} + +//go:noinline +func divisible_int16_2to4(x int16) bool { + return x%(1<<4) == 0 +} + +//go:noinline +func divisible_int16_2to5(x int16) bool { + return x%(1<<5) == 0 +} + +//go:noinline +func divisible_int16_2to6(x int16) bool { + return x%(1<<6) == 0 +} + +//go:noinline +func divisible_int16_2to7(x int16) bool { + return x%(1<<7) == 0 +} + +//go:noinline +func divisible_int16_2to8(x int16) bool { + return x%(1<<8) == 0 +} + +//go:noinline +func divisible_int16_2to9(x int16) bool { + return x%(1<<9) == 0 +} + +//go:noinline +func divisible_int16_2to10(x int16) bool { + return x%(1<<10) == 0 +} + +//go:noinline +func divisible_int16_2to11(x int16) bool { + return x%(1<<11) == 0 +} + +//go:noinline +func divisible_int16_2to12(x int16) bool { + return x%(1<<12) == 0 +} + +//go:noinline +func divisible_int16_2to13(x int16) bool { + return x%(1<<13) == 0 +} + +//go:noinline +func divisible_int16_2to14(x int16) bool { + return x%(1<<14) == 0 +} + +//go:noinline +func divisible_int32_2to4(x int32) bool { + return x%(1<<4) == 0 +} + +//go:noinline +func divisible_int32_2to15(x int32) bool { + return x%(1<<15) == 0 +} + +//go:noinline +func divisible_int32_2to26(x int32) bool { + return x%(1<<26) == 0 +} + +//go:noinline +func divisible_int64_2to4(x int64) bool { + return x%(1<<4) == 0 +} + +//go:noinline +func divisible_int64_2to15(x int64) bool { + return x%(1<<15) == 0 +} + +//go:noinline +func divisible_int64_2to26(x int64) bool { + return x%(1<<26) == 0 +} + +//go:noinline +func divisible_int64_2to34(x int64) bool { + return x%(1<<34) == 0 +} + +//go:noinline +func divisible_int64_2to48(x int64) bool { + return x%(1<<48) == 0 +} + +//go:noinline +func divisible_int64_2to57(x int64) bool { + return x%(1<<57) == 0 +} + +// testDivisibleSignedPow2 confirms that x%(1<> 8) | ((c & 0x00ff00ff00ff00ff) << 8) + return b +} + +//go:noinline +func genREV16_2(c uint64) uint64 { + b := ((c & 0xff00ff00) >> 8) | ((c & 0x00ff00ff) << 8) + return b +} + +//go:noinline +func genREV16W(c uint32) uint32 { + b := ((c & 0xff00ff00) >> 8) | ((c & 0x00ff00ff) << 8) + return b +} + +func TestREV16(t *testing.T) { + x := uint64(0x8f7f6f5f4f3f2f1f) + want1 := uint64(0x7f8f5f6f3f4f1f2f) + want2 := uint64(0x3f4f1f2f) + + got1 := genREV16_1(x) + if got1 != want1 { + t.Errorf("genREV16_1(%#x) = %#x want %#x", x, got1, want1) + } + got2 := genREV16_2(x) + if got2 != want2 { + t.Errorf("genREV16_2(%#x) = %#x want %#x", x, got2, want2) + } +} + +func TestREV16W(t *testing.T) { + x := uint32(0x4f3f2f1f) + want := uint32(0x3f4f1f2f) + + got := genREV16W(x) + if got != want { + t.Errorf("genREV16W(%#x) = %#x want %#x", x, got, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/array_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/array_test.go new file mode 100644 index 0000000000000000000000000000000000000000..efa00d0520fca89be888729ed5ff974c89669402 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/array_test.go @@ -0,0 +1,132 @@ +package main + +import "testing" + +//go:noinline +func testSliceLenCap12_ssa(a [10]int, i, j int) (int, int) { + b := a[i:j] + return len(b), cap(b) +} + +//go:noinline +func testSliceLenCap1_ssa(a [10]int, i, j int) (int, int) { + b := a[i:] + return len(b), cap(b) +} + +//go:noinline +func testSliceLenCap2_ssa(a [10]int, i, j int) (int, int) { + b := a[:j] + return len(b), cap(b) +} + +func testSliceLenCap(t *testing.T) { + a := [10]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + tests := [...]struct { + fn func(a [10]int, i, j int) (int, int) + i, j int // slice range + l, c int // len, cap + }{ + // -1 means the value is not used. + {testSliceLenCap12_ssa, 0, 0, 0, 10}, + {testSliceLenCap12_ssa, 0, 1, 1, 10}, + {testSliceLenCap12_ssa, 0, 10, 10, 10}, + {testSliceLenCap12_ssa, 10, 10, 0, 0}, + {testSliceLenCap12_ssa, 0, 5, 5, 10}, + {testSliceLenCap12_ssa, 5, 5, 0, 5}, + {testSliceLenCap12_ssa, 5, 10, 5, 5}, + {testSliceLenCap1_ssa, 0, -1, 0, 10}, + {testSliceLenCap1_ssa, 5, -1, 5, 5}, + {testSliceLenCap1_ssa, 10, -1, 0, 0}, + {testSliceLenCap2_ssa, -1, 0, 0, 10}, + {testSliceLenCap2_ssa, -1, 5, 5, 10}, + {testSliceLenCap2_ssa, -1, 10, 10, 10}, + } + + for i, test := range tests { + if l, c := test.fn(a, test.i, test.j); l != test.l && c != test.c { + t.Errorf("#%d len(a[%d:%d]), cap(a[%d:%d]) = %d %d, want %d %d", i, test.i, test.j, test.i, test.j, l, c, test.l, test.c) + } + } +} + +//go:noinline +func testSliceGetElement_ssa(a [10]int, i, j, p int) int { + return a[i:j][p] +} + +func testSliceGetElement(t *testing.T) { + a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90} + tests := [...]struct { + i, j, p int + want int // a[i:j][p] + }{ + {0, 10, 2, 20}, + {0, 5, 4, 40}, + {5, 10, 3, 80}, + {1, 9, 7, 80}, + } + + for i, test := range tests { + if got := testSliceGetElement_ssa(a, test.i, test.j, test.p); got != test.want { + t.Errorf("#%d a[%d:%d][%d] = %d, wanted %d", i, test.i, test.j, test.p, got, test.want) + } + } +} + +//go:noinline +func testSliceSetElement_ssa(a *[10]int, i, j, p, x int) { + (*a)[i:j][p] = x +} + +func testSliceSetElement(t *testing.T) { + a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90} + tests := [...]struct { + i, j, p int + want int // a[i:j][p] + }{ + {0, 10, 2, 17}, + {0, 5, 4, 11}, + {5, 10, 3, 28}, + {1, 9, 7, 99}, + } + + for i, test := range tests { + testSliceSetElement_ssa(&a, test.i, test.j, test.p, test.want) + if got := a[test.i+test.p]; got != test.want { + t.Errorf("#%d a[%d:%d][%d] = %d, wanted %d", i, test.i, test.j, test.p, got, test.want) + } + } +} + +func testSlicePanic1(t *testing.T) { + defer func() { + if r := recover(); r != nil { + //println("panicked as expected") + } + }() + + a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90} + testSliceLenCap12_ssa(a, 3, 12) + t.Errorf("expected to panic, but didn't") +} + +func testSlicePanic2(t *testing.T) { + defer func() { + if r := recover(); r != nil { + //println("panicked as expected") + } + }() + + a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90} + testSliceGetElement_ssa(a, 3, 7, 4) + t.Errorf("expected to panic, but didn't") +} + +func TestArray(t *testing.T) { + testSliceLenCap(t) + testSliceGetElement(t) + testSliceSetElement(t) + testSlicePanic1(t) + testSlicePanic2(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/assert_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/assert_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4326be80790715f83121355f49f0948e1a93aa14 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/assert_test.go @@ -0,0 +1,128 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests type assertion expressions and statements + +package main + +import ( + "runtime" + "testing" +) + +type ( + S struct{} + U struct{} + + I interface { + F() + } +) + +var ( + s *S + u *U +) + +func (s *S) F() {} +func (u *U) F() {} + +func e2t_ssa(e interface{}) *U { + return e.(*U) +} + +func i2t_ssa(i I) *U { + return i.(*U) +} + +func testAssertE2TOk(t *testing.T) { + if got := e2t_ssa(u); got != u { + t.Errorf("e2t_ssa(u)=%v want %v", got, u) + } +} + +func testAssertE2TPanic(t *testing.T) { + var got *U + defer func() { + if got != nil { + t.Errorf("e2t_ssa(s)=%v want nil", got) + } + e := recover() + err, ok := e.(*runtime.TypeAssertionError) + if !ok { + t.Errorf("e2t_ssa(s) panic type %T", e) + } + want := "interface conversion: interface {} is *main.S, not *main.U" + if err.Error() != want { + t.Errorf("e2t_ssa(s) wrong error, want '%s', got '%s'", want, err.Error()) + } + }() + got = e2t_ssa(s) + t.Errorf("e2t_ssa(s) should panic") + +} + +func testAssertI2TOk(t *testing.T) { + if got := i2t_ssa(u); got != u { + t.Errorf("i2t_ssa(u)=%v want %v", got, u) + } +} + +func testAssertI2TPanic(t *testing.T) { + var got *U + defer func() { + if got != nil { + t.Errorf("i2t_ssa(s)=%v want nil", got) + } + e := recover() + err, ok := e.(*runtime.TypeAssertionError) + if !ok { + t.Errorf("i2t_ssa(s) panic type %T", e) + } + want := "interface conversion: main.I is *main.S, not *main.U" + if err.Error() != want { + t.Errorf("i2t_ssa(s) wrong error, want '%s', got '%s'", want, err.Error()) + } + }() + got = i2t_ssa(s) + t.Errorf("i2t_ssa(s) should panic") +} + +func e2t2_ssa(e interface{}) (*U, bool) { + u, ok := e.(*U) + return u, ok +} + +func i2t2_ssa(i I) (*U, bool) { + u, ok := i.(*U) + return u, ok +} + +func testAssertE2T2(t *testing.T) { + if got, ok := e2t2_ssa(u); !ok || got != u { + t.Errorf("e2t2_ssa(u)=(%v, %v) want (%v, %v)", got, ok, u, true) + } + if got, ok := e2t2_ssa(s); ok || got != nil { + t.Errorf("e2t2_ssa(s)=(%v, %v) want (%v, %v)", got, ok, nil, false) + } +} + +func testAssertI2T2(t *testing.T) { + if got, ok := i2t2_ssa(u); !ok || got != u { + t.Errorf("i2t2_ssa(u)=(%v, %v) want (%v, %v)", got, ok, u, true) + } + if got, ok := i2t2_ssa(s); ok || got != nil { + t.Errorf("i2t2_ssa(s)=(%v, %v) want (%v, %v)", got, ok, nil, false) + } +} + +// TestTypeAssertion tests type assertions. +func TestTypeAssertion(t *testing.T) { + testAssertE2TOk(t) + testAssertE2TPanic(t) + testAssertI2TOk(t) + testAssertI2TPanic(t) + testAssertE2T2(t) + testAssertI2T2(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/break_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/break_test.go new file mode 100644 index 0000000000000000000000000000000000000000..50245dfd3186d8eee41a5d787620ae1ee9c4c4d7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/break_test.go @@ -0,0 +1,250 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests continue and break. + +package main + +import "testing" + +func continuePlain_ssa() int { + var n int + for i := 0; i < 10; i++ { + if i == 6 { + continue + } + n = i + } + return n +} + +func continueLabeled_ssa() int { + var n int +Next: + for i := 0; i < 10; i++ { + if i == 6 { + continue Next + } + n = i + } + return n +} + +func continuePlainInner_ssa() int { + var n int + for j := 0; j < 30; j += 10 { + for i := 0; i < 10; i++ { + if i == 6 { + continue + } + n = i + } + n += j + } + return n +} + +func continueLabeledInner_ssa() int { + var n int + for j := 0; j < 30; j += 10 { + Next: + for i := 0; i < 10; i++ { + if i == 6 { + continue Next + } + n = i + } + n += j + } + return n +} + +func continueLabeledOuter_ssa() int { + var n int +Next: + for j := 0; j < 30; j += 10 { + for i := 0; i < 10; i++ { + if i == 6 { + continue Next + } + n = i + } + n += j + } + return n +} + +func breakPlain_ssa() int { + var n int + for i := 0; i < 10; i++ { + if i == 6 { + break + } + n = i + } + return n +} + +func breakLabeled_ssa() int { + var n int +Next: + for i := 0; i < 10; i++ { + if i == 6 { + break Next + } + n = i + } + return n +} + +func breakPlainInner_ssa() int { + var n int + for j := 0; j < 30; j += 10 { + for i := 0; i < 10; i++ { + if i == 6 { + break + } + n = i + } + n += j + } + return n +} + +func breakLabeledInner_ssa() int { + var n int + for j := 0; j < 30; j += 10 { + Next: + for i := 0; i < 10; i++ { + if i == 6 { + break Next + } + n = i + } + n += j + } + return n +} + +func breakLabeledOuter_ssa() int { + var n int +Next: + for j := 0; j < 30; j += 10 { + for i := 0; i < 10; i++ { + if i == 6 { + break Next + } + n = i + } + n += j + } + return n +} + +var g, h int // globals to ensure optimizations don't collapse our switch statements + +func switchPlain_ssa() int { + var n int + switch g { + case 0: + n = 1 + break + n = 2 + } + return n +} + +func switchLabeled_ssa() int { + var n int +Done: + switch g { + case 0: + n = 1 + break Done + n = 2 + } + return n +} + +func switchPlainInner_ssa() int { + var n int + switch g { + case 0: + n = 1 + switch h { + case 0: + n += 10 + break + } + n = 2 + } + return n +} + +func switchLabeledInner_ssa() int { + var n int + switch g { + case 0: + n = 1 + Done: + switch h { + case 0: + n += 10 + break Done + } + n = 2 + } + return n +} + +func switchLabeledOuter_ssa() int { + var n int +Done: + switch g { + case 0: + n = 1 + switch h { + case 0: + n += 10 + break Done + } + n = 2 + } + return n +} + +// TestBreakContinue tests that continue and break statements do what they say. +func TestBreakContinue(t *testing.T) { + tests := [...]struct { + name string + fn func() int + want int + }{ + {"continuePlain_ssa", continuePlain_ssa, 9}, + {"continueLabeled_ssa", continueLabeled_ssa, 9}, + {"continuePlainInner_ssa", continuePlainInner_ssa, 29}, + {"continueLabeledInner_ssa", continueLabeledInner_ssa, 29}, + {"continueLabeledOuter_ssa", continueLabeledOuter_ssa, 5}, + + {"breakPlain_ssa", breakPlain_ssa, 5}, + {"breakLabeled_ssa", breakLabeled_ssa, 5}, + {"breakPlainInner_ssa", breakPlainInner_ssa, 25}, + {"breakLabeledInner_ssa", breakLabeledInner_ssa, 25}, + {"breakLabeledOuter_ssa", breakLabeledOuter_ssa, 5}, + + {"switchPlain_ssa", switchPlain_ssa, 1}, + {"switchLabeled_ssa", switchLabeled_ssa, 1}, + {"switchPlainInner_ssa", switchPlainInner_ssa, 2}, + {"switchLabeledInner_ssa", switchLabeledInner_ssa, 2}, + {"switchLabeledOuter_ssa", switchLabeledOuter_ssa, 11}, + + // no select tests; they're identical to switch + } + + for _, test := range tests { + if got := test.fn(); got != test.want { + t.Errorf("%s()=%d, want %d", test.name, got, test.want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/chan_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/chan_test.go new file mode 100644 index 0000000000000000000000000000000000000000..628bd8f7f782bea49ad859c1da8fb1e3fa6ad702 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/chan_test.go @@ -0,0 +1,63 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// chan.go tests chan operations. +package main + +import "testing" + +//go:noinline +func lenChan_ssa(v chan int) int { + return len(v) +} + +//go:noinline +func capChan_ssa(v chan int) int { + return cap(v) +} + +func testLenChan(t *testing.T) { + + v := make(chan int, 10) + v <- 1 + v <- 1 + v <- 1 + + if want, got := 3, lenChan_ssa(v); got != want { + t.Errorf("expected len(chan) = %d, got %d", want, got) + } +} + +func testLenNilChan(t *testing.T) { + + var v chan int + if want, got := 0, lenChan_ssa(v); got != want { + t.Errorf("expected len(nil) = %d, got %d", want, got) + } +} + +func testCapChan(t *testing.T) { + + v := make(chan int, 25) + + if want, got := 25, capChan_ssa(v); got != want { + t.Errorf("expected cap(chan) = %d, got %d", want, got) + } +} + +func testCapNilChan(t *testing.T) { + + var v chan int + if want, got := 0, capChan_ssa(v); got != want { + t.Errorf("expected cap(nil) = %d, got %d", want, got) + } +} + +func TestChan(t *testing.T) { + testLenChan(t) + testLenNilChan(t) + + testCapChan(t) + testCapNilChan(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/closure_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/closure_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6cddc2d16728b0d14377fe1194816ec91569ad80 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/closure_test.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// closure.go tests closure operations. +package main + +import "testing" + +//go:noinline +func testCFunc_ssa() int { + a := 0 + b := func() { + switch { + } + a++ + } + b() + b() + return a +} + +func testCFunc(t *testing.T) { + if want, got := 2, testCFunc_ssa(); got != want { + t.Errorf("expected %d, got %d", want, got) + } +} + +// TestClosure tests closure related behavior. +func TestClosure(t *testing.T) { + testCFunc(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/cmpConst_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/cmpConst_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9400ef40ae73ed06167496742f980188ea6fd201 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/cmpConst_test.go @@ -0,0 +1,2209 @@ +// Code generated by gen/cmpConstGen.go. DO NOT EDIT. + +package main + +import ( + "reflect" + "runtime" + "testing" +) + +// results show the expected result for the elements left of, equal to and right of the index. +type result struct{ l, e, r bool } + +var ( + eq = result{l: false, e: true, r: false} + ne = result{l: true, e: false, r: true} + lt = result{l: true, e: false, r: false} + le = result{l: true, e: true, r: false} + gt = result{l: false, e: false, r: true} + ge = result{l: false, e: true, r: true} +) + +// uint64 tests +var uint64_vals = []uint64{ + 0, + 1, + 126, + 127, + 128, + 254, + 255, + 256, + 32766, + 32767, + 32768, + 65534, + 65535, + 65536, + 2147483646, + 2147483647, + 2147483648, + 4278190080, + 4294967294, + 4294967295, + 4294967296, + 1095216660480, + 9223372036854775806, + 9223372036854775807, + 9223372036854775808, + 18374686479671623680, + 18446744073709551614, + 18446744073709551615, +} + +func lt_0_uint64(x uint64) bool { return x < 0 } +func le_0_uint64(x uint64) bool { return x <= 0 } +func gt_0_uint64(x uint64) bool { return x > 0 } +func ge_0_uint64(x uint64) bool { return x >= 0 } +func eq_0_uint64(x uint64) bool { return x == 0 } +func ne_0_uint64(x uint64) bool { return x != 0 } +func lt_1_uint64(x uint64) bool { return x < 1 } +func le_1_uint64(x uint64) bool { return x <= 1 } +func gt_1_uint64(x uint64) bool { return x > 1 } +func ge_1_uint64(x uint64) bool { return x >= 1 } +func eq_1_uint64(x uint64) bool { return x == 1 } +func ne_1_uint64(x uint64) bool { return x != 1 } +func lt_126_uint64(x uint64) bool { return x < 126 } +func le_126_uint64(x uint64) bool { return x <= 126 } +func gt_126_uint64(x uint64) bool { return x > 126 } +func ge_126_uint64(x uint64) bool { return x >= 126 } +func eq_126_uint64(x uint64) bool { return x == 126 } +func ne_126_uint64(x uint64) bool { return x != 126 } +func lt_127_uint64(x uint64) bool { return x < 127 } +func le_127_uint64(x uint64) bool { return x <= 127 } +func gt_127_uint64(x uint64) bool { return x > 127 } +func ge_127_uint64(x uint64) bool { return x >= 127 } +func eq_127_uint64(x uint64) bool { return x == 127 } +func ne_127_uint64(x uint64) bool { return x != 127 } +func lt_128_uint64(x uint64) bool { return x < 128 } +func le_128_uint64(x uint64) bool { return x <= 128 } +func gt_128_uint64(x uint64) bool { return x > 128 } +func ge_128_uint64(x uint64) bool { return x >= 128 } +func eq_128_uint64(x uint64) bool { return x == 128 } +func ne_128_uint64(x uint64) bool { return x != 128 } +func lt_254_uint64(x uint64) bool { return x < 254 } +func le_254_uint64(x uint64) bool { return x <= 254 } +func gt_254_uint64(x uint64) bool { return x > 254 } +func ge_254_uint64(x uint64) bool { return x >= 254 } +func eq_254_uint64(x uint64) bool { return x == 254 } +func ne_254_uint64(x uint64) bool { return x != 254 } +func lt_255_uint64(x uint64) bool { return x < 255 } +func le_255_uint64(x uint64) bool { return x <= 255 } +func gt_255_uint64(x uint64) bool { return x > 255 } +func ge_255_uint64(x uint64) bool { return x >= 255 } +func eq_255_uint64(x uint64) bool { return x == 255 } +func ne_255_uint64(x uint64) bool { return x != 255 } +func lt_256_uint64(x uint64) bool { return x < 256 } +func le_256_uint64(x uint64) bool { return x <= 256 } +func gt_256_uint64(x uint64) bool { return x > 256 } +func ge_256_uint64(x uint64) bool { return x >= 256 } +func eq_256_uint64(x uint64) bool { return x == 256 } +func ne_256_uint64(x uint64) bool { return x != 256 } +func lt_32766_uint64(x uint64) bool { return x < 32766 } +func le_32766_uint64(x uint64) bool { return x <= 32766 } +func gt_32766_uint64(x uint64) bool { return x > 32766 } +func ge_32766_uint64(x uint64) bool { return x >= 32766 } +func eq_32766_uint64(x uint64) bool { return x == 32766 } +func ne_32766_uint64(x uint64) bool { return x != 32766 } +func lt_32767_uint64(x uint64) bool { return x < 32767 } +func le_32767_uint64(x uint64) bool { return x <= 32767 } +func gt_32767_uint64(x uint64) bool { return x > 32767 } +func ge_32767_uint64(x uint64) bool { return x >= 32767 } +func eq_32767_uint64(x uint64) bool { return x == 32767 } +func ne_32767_uint64(x uint64) bool { return x != 32767 } +func lt_32768_uint64(x uint64) bool { return x < 32768 } +func le_32768_uint64(x uint64) bool { return x <= 32768 } +func gt_32768_uint64(x uint64) bool { return x > 32768 } +func ge_32768_uint64(x uint64) bool { return x >= 32768 } +func eq_32768_uint64(x uint64) bool { return x == 32768 } +func ne_32768_uint64(x uint64) bool { return x != 32768 } +func lt_65534_uint64(x uint64) bool { return x < 65534 } +func le_65534_uint64(x uint64) bool { return x <= 65534 } +func gt_65534_uint64(x uint64) bool { return x > 65534 } +func ge_65534_uint64(x uint64) bool { return x >= 65534 } +func eq_65534_uint64(x uint64) bool { return x == 65534 } +func ne_65534_uint64(x uint64) bool { return x != 65534 } +func lt_65535_uint64(x uint64) bool { return x < 65535 } +func le_65535_uint64(x uint64) bool { return x <= 65535 } +func gt_65535_uint64(x uint64) bool { return x > 65535 } +func ge_65535_uint64(x uint64) bool { return x >= 65535 } +func eq_65535_uint64(x uint64) bool { return x == 65535 } +func ne_65535_uint64(x uint64) bool { return x != 65535 } +func lt_65536_uint64(x uint64) bool { return x < 65536 } +func le_65536_uint64(x uint64) bool { return x <= 65536 } +func gt_65536_uint64(x uint64) bool { return x > 65536 } +func ge_65536_uint64(x uint64) bool { return x >= 65536 } +func eq_65536_uint64(x uint64) bool { return x == 65536 } +func ne_65536_uint64(x uint64) bool { return x != 65536 } +func lt_2147483646_uint64(x uint64) bool { return x < 2147483646 } +func le_2147483646_uint64(x uint64) bool { return x <= 2147483646 } +func gt_2147483646_uint64(x uint64) bool { return x > 2147483646 } +func ge_2147483646_uint64(x uint64) bool { return x >= 2147483646 } +func eq_2147483646_uint64(x uint64) bool { return x == 2147483646 } +func ne_2147483646_uint64(x uint64) bool { return x != 2147483646 } +func lt_2147483647_uint64(x uint64) bool { return x < 2147483647 } +func le_2147483647_uint64(x uint64) bool { return x <= 2147483647 } +func gt_2147483647_uint64(x uint64) bool { return x > 2147483647 } +func ge_2147483647_uint64(x uint64) bool { return x >= 2147483647 } +func eq_2147483647_uint64(x uint64) bool { return x == 2147483647 } +func ne_2147483647_uint64(x uint64) bool { return x != 2147483647 } +func lt_2147483648_uint64(x uint64) bool { return x < 2147483648 } +func le_2147483648_uint64(x uint64) bool { return x <= 2147483648 } +func gt_2147483648_uint64(x uint64) bool { return x > 2147483648 } +func ge_2147483648_uint64(x uint64) bool { return x >= 2147483648 } +func eq_2147483648_uint64(x uint64) bool { return x == 2147483648 } +func ne_2147483648_uint64(x uint64) bool { return x != 2147483648 } +func lt_4278190080_uint64(x uint64) bool { return x < 4278190080 } +func le_4278190080_uint64(x uint64) bool { return x <= 4278190080 } +func gt_4278190080_uint64(x uint64) bool { return x > 4278190080 } +func ge_4278190080_uint64(x uint64) bool { return x >= 4278190080 } +func eq_4278190080_uint64(x uint64) bool { return x == 4278190080 } +func ne_4278190080_uint64(x uint64) bool { return x != 4278190080 } +func lt_4294967294_uint64(x uint64) bool { return x < 4294967294 } +func le_4294967294_uint64(x uint64) bool { return x <= 4294967294 } +func gt_4294967294_uint64(x uint64) bool { return x > 4294967294 } +func ge_4294967294_uint64(x uint64) bool { return x >= 4294967294 } +func eq_4294967294_uint64(x uint64) bool { return x == 4294967294 } +func ne_4294967294_uint64(x uint64) bool { return x != 4294967294 } +func lt_4294967295_uint64(x uint64) bool { return x < 4294967295 } +func le_4294967295_uint64(x uint64) bool { return x <= 4294967295 } +func gt_4294967295_uint64(x uint64) bool { return x > 4294967295 } +func ge_4294967295_uint64(x uint64) bool { return x >= 4294967295 } +func eq_4294967295_uint64(x uint64) bool { return x == 4294967295 } +func ne_4294967295_uint64(x uint64) bool { return x != 4294967295 } +func lt_4294967296_uint64(x uint64) bool { return x < 4294967296 } +func le_4294967296_uint64(x uint64) bool { return x <= 4294967296 } +func gt_4294967296_uint64(x uint64) bool { return x > 4294967296 } +func ge_4294967296_uint64(x uint64) bool { return x >= 4294967296 } +func eq_4294967296_uint64(x uint64) bool { return x == 4294967296 } +func ne_4294967296_uint64(x uint64) bool { return x != 4294967296 } +func lt_1095216660480_uint64(x uint64) bool { return x < 1095216660480 } +func le_1095216660480_uint64(x uint64) bool { return x <= 1095216660480 } +func gt_1095216660480_uint64(x uint64) bool { return x > 1095216660480 } +func ge_1095216660480_uint64(x uint64) bool { return x >= 1095216660480 } +func eq_1095216660480_uint64(x uint64) bool { return x == 1095216660480 } +func ne_1095216660480_uint64(x uint64) bool { return x != 1095216660480 } +func lt_9223372036854775806_uint64(x uint64) bool { return x < 9223372036854775806 } +func le_9223372036854775806_uint64(x uint64) bool { return x <= 9223372036854775806 } +func gt_9223372036854775806_uint64(x uint64) bool { return x > 9223372036854775806 } +func ge_9223372036854775806_uint64(x uint64) bool { return x >= 9223372036854775806 } +func eq_9223372036854775806_uint64(x uint64) bool { return x == 9223372036854775806 } +func ne_9223372036854775806_uint64(x uint64) bool { return x != 9223372036854775806 } +func lt_9223372036854775807_uint64(x uint64) bool { return x < 9223372036854775807 } +func le_9223372036854775807_uint64(x uint64) bool { return x <= 9223372036854775807 } +func gt_9223372036854775807_uint64(x uint64) bool { return x > 9223372036854775807 } +func ge_9223372036854775807_uint64(x uint64) bool { return x >= 9223372036854775807 } +func eq_9223372036854775807_uint64(x uint64) bool { return x == 9223372036854775807 } +func ne_9223372036854775807_uint64(x uint64) bool { return x != 9223372036854775807 } +func lt_9223372036854775808_uint64(x uint64) bool { return x < 9223372036854775808 } +func le_9223372036854775808_uint64(x uint64) bool { return x <= 9223372036854775808 } +func gt_9223372036854775808_uint64(x uint64) bool { return x > 9223372036854775808 } +func ge_9223372036854775808_uint64(x uint64) bool { return x >= 9223372036854775808 } +func eq_9223372036854775808_uint64(x uint64) bool { return x == 9223372036854775808 } +func ne_9223372036854775808_uint64(x uint64) bool { return x != 9223372036854775808 } +func lt_18374686479671623680_uint64(x uint64) bool { return x < 18374686479671623680 } +func le_18374686479671623680_uint64(x uint64) bool { return x <= 18374686479671623680 } +func gt_18374686479671623680_uint64(x uint64) bool { return x > 18374686479671623680 } +func ge_18374686479671623680_uint64(x uint64) bool { return x >= 18374686479671623680 } +func eq_18374686479671623680_uint64(x uint64) bool { return x == 18374686479671623680 } +func ne_18374686479671623680_uint64(x uint64) bool { return x != 18374686479671623680 } +func lt_18446744073709551614_uint64(x uint64) bool { return x < 18446744073709551614 } +func le_18446744073709551614_uint64(x uint64) bool { return x <= 18446744073709551614 } +func gt_18446744073709551614_uint64(x uint64) bool { return x > 18446744073709551614 } +func ge_18446744073709551614_uint64(x uint64) bool { return x >= 18446744073709551614 } +func eq_18446744073709551614_uint64(x uint64) bool { return x == 18446744073709551614 } +func ne_18446744073709551614_uint64(x uint64) bool { return x != 18446744073709551614 } +func lt_18446744073709551615_uint64(x uint64) bool { return x < 18446744073709551615 } +func le_18446744073709551615_uint64(x uint64) bool { return x <= 18446744073709551615 } +func gt_18446744073709551615_uint64(x uint64) bool { return x > 18446744073709551615 } +func ge_18446744073709551615_uint64(x uint64) bool { return x >= 18446744073709551615 } +func eq_18446744073709551615_uint64(x uint64) bool { return x == 18446744073709551615 } +func ne_18446744073709551615_uint64(x uint64) bool { return x != 18446744073709551615 } + +var uint64_tests = []struct { + idx int // index of the constant used + exp result // expected results + fn func(uint64) bool +}{ + {idx: 0, exp: lt, fn: lt_0_uint64}, + {idx: 0, exp: le, fn: le_0_uint64}, + {idx: 0, exp: gt, fn: gt_0_uint64}, + {idx: 0, exp: ge, fn: ge_0_uint64}, + {idx: 0, exp: eq, fn: eq_0_uint64}, + {idx: 0, exp: ne, fn: ne_0_uint64}, + {idx: 1, exp: lt, fn: lt_1_uint64}, + {idx: 1, exp: le, fn: le_1_uint64}, + {idx: 1, exp: gt, fn: gt_1_uint64}, + {idx: 1, exp: ge, fn: ge_1_uint64}, + {idx: 1, exp: eq, fn: eq_1_uint64}, + {idx: 1, exp: ne, fn: ne_1_uint64}, + {idx: 2, exp: lt, fn: lt_126_uint64}, + {idx: 2, exp: le, fn: le_126_uint64}, + {idx: 2, exp: gt, fn: gt_126_uint64}, + {idx: 2, exp: ge, fn: ge_126_uint64}, + {idx: 2, exp: eq, fn: eq_126_uint64}, + {idx: 2, exp: ne, fn: ne_126_uint64}, + {idx: 3, exp: lt, fn: lt_127_uint64}, + {idx: 3, exp: le, fn: le_127_uint64}, + {idx: 3, exp: gt, fn: gt_127_uint64}, + {idx: 3, exp: ge, fn: ge_127_uint64}, + {idx: 3, exp: eq, fn: eq_127_uint64}, + {idx: 3, exp: ne, fn: ne_127_uint64}, + {idx: 4, exp: lt, fn: lt_128_uint64}, + {idx: 4, exp: le, fn: le_128_uint64}, + {idx: 4, exp: gt, fn: gt_128_uint64}, + {idx: 4, exp: ge, fn: ge_128_uint64}, + {idx: 4, exp: eq, fn: eq_128_uint64}, + {idx: 4, exp: ne, fn: ne_128_uint64}, + {idx: 5, exp: lt, fn: lt_254_uint64}, + {idx: 5, exp: le, fn: le_254_uint64}, + {idx: 5, exp: gt, fn: gt_254_uint64}, + {idx: 5, exp: ge, fn: ge_254_uint64}, + {idx: 5, exp: eq, fn: eq_254_uint64}, + {idx: 5, exp: ne, fn: ne_254_uint64}, + {idx: 6, exp: lt, fn: lt_255_uint64}, + {idx: 6, exp: le, fn: le_255_uint64}, + {idx: 6, exp: gt, fn: gt_255_uint64}, + {idx: 6, exp: ge, fn: ge_255_uint64}, + {idx: 6, exp: eq, fn: eq_255_uint64}, + {idx: 6, exp: ne, fn: ne_255_uint64}, + {idx: 7, exp: lt, fn: lt_256_uint64}, + {idx: 7, exp: le, fn: le_256_uint64}, + {idx: 7, exp: gt, fn: gt_256_uint64}, + {idx: 7, exp: ge, fn: ge_256_uint64}, + {idx: 7, exp: eq, fn: eq_256_uint64}, + {idx: 7, exp: ne, fn: ne_256_uint64}, + {idx: 8, exp: lt, fn: lt_32766_uint64}, + {idx: 8, exp: le, fn: le_32766_uint64}, + {idx: 8, exp: gt, fn: gt_32766_uint64}, + {idx: 8, exp: ge, fn: ge_32766_uint64}, + {idx: 8, exp: eq, fn: eq_32766_uint64}, + {idx: 8, exp: ne, fn: ne_32766_uint64}, + {idx: 9, exp: lt, fn: lt_32767_uint64}, + {idx: 9, exp: le, fn: le_32767_uint64}, + {idx: 9, exp: gt, fn: gt_32767_uint64}, + {idx: 9, exp: ge, fn: ge_32767_uint64}, + {idx: 9, exp: eq, fn: eq_32767_uint64}, + {idx: 9, exp: ne, fn: ne_32767_uint64}, + {idx: 10, exp: lt, fn: lt_32768_uint64}, + {idx: 10, exp: le, fn: le_32768_uint64}, + {idx: 10, exp: gt, fn: gt_32768_uint64}, + {idx: 10, exp: ge, fn: ge_32768_uint64}, + {idx: 10, exp: eq, fn: eq_32768_uint64}, + {idx: 10, exp: ne, fn: ne_32768_uint64}, + {idx: 11, exp: lt, fn: lt_65534_uint64}, + {idx: 11, exp: le, fn: le_65534_uint64}, + {idx: 11, exp: gt, fn: gt_65534_uint64}, + {idx: 11, exp: ge, fn: ge_65534_uint64}, + {idx: 11, exp: eq, fn: eq_65534_uint64}, + {idx: 11, exp: ne, fn: ne_65534_uint64}, + {idx: 12, exp: lt, fn: lt_65535_uint64}, + {idx: 12, exp: le, fn: le_65535_uint64}, + {idx: 12, exp: gt, fn: gt_65535_uint64}, + {idx: 12, exp: ge, fn: ge_65535_uint64}, + {idx: 12, exp: eq, fn: eq_65535_uint64}, + {idx: 12, exp: ne, fn: ne_65535_uint64}, + {idx: 13, exp: lt, fn: lt_65536_uint64}, + {idx: 13, exp: le, fn: le_65536_uint64}, + {idx: 13, exp: gt, fn: gt_65536_uint64}, + {idx: 13, exp: ge, fn: ge_65536_uint64}, + {idx: 13, exp: eq, fn: eq_65536_uint64}, + {idx: 13, exp: ne, fn: ne_65536_uint64}, + {idx: 14, exp: lt, fn: lt_2147483646_uint64}, + {idx: 14, exp: le, fn: le_2147483646_uint64}, + {idx: 14, exp: gt, fn: gt_2147483646_uint64}, + {idx: 14, exp: ge, fn: ge_2147483646_uint64}, + {idx: 14, exp: eq, fn: eq_2147483646_uint64}, + {idx: 14, exp: ne, fn: ne_2147483646_uint64}, + {idx: 15, exp: lt, fn: lt_2147483647_uint64}, + {idx: 15, exp: le, fn: le_2147483647_uint64}, + {idx: 15, exp: gt, fn: gt_2147483647_uint64}, + {idx: 15, exp: ge, fn: ge_2147483647_uint64}, + {idx: 15, exp: eq, fn: eq_2147483647_uint64}, + {idx: 15, exp: ne, fn: ne_2147483647_uint64}, + {idx: 16, exp: lt, fn: lt_2147483648_uint64}, + {idx: 16, exp: le, fn: le_2147483648_uint64}, + {idx: 16, exp: gt, fn: gt_2147483648_uint64}, + {idx: 16, exp: ge, fn: ge_2147483648_uint64}, + {idx: 16, exp: eq, fn: eq_2147483648_uint64}, + {idx: 16, exp: ne, fn: ne_2147483648_uint64}, + {idx: 17, exp: lt, fn: lt_4278190080_uint64}, + {idx: 17, exp: le, fn: le_4278190080_uint64}, + {idx: 17, exp: gt, fn: gt_4278190080_uint64}, + {idx: 17, exp: ge, fn: ge_4278190080_uint64}, + {idx: 17, exp: eq, fn: eq_4278190080_uint64}, + {idx: 17, exp: ne, fn: ne_4278190080_uint64}, + {idx: 18, exp: lt, fn: lt_4294967294_uint64}, + {idx: 18, exp: le, fn: le_4294967294_uint64}, + {idx: 18, exp: gt, fn: gt_4294967294_uint64}, + {idx: 18, exp: ge, fn: ge_4294967294_uint64}, + {idx: 18, exp: eq, fn: eq_4294967294_uint64}, + {idx: 18, exp: ne, fn: ne_4294967294_uint64}, + {idx: 19, exp: lt, fn: lt_4294967295_uint64}, + {idx: 19, exp: le, fn: le_4294967295_uint64}, + {idx: 19, exp: gt, fn: gt_4294967295_uint64}, + {idx: 19, exp: ge, fn: ge_4294967295_uint64}, + {idx: 19, exp: eq, fn: eq_4294967295_uint64}, + {idx: 19, exp: ne, fn: ne_4294967295_uint64}, + {idx: 20, exp: lt, fn: lt_4294967296_uint64}, + {idx: 20, exp: le, fn: le_4294967296_uint64}, + {idx: 20, exp: gt, fn: gt_4294967296_uint64}, + {idx: 20, exp: ge, fn: ge_4294967296_uint64}, + {idx: 20, exp: eq, fn: eq_4294967296_uint64}, + {idx: 20, exp: ne, fn: ne_4294967296_uint64}, + {idx: 21, exp: lt, fn: lt_1095216660480_uint64}, + {idx: 21, exp: le, fn: le_1095216660480_uint64}, + {idx: 21, exp: gt, fn: gt_1095216660480_uint64}, + {idx: 21, exp: ge, fn: ge_1095216660480_uint64}, + {idx: 21, exp: eq, fn: eq_1095216660480_uint64}, + {idx: 21, exp: ne, fn: ne_1095216660480_uint64}, + {idx: 22, exp: lt, fn: lt_9223372036854775806_uint64}, + {idx: 22, exp: le, fn: le_9223372036854775806_uint64}, + {idx: 22, exp: gt, fn: gt_9223372036854775806_uint64}, + {idx: 22, exp: ge, fn: ge_9223372036854775806_uint64}, + {idx: 22, exp: eq, fn: eq_9223372036854775806_uint64}, + {idx: 22, exp: ne, fn: ne_9223372036854775806_uint64}, + {idx: 23, exp: lt, fn: lt_9223372036854775807_uint64}, + {idx: 23, exp: le, fn: le_9223372036854775807_uint64}, + {idx: 23, exp: gt, fn: gt_9223372036854775807_uint64}, + {idx: 23, exp: ge, fn: ge_9223372036854775807_uint64}, + {idx: 23, exp: eq, fn: eq_9223372036854775807_uint64}, + {idx: 23, exp: ne, fn: ne_9223372036854775807_uint64}, + {idx: 24, exp: lt, fn: lt_9223372036854775808_uint64}, + {idx: 24, exp: le, fn: le_9223372036854775808_uint64}, + {idx: 24, exp: gt, fn: gt_9223372036854775808_uint64}, + {idx: 24, exp: ge, fn: ge_9223372036854775808_uint64}, + {idx: 24, exp: eq, fn: eq_9223372036854775808_uint64}, + {idx: 24, exp: ne, fn: ne_9223372036854775808_uint64}, + {idx: 25, exp: lt, fn: lt_18374686479671623680_uint64}, + {idx: 25, exp: le, fn: le_18374686479671623680_uint64}, + {idx: 25, exp: gt, fn: gt_18374686479671623680_uint64}, + {idx: 25, exp: ge, fn: ge_18374686479671623680_uint64}, + {idx: 25, exp: eq, fn: eq_18374686479671623680_uint64}, + {idx: 25, exp: ne, fn: ne_18374686479671623680_uint64}, + {idx: 26, exp: lt, fn: lt_18446744073709551614_uint64}, + {idx: 26, exp: le, fn: le_18446744073709551614_uint64}, + {idx: 26, exp: gt, fn: gt_18446744073709551614_uint64}, + {idx: 26, exp: ge, fn: ge_18446744073709551614_uint64}, + {idx: 26, exp: eq, fn: eq_18446744073709551614_uint64}, + {idx: 26, exp: ne, fn: ne_18446744073709551614_uint64}, + {idx: 27, exp: lt, fn: lt_18446744073709551615_uint64}, + {idx: 27, exp: le, fn: le_18446744073709551615_uint64}, + {idx: 27, exp: gt, fn: gt_18446744073709551615_uint64}, + {idx: 27, exp: ge, fn: ge_18446744073709551615_uint64}, + {idx: 27, exp: eq, fn: eq_18446744073709551615_uint64}, + {idx: 27, exp: ne, fn: ne_18446744073709551615_uint64}, +} + +// uint32 tests +var uint32_vals = []uint32{ + 0, + 1, + 126, + 127, + 128, + 254, + 255, + 256, + 32766, + 32767, + 32768, + 65534, + 65535, + 65536, + 2147483646, + 2147483647, + 2147483648, + 4278190080, + 4294967294, + 4294967295, +} + +func lt_0_uint32(x uint32) bool { return x < 0 } +func le_0_uint32(x uint32) bool { return x <= 0 } +func gt_0_uint32(x uint32) bool { return x > 0 } +func ge_0_uint32(x uint32) bool { return x >= 0 } +func eq_0_uint32(x uint32) bool { return x == 0 } +func ne_0_uint32(x uint32) bool { return x != 0 } +func lt_1_uint32(x uint32) bool { return x < 1 } +func le_1_uint32(x uint32) bool { return x <= 1 } +func gt_1_uint32(x uint32) bool { return x > 1 } +func ge_1_uint32(x uint32) bool { return x >= 1 } +func eq_1_uint32(x uint32) bool { return x == 1 } +func ne_1_uint32(x uint32) bool { return x != 1 } +func lt_126_uint32(x uint32) bool { return x < 126 } +func le_126_uint32(x uint32) bool { return x <= 126 } +func gt_126_uint32(x uint32) bool { return x > 126 } +func ge_126_uint32(x uint32) bool { return x >= 126 } +func eq_126_uint32(x uint32) bool { return x == 126 } +func ne_126_uint32(x uint32) bool { return x != 126 } +func lt_127_uint32(x uint32) bool { return x < 127 } +func le_127_uint32(x uint32) bool { return x <= 127 } +func gt_127_uint32(x uint32) bool { return x > 127 } +func ge_127_uint32(x uint32) bool { return x >= 127 } +func eq_127_uint32(x uint32) bool { return x == 127 } +func ne_127_uint32(x uint32) bool { return x != 127 } +func lt_128_uint32(x uint32) bool { return x < 128 } +func le_128_uint32(x uint32) bool { return x <= 128 } +func gt_128_uint32(x uint32) bool { return x > 128 } +func ge_128_uint32(x uint32) bool { return x >= 128 } +func eq_128_uint32(x uint32) bool { return x == 128 } +func ne_128_uint32(x uint32) bool { return x != 128 } +func lt_254_uint32(x uint32) bool { return x < 254 } +func le_254_uint32(x uint32) bool { return x <= 254 } +func gt_254_uint32(x uint32) bool { return x > 254 } +func ge_254_uint32(x uint32) bool { return x >= 254 } +func eq_254_uint32(x uint32) bool { return x == 254 } +func ne_254_uint32(x uint32) bool { return x != 254 } +func lt_255_uint32(x uint32) bool { return x < 255 } +func le_255_uint32(x uint32) bool { return x <= 255 } +func gt_255_uint32(x uint32) bool { return x > 255 } +func ge_255_uint32(x uint32) bool { return x >= 255 } +func eq_255_uint32(x uint32) bool { return x == 255 } +func ne_255_uint32(x uint32) bool { return x != 255 } +func lt_256_uint32(x uint32) bool { return x < 256 } +func le_256_uint32(x uint32) bool { return x <= 256 } +func gt_256_uint32(x uint32) bool { return x > 256 } +func ge_256_uint32(x uint32) bool { return x >= 256 } +func eq_256_uint32(x uint32) bool { return x == 256 } +func ne_256_uint32(x uint32) bool { return x != 256 } +func lt_32766_uint32(x uint32) bool { return x < 32766 } +func le_32766_uint32(x uint32) bool { return x <= 32766 } +func gt_32766_uint32(x uint32) bool { return x > 32766 } +func ge_32766_uint32(x uint32) bool { return x >= 32766 } +func eq_32766_uint32(x uint32) bool { return x == 32766 } +func ne_32766_uint32(x uint32) bool { return x != 32766 } +func lt_32767_uint32(x uint32) bool { return x < 32767 } +func le_32767_uint32(x uint32) bool { return x <= 32767 } +func gt_32767_uint32(x uint32) bool { return x > 32767 } +func ge_32767_uint32(x uint32) bool { return x >= 32767 } +func eq_32767_uint32(x uint32) bool { return x == 32767 } +func ne_32767_uint32(x uint32) bool { return x != 32767 } +func lt_32768_uint32(x uint32) bool { return x < 32768 } +func le_32768_uint32(x uint32) bool { return x <= 32768 } +func gt_32768_uint32(x uint32) bool { return x > 32768 } +func ge_32768_uint32(x uint32) bool { return x >= 32768 } +func eq_32768_uint32(x uint32) bool { return x == 32768 } +func ne_32768_uint32(x uint32) bool { return x != 32768 } +func lt_65534_uint32(x uint32) bool { return x < 65534 } +func le_65534_uint32(x uint32) bool { return x <= 65534 } +func gt_65534_uint32(x uint32) bool { return x > 65534 } +func ge_65534_uint32(x uint32) bool { return x >= 65534 } +func eq_65534_uint32(x uint32) bool { return x == 65534 } +func ne_65534_uint32(x uint32) bool { return x != 65534 } +func lt_65535_uint32(x uint32) bool { return x < 65535 } +func le_65535_uint32(x uint32) bool { return x <= 65535 } +func gt_65535_uint32(x uint32) bool { return x > 65535 } +func ge_65535_uint32(x uint32) bool { return x >= 65535 } +func eq_65535_uint32(x uint32) bool { return x == 65535 } +func ne_65535_uint32(x uint32) bool { return x != 65535 } +func lt_65536_uint32(x uint32) bool { return x < 65536 } +func le_65536_uint32(x uint32) bool { return x <= 65536 } +func gt_65536_uint32(x uint32) bool { return x > 65536 } +func ge_65536_uint32(x uint32) bool { return x >= 65536 } +func eq_65536_uint32(x uint32) bool { return x == 65536 } +func ne_65536_uint32(x uint32) bool { return x != 65536 } +func lt_2147483646_uint32(x uint32) bool { return x < 2147483646 } +func le_2147483646_uint32(x uint32) bool { return x <= 2147483646 } +func gt_2147483646_uint32(x uint32) bool { return x > 2147483646 } +func ge_2147483646_uint32(x uint32) bool { return x >= 2147483646 } +func eq_2147483646_uint32(x uint32) bool { return x == 2147483646 } +func ne_2147483646_uint32(x uint32) bool { return x != 2147483646 } +func lt_2147483647_uint32(x uint32) bool { return x < 2147483647 } +func le_2147483647_uint32(x uint32) bool { return x <= 2147483647 } +func gt_2147483647_uint32(x uint32) bool { return x > 2147483647 } +func ge_2147483647_uint32(x uint32) bool { return x >= 2147483647 } +func eq_2147483647_uint32(x uint32) bool { return x == 2147483647 } +func ne_2147483647_uint32(x uint32) bool { return x != 2147483647 } +func lt_2147483648_uint32(x uint32) bool { return x < 2147483648 } +func le_2147483648_uint32(x uint32) bool { return x <= 2147483648 } +func gt_2147483648_uint32(x uint32) bool { return x > 2147483648 } +func ge_2147483648_uint32(x uint32) bool { return x >= 2147483648 } +func eq_2147483648_uint32(x uint32) bool { return x == 2147483648 } +func ne_2147483648_uint32(x uint32) bool { return x != 2147483648 } +func lt_4278190080_uint32(x uint32) bool { return x < 4278190080 } +func le_4278190080_uint32(x uint32) bool { return x <= 4278190080 } +func gt_4278190080_uint32(x uint32) bool { return x > 4278190080 } +func ge_4278190080_uint32(x uint32) bool { return x >= 4278190080 } +func eq_4278190080_uint32(x uint32) bool { return x == 4278190080 } +func ne_4278190080_uint32(x uint32) bool { return x != 4278190080 } +func lt_4294967294_uint32(x uint32) bool { return x < 4294967294 } +func le_4294967294_uint32(x uint32) bool { return x <= 4294967294 } +func gt_4294967294_uint32(x uint32) bool { return x > 4294967294 } +func ge_4294967294_uint32(x uint32) bool { return x >= 4294967294 } +func eq_4294967294_uint32(x uint32) bool { return x == 4294967294 } +func ne_4294967294_uint32(x uint32) bool { return x != 4294967294 } +func lt_4294967295_uint32(x uint32) bool { return x < 4294967295 } +func le_4294967295_uint32(x uint32) bool { return x <= 4294967295 } +func gt_4294967295_uint32(x uint32) bool { return x > 4294967295 } +func ge_4294967295_uint32(x uint32) bool { return x >= 4294967295 } +func eq_4294967295_uint32(x uint32) bool { return x == 4294967295 } +func ne_4294967295_uint32(x uint32) bool { return x != 4294967295 } + +var uint32_tests = []struct { + idx int // index of the constant used + exp result // expected results + fn func(uint32) bool +}{ + {idx: 0, exp: lt, fn: lt_0_uint32}, + {idx: 0, exp: le, fn: le_0_uint32}, + {idx: 0, exp: gt, fn: gt_0_uint32}, + {idx: 0, exp: ge, fn: ge_0_uint32}, + {idx: 0, exp: eq, fn: eq_0_uint32}, + {idx: 0, exp: ne, fn: ne_0_uint32}, + {idx: 1, exp: lt, fn: lt_1_uint32}, + {idx: 1, exp: le, fn: le_1_uint32}, + {idx: 1, exp: gt, fn: gt_1_uint32}, + {idx: 1, exp: ge, fn: ge_1_uint32}, + {idx: 1, exp: eq, fn: eq_1_uint32}, + {idx: 1, exp: ne, fn: ne_1_uint32}, + {idx: 2, exp: lt, fn: lt_126_uint32}, + {idx: 2, exp: le, fn: le_126_uint32}, + {idx: 2, exp: gt, fn: gt_126_uint32}, + {idx: 2, exp: ge, fn: ge_126_uint32}, + {idx: 2, exp: eq, fn: eq_126_uint32}, + {idx: 2, exp: ne, fn: ne_126_uint32}, + {idx: 3, exp: lt, fn: lt_127_uint32}, + {idx: 3, exp: le, fn: le_127_uint32}, + {idx: 3, exp: gt, fn: gt_127_uint32}, + {idx: 3, exp: ge, fn: ge_127_uint32}, + {idx: 3, exp: eq, fn: eq_127_uint32}, + {idx: 3, exp: ne, fn: ne_127_uint32}, + {idx: 4, exp: lt, fn: lt_128_uint32}, + {idx: 4, exp: le, fn: le_128_uint32}, + {idx: 4, exp: gt, fn: gt_128_uint32}, + {idx: 4, exp: ge, fn: ge_128_uint32}, + {idx: 4, exp: eq, fn: eq_128_uint32}, + {idx: 4, exp: ne, fn: ne_128_uint32}, + {idx: 5, exp: lt, fn: lt_254_uint32}, + {idx: 5, exp: le, fn: le_254_uint32}, + {idx: 5, exp: gt, fn: gt_254_uint32}, + {idx: 5, exp: ge, fn: ge_254_uint32}, + {idx: 5, exp: eq, fn: eq_254_uint32}, + {idx: 5, exp: ne, fn: ne_254_uint32}, + {idx: 6, exp: lt, fn: lt_255_uint32}, + {idx: 6, exp: le, fn: le_255_uint32}, + {idx: 6, exp: gt, fn: gt_255_uint32}, + {idx: 6, exp: ge, fn: ge_255_uint32}, + {idx: 6, exp: eq, fn: eq_255_uint32}, + {idx: 6, exp: ne, fn: ne_255_uint32}, + {idx: 7, exp: lt, fn: lt_256_uint32}, + {idx: 7, exp: le, fn: le_256_uint32}, + {idx: 7, exp: gt, fn: gt_256_uint32}, + {idx: 7, exp: ge, fn: ge_256_uint32}, + {idx: 7, exp: eq, fn: eq_256_uint32}, + {idx: 7, exp: ne, fn: ne_256_uint32}, + {idx: 8, exp: lt, fn: lt_32766_uint32}, + {idx: 8, exp: le, fn: le_32766_uint32}, + {idx: 8, exp: gt, fn: gt_32766_uint32}, + {idx: 8, exp: ge, fn: ge_32766_uint32}, + {idx: 8, exp: eq, fn: eq_32766_uint32}, + {idx: 8, exp: ne, fn: ne_32766_uint32}, + {idx: 9, exp: lt, fn: lt_32767_uint32}, + {idx: 9, exp: le, fn: le_32767_uint32}, + {idx: 9, exp: gt, fn: gt_32767_uint32}, + {idx: 9, exp: ge, fn: ge_32767_uint32}, + {idx: 9, exp: eq, fn: eq_32767_uint32}, + {idx: 9, exp: ne, fn: ne_32767_uint32}, + {idx: 10, exp: lt, fn: lt_32768_uint32}, + {idx: 10, exp: le, fn: le_32768_uint32}, + {idx: 10, exp: gt, fn: gt_32768_uint32}, + {idx: 10, exp: ge, fn: ge_32768_uint32}, + {idx: 10, exp: eq, fn: eq_32768_uint32}, + {idx: 10, exp: ne, fn: ne_32768_uint32}, + {idx: 11, exp: lt, fn: lt_65534_uint32}, + {idx: 11, exp: le, fn: le_65534_uint32}, + {idx: 11, exp: gt, fn: gt_65534_uint32}, + {idx: 11, exp: ge, fn: ge_65534_uint32}, + {idx: 11, exp: eq, fn: eq_65534_uint32}, + {idx: 11, exp: ne, fn: ne_65534_uint32}, + {idx: 12, exp: lt, fn: lt_65535_uint32}, + {idx: 12, exp: le, fn: le_65535_uint32}, + {idx: 12, exp: gt, fn: gt_65535_uint32}, + {idx: 12, exp: ge, fn: ge_65535_uint32}, + {idx: 12, exp: eq, fn: eq_65535_uint32}, + {idx: 12, exp: ne, fn: ne_65535_uint32}, + {idx: 13, exp: lt, fn: lt_65536_uint32}, + {idx: 13, exp: le, fn: le_65536_uint32}, + {idx: 13, exp: gt, fn: gt_65536_uint32}, + {idx: 13, exp: ge, fn: ge_65536_uint32}, + {idx: 13, exp: eq, fn: eq_65536_uint32}, + {idx: 13, exp: ne, fn: ne_65536_uint32}, + {idx: 14, exp: lt, fn: lt_2147483646_uint32}, + {idx: 14, exp: le, fn: le_2147483646_uint32}, + {idx: 14, exp: gt, fn: gt_2147483646_uint32}, + {idx: 14, exp: ge, fn: ge_2147483646_uint32}, + {idx: 14, exp: eq, fn: eq_2147483646_uint32}, + {idx: 14, exp: ne, fn: ne_2147483646_uint32}, + {idx: 15, exp: lt, fn: lt_2147483647_uint32}, + {idx: 15, exp: le, fn: le_2147483647_uint32}, + {idx: 15, exp: gt, fn: gt_2147483647_uint32}, + {idx: 15, exp: ge, fn: ge_2147483647_uint32}, + {idx: 15, exp: eq, fn: eq_2147483647_uint32}, + {idx: 15, exp: ne, fn: ne_2147483647_uint32}, + {idx: 16, exp: lt, fn: lt_2147483648_uint32}, + {idx: 16, exp: le, fn: le_2147483648_uint32}, + {idx: 16, exp: gt, fn: gt_2147483648_uint32}, + {idx: 16, exp: ge, fn: ge_2147483648_uint32}, + {idx: 16, exp: eq, fn: eq_2147483648_uint32}, + {idx: 16, exp: ne, fn: ne_2147483648_uint32}, + {idx: 17, exp: lt, fn: lt_4278190080_uint32}, + {idx: 17, exp: le, fn: le_4278190080_uint32}, + {idx: 17, exp: gt, fn: gt_4278190080_uint32}, + {idx: 17, exp: ge, fn: ge_4278190080_uint32}, + {idx: 17, exp: eq, fn: eq_4278190080_uint32}, + {idx: 17, exp: ne, fn: ne_4278190080_uint32}, + {idx: 18, exp: lt, fn: lt_4294967294_uint32}, + {idx: 18, exp: le, fn: le_4294967294_uint32}, + {idx: 18, exp: gt, fn: gt_4294967294_uint32}, + {idx: 18, exp: ge, fn: ge_4294967294_uint32}, + {idx: 18, exp: eq, fn: eq_4294967294_uint32}, + {idx: 18, exp: ne, fn: ne_4294967294_uint32}, + {idx: 19, exp: lt, fn: lt_4294967295_uint32}, + {idx: 19, exp: le, fn: le_4294967295_uint32}, + {idx: 19, exp: gt, fn: gt_4294967295_uint32}, + {idx: 19, exp: ge, fn: ge_4294967295_uint32}, + {idx: 19, exp: eq, fn: eq_4294967295_uint32}, + {idx: 19, exp: ne, fn: ne_4294967295_uint32}, +} + +// uint16 tests +var uint16_vals = []uint16{ + 0, + 1, + 126, + 127, + 128, + 254, + 255, + 256, + 32766, + 32767, + 32768, + 65534, + 65535, +} + +func lt_0_uint16(x uint16) bool { return x < 0 } +func le_0_uint16(x uint16) bool { return x <= 0 } +func gt_0_uint16(x uint16) bool { return x > 0 } +func ge_0_uint16(x uint16) bool { return x >= 0 } +func eq_0_uint16(x uint16) bool { return x == 0 } +func ne_0_uint16(x uint16) bool { return x != 0 } +func lt_1_uint16(x uint16) bool { return x < 1 } +func le_1_uint16(x uint16) bool { return x <= 1 } +func gt_1_uint16(x uint16) bool { return x > 1 } +func ge_1_uint16(x uint16) bool { return x >= 1 } +func eq_1_uint16(x uint16) bool { return x == 1 } +func ne_1_uint16(x uint16) bool { return x != 1 } +func lt_126_uint16(x uint16) bool { return x < 126 } +func le_126_uint16(x uint16) bool { return x <= 126 } +func gt_126_uint16(x uint16) bool { return x > 126 } +func ge_126_uint16(x uint16) bool { return x >= 126 } +func eq_126_uint16(x uint16) bool { return x == 126 } +func ne_126_uint16(x uint16) bool { return x != 126 } +func lt_127_uint16(x uint16) bool { return x < 127 } +func le_127_uint16(x uint16) bool { return x <= 127 } +func gt_127_uint16(x uint16) bool { return x > 127 } +func ge_127_uint16(x uint16) bool { return x >= 127 } +func eq_127_uint16(x uint16) bool { return x == 127 } +func ne_127_uint16(x uint16) bool { return x != 127 } +func lt_128_uint16(x uint16) bool { return x < 128 } +func le_128_uint16(x uint16) bool { return x <= 128 } +func gt_128_uint16(x uint16) bool { return x > 128 } +func ge_128_uint16(x uint16) bool { return x >= 128 } +func eq_128_uint16(x uint16) bool { return x == 128 } +func ne_128_uint16(x uint16) bool { return x != 128 } +func lt_254_uint16(x uint16) bool { return x < 254 } +func le_254_uint16(x uint16) bool { return x <= 254 } +func gt_254_uint16(x uint16) bool { return x > 254 } +func ge_254_uint16(x uint16) bool { return x >= 254 } +func eq_254_uint16(x uint16) bool { return x == 254 } +func ne_254_uint16(x uint16) bool { return x != 254 } +func lt_255_uint16(x uint16) bool { return x < 255 } +func le_255_uint16(x uint16) bool { return x <= 255 } +func gt_255_uint16(x uint16) bool { return x > 255 } +func ge_255_uint16(x uint16) bool { return x >= 255 } +func eq_255_uint16(x uint16) bool { return x == 255 } +func ne_255_uint16(x uint16) bool { return x != 255 } +func lt_256_uint16(x uint16) bool { return x < 256 } +func le_256_uint16(x uint16) bool { return x <= 256 } +func gt_256_uint16(x uint16) bool { return x > 256 } +func ge_256_uint16(x uint16) bool { return x >= 256 } +func eq_256_uint16(x uint16) bool { return x == 256 } +func ne_256_uint16(x uint16) bool { return x != 256 } +func lt_32766_uint16(x uint16) bool { return x < 32766 } +func le_32766_uint16(x uint16) bool { return x <= 32766 } +func gt_32766_uint16(x uint16) bool { return x > 32766 } +func ge_32766_uint16(x uint16) bool { return x >= 32766 } +func eq_32766_uint16(x uint16) bool { return x == 32766 } +func ne_32766_uint16(x uint16) bool { return x != 32766 } +func lt_32767_uint16(x uint16) bool { return x < 32767 } +func le_32767_uint16(x uint16) bool { return x <= 32767 } +func gt_32767_uint16(x uint16) bool { return x > 32767 } +func ge_32767_uint16(x uint16) bool { return x >= 32767 } +func eq_32767_uint16(x uint16) bool { return x == 32767 } +func ne_32767_uint16(x uint16) bool { return x != 32767 } +func lt_32768_uint16(x uint16) bool { return x < 32768 } +func le_32768_uint16(x uint16) bool { return x <= 32768 } +func gt_32768_uint16(x uint16) bool { return x > 32768 } +func ge_32768_uint16(x uint16) bool { return x >= 32768 } +func eq_32768_uint16(x uint16) bool { return x == 32768 } +func ne_32768_uint16(x uint16) bool { return x != 32768 } +func lt_65534_uint16(x uint16) bool { return x < 65534 } +func le_65534_uint16(x uint16) bool { return x <= 65534 } +func gt_65534_uint16(x uint16) bool { return x > 65534 } +func ge_65534_uint16(x uint16) bool { return x >= 65534 } +func eq_65534_uint16(x uint16) bool { return x == 65534 } +func ne_65534_uint16(x uint16) bool { return x != 65534 } +func lt_65535_uint16(x uint16) bool { return x < 65535 } +func le_65535_uint16(x uint16) bool { return x <= 65535 } +func gt_65535_uint16(x uint16) bool { return x > 65535 } +func ge_65535_uint16(x uint16) bool { return x >= 65535 } +func eq_65535_uint16(x uint16) bool { return x == 65535 } +func ne_65535_uint16(x uint16) bool { return x != 65535 } + +var uint16_tests = []struct { + idx int // index of the constant used + exp result // expected results + fn func(uint16) bool +}{ + {idx: 0, exp: lt, fn: lt_0_uint16}, + {idx: 0, exp: le, fn: le_0_uint16}, + {idx: 0, exp: gt, fn: gt_0_uint16}, + {idx: 0, exp: ge, fn: ge_0_uint16}, + {idx: 0, exp: eq, fn: eq_0_uint16}, + {idx: 0, exp: ne, fn: ne_0_uint16}, + {idx: 1, exp: lt, fn: lt_1_uint16}, + {idx: 1, exp: le, fn: le_1_uint16}, + {idx: 1, exp: gt, fn: gt_1_uint16}, + {idx: 1, exp: ge, fn: ge_1_uint16}, + {idx: 1, exp: eq, fn: eq_1_uint16}, + {idx: 1, exp: ne, fn: ne_1_uint16}, + {idx: 2, exp: lt, fn: lt_126_uint16}, + {idx: 2, exp: le, fn: le_126_uint16}, + {idx: 2, exp: gt, fn: gt_126_uint16}, + {idx: 2, exp: ge, fn: ge_126_uint16}, + {idx: 2, exp: eq, fn: eq_126_uint16}, + {idx: 2, exp: ne, fn: ne_126_uint16}, + {idx: 3, exp: lt, fn: lt_127_uint16}, + {idx: 3, exp: le, fn: le_127_uint16}, + {idx: 3, exp: gt, fn: gt_127_uint16}, + {idx: 3, exp: ge, fn: ge_127_uint16}, + {idx: 3, exp: eq, fn: eq_127_uint16}, + {idx: 3, exp: ne, fn: ne_127_uint16}, + {idx: 4, exp: lt, fn: lt_128_uint16}, + {idx: 4, exp: le, fn: le_128_uint16}, + {idx: 4, exp: gt, fn: gt_128_uint16}, + {idx: 4, exp: ge, fn: ge_128_uint16}, + {idx: 4, exp: eq, fn: eq_128_uint16}, + {idx: 4, exp: ne, fn: ne_128_uint16}, + {idx: 5, exp: lt, fn: lt_254_uint16}, + {idx: 5, exp: le, fn: le_254_uint16}, + {idx: 5, exp: gt, fn: gt_254_uint16}, + {idx: 5, exp: ge, fn: ge_254_uint16}, + {idx: 5, exp: eq, fn: eq_254_uint16}, + {idx: 5, exp: ne, fn: ne_254_uint16}, + {idx: 6, exp: lt, fn: lt_255_uint16}, + {idx: 6, exp: le, fn: le_255_uint16}, + {idx: 6, exp: gt, fn: gt_255_uint16}, + {idx: 6, exp: ge, fn: ge_255_uint16}, + {idx: 6, exp: eq, fn: eq_255_uint16}, + {idx: 6, exp: ne, fn: ne_255_uint16}, + {idx: 7, exp: lt, fn: lt_256_uint16}, + {idx: 7, exp: le, fn: le_256_uint16}, + {idx: 7, exp: gt, fn: gt_256_uint16}, + {idx: 7, exp: ge, fn: ge_256_uint16}, + {idx: 7, exp: eq, fn: eq_256_uint16}, + {idx: 7, exp: ne, fn: ne_256_uint16}, + {idx: 8, exp: lt, fn: lt_32766_uint16}, + {idx: 8, exp: le, fn: le_32766_uint16}, + {idx: 8, exp: gt, fn: gt_32766_uint16}, + {idx: 8, exp: ge, fn: ge_32766_uint16}, + {idx: 8, exp: eq, fn: eq_32766_uint16}, + {idx: 8, exp: ne, fn: ne_32766_uint16}, + {idx: 9, exp: lt, fn: lt_32767_uint16}, + {idx: 9, exp: le, fn: le_32767_uint16}, + {idx: 9, exp: gt, fn: gt_32767_uint16}, + {idx: 9, exp: ge, fn: ge_32767_uint16}, + {idx: 9, exp: eq, fn: eq_32767_uint16}, + {idx: 9, exp: ne, fn: ne_32767_uint16}, + {idx: 10, exp: lt, fn: lt_32768_uint16}, + {idx: 10, exp: le, fn: le_32768_uint16}, + {idx: 10, exp: gt, fn: gt_32768_uint16}, + {idx: 10, exp: ge, fn: ge_32768_uint16}, + {idx: 10, exp: eq, fn: eq_32768_uint16}, + {idx: 10, exp: ne, fn: ne_32768_uint16}, + {idx: 11, exp: lt, fn: lt_65534_uint16}, + {idx: 11, exp: le, fn: le_65534_uint16}, + {idx: 11, exp: gt, fn: gt_65534_uint16}, + {idx: 11, exp: ge, fn: ge_65534_uint16}, + {idx: 11, exp: eq, fn: eq_65534_uint16}, + {idx: 11, exp: ne, fn: ne_65534_uint16}, + {idx: 12, exp: lt, fn: lt_65535_uint16}, + {idx: 12, exp: le, fn: le_65535_uint16}, + {idx: 12, exp: gt, fn: gt_65535_uint16}, + {idx: 12, exp: ge, fn: ge_65535_uint16}, + {idx: 12, exp: eq, fn: eq_65535_uint16}, + {idx: 12, exp: ne, fn: ne_65535_uint16}, +} + +// uint8 tests +var uint8_vals = []uint8{ + 0, + 1, + 126, + 127, + 128, + 254, + 255, +} + +func lt_0_uint8(x uint8) bool { return x < 0 } +func le_0_uint8(x uint8) bool { return x <= 0 } +func gt_0_uint8(x uint8) bool { return x > 0 } +func ge_0_uint8(x uint8) bool { return x >= 0 } +func eq_0_uint8(x uint8) bool { return x == 0 } +func ne_0_uint8(x uint8) bool { return x != 0 } +func lt_1_uint8(x uint8) bool { return x < 1 } +func le_1_uint8(x uint8) bool { return x <= 1 } +func gt_1_uint8(x uint8) bool { return x > 1 } +func ge_1_uint8(x uint8) bool { return x >= 1 } +func eq_1_uint8(x uint8) bool { return x == 1 } +func ne_1_uint8(x uint8) bool { return x != 1 } +func lt_126_uint8(x uint8) bool { return x < 126 } +func le_126_uint8(x uint8) bool { return x <= 126 } +func gt_126_uint8(x uint8) bool { return x > 126 } +func ge_126_uint8(x uint8) bool { return x >= 126 } +func eq_126_uint8(x uint8) bool { return x == 126 } +func ne_126_uint8(x uint8) bool { return x != 126 } +func lt_127_uint8(x uint8) bool { return x < 127 } +func le_127_uint8(x uint8) bool { return x <= 127 } +func gt_127_uint8(x uint8) bool { return x > 127 } +func ge_127_uint8(x uint8) bool { return x >= 127 } +func eq_127_uint8(x uint8) bool { return x == 127 } +func ne_127_uint8(x uint8) bool { return x != 127 } +func lt_128_uint8(x uint8) bool { return x < 128 } +func le_128_uint8(x uint8) bool { return x <= 128 } +func gt_128_uint8(x uint8) bool { return x > 128 } +func ge_128_uint8(x uint8) bool { return x >= 128 } +func eq_128_uint8(x uint8) bool { return x == 128 } +func ne_128_uint8(x uint8) bool { return x != 128 } +func lt_254_uint8(x uint8) bool { return x < 254 } +func le_254_uint8(x uint8) bool { return x <= 254 } +func gt_254_uint8(x uint8) bool { return x > 254 } +func ge_254_uint8(x uint8) bool { return x >= 254 } +func eq_254_uint8(x uint8) bool { return x == 254 } +func ne_254_uint8(x uint8) bool { return x != 254 } +func lt_255_uint8(x uint8) bool { return x < 255 } +func le_255_uint8(x uint8) bool { return x <= 255 } +func gt_255_uint8(x uint8) bool { return x > 255 } +func ge_255_uint8(x uint8) bool { return x >= 255 } +func eq_255_uint8(x uint8) bool { return x == 255 } +func ne_255_uint8(x uint8) bool { return x != 255 } + +var uint8_tests = []struct { + idx int // index of the constant used + exp result // expected results + fn func(uint8) bool +}{ + {idx: 0, exp: lt, fn: lt_0_uint8}, + {idx: 0, exp: le, fn: le_0_uint8}, + {idx: 0, exp: gt, fn: gt_0_uint8}, + {idx: 0, exp: ge, fn: ge_0_uint8}, + {idx: 0, exp: eq, fn: eq_0_uint8}, + {idx: 0, exp: ne, fn: ne_0_uint8}, + {idx: 1, exp: lt, fn: lt_1_uint8}, + {idx: 1, exp: le, fn: le_1_uint8}, + {idx: 1, exp: gt, fn: gt_1_uint8}, + {idx: 1, exp: ge, fn: ge_1_uint8}, + {idx: 1, exp: eq, fn: eq_1_uint8}, + {idx: 1, exp: ne, fn: ne_1_uint8}, + {idx: 2, exp: lt, fn: lt_126_uint8}, + {idx: 2, exp: le, fn: le_126_uint8}, + {idx: 2, exp: gt, fn: gt_126_uint8}, + {idx: 2, exp: ge, fn: ge_126_uint8}, + {idx: 2, exp: eq, fn: eq_126_uint8}, + {idx: 2, exp: ne, fn: ne_126_uint8}, + {idx: 3, exp: lt, fn: lt_127_uint8}, + {idx: 3, exp: le, fn: le_127_uint8}, + {idx: 3, exp: gt, fn: gt_127_uint8}, + {idx: 3, exp: ge, fn: ge_127_uint8}, + {idx: 3, exp: eq, fn: eq_127_uint8}, + {idx: 3, exp: ne, fn: ne_127_uint8}, + {idx: 4, exp: lt, fn: lt_128_uint8}, + {idx: 4, exp: le, fn: le_128_uint8}, + {idx: 4, exp: gt, fn: gt_128_uint8}, + {idx: 4, exp: ge, fn: ge_128_uint8}, + {idx: 4, exp: eq, fn: eq_128_uint8}, + {idx: 4, exp: ne, fn: ne_128_uint8}, + {idx: 5, exp: lt, fn: lt_254_uint8}, + {idx: 5, exp: le, fn: le_254_uint8}, + {idx: 5, exp: gt, fn: gt_254_uint8}, + {idx: 5, exp: ge, fn: ge_254_uint8}, + {idx: 5, exp: eq, fn: eq_254_uint8}, + {idx: 5, exp: ne, fn: ne_254_uint8}, + {idx: 6, exp: lt, fn: lt_255_uint8}, + {idx: 6, exp: le, fn: le_255_uint8}, + {idx: 6, exp: gt, fn: gt_255_uint8}, + {idx: 6, exp: ge, fn: ge_255_uint8}, + {idx: 6, exp: eq, fn: eq_255_uint8}, + {idx: 6, exp: ne, fn: ne_255_uint8}, +} + +// int64 tests +var int64_vals = []int64{ + -9223372036854775808, + -9223372036854775807, + -2147483649, + -2147483648, + -2147483647, + -32769, + -32768, + -32767, + -129, + -128, + -127, + -1, + 0, + 1, + 126, + 127, + 128, + 254, + 255, + 256, + 32766, + 32767, + 32768, + 65534, + 65535, + 65536, + 2147483646, + 2147483647, + 2147483648, + 4278190080, + 4294967294, + 4294967295, + 4294967296, + 1095216660480, + 9223372036854775806, + 9223372036854775807, +} + +func lt_neg9223372036854775808_int64(x int64) bool { return x < -9223372036854775808 } +func le_neg9223372036854775808_int64(x int64) bool { return x <= -9223372036854775808 } +func gt_neg9223372036854775808_int64(x int64) bool { return x > -9223372036854775808 } +func ge_neg9223372036854775808_int64(x int64) bool { return x >= -9223372036854775808 } +func eq_neg9223372036854775808_int64(x int64) bool { return x == -9223372036854775808 } +func ne_neg9223372036854775808_int64(x int64) bool { return x != -9223372036854775808 } +func lt_neg9223372036854775807_int64(x int64) bool { return x < -9223372036854775807 } +func le_neg9223372036854775807_int64(x int64) bool { return x <= -9223372036854775807 } +func gt_neg9223372036854775807_int64(x int64) bool { return x > -9223372036854775807 } +func ge_neg9223372036854775807_int64(x int64) bool { return x >= -9223372036854775807 } +func eq_neg9223372036854775807_int64(x int64) bool { return x == -9223372036854775807 } +func ne_neg9223372036854775807_int64(x int64) bool { return x != -9223372036854775807 } +func lt_neg2147483649_int64(x int64) bool { return x < -2147483649 } +func le_neg2147483649_int64(x int64) bool { return x <= -2147483649 } +func gt_neg2147483649_int64(x int64) bool { return x > -2147483649 } +func ge_neg2147483649_int64(x int64) bool { return x >= -2147483649 } +func eq_neg2147483649_int64(x int64) bool { return x == -2147483649 } +func ne_neg2147483649_int64(x int64) bool { return x != -2147483649 } +func lt_neg2147483648_int64(x int64) bool { return x < -2147483648 } +func le_neg2147483648_int64(x int64) bool { return x <= -2147483648 } +func gt_neg2147483648_int64(x int64) bool { return x > -2147483648 } +func ge_neg2147483648_int64(x int64) bool { return x >= -2147483648 } +func eq_neg2147483648_int64(x int64) bool { return x == -2147483648 } +func ne_neg2147483648_int64(x int64) bool { return x != -2147483648 } +func lt_neg2147483647_int64(x int64) bool { return x < -2147483647 } +func le_neg2147483647_int64(x int64) bool { return x <= -2147483647 } +func gt_neg2147483647_int64(x int64) bool { return x > -2147483647 } +func ge_neg2147483647_int64(x int64) bool { return x >= -2147483647 } +func eq_neg2147483647_int64(x int64) bool { return x == -2147483647 } +func ne_neg2147483647_int64(x int64) bool { return x != -2147483647 } +func lt_neg32769_int64(x int64) bool { return x < -32769 } +func le_neg32769_int64(x int64) bool { return x <= -32769 } +func gt_neg32769_int64(x int64) bool { return x > -32769 } +func ge_neg32769_int64(x int64) bool { return x >= -32769 } +func eq_neg32769_int64(x int64) bool { return x == -32769 } +func ne_neg32769_int64(x int64) bool { return x != -32769 } +func lt_neg32768_int64(x int64) bool { return x < -32768 } +func le_neg32768_int64(x int64) bool { return x <= -32768 } +func gt_neg32768_int64(x int64) bool { return x > -32768 } +func ge_neg32768_int64(x int64) bool { return x >= -32768 } +func eq_neg32768_int64(x int64) bool { return x == -32768 } +func ne_neg32768_int64(x int64) bool { return x != -32768 } +func lt_neg32767_int64(x int64) bool { return x < -32767 } +func le_neg32767_int64(x int64) bool { return x <= -32767 } +func gt_neg32767_int64(x int64) bool { return x > -32767 } +func ge_neg32767_int64(x int64) bool { return x >= -32767 } +func eq_neg32767_int64(x int64) bool { return x == -32767 } +func ne_neg32767_int64(x int64) bool { return x != -32767 } +func lt_neg129_int64(x int64) bool { return x < -129 } +func le_neg129_int64(x int64) bool { return x <= -129 } +func gt_neg129_int64(x int64) bool { return x > -129 } +func ge_neg129_int64(x int64) bool { return x >= -129 } +func eq_neg129_int64(x int64) bool { return x == -129 } +func ne_neg129_int64(x int64) bool { return x != -129 } +func lt_neg128_int64(x int64) bool { return x < -128 } +func le_neg128_int64(x int64) bool { return x <= -128 } +func gt_neg128_int64(x int64) bool { return x > -128 } +func ge_neg128_int64(x int64) bool { return x >= -128 } +func eq_neg128_int64(x int64) bool { return x == -128 } +func ne_neg128_int64(x int64) bool { return x != -128 } +func lt_neg127_int64(x int64) bool { return x < -127 } +func le_neg127_int64(x int64) bool { return x <= -127 } +func gt_neg127_int64(x int64) bool { return x > -127 } +func ge_neg127_int64(x int64) bool { return x >= -127 } +func eq_neg127_int64(x int64) bool { return x == -127 } +func ne_neg127_int64(x int64) bool { return x != -127 } +func lt_neg1_int64(x int64) bool { return x < -1 } +func le_neg1_int64(x int64) bool { return x <= -1 } +func gt_neg1_int64(x int64) bool { return x > -1 } +func ge_neg1_int64(x int64) bool { return x >= -1 } +func eq_neg1_int64(x int64) bool { return x == -1 } +func ne_neg1_int64(x int64) bool { return x != -1 } +func lt_0_int64(x int64) bool { return x < 0 } +func le_0_int64(x int64) bool { return x <= 0 } +func gt_0_int64(x int64) bool { return x > 0 } +func ge_0_int64(x int64) bool { return x >= 0 } +func eq_0_int64(x int64) bool { return x == 0 } +func ne_0_int64(x int64) bool { return x != 0 } +func lt_1_int64(x int64) bool { return x < 1 } +func le_1_int64(x int64) bool { return x <= 1 } +func gt_1_int64(x int64) bool { return x > 1 } +func ge_1_int64(x int64) bool { return x >= 1 } +func eq_1_int64(x int64) bool { return x == 1 } +func ne_1_int64(x int64) bool { return x != 1 } +func lt_126_int64(x int64) bool { return x < 126 } +func le_126_int64(x int64) bool { return x <= 126 } +func gt_126_int64(x int64) bool { return x > 126 } +func ge_126_int64(x int64) bool { return x >= 126 } +func eq_126_int64(x int64) bool { return x == 126 } +func ne_126_int64(x int64) bool { return x != 126 } +func lt_127_int64(x int64) bool { return x < 127 } +func le_127_int64(x int64) bool { return x <= 127 } +func gt_127_int64(x int64) bool { return x > 127 } +func ge_127_int64(x int64) bool { return x >= 127 } +func eq_127_int64(x int64) bool { return x == 127 } +func ne_127_int64(x int64) bool { return x != 127 } +func lt_128_int64(x int64) bool { return x < 128 } +func le_128_int64(x int64) bool { return x <= 128 } +func gt_128_int64(x int64) bool { return x > 128 } +func ge_128_int64(x int64) bool { return x >= 128 } +func eq_128_int64(x int64) bool { return x == 128 } +func ne_128_int64(x int64) bool { return x != 128 } +func lt_254_int64(x int64) bool { return x < 254 } +func le_254_int64(x int64) bool { return x <= 254 } +func gt_254_int64(x int64) bool { return x > 254 } +func ge_254_int64(x int64) bool { return x >= 254 } +func eq_254_int64(x int64) bool { return x == 254 } +func ne_254_int64(x int64) bool { return x != 254 } +func lt_255_int64(x int64) bool { return x < 255 } +func le_255_int64(x int64) bool { return x <= 255 } +func gt_255_int64(x int64) bool { return x > 255 } +func ge_255_int64(x int64) bool { return x >= 255 } +func eq_255_int64(x int64) bool { return x == 255 } +func ne_255_int64(x int64) bool { return x != 255 } +func lt_256_int64(x int64) bool { return x < 256 } +func le_256_int64(x int64) bool { return x <= 256 } +func gt_256_int64(x int64) bool { return x > 256 } +func ge_256_int64(x int64) bool { return x >= 256 } +func eq_256_int64(x int64) bool { return x == 256 } +func ne_256_int64(x int64) bool { return x != 256 } +func lt_32766_int64(x int64) bool { return x < 32766 } +func le_32766_int64(x int64) bool { return x <= 32766 } +func gt_32766_int64(x int64) bool { return x > 32766 } +func ge_32766_int64(x int64) bool { return x >= 32766 } +func eq_32766_int64(x int64) bool { return x == 32766 } +func ne_32766_int64(x int64) bool { return x != 32766 } +func lt_32767_int64(x int64) bool { return x < 32767 } +func le_32767_int64(x int64) bool { return x <= 32767 } +func gt_32767_int64(x int64) bool { return x > 32767 } +func ge_32767_int64(x int64) bool { return x >= 32767 } +func eq_32767_int64(x int64) bool { return x == 32767 } +func ne_32767_int64(x int64) bool { return x != 32767 } +func lt_32768_int64(x int64) bool { return x < 32768 } +func le_32768_int64(x int64) bool { return x <= 32768 } +func gt_32768_int64(x int64) bool { return x > 32768 } +func ge_32768_int64(x int64) bool { return x >= 32768 } +func eq_32768_int64(x int64) bool { return x == 32768 } +func ne_32768_int64(x int64) bool { return x != 32768 } +func lt_65534_int64(x int64) bool { return x < 65534 } +func le_65534_int64(x int64) bool { return x <= 65534 } +func gt_65534_int64(x int64) bool { return x > 65534 } +func ge_65534_int64(x int64) bool { return x >= 65534 } +func eq_65534_int64(x int64) bool { return x == 65534 } +func ne_65534_int64(x int64) bool { return x != 65534 } +func lt_65535_int64(x int64) bool { return x < 65535 } +func le_65535_int64(x int64) bool { return x <= 65535 } +func gt_65535_int64(x int64) bool { return x > 65535 } +func ge_65535_int64(x int64) bool { return x >= 65535 } +func eq_65535_int64(x int64) bool { return x == 65535 } +func ne_65535_int64(x int64) bool { return x != 65535 } +func lt_65536_int64(x int64) bool { return x < 65536 } +func le_65536_int64(x int64) bool { return x <= 65536 } +func gt_65536_int64(x int64) bool { return x > 65536 } +func ge_65536_int64(x int64) bool { return x >= 65536 } +func eq_65536_int64(x int64) bool { return x == 65536 } +func ne_65536_int64(x int64) bool { return x != 65536 } +func lt_2147483646_int64(x int64) bool { return x < 2147483646 } +func le_2147483646_int64(x int64) bool { return x <= 2147483646 } +func gt_2147483646_int64(x int64) bool { return x > 2147483646 } +func ge_2147483646_int64(x int64) bool { return x >= 2147483646 } +func eq_2147483646_int64(x int64) bool { return x == 2147483646 } +func ne_2147483646_int64(x int64) bool { return x != 2147483646 } +func lt_2147483647_int64(x int64) bool { return x < 2147483647 } +func le_2147483647_int64(x int64) bool { return x <= 2147483647 } +func gt_2147483647_int64(x int64) bool { return x > 2147483647 } +func ge_2147483647_int64(x int64) bool { return x >= 2147483647 } +func eq_2147483647_int64(x int64) bool { return x == 2147483647 } +func ne_2147483647_int64(x int64) bool { return x != 2147483647 } +func lt_2147483648_int64(x int64) bool { return x < 2147483648 } +func le_2147483648_int64(x int64) bool { return x <= 2147483648 } +func gt_2147483648_int64(x int64) bool { return x > 2147483648 } +func ge_2147483648_int64(x int64) bool { return x >= 2147483648 } +func eq_2147483648_int64(x int64) bool { return x == 2147483648 } +func ne_2147483648_int64(x int64) bool { return x != 2147483648 } +func lt_4278190080_int64(x int64) bool { return x < 4278190080 } +func le_4278190080_int64(x int64) bool { return x <= 4278190080 } +func gt_4278190080_int64(x int64) bool { return x > 4278190080 } +func ge_4278190080_int64(x int64) bool { return x >= 4278190080 } +func eq_4278190080_int64(x int64) bool { return x == 4278190080 } +func ne_4278190080_int64(x int64) bool { return x != 4278190080 } +func lt_4294967294_int64(x int64) bool { return x < 4294967294 } +func le_4294967294_int64(x int64) bool { return x <= 4294967294 } +func gt_4294967294_int64(x int64) bool { return x > 4294967294 } +func ge_4294967294_int64(x int64) bool { return x >= 4294967294 } +func eq_4294967294_int64(x int64) bool { return x == 4294967294 } +func ne_4294967294_int64(x int64) bool { return x != 4294967294 } +func lt_4294967295_int64(x int64) bool { return x < 4294967295 } +func le_4294967295_int64(x int64) bool { return x <= 4294967295 } +func gt_4294967295_int64(x int64) bool { return x > 4294967295 } +func ge_4294967295_int64(x int64) bool { return x >= 4294967295 } +func eq_4294967295_int64(x int64) bool { return x == 4294967295 } +func ne_4294967295_int64(x int64) bool { return x != 4294967295 } +func lt_4294967296_int64(x int64) bool { return x < 4294967296 } +func le_4294967296_int64(x int64) bool { return x <= 4294967296 } +func gt_4294967296_int64(x int64) bool { return x > 4294967296 } +func ge_4294967296_int64(x int64) bool { return x >= 4294967296 } +func eq_4294967296_int64(x int64) bool { return x == 4294967296 } +func ne_4294967296_int64(x int64) bool { return x != 4294967296 } +func lt_1095216660480_int64(x int64) bool { return x < 1095216660480 } +func le_1095216660480_int64(x int64) bool { return x <= 1095216660480 } +func gt_1095216660480_int64(x int64) bool { return x > 1095216660480 } +func ge_1095216660480_int64(x int64) bool { return x >= 1095216660480 } +func eq_1095216660480_int64(x int64) bool { return x == 1095216660480 } +func ne_1095216660480_int64(x int64) bool { return x != 1095216660480 } +func lt_9223372036854775806_int64(x int64) bool { return x < 9223372036854775806 } +func le_9223372036854775806_int64(x int64) bool { return x <= 9223372036854775806 } +func gt_9223372036854775806_int64(x int64) bool { return x > 9223372036854775806 } +func ge_9223372036854775806_int64(x int64) bool { return x >= 9223372036854775806 } +func eq_9223372036854775806_int64(x int64) bool { return x == 9223372036854775806 } +func ne_9223372036854775806_int64(x int64) bool { return x != 9223372036854775806 } +func lt_9223372036854775807_int64(x int64) bool { return x < 9223372036854775807 } +func le_9223372036854775807_int64(x int64) bool { return x <= 9223372036854775807 } +func gt_9223372036854775807_int64(x int64) bool { return x > 9223372036854775807 } +func ge_9223372036854775807_int64(x int64) bool { return x >= 9223372036854775807 } +func eq_9223372036854775807_int64(x int64) bool { return x == 9223372036854775807 } +func ne_9223372036854775807_int64(x int64) bool { return x != 9223372036854775807 } + +var int64_tests = []struct { + idx int // index of the constant used + exp result // expected results + fn func(int64) bool +}{ + {idx: 0, exp: lt, fn: lt_neg9223372036854775808_int64}, + {idx: 0, exp: le, fn: le_neg9223372036854775808_int64}, + {idx: 0, exp: gt, fn: gt_neg9223372036854775808_int64}, + {idx: 0, exp: ge, fn: ge_neg9223372036854775808_int64}, + {idx: 0, exp: eq, fn: eq_neg9223372036854775808_int64}, + {idx: 0, exp: ne, fn: ne_neg9223372036854775808_int64}, + {idx: 1, exp: lt, fn: lt_neg9223372036854775807_int64}, + {idx: 1, exp: le, fn: le_neg9223372036854775807_int64}, + {idx: 1, exp: gt, fn: gt_neg9223372036854775807_int64}, + {idx: 1, exp: ge, fn: ge_neg9223372036854775807_int64}, + {idx: 1, exp: eq, fn: eq_neg9223372036854775807_int64}, + {idx: 1, exp: ne, fn: ne_neg9223372036854775807_int64}, + {idx: 2, exp: lt, fn: lt_neg2147483649_int64}, + {idx: 2, exp: le, fn: le_neg2147483649_int64}, + {idx: 2, exp: gt, fn: gt_neg2147483649_int64}, + {idx: 2, exp: ge, fn: ge_neg2147483649_int64}, + {idx: 2, exp: eq, fn: eq_neg2147483649_int64}, + {idx: 2, exp: ne, fn: ne_neg2147483649_int64}, + {idx: 3, exp: lt, fn: lt_neg2147483648_int64}, + {idx: 3, exp: le, fn: le_neg2147483648_int64}, + {idx: 3, exp: gt, fn: gt_neg2147483648_int64}, + {idx: 3, exp: ge, fn: ge_neg2147483648_int64}, + {idx: 3, exp: eq, fn: eq_neg2147483648_int64}, + {idx: 3, exp: ne, fn: ne_neg2147483648_int64}, + {idx: 4, exp: lt, fn: lt_neg2147483647_int64}, + {idx: 4, exp: le, fn: le_neg2147483647_int64}, + {idx: 4, exp: gt, fn: gt_neg2147483647_int64}, + {idx: 4, exp: ge, fn: ge_neg2147483647_int64}, + {idx: 4, exp: eq, fn: eq_neg2147483647_int64}, + {idx: 4, exp: ne, fn: ne_neg2147483647_int64}, + {idx: 5, exp: lt, fn: lt_neg32769_int64}, + {idx: 5, exp: le, fn: le_neg32769_int64}, + {idx: 5, exp: gt, fn: gt_neg32769_int64}, + {idx: 5, exp: ge, fn: ge_neg32769_int64}, + {idx: 5, exp: eq, fn: eq_neg32769_int64}, + {idx: 5, exp: ne, fn: ne_neg32769_int64}, + {idx: 6, exp: lt, fn: lt_neg32768_int64}, + {idx: 6, exp: le, fn: le_neg32768_int64}, + {idx: 6, exp: gt, fn: gt_neg32768_int64}, + {idx: 6, exp: ge, fn: ge_neg32768_int64}, + {idx: 6, exp: eq, fn: eq_neg32768_int64}, + {idx: 6, exp: ne, fn: ne_neg32768_int64}, + {idx: 7, exp: lt, fn: lt_neg32767_int64}, + {idx: 7, exp: le, fn: le_neg32767_int64}, + {idx: 7, exp: gt, fn: gt_neg32767_int64}, + {idx: 7, exp: ge, fn: ge_neg32767_int64}, + {idx: 7, exp: eq, fn: eq_neg32767_int64}, + {idx: 7, exp: ne, fn: ne_neg32767_int64}, + {idx: 8, exp: lt, fn: lt_neg129_int64}, + {idx: 8, exp: le, fn: le_neg129_int64}, + {idx: 8, exp: gt, fn: gt_neg129_int64}, + {idx: 8, exp: ge, fn: ge_neg129_int64}, + {idx: 8, exp: eq, fn: eq_neg129_int64}, + {idx: 8, exp: ne, fn: ne_neg129_int64}, + {idx: 9, exp: lt, fn: lt_neg128_int64}, + {idx: 9, exp: le, fn: le_neg128_int64}, + {idx: 9, exp: gt, fn: gt_neg128_int64}, + {idx: 9, exp: ge, fn: ge_neg128_int64}, + {idx: 9, exp: eq, fn: eq_neg128_int64}, + {idx: 9, exp: ne, fn: ne_neg128_int64}, + {idx: 10, exp: lt, fn: lt_neg127_int64}, + {idx: 10, exp: le, fn: le_neg127_int64}, + {idx: 10, exp: gt, fn: gt_neg127_int64}, + {idx: 10, exp: ge, fn: ge_neg127_int64}, + {idx: 10, exp: eq, fn: eq_neg127_int64}, + {idx: 10, exp: ne, fn: ne_neg127_int64}, + {idx: 11, exp: lt, fn: lt_neg1_int64}, + {idx: 11, exp: le, fn: le_neg1_int64}, + {idx: 11, exp: gt, fn: gt_neg1_int64}, + {idx: 11, exp: ge, fn: ge_neg1_int64}, + {idx: 11, exp: eq, fn: eq_neg1_int64}, + {idx: 11, exp: ne, fn: ne_neg1_int64}, + {idx: 12, exp: lt, fn: lt_0_int64}, + {idx: 12, exp: le, fn: le_0_int64}, + {idx: 12, exp: gt, fn: gt_0_int64}, + {idx: 12, exp: ge, fn: ge_0_int64}, + {idx: 12, exp: eq, fn: eq_0_int64}, + {idx: 12, exp: ne, fn: ne_0_int64}, + {idx: 13, exp: lt, fn: lt_1_int64}, + {idx: 13, exp: le, fn: le_1_int64}, + {idx: 13, exp: gt, fn: gt_1_int64}, + {idx: 13, exp: ge, fn: ge_1_int64}, + {idx: 13, exp: eq, fn: eq_1_int64}, + {idx: 13, exp: ne, fn: ne_1_int64}, + {idx: 14, exp: lt, fn: lt_126_int64}, + {idx: 14, exp: le, fn: le_126_int64}, + {idx: 14, exp: gt, fn: gt_126_int64}, + {idx: 14, exp: ge, fn: ge_126_int64}, + {idx: 14, exp: eq, fn: eq_126_int64}, + {idx: 14, exp: ne, fn: ne_126_int64}, + {idx: 15, exp: lt, fn: lt_127_int64}, + {idx: 15, exp: le, fn: le_127_int64}, + {idx: 15, exp: gt, fn: gt_127_int64}, + {idx: 15, exp: ge, fn: ge_127_int64}, + {idx: 15, exp: eq, fn: eq_127_int64}, + {idx: 15, exp: ne, fn: ne_127_int64}, + {idx: 16, exp: lt, fn: lt_128_int64}, + {idx: 16, exp: le, fn: le_128_int64}, + {idx: 16, exp: gt, fn: gt_128_int64}, + {idx: 16, exp: ge, fn: ge_128_int64}, + {idx: 16, exp: eq, fn: eq_128_int64}, + {idx: 16, exp: ne, fn: ne_128_int64}, + {idx: 17, exp: lt, fn: lt_254_int64}, + {idx: 17, exp: le, fn: le_254_int64}, + {idx: 17, exp: gt, fn: gt_254_int64}, + {idx: 17, exp: ge, fn: ge_254_int64}, + {idx: 17, exp: eq, fn: eq_254_int64}, + {idx: 17, exp: ne, fn: ne_254_int64}, + {idx: 18, exp: lt, fn: lt_255_int64}, + {idx: 18, exp: le, fn: le_255_int64}, + {idx: 18, exp: gt, fn: gt_255_int64}, + {idx: 18, exp: ge, fn: ge_255_int64}, + {idx: 18, exp: eq, fn: eq_255_int64}, + {idx: 18, exp: ne, fn: ne_255_int64}, + {idx: 19, exp: lt, fn: lt_256_int64}, + {idx: 19, exp: le, fn: le_256_int64}, + {idx: 19, exp: gt, fn: gt_256_int64}, + {idx: 19, exp: ge, fn: ge_256_int64}, + {idx: 19, exp: eq, fn: eq_256_int64}, + {idx: 19, exp: ne, fn: ne_256_int64}, + {idx: 20, exp: lt, fn: lt_32766_int64}, + {idx: 20, exp: le, fn: le_32766_int64}, + {idx: 20, exp: gt, fn: gt_32766_int64}, + {idx: 20, exp: ge, fn: ge_32766_int64}, + {idx: 20, exp: eq, fn: eq_32766_int64}, + {idx: 20, exp: ne, fn: ne_32766_int64}, + {idx: 21, exp: lt, fn: lt_32767_int64}, + {idx: 21, exp: le, fn: le_32767_int64}, + {idx: 21, exp: gt, fn: gt_32767_int64}, + {idx: 21, exp: ge, fn: ge_32767_int64}, + {idx: 21, exp: eq, fn: eq_32767_int64}, + {idx: 21, exp: ne, fn: ne_32767_int64}, + {idx: 22, exp: lt, fn: lt_32768_int64}, + {idx: 22, exp: le, fn: le_32768_int64}, + {idx: 22, exp: gt, fn: gt_32768_int64}, + {idx: 22, exp: ge, fn: ge_32768_int64}, + {idx: 22, exp: eq, fn: eq_32768_int64}, + {idx: 22, exp: ne, fn: ne_32768_int64}, + {idx: 23, exp: lt, fn: lt_65534_int64}, + {idx: 23, exp: le, fn: le_65534_int64}, + {idx: 23, exp: gt, fn: gt_65534_int64}, + {idx: 23, exp: ge, fn: ge_65534_int64}, + {idx: 23, exp: eq, fn: eq_65534_int64}, + {idx: 23, exp: ne, fn: ne_65534_int64}, + {idx: 24, exp: lt, fn: lt_65535_int64}, + {idx: 24, exp: le, fn: le_65535_int64}, + {idx: 24, exp: gt, fn: gt_65535_int64}, + {idx: 24, exp: ge, fn: ge_65535_int64}, + {idx: 24, exp: eq, fn: eq_65535_int64}, + {idx: 24, exp: ne, fn: ne_65535_int64}, + {idx: 25, exp: lt, fn: lt_65536_int64}, + {idx: 25, exp: le, fn: le_65536_int64}, + {idx: 25, exp: gt, fn: gt_65536_int64}, + {idx: 25, exp: ge, fn: ge_65536_int64}, + {idx: 25, exp: eq, fn: eq_65536_int64}, + {idx: 25, exp: ne, fn: ne_65536_int64}, + {idx: 26, exp: lt, fn: lt_2147483646_int64}, + {idx: 26, exp: le, fn: le_2147483646_int64}, + {idx: 26, exp: gt, fn: gt_2147483646_int64}, + {idx: 26, exp: ge, fn: ge_2147483646_int64}, + {idx: 26, exp: eq, fn: eq_2147483646_int64}, + {idx: 26, exp: ne, fn: ne_2147483646_int64}, + {idx: 27, exp: lt, fn: lt_2147483647_int64}, + {idx: 27, exp: le, fn: le_2147483647_int64}, + {idx: 27, exp: gt, fn: gt_2147483647_int64}, + {idx: 27, exp: ge, fn: ge_2147483647_int64}, + {idx: 27, exp: eq, fn: eq_2147483647_int64}, + {idx: 27, exp: ne, fn: ne_2147483647_int64}, + {idx: 28, exp: lt, fn: lt_2147483648_int64}, + {idx: 28, exp: le, fn: le_2147483648_int64}, + {idx: 28, exp: gt, fn: gt_2147483648_int64}, + {idx: 28, exp: ge, fn: ge_2147483648_int64}, + {idx: 28, exp: eq, fn: eq_2147483648_int64}, + {idx: 28, exp: ne, fn: ne_2147483648_int64}, + {idx: 29, exp: lt, fn: lt_4278190080_int64}, + {idx: 29, exp: le, fn: le_4278190080_int64}, + {idx: 29, exp: gt, fn: gt_4278190080_int64}, + {idx: 29, exp: ge, fn: ge_4278190080_int64}, + {idx: 29, exp: eq, fn: eq_4278190080_int64}, + {idx: 29, exp: ne, fn: ne_4278190080_int64}, + {idx: 30, exp: lt, fn: lt_4294967294_int64}, + {idx: 30, exp: le, fn: le_4294967294_int64}, + {idx: 30, exp: gt, fn: gt_4294967294_int64}, + {idx: 30, exp: ge, fn: ge_4294967294_int64}, + {idx: 30, exp: eq, fn: eq_4294967294_int64}, + {idx: 30, exp: ne, fn: ne_4294967294_int64}, + {idx: 31, exp: lt, fn: lt_4294967295_int64}, + {idx: 31, exp: le, fn: le_4294967295_int64}, + {idx: 31, exp: gt, fn: gt_4294967295_int64}, + {idx: 31, exp: ge, fn: ge_4294967295_int64}, + {idx: 31, exp: eq, fn: eq_4294967295_int64}, + {idx: 31, exp: ne, fn: ne_4294967295_int64}, + {idx: 32, exp: lt, fn: lt_4294967296_int64}, + {idx: 32, exp: le, fn: le_4294967296_int64}, + {idx: 32, exp: gt, fn: gt_4294967296_int64}, + {idx: 32, exp: ge, fn: ge_4294967296_int64}, + {idx: 32, exp: eq, fn: eq_4294967296_int64}, + {idx: 32, exp: ne, fn: ne_4294967296_int64}, + {idx: 33, exp: lt, fn: lt_1095216660480_int64}, + {idx: 33, exp: le, fn: le_1095216660480_int64}, + {idx: 33, exp: gt, fn: gt_1095216660480_int64}, + {idx: 33, exp: ge, fn: ge_1095216660480_int64}, + {idx: 33, exp: eq, fn: eq_1095216660480_int64}, + {idx: 33, exp: ne, fn: ne_1095216660480_int64}, + {idx: 34, exp: lt, fn: lt_9223372036854775806_int64}, + {idx: 34, exp: le, fn: le_9223372036854775806_int64}, + {idx: 34, exp: gt, fn: gt_9223372036854775806_int64}, + {idx: 34, exp: ge, fn: ge_9223372036854775806_int64}, + {idx: 34, exp: eq, fn: eq_9223372036854775806_int64}, + {idx: 34, exp: ne, fn: ne_9223372036854775806_int64}, + {idx: 35, exp: lt, fn: lt_9223372036854775807_int64}, + {idx: 35, exp: le, fn: le_9223372036854775807_int64}, + {idx: 35, exp: gt, fn: gt_9223372036854775807_int64}, + {idx: 35, exp: ge, fn: ge_9223372036854775807_int64}, + {idx: 35, exp: eq, fn: eq_9223372036854775807_int64}, + {idx: 35, exp: ne, fn: ne_9223372036854775807_int64}, +} + +// int32 tests +var int32_vals = []int32{ + -2147483648, + -2147483647, + -32769, + -32768, + -32767, + -129, + -128, + -127, + -1, + 0, + 1, + 126, + 127, + 128, + 254, + 255, + 256, + 32766, + 32767, + 32768, + 65534, + 65535, + 65536, + 2147483646, + 2147483647, +} + +func lt_neg2147483648_int32(x int32) bool { return x < -2147483648 } +func le_neg2147483648_int32(x int32) bool { return x <= -2147483648 } +func gt_neg2147483648_int32(x int32) bool { return x > -2147483648 } +func ge_neg2147483648_int32(x int32) bool { return x >= -2147483648 } +func eq_neg2147483648_int32(x int32) bool { return x == -2147483648 } +func ne_neg2147483648_int32(x int32) bool { return x != -2147483648 } +func lt_neg2147483647_int32(x int32) bool { return x < -2147483647 } +func le_neg2147483647_int32(x int32) bool { return x <= -2147483647 } +func gt_neg2147483647_int32(x int32) bool { return x > -2147483647 } +func ge_neg2147483647_int32(x int32) bool { return x >= -2147483647 } +func eq_neg2147483647_int32(x int32) bool { return x == -2147483647 } +func ne_neg2147483647_int32(x int32) bool { return x != -2147483647 } +func lt_neg32769_int32(x int32) bool { return x < -32769 } +func le_neg32769_int32(x int32) bool { return x <= -32769 } +func gt_neg32769_int32(x int32) bool { return x > -32769 } +func ge_neg32769_int32(x int32) bool { return x >= -32769 } +func eq_neg32769_int32(x int32) bool { return x == -32769 } +func ne_neg32769_int32(x int32) bool { return x != -32769 } +func lt_neg32768_int32(x int32) bool { return x < -32768 } +func le_neg32768_int32(x int32) bool { return x <= -32768 } +func gt_neg32768_int32(x int32) bool { return x > -32768 } +func ge_neg32768_int32(x int32) bool { return x >= -32768 } +func eq_neg32768_int32(x int32) bool { return x == -32768 } +func ne_neg32768_int32(x int32) bool { return x != -32768 } +func lt_neg32767_int32(x int32) bool { return x < -32767 } +func le_neg32767_int32(x int32) bool { return x <= -32767 } +func gt_neg32767_int32(x int32) bool { return x > -32767 } +func ge_neg32767_int32(x int32) bool { return x >= -32767 } +func eq_neg32767_int32(x int32) bool { return x == -32767 } +func ne_neg32767_int32(x int32) bool { return x != -32767 } +func lt_neg129_int32(x int32) bool { return x < -129 } +func le_neg129_int32(x int32) bool { return x <= -129 } +func gt_neg129_int32(x int32) bool { return x > -129 } +func ge_neg129_int32(x int32) bool { return x >= -129 } +func eq_neg129_int32(x int32) bool { return x == -129 } +func ne_neg129_int32(x int32) bool { return x != -129 } +func lt_neg128_int32(x int32) bool { return x < -128 } +func le_neg128_int32(x int32) bool { return x <= -128 } +func gt_neg128_int32(x int32) bool { return x > -128 } +func ge_neg128_int32(x int32) bool { return x >= -128 } +func eq_neg128_int32(x int32) bool { return x == -128 } +func ne_neg128_int32(x int32) bool { return x != -128 } +func lt_neg127_int32(x int32) bool { return x < -127 } +func le_neg127_int32(x int32) bool { return x <= -127 } +func gt_neg127_int32(x int32) bool { return x > -127 } +func ge_neg127_int32(x int32) bool { return x >= -127 } +func eq_neg127_int32(x int32) bool { return x == -127 } +func ne_neg127_int32(x int32) bool { return x != -127 } +func lt_neg1_int32(x int32) bool { return x < -1 } +func le_neg1_int32(x int32) bool { return x <= -1 } +func gt_neg1_int32(x int32) bool { return x > -1 } +func ge_neg1_int32(x int32) bool { return x >= -1 } +func eq_neg1_int32(x int32) bool { return x == -1 } +func ne_neg1_int32(x int32) bool { return x != -1 } +func lt_0_int32(x int32) bool { return x < 0 } +func le_0_int32(x int32) bool { return x <= 0 } +func gt_0_int32(x int32) bool { return x > 0 } +func ge_0_int32(x int32) bool { return x >= 0 } +func eq_0_int32(x int32) bool { return x == 0 } +func ne_0_int32(x int32) bool { return x != 0 } +func lt_1_int32(x int32) bool { return x < 1 } +func le_1_int32(x int32) bool { return x <= 1 } +func gt_1_int32(x int32) bool { return x > 1 } +func ge_1_int32(x int32) bool { return x >= 1 } +func eq_1_int32(x int32) bool { return x == 1 } +func ne_1_int32(x int32) bool { return x != 1 } +func lt_126_int32(x int32) bool { return x < 126 } +func le_126_int32(x int32) bool { return x <= 126 } +func gt_126_int32(x int32) bool { return x > 126 } +func ge_126_int32(x int32) bool { return x >= 126 } +func eq_126_int32(x int32) bool { return x == 126 } +func ne_126_int32(x int32) bool { return x != 126 } +func lt_127_int32(x int32) bool { return x < 127 } +func le_127_int32(x int32) bool { return x <= 127 } +func gt_127_int32(x int32) bool { return x > 127 } +func ge_127_int32(x int32) bool { return x >= 127 } +func eq_127_int32(x int32) bool { return x == 127 } +func ne_127_int32(x int32) bool { return x != 127 } +func lt_128_int32(x int32) bool { return x < 128 } +func le_128_int32(x int32) bool { return x <= 128 } +func gt_128_int32(x int32) bool { return x > 128 } +func ge_128_int32(x int32) bool { return x >= 128 } +func eq_128_int32(x int32) bool { return x == 128 } +func ne_128_int32(x int32) bool { return x != 128 } +func lt_254_int32(x int32) bool { return x < 254 } +func le_254_int32(x int32) bool { return x <= 254 } +func gt_254_int32(x int32) bool { return x > 254 } +func ge_254_int32(x int32) bool { return x >= 254 } +func eq_254_int32(x int32) bool { return x == 254 } +func ne_254_int32(x int32) bool { return x != 254 } +func lt_255_int32(x int32) bool { return x < 255 } +func le_255_int32(x int32) bool { return x <= 255 } +func gt_255_int32(x int32) bool { return x > 255 } +func ge_255_int32(x int32) bool { return x >= 255 } +func eq_255_int32(x int32) bool { return x == 255 } +func ne_255_int32(x int32) bool { return x != 255 } +func lt_256_int32(x int32) bool { return x < 256 } +func le_256_int32(x int32) bool { return x <= 256 } +func gt_256_int32(x int32) bool { return x > 256 } +func ge_256_int32(x int32) bool { return x >= 256 } +func eq_256_int32(x int32) bool { return x == 256 } +func ne_256_int32(x int32) bool { return x != 256 } +func lt_32766_int32(x int32) bool { return x < 32766 } +func le_32766_int32(x int32) bool { return x <= 32766 } +func gt_32766_int32(x int32) bool { return x > 32766 } +func ge_32766_int32(x int32) bool { return x >= 32766 } +func eq_32766_int32(x int32) bool { return x == 32766 } +func ne_32766_int32(x int32) bool { return x != 32766 } +func lt_32767_int32(x int32) bool { return x < 32767 } +func le_32767_int32(x int32) bool { return x <= 32767 } +func gt_32767_int32(x int32) bool { return x > 32767 } +func ge_32767_int32(x int32) bool { return x >= 32767 } +func eq_32767_int32(x int32) bool { return x == 32767 } +func ne_32767_int32(x int32) bool { return x != 32767 } +func lt_32768_int32(x int32) bool { return x < 32768 } +func le_32768_int32(x int32) bool { return x <= 32768 } +func gt_32768_int32(x int32) bool { return x > 32768 } +func ge_32768_int32(x int32) bool { return x >= 32768 } +func eq_32768_int32(x int32) bool { return x == 32768 } +func ne_32768_int32(x int32) bool { return x != 32768 } +func lt_65534_int32(x int32) bool { return x < 65534 } +func le_65534_int32(x int32) bool { return x <= 65534 } +func gt_65534_int32(x int32) bool { return x > 65534 } +func ge_65534_int32(x int32) bool { return x >= 65534 } +func eq_65534_int32(x int32) bool { return x == 65534 } +func ne_65534_int32(x int32) bool { return x != 65534 } +func lt_65535_int32(x int32) bool { return x < 65535 } +func le_65535_int32(x int32) bool { return x <= 65535 } +func gt_65535_int32(x int32) bool { return x > 65535 } +func ge_65535_int32(x int32) bool { return x >= 65535 } +func eq_65535_int32(x int32) bool { return x == 65535 } +func ne_65535_int32(x int32) bool { return x != 65535 } +func lt_65536_int32(x int32) bool { return x < 65536 } +func le_65536_int32(x int32) bool { return x <= 65536 } +func gt_65536_int32(x int32) bool { return x > 65536 } +func ge_65536_int32(x int32) bool { return x >= 65536 } +func eq_65536_int32(x int32) bool { return x == 65536 } +func ne_65536_int32(x int32) bool { return x != 65536 } +func lt_2147483646_int32(x int32) bool { return x < 2147483646 } +func le_2147483646_int32(x int32) bool { return x <= 2147483646 } +func gt_2147483646_int32(x int32) bool { return x > 2147483646 } +func ge_2147483646_int32(x int32) bool { return x >= 2147483646 } +func eq_2147483646_int32(x int32) bool { return x == 2147483646 } +func ne_2147483646_int32(x int32) bool { return x != 2147483646 } +func lt_2147483647_int32(x int32) bool { return x < 2147483647 } +func le_2147483647_int32(x int32) bool { return x <= 2147483647 } +func gt_2147483647_int32(x int32) bool { return x > 2147483647 } +func ge_2147483647_int32(x int32) bool { return x >= 2147483647 } +func eq_2147483647_int32(x int32) bool { return x == 2147483647 } +func ne_2147483647_int32(x int32) bool { return x != 2147483647 } + +var int32_tests = []struct { + idx int // index of the constant used + exp result // expected results + fn func(int32) bool +}{ + {idx: 0, exp: lt, fn: lt_neg2147483648_int32}, + {idx: 0, exp: le, fn: le_neg2147483648_int32}, + {idx: 0, exp: gt, fn: gt_neg2147483648_int32}, + {idx: 0, exp: ge, fn: ge_neg2147483648_int32}, + {idx: 0, exp: eq, fn: eq_neg2147483648_int32}, + {idx: 0, exp: ne, fn: ne_neg2147483648_int32}, + {idx: 1, exp: lt, fn: lt_neg2147483647_int32}, + {idx: 1, exp: le, fn: le_neg2147483647_int32}, + {idx: 1, exp: gt, fn: gt_neg2147483647_int32}, + {idx: 1, exp: ge, fn: ge_neg2147483647_int32}, + {idx: 1, exp: eq, fn: eq_neg2147483647_int32}, + {idx: 1, exp: ne, fn: ne_neg2147483647_int32}, + {idx: 2, exp: lt, fn: lt_neg32769_int32}, + {idx: 2, exp: le, fn: le_neg32769_int32}, + {idx: 2, exp: gt, fn: gt_neg32769_int32}, + {idx: 2, exp: ge, fn: ge_neg32769_int32}, + {idx: 2, exp: eq, fn: eq_neg32769_int32}, + {idx: 2, exp: ne, fn: ne_neg32769_int32}, + {idx: 3, exp: lt, fn: lt_neg32768_int32}, + {idx: 3, exp: le, fn: le_neg32768_int32}, + {idx: 3, exp: gt, fn: gt_neg32768_int32}, + {idx: 3, exp: ge, fn: ge_neg32768_int32}, + {idx: 3, exp: eq, fn: eq_neg32768_int32}, + {idx: 3, exp: ne, fn: ne_neg32768_int32}, + {idx: 4, exp: lt, fn: lt_neg32767_int32}, + {idx: 4, exp: le, fn: le_neg32767_int32}, + {idx: 4, exp: gt, fn: gt_neg32767_int32}, + {idx: 4, exp: ge, fn: ge_neg32767_int32}, + {idx: 4, exp: eq, fn: eq_neg32767_int32}, + {idx: 4, exp: ne, fn: ne_neg32767_int32}, + {idx: 5, exp: lt, fn: lt_neg129_int32}, + {idx: 5, exp: le, fn: le_neg129_int32}, + {idx: 5, exp: gt, fn: gt_neg129_int32}, + {idx: 5, exp: ge, fn: ge_neg129_int32}, + {idx: 5, exp: eq, fn: eq_neg129_int32}, + {idx: 5, exp: ne, fn: ne_neg129_int32}, + {idx: 6, exp: lt, fn: lt_neg128_int32}, + {idx: 6, exp: le, fn: le_neg128_int32}, + {idx: 6, exp: gt, fn: gt_neg128_int32}, + {idx: 6, exp: ge, fn: ge_neg128_int32}, + {idx: 6, exp: eq, fn: eq_neg128_int32}, + {idx: 6, exp: ne, fn: ne_neg128_int32}, + {idx: 7, exp: lt, fn: lt_neg127_int32}, + {idx: 7, exp: le, fn: le_neg127_int32}, + {idx: 7, exp: gt, fn: gt_neg127_int32}, + {idx: 7, exp: ge, fn: ge_neg127_int32}, + {idx: 7, exp: eq, fn: eq_neg127_int32}, + {idx: 7, exp: ne, fn: ne_neg127_int32}, + {idx: 8, exp: lt, fn: lt_neg1_int32}, + {idx: 8, exp: le, fn: le_neg1_int32}, + {idx: 8, exp: gt, fn: gt_neg1_int32}, + {idx: 8, exp: ge, fn: ge_neg1_int32}, + {idx: 8, exp: eq, fn: eq_neg1_int32}, + {idx: 8, exp: ne, fn: ne_neg1_int32}, + {idx: 9, exp: lt, fn: lt_0_int32}, + {idx: 9, exp: le, fn: le_0_int32}, + {idx: 9, exp: gt, fn: gt_0_int32}, + {idx: 9, exp: ge, fn: ge_0_int32}, + {idx: 9, exp: eq, fn: eq_0_int32}, + {idx: 9, exp: ne, fn: ne_0_int32}, + {idx: 10, exp: lt, fn: lt_1_int32}, + {idx: 10, exp: le, fn: le_1_int32}, + {idx: 10, exp: gt, fn: gt_1_int32}, + {idx: 10, exp: ge, fn: ge_1_int32}, + {idx: 10, exp: eq, fn: eq_1_int32}, + {idx: 10, exp: ne, fn: ne_1_int32}, + {idx: 11, exp: lt, fn: lt_126_int32}, + {idx: 11, exp: le, fn: le_126_int32}, + {idx: 11, exp: gt, fn: gt_126_int32}, + {idx: 11, exp: ge, fn: ge_126_int32}, + {idx: 11, exp: eq, fn: eq_126_int32}, + {idx: 11, exp: ne, fn: ne_126_int32}, + {idx: 12, exp: lt, fn: lt_127_int32}, + {idx: 12, exp: le, fn: le_127_int32}, + {idx: 12, exp: gt, fn: gt_127_int32}, + {idx: 12, exp: ge, fn: ge_127_int32}, + {idx: 12, exp: eq, fn: eq_127_int32}, + {idx: 12, exp: ne, fn: ne_127_int32}, + {idx: 13, exp: lt, fn: lt_128_int32}, + {idx: 13, exp: le, fn: le_128_int32}, + {idx: 13, exp: gt, fn: gt_128_int32}, + {idx: 13, exp: ge, fn: ge_128_int32}, + {idx: 13, exp: eq, fn: eq_128_int32}, + {idx: 13, exp: ne, fn: ne_128_int32}, + {idx: 14, exp: lt, fn: lt_254_int32}, + {idx: 14, exp: le, fn: le_254_int32}, + {idx: 14, exp: gt, fn: gt_254_int32}, + {idx: 14, exp: ge, fn: ge_254_int32}, + {idx: 14, exp: eq, fn: eq_254_int32}, + {idx: 14, exp: ne, fn: ne_254_int32}, + {idx: 15, exp: lt, fn: lt_255_int32}, + {idx: 15, exp: le, fn: le_255_int32}, + {idx: 15, exp: gt, fn: gt_255_int32}, + {idx: 15, exp: ge, fn: ge_255_int32}, + {idx: 15, exp: eq, fn: eq_255_int32}, + {idx: 15, exp: ne, fn: ne_255_int32}, + {idx: 16, exp: lt, fn: lt_256_int32}, + {idx: 16, exp: le, fn: le_256_int32}, + {idx: 16, exp: gt, fn: gt_256_int32}, + {idx: 16, exp: ge, fn: ge_256_int32}, + {idx: 16, exp: eq, fn: eq_256_int32}, + {idx: 16, exp: ne, fn: ne_256_int32}, + {idx: 17, exp: lt, fn: lt_32766_int32}, + {idx: 17, exp: le, fn: le_32766_int32}, + {idx: 17, exp: gt, fn: gt_32766_int32}, + {idx: 17, exp: ge, fn: ge_32766_int32}, + {idx: 17, exp: eq, fn: eq_32766_int32}, + {idx: 17, exp: ne, fn: ne_32766_int32}, + {idx: 18, exp: lt, fn: lt_32767_int32}, + {idx: 18, exp: le, fn: le_32767_int32}, + {idx: 18, exp: gt, fn: gt_32767_int32}, + {idx: 18, exp: ge, fn: ge_32767_int32}, + {idx: 18, exp: eq, fn: eq_32767_int32}, + {idx: 18, exp: ne, fn: ne_32767_int32}, + {idx: 19, exp: lt, fn: lt_32768_int32}, + {idx: 19, exp: le, fn: le_32768_int32}, + {idx: 19, exp: gt, fn: gt_32768_int32}, + {idx: 19, exp: ge, fn: ge_32768_int32}, + {idx: 19, exp: eq, fn: eq_32768_int32}, + {idx: 19, exp: ne, fn: ne_32768_int32}, + {idx: 20, exp: lt, fn: lt_65534_int32}, + {idx: 20, exp: le, fn: le_65534_int32}, + {idx: 20, exp: gt, fn: gt_65534_int32}, + {idx: 20, exp: ge, fn: ge_65534_int32}, + {idx: 20, exp: eq, fn: eq_65534_int32}, + {idx: 20, exp: ne, fn: ne_65534_int32}, + {idx: 21, exp: lt, fn: lt_65535_int32}, + {idx: 21, exp: le, fn: le_65535_int32}, + {idx: 21, exp: gt, fn: gt_65535_int32}, + {idx: 21, exp: ge, fn: ge_65535_int32}, + {idx: 21, exp: eq, fn: eq_65535_int32}, + {idx: 21, exp: ne, fn: ne_65535_int32}, + {idx: 22, exp: lt, fn: lt_65536_int32}, + {idx: 22, exp: le, fn: le_65536_int32}, + {idx: 22, exp: gt, fn: gt_65536_int32}, + {idx: 22, exp: ge, fn: ge_65536_int32}, + {idx: 22, exp: eq, fn: eq_65536_int32}, + {idx: 22, exp: ne, fn: ne_65536_int32}, + {idx: 23, exp: lt, fn: lt_2147483646_int32}, + {idx: 23, exp: le, fn: le_2147483646_int32}, + {idx: 23, exp: gt, fn: gt_2147483646_int32}, + {idx: 23, exp: ge, fn: ge_2147483646_int32}, + {idx: 23, exp: eq, fn: eq_2147483646_int32}, + {idx: 23, exp: ne, fn: ne_2147483646_int32}, + {idx: 24, exp: lt, fn: lt_2147483647_int32}, + {idx: 24, exp: le, fn: le_2147483647_int32}, + {idx: 24, exp: gt, fn: gt_2147483647_int32}, + {idx: 24, exp: ge, fn: ge_2147483647_int32}, + {idx: 24, exp: eq, fn: eq_2147483647_int32}, + {idx: 24, exp: ne, fn: ne_2147483647_int32}, +} + +// int16 tests +var int16_vals = []int16{ + -32768, + -32767, + -129, + -128, + -127, + -1, + 0, + 1, + 126, + 127, + 128, + 254, + 255, + 256, + 32766, + 32767, +} + +func lt_neg32768_int16(x int16) bool { return x < -32768 } +func le_neg32768_int16(x int16) bool { return x <= -32768 } +func gt_neg32768_int16(x int16) bool { return x > -32768 } +func ge_neg32768_int16(x int16) bool { return x >= -32768 } +func eq_neg32768_int16(x int16) bool { return x == -32768 } +func ne_neg32768_int16(x int16) bool { return x != -32768 } +func lt_neg32767_int16(x int16) bool { return x < -32767 } +func le_neg32767_int16(x int16) bool { return x <= -32767 } +func gt_neg32767_int16(x int16) bool { return x > -32767 } +func ge_neg32767_int16(x int16) bool { return x >= -32767 } +func eq_neg32767_int16(x int16) bool { return x == -32767 } +func ne_neg32767_int16(x int16) bool { return x != -32767 } +func lt_neg129_int16(x int16) bool { return x < -129 } +func le_neg129_int16(x int16) bool { return x <= -129 } +func gt_neg129_int16(x int16) bool { return x > -129 } +func ge_neg129_int16(x int16) bool { return x >= -129 } +func eq_neg129_int16(x int16) bool { return x == -129 } +func ne_neg129_int16(x int16) bool { return x != -129 } +func lt_neg128_int16(x int16) bool { return x < -128 } +func le_neg128_int16(x int16) bool { return x <= -128 } +func gt_neg128_int16(x int16) bool { return x > -128 } +func ge_neg128_int16(x int16) bool { return x >= -128 } +func eq_neg128_int16(x int16) bool { return x == -128 } +func ne_neg128_int16(x int16) bool { return x != -128 } +func lt_neg127_int16(x int16) bool { return x < -127 } +func le_neg127_int16(x int16) bool { return x <= -127 } +func gt_neg127_int16(x int16) bool { return x > -127 } +func ge_neg127_int16(x int16) bool { return x >= -127 } +func eq_neg127_int16(x int16) bool { return x == -127 } +func ne_neg127_int16(x int16) bool { return x != -127 } +func lt_neg1_int16(x int16) bool { return x < -1 } +func le_neg1_int16(x int16) bool { return x <= -1 } +func gt_neg1_int16(x int16) bool { return x > -1 } +func ge_neg1_int16(x int16) bool { return x >= -1 } +func eq_neg1_int16(x int16) bool { return x == -1 } +func ne_neg1_int16(x int16) bool { return x != -1 } +func lt_0_int16(x int16) bool { return x < 0 } +func le_0_int16(x int16) bool { return x <= 0 } +func gt_0_int16(x int16) bool { return x > 0 } +func ge_0_int16(x int16) bool { return x >= 0 } +func eq_0_int16(x int16) bool { return x == 0 } +func ne_0_int16(x int16) bool { return x != 0 } +func lt_1_int16(x int16) bool { return x < 1 } +func le_1_int16(x int16) bool { return x <= 1 } +func gt_1_int16(x int16) bool { return x > 1 } +func ge_1_int16(x int16) bool { return x >= 1 } +func eq_1_int16(x int16) bool { return x == 1 } +func ne_1_int16(x int16) bool { return x != 1 } +func lt_126_int16(x int16) bool { return x < 126 } +func le_126_int16(x int16) bool { return x <= 126 } +func gt_126_int16(x int16) bool { return x > 126 } +func ge_126_int16(x int16) bool { return x >= 126 } +func eq_126_int16(x int16) bool { return x == 126 } +func ne_126_int16(x int16) bool { return x != 126 } +func lt_127_int16(x int16) bool { return x < 127 } +func le_127_int16(x int16) bool { return x <= 127 } +func gt_127_int16(x int16) bool { return x > 127 } +func ge_127_int16(x int16) bool { return x >= 127 } +func eq_127_int16(x int16) bool { return x == 127 } +func ne_127_int16(x int16) bool { return x != 127 } +func lt_128_int16(x int16) bool { return x < 128 } +func le_128_int16(x int16) bool { return x <= 128 } +func gt_128_int16(x int16) bool { return x > 128 } +func ge_128_int16(x int16) bool { return x >= 128 } +func eq_128_int16(x int16) bool { return x == 128 } +func ne_128_int16(x int16) bool { return x != 128 } +func lt_254_int16(x int16) bool { return x < 254 } +func le_254_int16(x int16) bool { return x <= 254 } +func gt_254_int16(x int16) bool { return x > 254 } +func ge_254_int16(x int16) bool { return x >= 254 } +func eq_254_int16(x int16) bool { return x == 254 } +func ne_254_int16(x int16) bool { return x != 254 } +func lt_255_int16(x int16) bool { return x < 255 } +func le_255_int16(x int16) bool { return x <= 255 } +func gt_255_int16(x int16) bool { return x > 255 } +func ge_255_int16(x int16) bool { return x >= 255 } +func eq_255_int16(x int16) bool { return x == 255 } +func ne_255_int16(x int16) bool { return x != 255 } +func lt_256_int16(x int16) bool { return x < 256 } +func le_256_int16(x int16) bool { return x <= 256 } +func gt_256_int16(x int16) bool { return x > 256 } +func ge_256_int16(x int16) bool { return x >= 256 } +func eq_256_int16(x int16) bool { return x == 256 } +func ne_256_int16(x int16) bool { return x != 256 } +func lt_32766_int16(x int16) bool { return x < 32766 } +func le_32766_int16(x int16) bool { return x <= 32766 } +func gt_32766_int16(x int16) bool { return x > 32766 } +func ge_32766_int16(x int16) bool { return x >= 32766 } +func eq_32766_int16(x int16) bool { return x == 32766 } +func ne_32766_int16(x int16) bool { return x != 32766 } +func lt_32767_int16(x int16) bool { return x < 32767 } +func le_32767_int16(x int16) bool { return x <= 32767 } +func gt_32767_int16(x int16) bool { return x > 32767 } +func ge_32767_int16(x int16) bool { return x >= 32767 } +func eq_32767_int16(x int16) bool { return x == 32767 } +func ne_32767_int16(x int16) bool { return x != 32767 } + +var int16_tests = []struct { + idx int // index of the constant used + exp result // expected results + fn func(int16) bool +}{ + {idx: 0, exp: lt, fn: lt_neg32768_int16}, + {idx: 0, exp: le, fn: le_neg32768_int16}, + {idx: 0, exp: gt, fn: gt_neg32768_int16}, + {idx: 0, exp: ge, fn: ge_neg32768_int16}, + {idx: 0, exp: eq, fn: eq_neg32768_int16}, + {idx: 0, exp: ne, fn: ne_neg32768_int16}, + {idx: 1, exp: lt, fn: lt_neg32767_int16}, + {idx: 1, exp: le, fn: le_neg32767_int16}, + {idx: 1, exp: gt, fn: gt_neg32767_int16}, + {idx: 1, exp: ge, fn: ge_neg32767_int16}, + {idx: 1, exp: eq, fn: eq_neg32767_int16}, + {idx: 1, exp: ne, fn: ne_neg32767_int16}, + {idx: 2, exp: lt, fn: lt_neg129_int16}, + {idx: 2, exp: le, fn: le_neg129_int16}, + {idx: 2, exp: gt, fn: gt_neg129_int16}, + {idx: 2, exp: ge, fn: ge_neg129_int16}, + {idx: 2, exp: eq, fn: eq_neg129_int16}, + {idx: 2, exp: ne, fn: ne_neg129_int16}, + {idx: 3, exp: lt, fn: lt_neg128_int16}, + {idx: 3, exp: le, fn: le_neg128_int16}, + {idx: 3, exp: gt, fn: gt_neg128_int16}, + {idx: 3, exp: ge, fn: ge_neg128_int16}, + {idx: 3, exp: eq, fn: eq_neg128_int16}, + {idx: 3, exp: ne, fn: ne_neg128_int16}, + {idx: 4, exp: lt, fn: lt_neg127_int16}, + {idx: 4, exp: le, fn: le_neg127_int16}, + {idx: 4, exp: gt, fn: gt_neg127_int16}, + {idx: 4, exp: ge, fn: ge_neg127_int16}, + {idx: 4, exp: eq, fn: eq_neg127_int16}, + {idx: 4, exp: ne, fn: ne_neg127_int16}, + {idx: 5, exp: lt, fn: lt_neg1_int16}, + {idx: 5, exp: le, fn: le_neg1_int16}, + {idx: 5, exp: gt, fn: gt_neg1_int16}, + {idx: 5, exp: ge, fn: ge_neg1_int16}, + {idx: 5, exp: eq, fn: eq_neg1_int16}, + {idx: 5, exp: ne, fn: ne_neg1_int16}, + {idx: 6, exp: lt, fn: lt_0_int16}, + {idx: 6, exp: le, fn: le_0_int16}, + {idx: 6, exp: gt, fn: gt_0_int16}, + {idx: 6, exp: ge, fn: ge_0_int16}, + {idx: 6, exp: eq, fn: eq_0_int16}, + {idx: 6, exp: ne, fn: ne_0_int16}, + {idx: 7, exp: lt, fn: lt_1_int16}, + {idx: 7, exp: le, fn: le_1_int16}, + {idx: 7, exp: gt, fn: gt_1_int16}, + {idx: 7, exp: ge, fn: ge_1_int16}, + {idx: 7, exp: eq, fn: eq_1_int16}, + {idx: 7, exp: ne, fn: ne_1_int16}, + {idx: 8, exp: lt, fn: lt_126_int16}, + {idx: 8, exp: le, fn: le_126_int16}, + {idx: 8, exp: gt, fn: gt_126_int16}, + {idx: 8, exp: ge, fn: ge_126_int16}, + {idx: 8, exp: eq, fn: eq_126_int16}, + {idx: 8, exp: ne, fn: ne_126_int16}, + {idx: 9, exp: lt, fn: lt_127_int16}, + {idx: 9, exp: le, fn: le_127_int16}, + {idx: 9, exp: gt, fn: gt_127_int16}, + {idx: 9, exp: ge, fn: ge_127_int16}, + {idx: 9, exp: eq, fn: eq_127_int16}, + {idx: 9, exp: ne, fn: ne_127_int16}, + {idx: 10, exp: lt, fn: lt_128_int16}, + {idx: 10, exp: le, fn: le_128_int16}, + {idx: 10, exp: gt, fn: gt_128_int16}, + {idx: 10, exp: ge, fn: ge_128_int16}, + {idx: 10, exp: eq, fn: eq_128_int16}, + {idx: 10, exp: ne, fn: ne_128_int16}, + {idx: 11, exp: lt, fn: lt_254_int16}, + {idx: 11, exp: le, fn: le_254_int16}, + {idx: 11, exp: gt, fn: gt_254_int16}, + {idx: 11, exp: ge, fn: ge_254_int16}, + {idx: 11, exp: eq, fn: eq_254_int16}, + {idx: 11, exp: ne, fn: ne_254_int16}, + {idx: 12, exp: lt, fn: lt_255_int16}, + {idx: 12, exp: le, fn: le_255_int16}, + {idx: 12, exp: gt, fn: gt_255_int16}, + {idx: 12, exp: ge, fn: ge_255_int16}, + {idx: 12, exp: eq, fn: eq_255_int16}, + {idx: 12, exp: ne, fn: ne_255_int16}, + {idx: 13, exp: lt, fn: lt_256_int16}, + {idx: 13, exp: le, fn: le_256_int16}, + {idx: 13, exp: gt, fn: gt_256_int16}, + {idx: 13, exp: ge, fn: ge_256_int16}, + {idx: 13, exp: eq, fn: eq_256_int16}, + {idx: 13, exp: ne, fn: ne_256_int16}, + {idx: 14, exp: lt, fn: lt_32766_int16}, + {idx: 14, exp: le, fn: le_32766_int16}, + {idx: 14, exp: gt, fn: gt_32766_int16}, + {idx: 14, exp: ge, fn: ge_32766_int16}, + {idx: 14, exp: eq, fn: eq_32766_int16}, + {idx: 14, exp: ne, fn: ne_32766_int16}, + {idx: 15, exp: lt, fn: lt_32767_int16}, + {idx: 15, exp: le, fn: le_32767_int16}, + {idx: 15, exp: gt, fn: gt_32767_int16}, + {idx: 15, exp: ge, fn: ge_32767_int16}, + {idx: 15, exp: eq, fn: eq_32767_int16}, + {idx: 15, exp: ne, fn: ne_32767_int16}, +} + +// int8 tests +var int8_vals = []int8{ + -128, + -127, + -1, + 0, + 1, + 126, + 127, +} + +func lt_neg128_int8(x int8) bool { return x < -128 } +func le_neg128_int8(x int8) bool { return x <= -128 } +func gt_neg128_int8(x int8) bool { return x > -128 } +func ge_neg128_int8(x int8) bool { return x >= -128 } +func eq_neg128_int8(x int8) bool { return x == -128 } +func ne_neg128_int8(x int8) bool { return x != -128 } +func lt_neg127_int8(x int8) bool { return x < -127 } +func le_neg127_int8(x int8) bool { return x <= -127 } +func gt_neg127_int8(x int8) bool { return x > -127 } +func ge_neg127_int8(x int8) bool { return x >= -127 } +func eq_neg127_int8(x int8) bool { return x == -127 } +func ne_neg127_int8(x int8) bool { return x != -127 } +func lt_neg1_int8(x int8) bool { return x < -1 } +func le_neg1_int8(x int8) bool { return x <= -1 } +func gt_neg1_int8(x int8) bool { return x > -1 } +func ge_neg1_int8(x int8) bool { return x >= -1 } +func eq_neg1_int8(x int8) bool { return x == -1 } +func ne_neg1_int8(x int8) bool { return x != -1 } +func lt_0_int8(x int8) bool { return x < 0 } +func le_0_int8(x int8) bool { return x <= 0 } +func gt_0_int8(x int8) bool { return x > 0 } +func ge_0_int8(x int8) bool { return x >= 0 } +func eq_0_int8(x int8) bool { return x == 0 } +func ne_0_int8(x int8) bool { return x != 0 } +func lt_1_int8(x int8) bool { return x < 1 } +func le_1_int8(x int8) bool { return x <= 1 } +func gt_1_int8(x int8) bool { return x > 1 } +func ge_1_int8(x int8) bool { return x >= 1 } +func eq_1_int8(x int8) bool { return x == 1 } +func ne_1_int8(x int8) bool { return x != 1 } +func lt_126_int8(x int8) bool { return x < 126 } +func le_126_int8(x int8) bool { return x <= 126 } +func gt_126_int8(x int8) bool { return x > 126 } +func ge_126_int8(x int8) bool { return x >= 126 } +func eq_126_int8(x int8) bool { return x == 126 } +func ne_126_int8(x int8) bool { return x != 126 } +func lt_127_int8(x int8) bool { return x < 127 } +func le_127_int8(x int8) bool { return x <= 127 } +func gt_127_int8(x int8) bool { return x > 127 } +func ge_127_int8(x int8) bool { return x >= 127 } +func eq_127_int8(x int8) bool { return x == 127 } +func ne_127_int8(x int8) bool { return x != 127 } + +var int8_tests = []struct { + idx int // index of the constant used + exp result // expected results + fn func(int8) bool +}{ + {idx: 0, exp: lt, fn: lt_neg128_int8}, + {idx: 0, exp: le, fn: le_neg128_int8}, + {idx: 0, exp: gt, fn: gt_neg128_int8}, + {idx: 0, exp: ge, fn: ge_neg128_int8}, + {idx: 0, exp: eq, fn: eq_neg128_int8}, + {idx: 0, exp: ne, fn: ne_neg128_int8}, + {idx: 1, exp: lt, fn: lt_neg127_int8}, + {idx: 1, exp: le, fn: le_neg127_int8}, + {idx: 1, exp: gt, fn: gt_neg127_int8}, + {idx: 1, exp: ge, fn: ge_neg127_int8}, + {idx: 1, exp: eq, fn: eq_neg127_int8}, + {idx: 1, exp: ne, fn: ne_neg127_int8}, + {idx: 2, exp: lt, fn: lt_neg1_int8}, + {idx: 2, exp: le, fn: le_neg1_int8}, + {idx: 2, exp: gt, fn: gt_neg1_int8}, + {idx: 2, exp: ge, fn: ge_neg1_int8}, + {idx: 2, exp: eq, fn: eq_neg1_int8}, + {idx: 2, exp: ne, fn: ne_neg1_int8}, + {idx: 3, exp: lt, fn: lt_0_int8}, + {idx: 3, exp: le, fn: le_0_int8}, + {idx: 3, exp: gt, fn: gt_0_int8}, + {idx: 3, exp: ge, fn: ge_0_int8}, + {idx: 3, exp: eq, fn: eq_0_int8}, + {idx: 3, exp: ne, fn: ne_0_int8}, + {idx: 4, exp: lt, fn: lt_1_int8}, + {idx: 4, exp: le, fn: le_1_int8}, + {idx: 4, exp: gt, fn: gt_1_int8}, + {idx: 4, exp: ge, fn: ge_1_int8}, + {idx: 4, exp: eq, fn: eq_1_int8}, + {idx: 4, exp: ne, fn: ne_1_int8}, + {idx: 5, exp: lt, fn: lt_126_int8}, + {idx: 5, exp: le, fn: le_126_int8}, + {idx: 5, exp: gt, fn: gt_126_int8}, + {idx: 5, exp: ge, fn: ge_126_int8}, + {idx: 5, exp: eq, fn: eq_126_int8}, + {idx: 5, exp: ne, fn: ne_126_int8}, + {idx: 6, exp: lt, fn: lt_127_int8}, + {idx: 6, exp: le, fn: le_127_int8}, + {idx: 6, exp: gt, fn: gt_127_int8}, + {idx: 6, exp: ge, fn: ge_127_int8}, + {idx: 6, exp: eq, fn: eq_127_int8}, + {idx: 6, exp: ne, fn: ne_127_int8}, +} + +// TestComparisonsConst tests results for comparison operations against constants. +func TestComparisonsConst(t *testing.T) { + for i, test := range uint64_tests { + for j, x := range uint64_vals { + want := test.exp.l + if j == test.idx { + want = test.exp.e + } else if j > test.idx { + want = test.exp.r + } + if test.fn(x) != want { + fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name() + t.Errorf("test failed: %v(%v) != %v [type=uint64 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx) + } + } + } + for i, test := range uint32_tests { + for j, x := range uint32_vals { + want := test.exp.l + if j == test.idx { + want = test.exp.e + } else if j > test.idx { + want = test.exp.r + } + if test.fn(x) != want { + fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name() + t.Errorf("test failed: %v(%v) != %v [type=uint32 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx) + } + } + } + for i, test := range uint16_tests { + for j, x := range uint16_vals { + want := test.exp.l + if j == test.idx { + want = test.exp.e + } else if j > test.idx { + want = test.exp.r + } + if test.fn(x) != want { + fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name() + t.Errorf("test failed: %v(%v) != %v [type=uint16 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx) + } + } + } + for i, test := range uint8_tests { + for j, x := range uint8_vals { + want := test.exp.l + if j == test.idx { + want = test.exp.e + } else if j > test.idx { + want = test.exp.r + } + if test.fn(x) != want { + fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name() + t.Errorf("test failed: %v(%v) != %v [type=uint8 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx) + } + } + } + for i, test := range int64_tests { + for j, x := range int64_vals { + want := test.exp.l + if j == test.idx { + want = test.exp.e + } else if j > test.idx { + want = test.exp.r + } + if test.fn(x) != want { + fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name() + t.Errorf("test failed: %v(%v) != %v [type=int64 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx) + } + } + } + for i, test := range int32_tests { + for j, x := range int32_vals { + want := test.exp.l + if j == test.idx { + want = test.exp.e + } else if j > test.idx { + want = test.exp.r + } + if test.fn(x) != want { + fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name() + t.Errorf("test failed: %v(%v) != %v [type=int32 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx) + } + } + } + for i, test := range int16_tests { + for j, x := range int16_vals { + want := test.exp.l + if j == test.idx { + want = test.exp.e + } else if j > test.idx { + want = test.exp.r + } + if test.fn(x) != want { + fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name() + t.Errorf("test failed: %v(%v) != %v [type=int16 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx) + } + } + } + for i, test := range int8_tests { + for j, x := range int8_vals { + want := test.exp.l + if j == test.idx { + want = test.exp.e + } else if j > test.idx { + want = test.exp.r + } + if test.fn(x) != want { + fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name() + t.Errorf("test failed: %v(%v) != %v [type=int8 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/cmp_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/cmp_test.go new file mode 100644 index 0000000000000000000000000000000000000000..06b58f2a02620a10e77ab505c792071488bb9691 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/cmp_test.go @@ -0,0 +1,37 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// cmp_ssa.go tests compare simplification operations. +package main + +import "testing" + +//go:noinline +func eq_ssa(a int64) bool { + return 4+a == 10 +} + +//go:noinline +func neq_ssa(a int64) bool { + return 10 != a+4 +} + +func testCmp(t *testing.T) { + if wanted, got := true, eq_ssa(6); wanted != got { + t.Errorf("eq_ssa: expected %v, got %v\n", wanted, got) + } + if wanted, got := false, eq_ssa(7); wanted != got { + t.Errorf("eq_ssa: expected %v, got %v\n", wanted, got) + } + if wanted, got := false, neq_ssa(6); wanted != got { + t.Errorf("neq_ssa: expected %v, got %v\n", wanted, got) + } + if wanted, got := true, neq_ssa(7); wanted != got { + t.Errorf("neq_ssa: expected %v, got %v\n", wanted, got) + } +} + +func TestCmp(t *testing.T) { + testCmp(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/compound_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/compound_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4ae464dbe3843244817a42e04675cf1219c855c4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/compound_test.go @@ -0,0 +1,128 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test compound objects + +package main + +import ( + "testing" +) + +func string_ssa(a, b string, x bool) string { + s := "" + if x { + s = a + } else { + s = b + } + return s +} + +func testString(t *testing.T) { + a := "foo" + b := "barz" + if want, got := a, string_ssa(a, b, true); got != want { + t.Errorf("string_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want) + } + if want, got := b, string_ssa(a, b, false); got != want { + t.Errorf("string_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want) + } +} + +//go:noinline +func complex64_ssa(a, b complex64, x bool) complex64 { + var c complex64 + if x { + c = a + } else { + c = b + } + return c +} + +//go:noinline +func complex128_ssa(a, b complex128, x bool) complex128 { + var c complex128 + if x { + c = a + } else { + c = b + } + return c +} + +func testComplex64(t *testing.T) { + var a complex64 = 1 + 2i + var b complex64 = 3 + 4i + + if want, got := a, complex64_ssa(a, b, true); got != want { + t.Errorf("complex64_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want) + } + if want, got := b, complex64_ssa(a, b, false); got != want { + t.Errorf("complex64_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want) + } +} + +func testComplex128(t *testing.T) { + var a complex128 = 1 + 2i + var b complex128 = 3 + 4i + + if want, got := a, complex128_ssa(a, b, true); got != want { + t.Errorf("complex128_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want) + } + if want, got := b, complex128_ssa(a, b, false); got != want { + t.Errorf("complex128_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want) + } +} + +func slice_ssa(a, b []byte, x bool) []byte { + var s []byte + if x { + s = a + } else { + s = b + } + return s +} + +func testSlice(t *testing.T) { + a := []byte{3, 4, 5} + b := []byte{7, 8, 9} + if want, got := byte(3), slice_ssa(a, b, true)[0]; got != want { + t.Errorf("slice_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want) + } + if want, got := byte(7), slice_ssa(a, b, false)[0]; got != want { + t.Errorf("slice_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want) + } +} + +func interface_ssa(a, b interface{}, x bool) interface{} { + var s interface{} + if x { + s = a + } else { + s = b + } + return s +} + +func testInterface(t *testing.T) { + a := interface{}(3) + b := interface{}(4) + if want, got := 3, interface_ssa(a, b, true).(int); got != want { + t.Errorf("interface_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want) + } + if want, got := 4, interface_ssa(a, b, false).(int); got != want { + t.Errorf("interface_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want) + } +} + +func TestCompound(t *testing.T) { + testString(t) + testSlice(t) + testInterface(t) + testComplex64(t) + testComplex128(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/copy_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/copy_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c29611d32a22e9448b4b8da284fabc8df56a292f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/copy_test.go @@ -0,0 +1,760 @@ +// Code generated by gen/copyGen.go. DO NOT EDIT. + +package main + +import "testing" + +type T1 struct { + pre [8]byte + mid [1]byte + post [8]byte +} + +//go:noinline +func t1copy_ssa(y, x *[1]byte) { + *y = *x +} +func testCopy1(t *testing.T) { + a := T1{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1]byte{0}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [1]byte{100} + t1copy_ssa(&a.mid, &x) + want := T1{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1]byte{100}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t1copy got=%v, want %v\n", a, want) + } +} + +type T2 struct { + pre [8]byte + mid [2]byte + post [8]byte +} + +//go:noinline +func t2copy_ssa(y, x *[2]byte) { + *y = *x +} +func testCopy2(t *testing.T) { + a := T2{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [2]byte{0, 1}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [2]byte{100, 101} + t2copy_ssa(&a.mid, &x) + want := T2{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [2]byte{100, 101}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t2copy got=%v, want %v\n", a, want) + } +} + +type T3 struct { + pre [8]byte + mid [3]byte + post [8]byte +} + +//go:noinline +func t3copy_ssa(y, x *[3]byte) { + *y = *x +} +func testCopy3(t *testing.T) { + a := T3{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [3]byte{0, 1, 2}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [3]byte{100, 101, 102} + t3copy_ssa(&a.mid, &x) + want := T3{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [3]byte{100, 101, 102}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t3copy got=%v, want %v\n", a, want) + } +} + +type T4 struct { + pre [8]byte + mid [4]byte + post [8]byte +} + +//go:noinline +func t4copy_ssa(y, x *[4]byte) { + *y = *x +} +func testCopy4(t *testing.T) { + a := T4{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [4]byte{0, 1, 2, 3}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [4]byte{100, 101, 102, 103} + t4copy_ssa(&a.mid, &x) + want := T4{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [4]byte{100, 101, 102, 103}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t4copy got=%v, want %v\n", a, want) + } +} + +type T5 struct { + pre [8]byte + mid [5]byte + post [8]byte +} + +//go:noinline +func t5copy_ssa(y, x *[5]byte) { + *y = *x +} +func testCopy5(t *testing.T) { + a := T5{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [5]byte{0, 1, 2, 3, 4}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [5]byte{100, 101, 102, 103, 104} + t5copy_ssa(&a.mid, &x) + want := T5{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [5]byte{100, 101, 102, 103, 104}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t5copy got=%v, want %v\n", a, want) + } +} + +type T6 struct { + pre [8]byte + mid [6]byte + post [8]byte +} + +//go:noinline +func t6copy_ssa(y, x *[6]byte) { + *y = *x +} +func testCopy6(t *testing.T) { + a := T6{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [6]byte{0, 1, 2, 3, 4, 5}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [6]byte{100, 101, 102, 103, 104, 105} + t6copy_ssa(&a.mid, &x) + want := T6{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [6]byte{100, 101, 102, 103, 104, 105}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t6copy got=%v, want %v\n", a, want) + } +} + +type T7 struct { + pre [8]byte + mid [7]byte + post [8]byte +} + +//go:noinline +func t7copy_ssa(y, x *[7]byte) { + *y = *x +} +func testCopy7(t *testing.T) { + a := T7{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [7]byte{0, 1, 2, 3, 4, 5, 6}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [7]byte{100, 101, 102, 103, 104, 105, 106} + t7copy_ssa(&a.mid, &x) + want := T7{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [7]byte{100, 101, 102, 103, 104, 105, 106}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t7copy got=%v, want %v\n", a, want) + } +} + +type T8 struct { + pre [8]byte + mid [8]byte + post [8]byte +} + +//go:noinline +func t8copy_ssa(y, x *[8]byte) { + *y = *x +} +func testCopy8(t *testing.T) { + a := T8{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [8]byte{0, 1, 2, 3, 4, 5, 6, 7}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [8]byte{100, 101, 102, 103, 104, 105, 106, 107} + t8copy_ssa(&a.mid, &x) + want := T8{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [8]byte{100, 101, 102, 103, 104, 105, 106, 107}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t8copy got=%v, want %v\n", a, want) + } +} + +type T9 struct { + pre [8]byte + mid [9]byte + post [8]byte +} + +//go:noinline +func t9copy_ssa(y, x *[9]byte) { + *y = *x +} +func testCopy9(t *testing.T) { + a := T9{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [9]byte{0, 1, 2, 3, 4, 5, 6, 7, 8}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [9]byte{100, 101, 102, 103, 104, 105, 106, 107, 108} + t9copy_ssa(&a.mid, &x) + want := T9{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [9]byte{100, 101, 102, 103, 104, 105, 106, 107, 108}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t9copy got=%v, want %v\n", a, want) + } +} + +type T10 struct { + pre [8]byte + mid [10]byte + post [8]byte +} + +//go:noinline +func t10copy_ssa(y, x *[10]byte) { + *y = *x +} +func testCopy10(t *testing.T) { + a := T10{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [10]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [10]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109} + t10copy_ssa(&a.mid, &x) + want := T10{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [10]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t10copy got=%v, want %v\n", a, want) + } +} + +type T15 struct { + pre [8]byte + mid [15]byte + post [8]byte +} + +//go:noinline +func t15copy_ssa(y, x *[15]byte) { + *y = *x +} +func testCopy15(t *testing.T) { + a := T15{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [15]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [15]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114} + t15copy_ssa(&a.mid, &x) + want := T15{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [15]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t15copy got=%v, want %v\n", a, want) + } +} + +type T16 struct { + pre [8]byte + mid [16]byte + post [8]byte +} + +//go:noinline +func t16copy_ssa(y, x *[16]byte) { + *y = *x +} +func testCopy16(t *testing.T) { + a := T16{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [16]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115} + t16copy_ssa(&a.mid, &x) + want := T16{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [16]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t16copy got=%v, want %v\n", a, want) + } +} + +type T17 struct { + pre [8]byte + mid [17]byte + post [8]byte +} + +//go:noinline +func t17copy_ssa(y, x *[17]byte) { + *y = *x +} +func testCopy17(t *testing.T) { + a := T17{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [17]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [17]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116} + t17copy_ssa(&a.mid, &x) + want := T17{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [17]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t17copy got=%v, want %v\n", a, want) + } +} + +type T23 struct { + pre [8]byte + mid [23]byte + post [8]byte +} + +//go:noinline +func t23copy_ssa(y, x *[23]byte) { + *y = *x +} +func testCopy23(t *testing.T) { + a := T23{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [23]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [23]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122} + t23copy_ssa(&a.mid, &x) + want := T23{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [23]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t23copy got=%v, want %v\n", a, want) + } +} + +type T24 struct { + pre [8]byte + mid [24]byte + post [8]byte +} + +//go:noinline +func t24copy_ssa(y, x *[24]byte) { + *y = *x +} +func testCopy24(t *testing.T) { + a := T24{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [24]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [24]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123} + t24copy_ssa(&a.mid, &x) + want := T24{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [24]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t24copy got=%v, want %v\n", a, want) + } +} + +type T25 struct { + pre [8]byte + mid [25]byte + post [8]byte +} + +//go:noinline +func t25copy_ssa(y, x *[25]byte) { + *y = *x +} +func testCopy25(t *testing.T) { + a := T25{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [25]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [25]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124} + t25copy_ssa(&a.mid, &x) + want := T25{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [25]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t25copy got=%v, want %v\n", a, want) + } +} + +type T31 struct { + pre [8]byte + mid [31]byte + post [8]byte +} + +//go:noinline +func t31copy_ssa(y, x *[31]byte) { + *y = *x +} +func testCopy31(t *testing.T) { + a := T31{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [31]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [31]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130} + t31copy_ssa(&a.mid, &x) + want := T31{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [31]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t31copy got=%v, want %v\n", a, want) + } +} + +type T32 struct { + pre [8]byte + mid [32]byte + post [8]byte +} + +//go:noinline +func t32copy_ssa(y, x *[32]byte) { + *y = *x +} +func testCopy32(t *testing.T) { + a := T32{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [32]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [32]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131} + t32copy_ssa(&a.mid, &x) + want := T32{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [32]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t32copy got=%v, want %v\n", a, want) + } +} + +type T33 struct { + pre [8]byte + mid [33]byte + post [8]byte +} + +//go:noinline +func t33copy_ssa(y, x *[33]byte) { + *y = *x +} +func testCopy33(t *testing.T) { + a := T33{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [33]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [33]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132} + t33copy_ssa(&a.mid, &x) + want := T33{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [33]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t33copy got=%v, want %v\n", a, want) + } +} + +type T63 struct { + pre [8]byte + mid [63]byte + post [8]byte +} + +//go:noinline +func t63copy_ssa(y, x *[63]byte) { + *y = *x +} +func testCopy63(t *testing.T) { + a := T63{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [63]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [63]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162} + t63copy_ssa(&a.mid, &x) + want := T63{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [63]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t63copy got=%v, want %v\n", a, want) + } +} + +type T64 struct { + pre [8]byte + mid [64]byte + post [8]byte +} + +//go:noinline +func t64copy_ssa(y, x *[64]byte) { + *y = *x +} +func testCopy64(t *testing.T) { + a := T64{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [64]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [64]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163} + t64copy_ssa(&a.mid, &x) + want := T64{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [64]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t64copy got=%v, want %v\n", a, want) + } +} + +type T65 struct { + pre [8]byte + mid [65]byte + post [8]byte +} + +//go:noinline +func t65copy_ssa(y, x *[65]byte) { + *y = *x +} +func testCopy65(t *testing.T) { + a := T65{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [65]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [65]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164} + t65copy_ssa(&a.mid, &x) + want := T65{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [65]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t65copy got=%v, want %v\n", a, want) + } +} + +type T1023 struct { + pre [8]byte + mid [1023]byte + post [8]byte +} + +//go:noinline +func t1023copy_ssa(y, x *[1023]byte) { + *y = *x +} +func testCopy1023(t *testing.T) { + a := T1023{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1023]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [1023]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122} + t1023copy_ssa(&a.mid, &x) + want := T1023{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1023]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t1023copy got=%v, want %v\n", a, want) + } +} + +type T1024 struct { + pre [8]byte + mid [1024]byte + post [8]byte +} + +//go:noinline +func t1024copy_ssa(y, x *[1024]byte) { + *y = *x +} +func testCopy1024(t *testing.T) { + a := T1024{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1024]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [1024]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123} + t1024copy_ssa(&a.mid, &x) + want := T1024{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1024]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t1024copy got=%v, want %v\n", a, want) + } +} + +type T1025 struct { + pre [8]byte + mid [1025]byte + post [8]byte +} + +//go:noinline +func t1025copy_ssa(y, x *[1025]byte) { + *y = *x +} +func testCopy1025(t *testing.T) { + a := T1025{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1025]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [1025]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124} + t1025copy_ssa(&a.mid, &x) + want := T1025{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1025]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t1025copy got=%v, want %v\n", a, want) + } +} + +type T1031 struct { + pre [8]byte + mid [1031]byte + post [8]byte +} + +//go:noinline +func t1031copy_ssa(y, x *[1031]byte) { + *y = *x +} +func testCopy1031(t *testing.T) { + a := T1031{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1031]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [1031]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130} + t1031copy_ssa(&a.mid, &x) + want := T1031{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1031]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t1031copy got=%v, want %v\n", a, want) + } +} + +type T1032 struct { + pre [8]byte + mid [1032]byte + post [8]byte +} + +//go:noinline +func t1032copy_ssa(y, x *[1032]byte) { + *y = *x +} +func testCopy1032(t *testing.T) { + a := T1032{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1032]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [1032]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131} + t1032copy_ssa(&a.mid, &x) + want := T1032{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1032]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t1032copy got=%v, want %v\n", a, want) + } +} + +type T1033 struct { + pre [8]byte + mid [1033]byte + post [8]byte +} + +//go:noinline +func t1033copy_ssa(y, x *[1033]byte) { + *y = *x +} +func testCopy1033(t *testing.T) { + a := T1033{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1033]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [1033]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132} + t1033copy_ssa(&a.mid, &x) + want := T1033{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1033]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t1033copy got=%v, want %v\n", a, want) + } +} + +type T1039 struct { + pre [8]byte + mid [1039]byte + post [8]byte +} + +//go:noinline +func t1039copy_ssa(y, x *[1039]byte) { + *y = *x +} +func testCopy1039(t *testing.T) { + a := T1039{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1039]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [1039]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138} + t1039copy_ssa(&a.mid, &x) + want := T1039{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1039]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t1039copy got=%v, want %v\n", a, want) + } +} + +type T1040 struct { + pre [8]byte + mid [1040]byte + post [8]byte +} + +//go:noinline +func t1040copy_ssa(y, x *[1040]byte) { + *y = *x +} +func testCopy1040(t *testing.T) { + a := T1040{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1040]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [1040]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139} + t1040copy_ssa(&a.mid, &x) + want := T1040{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1040]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t1040copy got=%v, want %v\n", a, want) + } +} + +type T1041 struct { + pre [8]byte + mid [1041]byte + post [8]byte +} + +//go:noinline +func t1041copy_ssa(y, x *[1041]byte) { + *y = *x +} +func testCopy1041(t *testing.T) { + a := T1041{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1041]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + x := [1041]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140} + t1041copy_ssa(&a.mid, &x) + want := T1041{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1041]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}} + if a != want { + t.Errorf("t1041copy got=%v, want %v\n", a, want) + } +} + +//go:noinline +func tu2copy_ssa(docopy bool, data [2]byte, x *[2]byte) { + if docopy { + *x = data + } +} +func testUnalignedCopy2(t *testing.T) { + var a [2]byte + t2 := [2]byte{2, 3} + tu2copy_ssa(true, t2, &a) + want2 := [2]byte{2, 3} + if a != want2 { + t.Errorf("tu2copy got=%v, want %v\n", a, want2) + } +} + +//go:noinline +func tu3copy_ssa(docopy bool, data [3]byte, x *[3]byte) { + if docopy { + *x = data + } +} +func testUnalignedCopy3(t *testing.T) { + var a [3]byte + t3 := [3]byte{3, 4, 5} + tu3copy_ssa(true, t3, &a) + want3 := [3]byte{3, 4, 5} + if a != want3 { + t.Errorf("tu3copy got=%v, want %v\n", a, want3) + } +} + +//go:noinline +func tu4copy_ssa(docopy bool, data [4]byte, x *[4]byte) { + if docopy { + *x = data + } +} +func testUnalignedCopy4(t *testing.T) { + var a [4]byte + t4 := [4]byte{4, 5, 6, 7} + tu4copy_ssa(true, t4, &a) + want4 := [4]byte{4, 5, 6, 7} + if a != want4 { + t.Errorf("tu4copy got=%v, want %v\n", a, want4) + } +} + +//go:noinline +func tu5copy_ssa(docopy bool, data [5]byte, x *[5]byte) { + if docopy { + *x = data + } +} +func testUnalignedCopy5(t *testing.T) { + var a [5]byte + t5 := [5]byte{5, 6, 7, 8, 9} + tu5copy_ssa(true, t5, &a) + want5 := [5]byte{5, 6, 7, 8, 9} + if a != want5 { + t.Errorf("tu5copy got=%v, want %v\n", a, want5) + } +} + +//go:noinline +func tu6copy_ssa(docopy bool, data [6]byte, x *[6]byte) { + if docopy { + *x = data + } +} +func testUnalignedCopy6(t *testing.T) { + var a [6]byte + t6 := [6]byte{6, 7, 8, 9, 10, 11} + tu6copy_ssa(true, t6, &a) + want6 := [6]byte{6, 7, 8, 9, 10, 11} + if a != want6 { + t.Errorf("tu6copy got=%v, want %v\n", a, want6) + } +} + +//go:noinline +func tu7copy_ssa(docopy bool, data [7]byte, x *[7]byte) { + if docopy { + *x = data + } +} +func testUnalignedCopy7(t *testing.T) { + var a [7]byte + t7 := [7]byte{7, 8, 9, 10, 11, 12, 13} + tu7copy_ssa(true, t7, &a) + want7 := [7]byte{7, 8, 9, 10, 11, 12, 13} + if a != want7 { + t.Errorf("tu7copy got=%v, want %v\n", a, want7) + } +} +func TestCopy(t *testing.T) { + testCopy1(t) + testCopy2(t) + testCopy3(t) + testCopy4(t) + testCopy5(t) + testCopy6(t) + testCopy7(t) + testCopy8(t) + testCopy9(t) + testCopy10(t) + testCopy15(t) + testCopy16(t) + testCopy17(t) + testCopy23(t) + testCopy24(t) + testCopy25(t) + testCopy31(t) + testCopy32(t) + testCopy33(t) + testCopy63(t) + testCopy64(t) + testCopy65(t) + testCopy1023(t) + testCopy1024(t) + testCopy1025(t) + testCopy1031(t) + testCopy1032(t) + testCopy1033(t) + testCopy1039(t) + testCopy1040(t) + testCopy1041(t) + testUnalignedCopy2(t) + testUnalignedCopy3(t) + testUnalignedCopy4(t) + testUnalignedCopy5(t) + testUnalignedCopy6(t) + testUnalignedCopy7(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/ctl_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/ctl_test.go new file mode 100644 index 0000000000000000000000000000000000000000..501f79eee1e2ddfd9648120ab0dbde0b98cb3e98 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/ctl_test.go @@ -0,0 +1,148 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test control flow + +package main + +import "testing" + +// nor_ssa calculates NOR(a, b). +// It is implemented in a way that generates +// phi control values. +func nor_ssa(a, b bool) bool { + var c bool + if a { + c = true + } + if b { + c = true + } + if c { + return false + } + return true +} + +func testPhiControl(t *testing.T) { + tests := [...][3]bool{ // a, b, want + {false, false, true}, + {true, false, false}, + {false, true, false}, + {true, true, false}, + } + for _, test := range tests { + a, b := test[0], test[1] + got := nor_ssa(a, b) + want := test[2] + if want != got { + t.Errorf("nor(%t, %t)=%t got %t", a, b, want, got) + } + } +} + +func emptyRange_ssa(b []byte) bool { + for _, x := range b { + _ = x + } + return true +} + +func testEmptyRange(t *testing.T) { + if !emptyRange_ssa([]byte{}) { + t.Errorf("emptyRange_ssa([]byte{})=false, want true") + } +} + +func switch_ssa(a int) int { + ret := 0 + switch a { + case 5: + ret += 5 + case 4: + ret += 4 + case 3: + ret += 3 + case 2: + ret += 2 + case 1: + ret += 1 + } + return ret +} + +func fallthrough_ssa(a int) int { + ret := 0 + switch a { + case 5: + ret++ + fallthrough + case 4: + ret++ + fallthrough + case 3: + ret++ + fallthrough + case 2: + ret++ + fallthrough + case 1: + ret++ + } + return ret +} + +func testFallthrough(t *testing.T) { + for i := 0; i < 6; i++ { + if got := fallthrough_ssa(i); got != i { + t.Errorf("fallthrough_ssa(i) = %d, wanted %d", got, i) + } + } +} + +func testSwitch(t *testing.T) { + for i := 0; i < 6; i++ { + if got := switch_ssa(i); got != i { + t.Errorf("switch_ssa(i) = %d, wanted %d", got, i) + } + } +} + +type junk struct { + step int +} + +// flagOverwrite_ssa is intended to reproduce an issue seen where a XOR +// was scheduled between a compare and branch, clearing flags. +// +//go:noinline +func flagOverwrite_ssa(s *junk, c int) int { + if '0' <= c && c <= '9' { + s.step = 0 + return 1 + } + if c == 'e' || c == 'E' { + s.step = 0 + return 2 + } + s.step = 0 + return 3 +} + +func testFlagOverwrite(t *testing.T) { + j := junk{} + if got := flagOverwrite_ssa(&j, ' '); got != 3 { + t.Errorf("flagOverwrite_ssa = %d, wanted 3", got) + } +} + +func TestCtl(t *testing.T) { + testPhiControl(t) + testEmptyRange(t) + + testSwitch(t) + testFallthrough(t) + + testFlagOverwrite(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/deferNoReturn_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/deferNoReturn_test.go new file mode 100644 index 0000000000000000000000000000000000000000..308e8976072e805366630b1c269682de2f1f0610 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/deferNoReturn_test.go @@ -0,0 +1,21 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that a defer in a function with no return +// statement will compile correctly. + +package main + +import "testing" + +func deferNoReturn_ssa() { + defer func() { println("returned") }() + for { + println("loop") + } +} + +func TestDeferNoReturn(t *testing.T) { + // This is a compile-time test, no runtime testing required. +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/divbyzero_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/divbyzero_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ee848b3cc09eef2c42e268469606a80b699cde27 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/divbyzero_test.go @@ -0,0 +1,48 @@ +package main + +import ( + "runtime" + "testing" +) + +func checkDivByZero(f func()) (divByZero bool) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: integer divide by zero" { + divByZero = true + } + } + }() + f() + return false +} + +//go:noinline +func div_a(i uint, s []int) int { + return s[i%uint(len(s))] +} + +//go:noinline +func div_b(i uint, j uint) uint { + return i / j +} + +//go:noinline +func div_c(i int) int { + return 7 / (i - i) +} + +func TestDivByZero(t *testing.T) { + if got := checkDivByZero(func() { div_b(7, 0) }); !got { + t.Errorf("expected div by zero for b(7, 0), got no error\n") + } + if got := checkDivByZero(func() { div_b(7, 7) }); got { + t.Errorf("expected no error for b(7, 7), got div by zero\n") + } + if got := checkDivByZero(func() { div_a(4, nil) }); !got { + t.Errorf("expected div by zero for a(4, nil), got no error\n") + } + if got := checkDivByZero(func() { div_c(5) }); !got { + t.Errorf("expected div by zero for c(5), got no error\n") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/dupLoad_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/dupLoad_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d85912309da87261c91e6d6290e2d58191e49fab --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/dupLoad_test.go @@ -0,0 +1,83 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test makes sure that we don't split a single +// load up into two separate loads. + +package main + +import "testing" + +//go:noinline +func read1(b []byte) (uint16, uint16) { + // There is only a single read of b[0]. The two + // returned values must have the same low byte. + v := b[0] + return uint16(v), uint16(v) | uint16(b[1])<<8 +} + +func main1(t *testing.T) { + const N = 100000 + done := make(chan bool, 2) + b := make([]byte, 2) + go func() { + for i := 0; i < N; i++ { + b[0] = byte(i) + b[1] = byte(i) + } + done <- true + }() + go func() { + for i := 0; i < N; i++ { + x, y := read1(b) + if byte(x) != byte(y) { + t.Errorf("x=%x y=%x\n", x, y) + done <- false + return + } + } + done <- true + }() + <-done + <-done +} + +//go:noinline +func read2(b []byte) (uint16, uint16) { + // There is only a single read of b[1]. The two + // returned values must have the same high byte. + v := uint16(b[1]) << 8 + return v, uint16(b[0]) | v +} + +func main2(t *testing.T) { + const N = 100000 + done := make(chan bool, 2) + b := make([]byte, 2) + go func() { + for i := 0; i < N; i++ { + b[0] = byte(i) + b[1] = byte(i) + } + done <- true + }() + go func() { + for i := 0; i < N; i++ { + x, y := read2(b) + if x&0xff00 != y&0xff00 { + t.Errorf("x=%x y=%x\n", x, y) + done <- false + return + } + } + done <- true + }() + <-done + <-done +} + +func TestDupLoad(t *testing.T) { + main1(t) + main2(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go new file mode 100644 index 0000000000000000000000000000000000000000..ad22601f43b481f7fdf84936e88d06ebbc98a5e7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go @@ -0,0 +1,315 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "strings" +) + +// make fake flow graph. + +// The blocks of the flow graph are designated with letters A +// through Z, always including A (start block) and Z (exit +// block) The specification of a flow graph is a comma- +// separated list of block successor words, for blocks ordered +// A, B, C etc, where each block except Z has one or two +// successors, and any block except A can be a target. Within +// the generated code, each block with two successors includes +// a conditional testing x & 1 != 0 (x is the input parameter +// to the generated function) and also unconditionally shifts x +// right by one, so that different inputs generate different +// execution paths, including loops. Every block inverts a +// global binary to ensure it is not empty. For a flow graph +// with J words (J+1 blocks), a J-1 bit serial number specifies +// which blocks (not including A and Z) include an increment of +// the return variable y by increasing powers of 10, and a +// different version of the test function is created for each +// of the 2-to-the-(J-1) serial numbers. + +// For each generated function a compact summary is also +// created so that the generated function can be simulated +// with a simple interpreter to sanity check the behavior of +// the compiled code. + +// For example: + +// func BC_CD_BE_BZ_CZ101(x int64) int64 { +// y := int64(0) +// var b int64 +// _ = b +// b = x & 1 +// x = x >> 1 +// if b != 0 { +// goto C +// } +// goto B +// B: +// glob_ = !glob_ +// y += 1 +// b = x & 1 +// x = x >> 1 +// if b != 0 { +// goto D +// } +// goto C +// C: +// glob_ = !glob_ +// // no y increment +// b = x & 1 +// x = x >> 1 +// if b != 0 { +// goto E +// } +// goto B +// D: +// glob_ = !glob_ +// y += 10 +// b = x & 1 +// x = x >> 1 +// if b != 0 { +// goto Z +// } +// goto B +// E: +// glob_ = !glob_ +// // no y increment +// b = x & 1 +// x = x >> 1 +// if b != 0 { +// goto Z +// } +// goto C +// Z: +// return y +// } + +// {f:BC_CD_BE_BZ_CZ101, +// maxin:32, blocks:[]blo{ +// blo{inc:0, cond:true, succs:[2]int64{1, 2}}, +// blo{inc:1, cond:true, succs:[2]int64{2, 3}}, +// blo{inc:0, cond:true, succs:[2]int64{1, 4}}, +// blo{inc:10, cond:true, succs:[2]int64{1, 25}}, +// blo{inc:0, cond:true, succs:[2]int64{2, 25}},}}, + +var labels string = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func blocks(spec string) (blocks []string, fnameBase string) { + spec = strings.ToUpper(spec) + blocks = strings.Split(spec, ",") + fnameBase = strings.Replace(spec, ",", "_", -1) + return +} + +func makeFunctionFromFlowGraph(blocks []blo, fname string) string { + s := "" + + for j := range blocks { + // begin block + if j == 0 { + // block A, implicit label + s += ` +func ` + fname + `(x int64) int64 { + y := int64(0) + var b int64 + _ = b` + } else { + // block B,C, etc, explicit label w/ conditional increment + l := labels[j : j+1] + yeq := ` + // no y increment` + if blocks[j].inc != 0 { + yeq = ` + y += ` + fmt.Sprintf("%d", blocks[j].inc) + } + + s += ` +` + l + `: + glob = !glob` + yeq + } + + // edges to successors + if blocks[j].cond { // conditionally branch to second successor + s += ` + b = x & 1 + x = x >> 1 + if b != 0 {` + ` + goto ` + string(labels[blocks[j].succs[1]]) + ` + }` + + } + // branch to first successor + s += ` + goto ` + string(labels[blocks[j].succs[0]]) + } + + // end block (Z) + s += ` +Z: + return y +} +` + return s +} + +var graphs []string = []string{ + "Z", "BZ,Z", "B,BZ", "BZ,BZ", + "ZB,Z", "B,ZB", "ZB,BZ", "ZB,ZB", + + "BC,C,Z", "BC,BC,Z", "BC,BC,BZ", + "BC,Z,Z", "BC,ZC,Z", "BC,ZC,BZ", + "BZ,C,Z", "BZ,BC,Z", "BZ,CZ,Z", + "BZ,C,BZ", "BZ,BC,BZ", "BZ,CZ,BZ", + "BZ,C,CZ", "BZ,BC,CZ", "BZ,CZ,CZ", + + "BC,CD,BE,BZ,CZ", + "BC,BD,CE,CZ,BZ", + "BC,BD,CE,FZ,GZ,F,G", + "BC,BD,CE,FZ,GZ,G,F", + + "BC,DE,BE,FZ,FZ,Z", + "BC,DE,BE,FZ,ZF,Z", + "BC,DE,BE,ZF,FZ,Z", + "BC,DE,EB,FZ,FZ,Z", + "BC,ED,BE,FZ,FZ,Z", + "CB,DE,BE,FZ,FZ,Z", + + "CB,ED,BE,FZ,FZ,Z", + "BC,ED,EB,FZ,ZF,Z", + "CB,DE,EB,ZF,FZ,Z", + "CB,ED,EB,FZ,FZ,Z", + + "BZ,CD,CD,CE,BZ", + "EC,DF,FG,ZC,GB,BE,FD", + "BH,CF,DG,HE,BF,CG,DH,BZ", +} + +// blo describes a block in the generated/interpreted code +type blo struct { + inc int64 // increment amount + cond bool // block ends in conditional + succs [2]int64 +} + +// strings2blocks converts a slice of strings specifying +// successors into a slice of blo encoding the blocks in a +// common form easy to execute or interpret. +func strings2blocks(blocks []string, fname string, i int) (bs []blo, cond uint) { + bs = make([]blo, len(blocks)) + edge := int64(1) + cond = 0 + k := uint(0) + for j, s := range blocks { + if j == 0 { + } else { + if (i>>k)&1 != 0 { + bs[j].inc = edge + edge *= 10 + } + k++ + } + if len(s) > 1 { + bs[j].succs[1] = int64(blocks[j][1] - 'A') + bs[j].cond = true + cond++ + } + bs[j].succs[0] = int64(blocks[j][0] - 'A') + } + return bs, cond +} + +// fmtBlocks writes out the blocks for consumption in the generated test +func fmtBlocks(bs []blo) string { + s := "[]blo{" + for _, b := range bs { + s += fmt.Sprintf("blo{inc:%d, cond:%v, succs:[2]int64{%d, %d}},", b.inc, b.cond, b.succs[0], b.succs[1]) + } + s += "}" + return s +} + +func main() { + fmt.Printf(`// This is a machine-generated test file from flowgraph_generator1.go. +package main +import "fmt" +var glob bool +`) + s := "var funs []fun = []fun{" + for _, g := range graphs { + split, fnameBase := blocks(g) + nconfigs := 1 << uint(len(split)-1) + + for i := 0; i < nconfigs; i++ { + fname := fnameBase + fmt.Sprintf("%b", i) + bs, k := strings2blocks(split, fname, i) + fmt.Printf("%s", makeFunctionFromFlowGraph(bs, fname)) + s += ` + {f:` + fname + `, maxin:` + fmt.Sprintf("%d", 1<>1 + if c { + next = b.succs[1] + } + } + if next == last { + return y, true + } + j = next + } + return -1, false +} + +func main() { + sum := int64(0) + for i, f := range funs { + for x := int64(0); x < 16*f.maxin; x++ { + y, ok := interpret(f.blocks, x) + if ok { + yy := f.f(x) + if y != yy { + fmt.Printf("y(%d) != yy(%d), x=%b, i=%d, blocks=%v\n", y, yy, x, i, f.blocks) + return + } + sum += y + } + } + } +// fmt.Printf("Sum of all returns over all terminating inputs is %d\n", sum) +} +`) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/fp_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/fp_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b96ce84a6ca466a50a02c7f90b1645d6971198d0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/fp_test.go @@ -0,0 +1,1775 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests floating point arithmetic expressions + +package main + +import ( + "fmt" + "testing" +) + +// manysub_ssa is designed to tickle bugs that depend on register +// pressure or unfriendly operand ordering in registers (and at +// least once it succeeded in this). +// +//go:noinline +func manysub_ssa(a, b, c, d float64) (aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd float64) { + aa = a + 11.0 - a + ab = a - b + ac = a - c + ad = a - d + ba = b - a + bb = b + 22.0 - b + bc = b - c + bd = b - d + ca = c - a + cb = c - b + cc = c + 33.0 - c + cd = c - d + da = d - a + db = d - b + dc = d - c + dd = d + 44.0 - d + return +} + +// fpspill_ssa attempts to trigger a bug where phis with floating point values +// were stored in non-fp registers causing an error in doasm. +// +//go:noinline +func fpspill_ssa(a int) float64 { + + ret := -1.0 + switch a { + case 0: + ret = 1.0 + case 1: + ret = 1.1 + case 2: + ret = 1.2 + case 3: + ret = 1.3 + case 4: + ret = 1.4 + case 5: + ret = 1.5 + case 6: + ret = 1.6 + case 7: + ret = 1.7 + case 8: + ret = 1.8 + case 9: + ret = 1.9 + case 10: + ret = 1.10 + case 11: + ret = 1.11 + case 12: + ret = 1.12 + case 13: + ret = 1.13 + case 14: + ret = 1.14 + case 15: + ret = 1.15 + case 16: + ret = 1.16 + } + return ret +} + +//go:noinline +func add64_ssa(a, b float64) float64 { + return a + b +} + +//go:noinline +func mul64_ssa(a, b float64) float64 { + return a * b +} + +//go:noinline +func sub64_ssa(a, b float64) float64 { + return a - b +} + +//go:noinline +func div64_ssa(a, b float64) float64 { + return a / b +} + +//go:noinline +func neg64_ssa(a, b float64) float64 { + return -a + -1*b +} + +//go:noinline +func add32_ssa(a, b float32) float32 { + return a + b +} + +//go:noinline +func mul32_ssa(a, b float32) float32 { + return a * b +} + +//go:noinline +func sub32_ssa(a, b float32) float32 { + return a - b +} + +//go:noinline +func div32_ssa(a, b float32) float32 { + return a / b +} + +//go:noinline +func neg32_ssa(a, b float32) float32 { + return -a + -1*b +} + +//go:noinline +func conv2Float64_ssa(a int8, b uint8, c int16, d uint16, + e int32, f uint32, g int64, h uint64, i float32) (aa, bb, cc, dd, ee, ff, gg, hh, ii float64) { + aa = float64(a) + bb = float64(b) + cc = float64(c) + hh = float64(h) + dd = float64(d) + ee = float64(e) + ff = float64(f) + gg = float64(g) + ii = float64(i) + return +} + +//go:noinline +func conv2Float32_ssa(a int8, b uint8, c int16, d uint16, + e int32, f uint32, g int64, h uint64, i float64) (aa, bb, cc, dd, ee, ff, gg, hh, ii float32) { + aa = float32(a) + bb = float32(b) + cc = float32(c) + dd = float32(d) + ee = float32(e) + ff = float32(f) + gg = float32(g) + hh = float32(h) + ii = float32(i) + return +} + +func integer2floatConversions(t *testing.T) { + { + a, b, c, d, e, f, g, h, i := conv2Float64_ssa(0, 0, 0, 0, 0, 0, 0, 0, 0) + expectAll64(t, "zero64", 0, a, b, c, d, e, f, g, h, i) + } + { + a, b, c, d, e, f, g, h, i := conv2Float64_ssa(1, 1, 1, 1, 1, 1, 1, 1, 1) + expectAll64(t, "one64", 1, a, b, c, d, e, f, g, h, i) + } + { + a, b, c, d, e, f, g, h, i := conv2Float32_ssa(0, 0, 0, 0, 0, 0, 0, 0, 0) + expectAll32(t, "zero32", 0, a, b, c, d, e, f, g, h, i) + } + { + a, b, c, d, e, f, g, h, i := conv2Float32_ssa(1, 1, 1, 1, 1, 1, 1, 1, 1) + expectAll32(t, "one32", 1, a, b, c, d, e, f, g, h, i) + } + { + // Check maximum values + a, b, c, d, e, f, g, h, i := conv2Float64_ssa(127, 255, 32767, 65535, 0x7fffffff, 0xffffffff, 0x7fffFFFFffffFFFF, 0xffffFFFFffffFFFF, 3.402823e38) + expect64(t, "a", a, 127) + expect64(t, "b", b, 255) + expect64(t, "c", c, 32767) + expect64(t, "d", d, 65535) + expect64(t, "e", e, float64(int32(0x7fffffff))) + expect64(t, "f", f, float64(uint32(0xffffffff))) + expect64(t, "g", g, float64(int64(0x7fffffffffffffff))) + expect64(t, "h", h, float64(uint64(0xffffffffffffffff))) + expect64(t, "i", i, float64(float32(3.402823e38))) + } + { + // Check minimum values (and tweaks for unsigned) + a, b, c, d, e, f, g, h, i := conv2Float64_ssa(-128, 254, -32768, 65534, ^0x7fffffff, 0xfffffffe, ^0x7fffFFFFffffFFFF, 0xffffFFFFffffF401, 1.5e-45) + expect64(t, "a", a, -128) + expect64(t, "b", b, 254) + expect64(t, "c", c, -32768) + expect64(t, "d", d, 65534) + expect64(t, "e", e, float64(^int32(0x7fffffff))) + expect64(t, "f", f, float64(uint32(0xfffffffe))) + expect64(t, "g", g, float64(^int64(0x7fffffffffffffff))) + expect64(t, "h", h, float64(uint64(0xfffffffffffff401))) + expect64(t, "i", i, float64(float32(1.5e-45))) + } + { + // Check maximum values + a, b, c, d, e, f, g, h, i := conv2Float32_ssa(127, 255, 32767, 65535, 0x7fffffff, 0xffffffff, 0x7fffFFFFffffFFFF, 0xffffFFFFffffFFFF, 3.402823e38) + expect32(t, "a", a, 127) + expect32(t, "b", b, 255) + expect32(t, "c", c, 32767) + expect32(t, "d", d, 65535) + expect32(t, "e", e, float32(int32(0x7fffffff))) + expect32(t, "f", f, float32(uint32(0xffffffff))) + expect32(t, "g", g, float32(int64(0x7fffffffffffffff))) + expect32(t, "h", h, float32(uint64(0xffffffffffffffff))) + expect32(t, "i", i, float32(float64(3.402823e38))) + } + { + // Check minimum values (and tweaks for unsigned) + a, b, c, d, e, f, g, h, i := conv2Float32_ssa(-128, 254, -32768, 65534, ^0x7fffffff, 0xfffffffe, ^0x7fffFFFFffffFFFF, 0xffffFFFFffffF401, 1.5e-45) + expect32(t, "a", a, -128) + expect32(t, "b", b, 254) + expect32(t, "c", c, -32768) + expect32(t, "d", d, 65534) + expect32(t, "e", e, float32(^int32(0x7fffffff))) + expect32(t, "f", f, float32(uint32(0xfffffffe))) + expect32(t, "g", g, float32(^int64(0x7fffffffffffffff))) + expect32(t, "h", h, float32(uint64(0xfffffffffffff401))) + expect32(t, "i", i, float32(float64(1.5e-45))) + } +} + +func multiplyAdd(t *testing.T) { + { + // Test that a multiply-accumulate operation with intermediate + // rounding forced by a float32() cast produces the expected + // result. + // Test cases generated experimentally on a system (s390x) that + // supports fused multiply-add instructions. + var tests = [...]struct{ x, y, z, res float32 }{ + {0.6046603, 0.9405091, 0.6645601, 1.2332485}, // fused multiply-add result: 1.2332486 + {0.67908466, 0.21855305, 0.20318687, 0.3516029}, // fused multiply-add result: 0.35160288 + {0.29311424, 0.29708257, 0.752573, 0.8396522}, // fused multiply-add result: 0.8396521 + {0.5305857, 0.2535405, 0.282081, 0.41660595}, // fused multiply-add result: 0.41660598 + {0.29711226, 0.89436173, 0.097454615, 0.36318043}, // fused multiply-add result: 0.36318046 + {0.6810783, 0.24151509, 0.31152245, 0.47601312}, // fused multiply-add result: 0.47601315 + {0.73023146, 0.18292491, 0.4283571, 0.5619346}, // fused multiply-add result: 0.56193465 + {0.89634174, 0.32208398, 0.7211478, 1.009845}, // fused multiply-add result: 1.0098451 + {0.6280982, 0.12675293, 0.2813303, 0.36094356}, // fused multiply-add result: 0.3609436 + {0.29400632, 0.75316125, 0.15096405, 0.3723982}, // fused multiply-add result: 0.37239823 + } + check := func(s string, got, expected float32) { + if got != expected { + fmt.Printf("multiplyAdd: %s, expected %g, got %g\n", s, expected, got) + } + } + for _, t := range tests { + check( + fmt.Sprintf("float32(%v * %v) + %v", t.x, t.y, t.z), + func(x, y, z float32) float32 { + return float32(x*y) + z + }(t.x, t.y, t.z), + t.res) + + check( + fmt.Sprintf("%v += float32(%v * %v)", t.z, t.x, t.y), + func(x, y, z float32) float32 { + z += float32(x * y) + return z + }(t.x, t.y, t.z), + t.res) + } + } + { + // Test that a multiply-accumulate operation with intermediate + // rounding forced by a float64() cast produces the expected + // result. + // Test cases generated experimentally on a system (s390x) that + // supports fused multiply-add instructions. + var tests = [...]struct{ x, y, z, res float64 }{ + {0.4688898449024232, 0.28303415118044517, 0.29310185733681576, 0.42581369658590373}, // fused multiply-add result: 0.4258136965859037 + {0.7886049150193449, 0.3618054804803169, 0.8805431227416171, 1.1658647029293308}, // fused multiply-add result: 1.1658647029293305 + {0.7302314772948083, 0.18292491645390843, 0.4283570818068078, 0.5619346137829748}, // fused multiply-add result: 0.5619346137829747 + {0.6908388315056789, 0.7109071952999951, 0.5637795958152644, 1.0549018919252924}, // fused multiply-add result: 1.0549018919252926 + {0.4584424785756506, 0.6001655953233308, 0.02626515060968944, 0.3014065536855481}, // fused multiply-add result: 0.30140655368554814 + {0.539210105890946, 0.9756748149873165, 0.7507630564795985, 1.2768567767840384}, // fused multiply-add result: 1.2768567767840386 + {0.7830349733960021, 0.3932509992288867, 0.1304138461737918, 0.4383431318929343}, // fused multiply-add result: 0.43834313189293433 + {0.6841751300974551, 0.6530402051353608, 0.524499759549865, 0.9712936268572192}, // fused multiply-add result: 0.9712936268572193 + {0.3691117091643448, 0.826454125634742, 0.34768170859156955, 0.6527356034505334}, // fused multiply-add result: 0.6527356034505333 + {0.16867966833433606, 0.33136826030698385, 0.8279280961505588, 0.8838231843956668}, // fused multiply-add result: 0.8838231843956669 + } + check := func(s string, got, expected float64) { + if got != expected { + fmt.Printf("multiplyAdd: %s, expected %g, got %g\n", s, expected, got) + } + } + for _, t := range tests { + check( + fmt.Sprintf("float64(%v * %v) + %v", t.x, t.y, t.z), + func(x, y, z float64) float64 { + return float64(x*y) + z + }(t.x, t.y, t.z), + t.res) + + check( + fmt.Sprintf("%v += float64(%v * %v)", t.z, t.x, t.y), + func(x, y, z float64) float64 { + z += float64(x * y) + return z + }(t.x, t.y, t.z), + t.res) + } + } + { + // Test that a multiply-accumulate operation with intermediate + // rounding forced by a complex128() cast produces the expected + // result. + // Test cases generated experimentally on a system (s390x) that + // supports fused multiply-add instructions. + var tests = [...]struct { + x, y float64 + res complex128 + }{ + {0.6046602879796196, 0.9405090880450124, (2.754489951983871 + 3i)}, // fused multiply-add result: (2.7544899519838713 + 3i) + {0.09696951891448456, 0.30091186058528707, (0.5918204173287407 + 3i)}, // fused multiply-add result: (0.5918204173287408 + 3i) + {0.544155573000885, 0.27850762181610883, (1.910974340818764 + 3i)}, // fused multiply-add result: (1.9109743408187638 + 3i) + {0.9769168685862624, 0.07429099894984302, (3.0050416047086297 + 3i)}, // fused multiply-add result: (3.00504160470863 + 3i) + {0.9269868035744142, 0.9549454404167818, (3.735905851140024 + 3i)}, // fused multiply-add result: (3.7359058511400245 + 3i) + {0.7109071952999951, 0.5637795958152644, (2.69650118171525 + 3i)}, // fused multiply-add result: (2.6965011817152496 + 3i) + {0.7558235074915978, 0.40380328579570035, (2.671273808270494 + 3i)}, // fused multiply-add result: (2.6712738082704934 + 3i) + {0.13065111702897217, 0.9859647293402467, (1.3779180804271633 + 3i)}, // fused multiply-add result: (1.3779180804271631 + 3i) + {0.8963417453962161, 0.3220839705208817, (3.0111092067095298 + 3i)}, // fused multiply-add result: (3.01110920670953 + 3i) + {0.39998376285699544, 0.497868113342702, (1.697819401913688 + 3i)}, // fused multiply-add result: (1.6978194019136883 + 3i) + } + check := func(s string, got, expected complex128) { + if got != expected { + fmt.Printf("multiplyAdd: %s, expected %v, got %v\n", s, expected, got) + } + } + for _, t := range tests { + check( + fmt.Sprintf("complex128(complex(%v, 1)*3) + complex(%v, 0)", t.x, t.y), + func(x, y float64) complex128 { + return complex128(complex(x, 1)*3) + complex(y, 0) + }(t.x, t.y), + t.res) + + check( + fmt.Sprintf("z := complex(%v, 1); z += complex128(complex(%v, 1) * 3)", t.y, t.x), + func(x, y float64) complex128 { + z := complex(y, 0) + z += complex128(complex(x, 1) * 3) + return z + }(t.x, t.y), + t.res) + } + } +} + +const ( + aa = 0x1000000000000000 + ab = 0x100000000000000 + ac = 0x10000000000000 + ad = 0x1000000000000 + ba = 0x100000000000 + bb = 0x10000000000 + bc = 0x1000000000 + bd = 0x100000000 + ca = 0x10000000 + cb = 0x1000000 + cc = 0x100000 + cd = 0x10000 + da = 0x1000 + db = 0x100 + dc = 0x10 + dd = 0x1 +) + +//go:noinline +func compares64_ssa(a, b, c, d float64) (lt, le, eq, ne, ge, gt uint64) { + if a < a { + lt += aa + } + if a < b { + lt += ab + } + if a < c { + lt += ac + } + if a < d { + lt += ad + } + + if b < a { + lt += ba + } + if b < b { + lt += bb + } + if b < c { + lt += bc + } + if b < d { + lt += bd + } + + if c < a { + lt += ca + } + if c < b { + lt += cb + } + if c < c { + lt += cc + } + if c < d { + lt += cd + } + + if d < a { + lt += da + } + if d < b { + lt += db + } + if d < c { + lt += dc + } + if d < d { + lt += dd + } + + if a <= a { + le += aa + } + if a <= b { + le += ab + } + if a <= c { + le += ac + } + if a <= d { + le += ad + } + + if b <= a { + le += ba + } + if b <= b { + le += bb + } + if b <= c { + le += bc + } + if b <= d { + le += bd + } + + if c <= a { + le += ca + } + if c <= b { + le += cb + } + if c <= c { + le += cc + } + if c <= d { + le += cd + } + + if d <= a { + le += da + } + if d <= b { + le += db + } + if d <= c { + le += dc + } + if d <= d { + le += dd + } + + if a == a { + eq += aa + } + if a == b { + eq += ab + } + if a == c { + eq += ac + } + if a == d { + eq += ad + } + + if b == a { + eq += ba + } + if b == b { + eq += bb + } + if b == c { + eq += bc + } + if b == d { + eq += bd + } + + if c == a { + eq += ca + } + if c == b { + eq += cb + } + if c == c { + eq += cc + } + if c == d { + eq += cd + } + + if d == a { + eq += da + } + if d == b { + eq += db + } + if d == c { + eq += dc + } + if d == d { + eq += dd + } + + if a != a { + ne += aa + } + if a != b { + ne += ab + } + if a != c { + ne += ac + } + if a != d { + ne += ad + } + + if b != a { + ne += ba + } + if b != b { + ne += bb + } + if b != c { + ne += bc + } + if b != d { + ne += bd + } + + if c != a { + ne += ca + } + if c != b { + ne += cb + } + if c != c { + ne += cc + } + if c != d { + ne += cd + } + + if d != a { + ne += da + } + if d != b { + ne += db + } + if d != c { + ne += dc + } + if d != d { + ne += dd + } + + if a >= a { + ge += aa + } + if a >= b { + ge += ab + } + if a >= c { + ge += ac + } + if a >= d { + ge += ad + } + + if b >= a { + ge += ba + } + if b >= b { + ge += bb + } + if b >= c { + ge += bc + } + if b >= d { + ge += bd + } + + if c >= a { + ge += ca + } + if c >= b { + ge += cb + } + if c >= c { + ge += cc + } + if c >= d { + ge += cd + } + + if d >= a { + ge += da + } + if d >= b { + ge += db + } + if d >= c { + ge += dc + } + if d >= d { + ge += dd + } + + if a > a { + gt += aa + } + if a > b { + gt += ab + } + if a > c { + gt += ac + } + if a > d { + gt += ad + } + + if b > a { + gt += ba + } + if b > b { + gt += bb + } + if b > c { + gt += bc + } + if b > d { + gt += bd + } + + if c > a { + gt += ca + } + if c > b { + gt += cb + } + if c > c { + gt += cc + } + if c > d { + gt += cd + } + + if d > a { + gt += da + } + if d > b { + gt += db + } + if d > c { + gt += dc + } + if d > d { + gt += dd + } + + return +} + +//go:noinline +func compares32_ssa(a, b, c, d float32) (lt, le, eq, ne, ge, gt uint64) { + if a < a { + lt += aa + } + if a < b { + lt += ab + } + if a < c { + lt += ac + } + if a < d { + lt += ad + } + + if b < a { + lt += ba + } + if b < b { + lt += bb + } + if b < c { + lt += bc + } + if b < d { + lt += bd + } + + if c < a { + lt += ca + } + if c < b { + lt += cb + } + if c < c { + lt += cc + } + if c < d { + lt += cd + } + + if d < a { + lt += da + } + if d < b { + lt += db + } + if d < c { + lt += dc + } + if d < d { + lt += dd + } + + if a <= a { + le += aa + } + if a <= b { + le += ab + } + if a <= c { + le += ac + } + if a <= d { + le += ad + } + + if b <= a { + le += ba + } + if b <= b { + le += bb + } + if b <= c { + le += bc + } + if b <= d { + le += bd + } + + if c <= a { + le += ca + } + if c <= b { + le += cb + } + if c <= c { + le += cc + } + if c <= d { + le += cd + } + + if d <= a { + le += da + } + if d <= b { + le += db + } + if d <= c { + le += dc + } + if d <= d { + le += dd + } + + if a == a { + eq += aa + } + if a == b { + eq += ab + } + if a == c { + eq += ac + } + if a == d { + eq += ad + } + + if b == a { + eq += ba + } + if b == b { + eq += bb + } + if b == c { + eq += bc + } + if b == d { + eq += bd + } + + if c == a { + eq += ca + } + if c == b { + eq += cb + } + if c == c { + eq += cc + } + if c == d { + eq += cd + } + + if d == a { + eq += da + } + if d == b { + eq += db + } + if d == c { + eq += dc + } + if d == d { + eq += dd + } + + if a != a { + ne += aa + } + if a != b { + ne += ab + } + if a != c { + ne += ac + } + if a != d { + ne += ad + } + + if b != a { + ne += ba + } + if b != b { + ne += bb + } + if b != c { + ne += bc + } + if b != d { + ne += bd + } + + if c != a { + ne += ca + } + if c != b { + ne += cb + } + if c != c { + ne += cc + } + if c != d { + ne += cd + } + + if d != a { + ne += da + } + if d != b { + ne += db + } + if d != c { + ne += dc + } + if d != d { + ne += dd + } + + if a >= a { + ge += aa + } + if a >= b { + ge += ab + } + if a >= c { + ge += ac + } + if a >= d { + ge += ad + } + + if b >= a { + ge += ba + } + if b >= b { + ge += bb + } + if b >= c { + ge += bc + } + if b >= d { + ge += bd + } + + if c >= a { + ge += ca + } + if c >= b { + ge += cb + } + if c >= c { + ge += cc + } + if c >= d { + ge += cd + } + + if d >= a { + ge += da + } + if d >= b { + ge += db + } + if d >= c { + ge += dc + } + if d >= d { + ge += dd + } + + if a > a { + gt += aa + } + if a > b { + gt += ab + } + if a > c { + gt += ac + } + if a > d { + gt += ad + } + + if b > a { + gt += ba + } + if b > b { + gt += bb + } + if b > c { + gt += bc + } + if b > d { + gt += bd + } + + if c > a { + gt += ca + } + if c > b { + gt += cb + } + if c > c { + gt += cc + } + if c > d { + gt += cd + } + + if d > a { + gt += da + } + if d > b { + gt += db + } + if d > c { + gt += dc + } + if d > d { + gt += dd + } + + return +} + +//go:noinline +func le64_ssa(x, y float64) bool { + return x <= y +} + +//go:noinline +func ge64_ssa(x, y float64) bool { + return x >= y +} + +//go:noinline +func lt64_ssa(x, y float64) bool { + return x < y +} + +//go:noinline +func gt64_ssa(x, y float64) bool { + return x > y +} + +//go:noinline +func eq64_ssa(x, y float64) bool { + return x == y +} + +//go:noinline +func ne64_ssa(x, y float64) bool { + return x != y +} + +//go:noinline +func eqbr64_ssa(x, y float64) float64 { + if x == y { + return 17 + } + return 42 +} + +//go:noinline +func nebr64_ssa(x, y float64) float64 { + if x != y { + return 17 + } + return 42 +} + +//go:noinline +func gebr64_ssa(x, y float64) float64 { + if x >= y { + return 17 + } + return 42 +} + +//go:noinline +func lebr64_ssa(x, y float64) float64 { + if x <= y { + return 17 + } + return 42 +} + +//go:noinline +func ltbr64_ssa(x, y float64) float64 { + if x < y { + return 17 + } + return 42 +} + +//go:noinline +func gtbr64_ssa(x, y float64) float64 { + if x > y { + return 17 + } + return 42 +} + +//go:noinline +func le32_ssa(x, y float32) bool { + return x <= y +} + +//go:noinline +func ge32_ssa(x, y float32) bool { + return x >= y +} + +//go:noinline +func lt32_ssa(x, y float32) bool { + return x < y +} + +//go:noinline +func gt32_ssa(x, y float32) bool { + return x > y +} + +//go:noinline +func eq32_ssa(x, y float32) bool { + return x == y +} + +//go:noinline +func ne32_ssa(x, y float32) bool { + return x != y +} + +//go:noinline +func eqbr32_ssa(x, y float32) float32 { + if x == y { + return 17 + } + return 42 +} + +//go:noinline +func nebr32_ssa(x, y float32) float32 { + if x != y { + return 17 + } + return 42 +} + +//go:noinline +func gebr32_ssa(x, y float32) float32 { + if x >= y { + return 17 + } + return 42 +} + +//go:noinline +func lebr32_ssa(x, y float32) float32 { + if x <= y { + return 17 + } + return 42 +} + +//go:noinline +func ltbr32_ssa(x, y float32) float32 { + if x < y { + return 17 + } + return 42 +} + +//go:noinline +func gtbr32_ssa(x, y float32) float32 { + if x > y { + return 17 + } + return 42 +} + +//go:noinline +func F32toU8_ssa(x float32) uint8 { + return uint8(x) +} + +//go:noinline +func F32toI8_ssa(x float32) int8 { + return int8(x) +} + +//go:noinline +func F32toU16_ssa(x float32) uint16 { + return uint16(x) +} + +//go:noinline +func F32toI16_ssa(x float32) int16 { + return int16(x) +} + +//go:noinline +func F32toU32_ssa(x float32) uint32 { + return uint32(x) +} + +//go:noinline +func F32toI32_ssa(x float32) int32 { + return int32(x) +} + +//go:noinline +func F32toU64_ssa(x float32) uint64 { + return uint64(x) +} + +//go:noinline +func F32toI64_ssa(x float32) int64 { + return int64(x) +} + +//go:noinline +func F64toU8_ssa(x float64) uint8 { + return uint8(x) +} + +//go:noinline +func F64toI8_ssa(x float64) int8 { + return int8(x) +} + +//go:noinline +func F64toU16_ssa(x float64) uint16 { + return uint16(x) +} + +//go:noinline +func F64toI16_ssa(x float64) int16 { + return int16(x) +} + +//go:noinline +func F64toU32_ssa(x float64) uint32 { + return uint32(x) +} + +//go:noinline +func F64toI32_ssa(x float64) int32 { + return int32(x) +} + +//go:noinline +func F64toU64_ssa(x float64) uint64 { + return uint64(x) +} + +//go:noinline +func F64toI64_ssa(x float64) int64 { + return int64(x) +} + +func floatsToInts(t *testing.T, x float64, expected int64) { + y := float32(x) + expectInt64(t, "F64toI8", int64(F64toI8_ssa(x)), expected) + expectInt64(t, "F64toI16", int64(F64toI16_ssa(x)), expected) + expectInt64(t, "F64toI32", int64(F64toI32_ssa(x)), expected) + expectInt64(t, "F64toI64", int64(F64toI64_ssa(x)), expected) + expectInt64(t, "F32toI8", int64(F32toI8_ssa(y)), expected) + expectInt64(t, "F32toI16", int64(F32toI16_ssa(y)), expected) + expectInt64(t, "F32toI32", int64(F32toI32_ssa(y)), expected) + expectInt64(t, "F32toI64", int64(F32toI64_ssa(y)), expected) +} + +func floatsToUints(t *testing.T, x float64, expected uint64) { + y := float32(x) + expectUint64(t, "F64toU8", uint64(F64toU8_ssa(x)), expected) + expectUint64(t, "F64toU16", uint64(F64toU16_ssa(x)), expected) + expectUint64(t, "F64toU32", uint64(F64toU32_ssa(x)), expected) + expectUint64(t, "F64toU64", uint64(F64toU64_ssa(x)), expected) + expectUint64(t, "F32toU8", uint64(F32toU8_ssa(y)), expected) + expectUint64(t, "F32toU16", uint64(F32toU16_ssa(y)), expected) + expectUint64(t, "F32toU32", uint64(F32toU32_ssa(y)), expected) + expectUint64(t, "F32toU64", uint64(F32toU64_ssa(y)), expected) +} + +func floatingToIntegerConversionsTest(t *testing.T) { + floatsToInts(t, 0.0, 0) + floatsToInts(t, 0.5, 0) + floatsToInts(t, 0.9, 0) + floatsToInts(t, 1.0, 1) + floatsToInts(t, 1.5, 1) + floatsToInts(t, 127.0, 127) + floatsToInts(t, -1.0, -1) + floatsToInts(t, -128.0, -128) + + floatsToUints(t, 0.0, 0) + floatsToUints(t, 1.0, 1) + floatsToUints(t, 255.0, 255) + + for j := uint(0); j < 24; j++ { + // Avoid hard cases in the construction + // of the test inputs. + v := int64(1<<62) | int64(1<<(62-j)) + w := uint64(v) + f := float32(v) + d := float64(v) + expectUint64(t, "2**62...", F32toU64_ssa(f), w) + expectUint64(t, "2**62...", F64toU64_ssa(d), w) + expectInt64(t, "2**62...", F32toI64_ssa(f), v) + expectInt64(t, "2**62...", F64toI64_ssa(d), v) + expectInt64(t, "2**62...", F32toI64_ssa(-f), -v) + expectInt64(t, "2**62...", F64toI64_ssa(-d), -v) + w += w + f += f + d += d + expectUint64(t, "2**63...", F32toU64_ssa(f), w) + expectUint64(t, "2**63...", F64toU64_ssa(d), w) + } + + for j := uint(0); j < 16; j++ { + // Avoid hard cases in the construction + // of the test inputs. + v := int32(1<<30) | int32(1<<(30-j)) + w := uint32(v) + f := float32(v) + d := float64(v) + expectUint32(t, "2**30...", F32toU32_ssa(f), w) + expectUint32(t, "2**30...", F64toU32_ssa(d), w) + expectInt32(t, "2**30...", F32toI32_ssa(f), v) + expectInt32(t, "2**30...", F64toI32_ssa(d), v) + expectInt32(t, "2**30...", F32toI32_ssa(-f), -v) + expectInt32(t, "2**30...", F64toI32_ssa(-d), -v) + w += w + f += f + d += d + expectUint32(t, "2**31...", F32toU32_ssa(f), w) + expectUint32(t, "2**31...", F64toU32_ssa(d), w) + } + + for j := uint(0); j < 15; j++ { + // Avoid hard cases in the construction + // of the test inputs. + v := int16(1<<14) | int16(1<<(14-j)) + w := uint16(v) + f := float32(v) + d := float64(v) + expectUint16(t, "2**14...", F32toU16_ssa(f), w) + expectUint16(t, "2**14...", F64toU16_ssa(d), w) + expectInt16(t, "2**14...", F32toI16_ssa(f), v) + expectInt16(t, "2**14...", F64toI16_ssa(d), v) + expectInt16(t, "2**14...", F32toI16_ssa(-f), -v) + expectInt16(t, "2**14...", F64toI16_ssa(-d), -v) + w += w + f += f + d += d + expectUint16(t, "2**15...", F32toU16_ssa(f), w) + expectUint16(t, "2**15...", F64toU16_ssa(d), w) + } + + expectInt32(t, "-2147483648", F32toI32_ssa(-2147483648), -2147483648) + + expectInt32(t, "-2147483648", F64toI32_ssa(-2147483648), -2147483648) + expectInt32(t, "-2147483647", F64toI32_ssa(-2147483647), -2147483647) + expectUint32(t, "4294967295", F64toU32_ssa(4294967295), 4294967295) + + expectInt16(t, "-32768", F64toI16_ssa(-32768), -32768) + expectInt16(t, "-32768", F32toI16_ssa(-32768), -32768) + + // NB more of a pain to do these for 32-bit because of lost bits in Float32 mantissa + expectInt16(t, "32767", F64toI16_ssa(32767), 32767) + expectInt16(t, "32767", F32toI16_ssa(32767), 32767) + expectUint16(t, "32767", F64toU16_ssa(32767), 32767) + expectUint16(t, "32767", F32toU16_ssa(32767), 32767) + expectUint16(t, "65535", F64toU16_ssa(65535), 65535) + expectUint16(t, "65535", F32toU16_ssa(65535), 65535) +} + +func fail64(s string, f func(a, b float64) float64, a, b, e float64) { + d := f(a, b) + if d != e { + fmt.Printf("For (float64) %v %v %v, expected %v, got %v\n", a, s, b, e, d) + } +} + +func fail64bool(s string, f func(a, b float64) bool, a, b float64, e bool) { + d := f(a, b) + if d != e { + fmt.Printf("For (float64) %v %v %v, expected %v, got %v\n", a, s, b, e, d) + } +} + +func fail32(s string, f func(a, b float32) float32, a, b, e float32) { + d := f(a, b) + if d != e { + fmt.Printf("For (float32) %v %v %v, expected %v, got %v\n", a, s, b, e, d) + } +} + +func fail32bool(s string, f func(a, b float32) bool, a, b float32, e bool) { + d := f(a, b) + if d != e { + fmt.Printf("For (float32) %v %v %v, expected %v, got %v\n", a, s, b, e, d) + } +} + +func expect64(t *testing.T, s string, x, expected float64) { + if x != expected { + println("F64 Expected", expected, "for", s, ", got", x) + } +} + +func expect32(t *testing.T, s string, x, expected float32) { + if x != expected { + println("F32 Expected", expected, "for", s, ", got", x) + } +} + +func expectUint64(t *testing.T, s string, x, expected uint64) { + if x != expected { + fmt.Printf("U64 Expected 0x%016x for %s, got 0x%016x\n", expected, s, x) + } +} + +func expectInt64(t *testing.T, s string, x, expected int64) { + if x != expected { + fmt.Printf("%s: Expected 0x%016x, got 0x%016x\n", s, expected, x) + } +} + +func expectUint32(t *testing.T, s string, x, expected uint32) { + if x != expected { + fmt.Printf("U32 %s: Expected 0x%08x, got 0x%08x\n", s, expected, x) + } +} + +func expectInt32(t *testing.T, s string, x, expected int32) { + if x != expected { + fmt.Printf("I32 %s: Expected 0x%08x, got 0x%08x\n", s, expected, x) + } +} + +func expectUint16(t *testing.T, s string, x, expected uint16) { + if x != expected { + fmt.Printf("U16 %s: Expected 0x%04x, got 0x%04x\n", s, expected, x) + } +} + +func expectInt16(t *testing.T, s string, x, expected int16) { + if x != expected { + fmt.Printf("I16 %s: Expected 0x%04x, got 0x%04x\n", s, expected, x) + } +} + +func expectAll64(t *testing.T, s string, expected, a, b, c, d, e, f, g, h, i float64) { + expect64(t, s+":a", a, expected) + expect64(t, s+":b", b, expected) + expect64(t, s+":c", c, expected) + expect64(t, s+":d", d, expected) + expect64(t, s+":e", e, expected) + expect64(t, s+":f", f, expected) + expect64(t, s+":g", g, expected) +} + +func expectAll32(t *testing.T, s string, expected, a, b, c, d, e, f, g, h, i float32) { + expect32(t, s+":a", a, expected) + expect32(t, s+":b", b, expected) + expect32(t, s+":c", c, expected) + expect32(t, s+":d", d, expected) + expect32(t, s+":e", e, expected) + expect32(t, s+":f", f, expected) + expect32(t, s+":g", g, expected) +} + +var ev64 [2]float64 = [2]float64{42.0, 17.0} +var ev32 [2]float32 = [2]float32{42.0, 17.0} + +func cmpOpTest(t *testing.T, + s string, + f func(a, b float64) bool, + g func(a, b float64) float64, + ff func(a, b float32) bool, + gg func(a, b float32) float32, + zero, one, inf, nan float64, result uint) { + fail64bool(s, f, zero, zero, result>>16&1 == 1) + fail64bool(s, f, zero, one, result>>12&1 == 1) + fail64bool(s, f, zero, inf, result>>8&1 == 1) + fail64bool(s, f, zero, nan, result>>4&1 == 1) + fail64bool(s, f, nan, nan, result&1 == 1) + + fail64(s, g, zero, zero, ev64[result>>16&1]) + fail64(s, g, zero, one, ev64[result>>12&1]) + fail64(s, g, zero, inf, ev64[result>>8&1]) + fail64(s, g, zero, nan, ev64[result>>4&1]) + fail64(s, g, nan, nan, ev64[result>>0&1]) + + { + zero := float32(zero) + one := float32(one) + inf := float32(inf) + nan := float32(nan) + fail32bool(s, ff, zero, zero, (result>>16)&1 == 1) + fail32bool(s, ff, zero, one, (result>>12)&1 == 1) + fail32bool(s, ff, zero, inf, (result>>8)&1 == 1) + fail32bool(s, ff, zero, nan, (result>>4)&1 == 1) + fail32bool(s, ff, nan, nan, result&1 == 1) + + fail32(s, gg, zero, zero, ev32[(result>>16)&1]) + fail32(s, gg, zero, one, ev32[(result>>12)&1]) + fail32(s, gg, zero, inf, ev32[(result>>8)&1]) + fail32(s, gg, zero, nan, ev32[(result>>4)&1]) + fail32(s, gg, nan, nan, ev32[(result>>0)&1]) + } +} + +func expectCx128(t *testing.T, s string, x, expected complex128) { + if x != expected { + t.Errorf("Cx 128 Expected %f for %s, got %f", expected, s, x) + } +} + +func expectCx64(t *testing.T, s string, x, expected complex64) { + if x != expected { + t.Errorf("Cx 64 Expected %f for %s, got %f", expected, s, x) + } +} + +//go:noinline +func cx128sum_ssa(a, b complex128) complex128 { + return a + b +} + +//go:noinline +func cx128diff_ssa(a, b complex128) complex128 { + return a - b +} + +//go:noinline +func cx128prod_ssa(a, b complex128) complex128 { + return a * b +} + +//go:noinline +func cx128quot_ssa(a, b complex128) complex128 { + return a / b +} + +//go:noinline +func cx128neg_ssa(a complex128) complex128 { + return -a +} + +//go:noinline +func cx128real_ssa(a complex128) float64 { + return real(a) +} + +//go:noinline +func cx128imag_ssa(a complex128) float64 { + return imag(a) +} + +//go:noinline +func cx128cnst_ssa(a complex128) complex128 { + b := 2 + 3i + return a * b +} + +//go:noinline +func cx64sum_ssa(a, b complex64) complex64 { + return a + b +} + +//go:noinline +func cx64diff_ssa(a, b complex64) complex64 { + return a - b +} + +//go:noinline +func cx64prod_ssa(a, b complex64) complex64 { + return a * b +} + +//go:noinline +func cx64quot_ssa(a, b complex64) complex64 { + return a / b +} + +//go:noinline +func cx64neg_ssa(a complex64) complex64 { + return -a +} + +//go:noinline +func cx64real_ssa(a complex64) float32 { + return real(a) +} + +//go:noinline +func cx64imag_ssa(a complex64) float32 { + return imag(a) +} + +//go:noinline +func cx128eq_ssa(a, b complex128) bool { + return a == b +} + +//go:noinline +func cx128ne_ssa(a, b complex128) bool { + return a != b +} + +//go:noinline +func cx64eq_ssa(a, b complex64) bool { + return a == b +} + +//go:noinline +func cx64ne_ssa(a, b complex64) bool { + return a != b +} + +func expectTrue(t *testing.T, s string, b bool) { + if !b { + t.Errorf("expected true for %s, got false", s) + } +} +func expectFalse(t *testing.T, s string, b bool) { + if b { + t.Errorf("expected false for %s, got true", s) + } +} + +func complexTest128(t *testing.T) { + var a complex128 = 1 + 2i + var b complex128 = 3 + 6i + sum := cx128sum_ssa(b, a) + diff := cx128diff_ssa(b, a) + prod := cx128prod_ssa(b, a) + quot := cx128quot_ssa(b, a) + neg := cx128neg_ssa(a) + r := cx128real_ssa(a) + i := cx128imag_ssa(a) + cnst := cx128cnst_ssa(a) + c1 := cx128eq_ssa(a, a) + c2 := cx128eq_ssa(a, b) + c3 := cx128ne_ssa(a, a) + c4 := cx128ne_ssa(a, b) + + expectCx128(t, "sum", sum, 4+8i) + expectCx128(t, "diff", diff, 2+4i) + expectCx128(t, "prod", prod, -9+12i) + expectCx128(t, "quot", quot, 3+0i) + expectCx128(t, "neg", neg, -1-2i) + expect64(t, "real", r, 1) + expect64(t, "imag", i, 2) + expectCx128(t, "cnst", cnst, -4+7i) + expectTrue(t, fmt.Sprintf("%v==%v", a, a), c1) + expectFalse(t, fmt.Sprintf("%v==%v", a, b), c2) + expectFalse(t, fmt.Sprintf("%v!=%v", a, a), c3) + expectTrue(t, fmt.Sprintf("%v!=%v", a, b), c4) +} + +func complexTest64(t *testing.T) { + var a complex64 = 1 + 2i + var b complex64 = 3 + 6i + sum := cx64sum_ssa(b, a) + diff := cx64diff_ssa(b, a) + prod := cx64prod_ssa(b, a) + quot := cx64quot_ssa(b, a) + neg := cx64neg_ssa(a) + r := cx64real_ssa(a) + i := cx64imag_ssa(a) + c1 := cx64eq_ssa(a, a) + c2 := cx64eq_ssa(a, b) + c3 := cx64ne_ssa(a, a) + c4 := cx64ne_ssa(a, b) + + expectCx64(t, "sum", sum, 4+8i) + expectCx64(t, "diff", diff, 2+4i) + expectCx64(t, "prod", prod, -9+12i) + expectCx64(t, "quot", quot, 3+0i) + expectCx64(t, "neg", neg, -1-2i) + expect32(t, "real", r, 1) + expect32(t, "imag", i, 2) + expectTrue(t, fmt.Sprintf("%v==%v", a, a), c1) + expectFalse(t, fmt.Sprintf("%v==%v", a, b), c2) + expectFalse(t, fmt.Sprintf("%v!=%v", a, a), c3) + expectTrue(t, fmt.Sprintf("%v!=%v", a, b), c4) +} + +// TestFP tests that we get the right answer for floating point expressions. +func TestFP(t *testing.T) { + a := 3.0 + b := 4.0 + + c := float32(3.0) + d := float32(4.0) + + tiny := float32(1.5e-45) // smallest f32 denorm = 2**(-149) + dtiny := float64(tiny) // well within range of f64 + + fail64("+", add64_ssa, a, b, 7.0) + fail64("*", mul64_ssa, a, b, 12.0) + fail64("-", sub64_ssa, a, b, -1.0) + fail64("/", div64_ssa, a, b, 0.75) + fail64("neg", neg64_ssa, a, b, -7) + + fail32("+", add32_ssa, c, d, 7.0) + fail32("*", mul32_ssa, c, d, 12.0) + fail32("-", sub32_ssa, c, d, -1.0) + fail32("/", div32_ssa, c, d, 0.75) + fail32("neg", neg32_ssa, c, d, -7) + + // denorm-squared should underflow to zero. + fail32("*", mul32_ssa, tiny, tiny, 0) + + // but should not underflow in float and in fact is exactly representable. + fail64("*", mul64_ssa, dtiny, dtiny, 1.9636373861190906e-90) + + // Intended to create register pressure which forces + // asymmetric op into different code paths. + aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd := manysub_ssa(1000.0, 100.0, 10.0, 1.0) + + expect64(t, "aa", aa, 11.0) + expect64(t, "ab", ab, 900.0) + expect64(t, "ac", ac, 990.0) + expect64(t, "ad", ad, 999.0) + + expect64(t, "ba", ba, -900.0) + expect64(t, "bb", bb, 22.0) + expect64(t, "bc", bc, 90.0) + expect64(t, "bd", bd, 99.0) + + expect64(t, "ca", ca, -990.0) + expect64(t, "cb", cb, -90.0) + expect64(t, "cc", cc, 33.0) + expect64(t, "cd", cd, 9.0) + + expect64(t, "da", da, -999.0) + expect64(t, "db", db, -99.0) + expect64(t, "dc", dc, -9.0) + expect64(t, "dd", dd, 44.0) + + integer2floatConversions(t) + + multiplyAdd(t) + + var zero64 float64 = 0.0 + var one64 float64 = 1.0 + var inf64 float64 = 1.0 / zero64 + var nan64 float64 = sub64_ssa(inf64, inf64) + + cmpOpTest(t, "!=", ne64_ssa, nebr64_ssa, ne32_ssa, nebr32_ssa, zero64, one64, inf64, nan64, 0x01111) + cmpOpTest(t, "==", eq64_ssa, eqbr64_ssa, eq32_ssa, eqbr32_ssa, zero64, one64, inf64, nan64, 0x10000) + cmpOpTest(t, "<=", le64_ssa, lebr64_ssa, le32_ssa, lebr32_ssa, zero64, one64, inf64, nan64, 0x11100) + cmpOpTest(t, "<", lt64_ssa, ltbr64_ssa, lt32_ssa, ltbr32_ssa, zero64, one64, inf64, nan64, 0x01100) + cmpOpTest(t, ">", gt64_ssa, gtbr64_ssa, gt32_ssa, gtbr32_ssa, zero64, one64, inf64, nan64, 0x00000) + cmpOpTest(t, ">=", ge64_ssa, gebr64_ssa, ge32_ssa, gebr32_ssa, zero64, one64, inf64, nan64, 0x10000) + + { + lt, le, eq, ne, ge, gt := compares64_ssa(0.0, 1.0, inf64, nan64) + expectUint64(t, "lt", lt, 0x0110001000000000) + expectUint64(t, "le", le, 0x1110011000100000) + expectUint64(t, "eq", eq, 0x1000010000100000) + expectUint64(t, "ne", ne, 0x0111101111011111) + expectUint64(t, "ge", ge, 0x1000110011100000) + expectUint64(t, "gt", gt, 0x0000100011000000) + // fmt.Printf("lt=0x%016x, le=0x%016x, eq=0x%016x, ne=0x%016x, ge=0x%016x, gt=0x%016x\n", + // lt, le, eq, ne, ge, gt) + } + { + lt, le, eq, ne, ge, gt := compares32_ssa(0.0, 1.0, float32(inf64), float32(nan64)) + expectUint64(t, "lt", lt, 0x0110001000000000) + expectUint64(t, "le", le, 0x1110011000100000) + expectUint64(t, "eq", eq, 0x1000010000100000) + expectUint64(t, "ne", ne, 0x0111101111011111) + expectUint64(t, "ge", ge, 0x1000110011100000) + expectUint64(t, "gt", gt, 0x0000100011000000) + } + + floatingToIntegerConversionsTest(t) + complexTest128(t) + complexTest64(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go new file mode 100644 index 0000000000000000000000000000000000000000..b03c105d5fdcae98d4bfc3f542ec9c4dc3851eb5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go @@ -0,0 +1,208 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This program generates a test to verify that the standard arithmetic +// operators properly handle some special cases. The test file should be +// generated with a known working version of go. +// launch with `go run arithBoundaryGen.go` a file called arithBoundary.go +// will be written into the parent directory containing the tests + +package main + +import ( + "bytes" + "fmt" + "go/format" + "log" + "text/template" +) + +// used for interpolation in a text template +type tmplData struct { + Name, Stype, Symbol string +} + +// used to work around an issue with the mod symbol being +// interpreted as part of a format string +func (s tmplData) SymFirst() string { + return string(s.Symbol[0]) +} + +// ucast casts an unsigned int to the size in s +func ucast(i uint64, s sizedTestData) uint64 { + switch s.name { + case "uint32": + return uint64(uint32(i)) + case "uint16": + return uint64(uint16(i)) + case "uint8": + return uint64(uint8(i)) + } + return i +} + +// icast casts a signed int to the size in s +func icast(i int64, s sizedTestData) int64 { + switch s.name { + case "int32": + return int64(int32(i)) + case "int16": + return int64(int16(i)) + case "int8": + return int64(int8(i)) + } + return i +} + +type sizedTestData struct { + name string + sn string + u []uint64 + i []int64 +} + +// values to generate tests. these should include the smallest and largest values, along +// with any other values that might cause issues. we generate n^2 tests for each size to +// cover all cases. +var szs = []sizedTestData{ + sizedTestData{name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0xffffFFFFffffFFFF}}, + sizedTestData{name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF, + -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}}, + + sizedTestData{name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}}, + sizedTestData{name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0, + 1, 0x7FFFFFFF}}, + + sizedTestData{name: "uint16", sn: "16", u: []uint64{0, 1, 65535}}, + sizedTestData{name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}}, + + sizedTestData{name: "uint8", sn: "8", u: []uint64{0, 1, 255}}, + sizedTestData{name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}}, +} + +type op struct { + name, symbol string +} + +// ops that we will be generating tests for +var ops = []op{op{"add", "+"}, op{"sub", "-"}, op{"div", "/"}, op{"mod", "%%"}, op{"mul", "*"}} + +func main() { + w := new(bytes.Buffer) + fmt.Fprintf(w, "// Code generated by gen/arithBoundaryGen.go. DO NOT EDIT.\n\n") + fmt.Fprintf(w, "package main;\n") + fmt.Fprintf(w, "import \"testing\"\n") + + for _, sz := range []int{64, 32, 16, 8} { + fmt.Fprintf(w, "type utd%d struct {\n", sz) + fmt.Fprintf(w, " a,b uint%d\n", sz) + fmt.Fprintf(w, " add,sub,mul,div,mod uint%d\n", sz) + fmt.Fprintf(w, "}\n") + + fmt.Fprintf(w, "type itd%d struct {\n", sz) + fmt.Fprintf(w, " a,b int%d\n", sz) + fmt.Fprintf(w, " add,sub,mul,div,mod int%d\n", sz) + fmt.Fprintf(w, "}\n") + } + + // the function being tested + testFunc, err := template.New("testFunc").Parse( + `//go:noinline + func {{.Name}}_{{.Stype}}_ssa(a, b {{.Stype}}) {{.Stype}} { + return a {{.SymFirst}} b +} +`) + if err != nil { + panic(err) + } + + // generate our functions to be tested + for _, s := range szs { + for _, o := range ops { + fd := tmplData{o.name, s.name, o.symbol} + err = testFunc.Execute(w, fd) + if err != nil { + panic(err) + } + } + } + + // generate the test data + for _, s := range szs { + if len(s.u) > 0 { + fmt.Fprintf(w, "var %s_data []utd%s = []utd%s{", s.name, s.sn, s.sn) + for _, i := range s.u { + for _, j := range s.u { + fmt.Fprintf(w, "utd%s{a: %d, b: %d, add: %d, sub: %d, mul: %d", s.sn, i, j, ucast(i+j, s), ucast(i-j, s), ucast(i*j, s)) + if j != 0 { + fmt.Fprintf(w, ", div: %d, mod: %d", ucast(i/j, s), ucast(i%j, s)) + } + fmt.Fprint(w, "},\n") + } + } + fmt.Fprintf(w, "}\n") + } else { + // TODO: clean up this duplication + fmt.Fprintf(w, "var %s_data []itd%s = []itd%s{", s.name, s.sn, s.sn) + for _, i := range s.i { + for _, j := range s.i { + fmt.Fprintf(w, "itd%s{a: %d, b: %d, add: %d, sub: %d, mul: %d", s.sn, i, j, icast(i+j, s), icast(i-j, s), icast(i*j, s)) + if j != 0 { + fmt.Fprintf(w, ", div: %d, mod: %d", icast(i/j, s), icast(i%j, s)) + } + fmt.Fprint(w, "},\n") + } + } + fmt.Fprintf(w, "}\n") + } + } + + fmt.Fprintf(w, "//TestArithmeticBoundary tests boundary results for arithmetic operations.\n") + fmt.Fprintf(w, "func TestArithmeticBoundary(t *testing.T) {\n\n") + + verify, err := template.New("tst").Parse( + `if got := {{.Name}}_{{.Stype}}_ssa(v.a, v.b); got != v.{{.Name}} { + t.Errorf("{{.Name}}_{{.Stype}} %d{{.Symbol}}%d = %d, wanted %d\n",v.a,v.b,got,v.{{.Name}}) +} +`) + + for _, s := range szs { + fmt.Fprintf(w, "for _, v := range %s_data {\n", s.name) + + for _, o := range ops { + // avoid generating tests that divide by zero + if o.name == "div" || o.name == "mod" { + fmt.Fprint(w, "if v.b != 0 {") + } + + err = verify.Execute(w, tmplData{o.name, s.name, o.symbol}) + + if o.name == "div" || o.name == "mod" { + fmt.Fprint(w, "\n}\n") + } + + if err != nil { + panic(err) + } + + } + fmt.Fprint(w, " }\n") + } + + fmt.Fprintf(w, "}\n") + + // gofmt result + b := w.Bytes() + src, err := format.Source(b) + if err != nil { + fmt.Printf("%s\n", b) + panic(err) + } + + // write to file + err = os.WriteFile("../arithBoundary_test.go", src, 0666) + if err != nil { + log.Fatalf("can't write output: %v\n", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/arithConstGen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/arithConstGen.go new file mode 100644 index 0000000000000000000000000000000000000000..1649f4655760f5ad9351a2040e26bb458b9c1ec3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/arithConstGen.go @@ -0,0 +1,345 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This program generates a test to verify that the standard arithmetic +// operators properly handle const cases. The test file should be +// generated with a known working version of go. +// launch with `go run arithConstGen.go` a file called arithConst.go +// will be written into the parent directory containing the tests + +package main + +import ( + "bytes" + "fmt" + "go/format" + "log" + "strings" + "text/template" +) + +type op struct { + name, symbol string +} +type szD struct { + name string + sn string + u []uint64 + i []int64 + oponly string +} + +var szs = []szD{ + {name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0x8000000000000000, 0xffffFFFFffffFFFF}}, + {name: "uint64", sn: "64", u: []uint64{3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"}, + + {name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF, + -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}}, + {name: "int64", sn: "64", i: []int64{-9, -5, -3, 3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"}, + + {name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}}, + {name: "uint32", sn: "32", u: []uint64{3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"}, + + {name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0, + 1, 0x7FFFFFFF}}, + {name: "int32", sn: "32", i: []int64{-9, -5, -3, 3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"}, + + {name: "uint16", sn: "16", u: []uint64{0, 1, 65535}}, + {name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}}, + + {name: "uint8", sn: "8", u: []uint64{0, 1, 255}}, + {name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}}, +} + +var ops = []op{ + {"add", "+"}, + {"sub", "-"}, + {"div", "/"}, + {"mul", "*"}, + {"lsh", "<<"}, + {"rsh", ">>"}, + {"mod", "%"}, + {"and", "&"}, + {"or", "|"}, + {"xor", "^"}, +} + +// compute the result of i op j, cast as type t. +func ansU(i, j uint64, t, op string) string { + var ans uint64 + switch op { + case "+": + ans = i + j + case "-": + ans = i - j + case "*": + ans = i * j + case "/": + if j != 0 { + ans = i / j + } + case "%": + if j != 0 { + ans = i % j + } + case "<<": + ans = i << j + case ">>": + ans = i >> j + case "&": + ans = i & j + case "|": + ans = i | j + case "^": + ans = i ^ j + } + switch t { + case "uint32": + ans = uint64(uint32(ans)) + case "uint16": + ans = uint64(uint16(ans)) + case "uint8": + ans = uint64(uint8(ans)) + } + return fmt.Sprintf("%d", ans) +} + +// compute the result of i op j, cast as type t. +func ansS(i, j int64, t, op string) string { + var ans int64 + switch op { + case "+": + ans = i + j + case "-": + ans = i - j + case "*": + ans = i * j + case "/": + if j != 0 { + ans = i / j + } + case "%": + if j != 0 { + ans = i % j + } + case "<<": + ans = i << uint64(j) + case ">>": + ans = i >> uint64(j) + case "&": + ans = i & j + case "|": + ans = i | j + case "^": + ans = i ^ j + } + switch t { + case "int32": + ans = int64(int32(ans)) + case "int16": + ans = int64(int16(ans)) + case "int8": + ans = int64(int8(ans)) + } + return fmt.Sprintf("%d", ans) +} + +func main() { + w := new(bytes.Buffer) + fmt.Fprintf(w, "// Code generated by gen/arithConstGen.go. DO NOT EDIT.\n\n") + fmt.Fprintf(w, "package main;\n") + fmt.Fprintf(w, "import \"testing\"\n") + + fncCnst1 := template.Must(template.New("fnc").Parse( + `//go:noinline +func {{.Name}}_{{.Type_}}_{{.FNumber}}(a {{.Type_}}) {{.Type_}} { return a {{.Symbol}} {{.Number}} } +`)) + fncCnst2 := template.Must(template.New("fnc").Parse( + `//go:noinline +func {{.Name}}_{{.FNumber}}_{{.Type_}}(a {{.Type_}}) {{.Type_}} { return {{.Number}} {{.Symbol}} a } +`)) + + type fncData struct { + Name, Type_, Symbol, FNumber, Number string + } + + for _, s := range szs { + for _, o := range ops { + if s.oponly != "" && s.oponly != o.name { + continue + } + fd := fncData{o.name, s.name, o.symbol, "", ""} + + // unsigned test cases + if len(s.u) > 0 { + for _, i := range s.u { + fd.Number = fmt.Sprintf("%d", i) + fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1) + + // avoid division by zero + if o.name != "mod" && o.name != "div" || i != 0 { + // introduce uint64 cast for rhs shift operands + // if they are too large for default uint type + number := fd.Number + if (o.name == "lsh" || o.name == "rsh") && uint64(uint32(i)) != i { + fd.Number = fmt.Sprintf("uint64(%s)", number) + } + fncCnst1.Execute(w, fd) + fd.Number = number + } + + fncCnst2.Execute(w, fd) + } + } + + // signed test cases + if len(s.i) > 0 { + // don't generate tests for shifts by signed integers + if o.name == "lsh" || o.name == "rsh" { + continue + } + for _, i := range s.i { + fd.Number = fmt.Sprintf("%d", i) + fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1) + + // avoid division by zero + if o.name != "mod" && o.name != "div" || i != 0 { + fncCnst1.Execute(w, fd) + } + fncCnst2.Execute(w, fd) + } + } + } + } + + vrf1 := template.Must(template.New("vrf1").Parse(` + test_{{.Size}}{fn: {{.Name}}_{{.FNumber}}_{{.Type_}}, fnname: "{{.Name}}_{{.FNumber}}_{{.Type_}}", in: {{.Input}}, want: {{.Ans}}},`)) + + vrf2 := template.Must(template.New("vrf2").Parse(` + test_{{.Size}}{fn: {{.Name}}_{{.Type_}}_{{.FNumber}}, fnname: "{{.Name}}_{{.Type_}}_{{.FNumber}}", in: {{.Input}}, want: {{.Ans}}},`)) + + type cfncData struct { + Size, Name, Type_, Symbol, FNumber, Number string + Ans, Input string + } + for _, s := range szs { + fmt.Fprintf(w, ` +type test_%[1]s%[2]s struct { + fn func (%[1]s) %[1]s + fnname string + in %[1]s + want %[1]s +} +`, s.name, s.oponly) + fmt.Fprintf(w, "var tests_%[1]s%[2]s =[]test_%[1]s {\n\n", s.name, s.oponly) + + if len(s.u) > 0 { + for _, o := range ops { + if s.oponly != "" && s.oponly != o.name { + continue + } + fd := cfncData{s.name, o.name, s.name, o.symbol, "", "", "", ""} + for _, i := range s.u { + fd.Number = fmt.Sprintf("%d", i) + fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1) + + // unsigned + for _, j := range s.u { + + if o.name != "mod" && o.name != "div" || j != 0 { + fd.Ans = ansU(i, j, s.name, o.symbol) + fd.Input = fmt.Sprintf("%d", j) + if err := vrf1.Execute(w, fd); err != nil { + panic(err) + } + } + + if o.name != "mod" && o.name != "div" || i != 0 { + fd.Ans = ansU(j, i, s.name, o.symbol) + fd.Input = fmt.Sprintf("%d", j) + if err := vrf2.Execute(w, fd); err != nil { + panic(err) + } + } + + } + } + + } + } + + // signed + if len(s.i) > 0 { + for _, o := range ops { + if s.oponly != "" && s.oponly != o.name { + continue + } + // don't generate tests for shifts by signed integers + if o.name == "lsh" || o.name == "rsh" { + continue + } + fd := cfncData{s.name, o.name, s.name, o.symbol, "", "", "", ""} + for _, i := range s.i { + fd.Number = fmt.Sprintf("%d", i) + fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1) + for _, j := range s.i { + if o.name != "mod" && o.name != "div" || j != 0 { + fd.Ans = ansS(i, j, s.name, o.symbol) + fd.Input = fmt.Sprintf("%d", j) + if err := vrf1.Execute(w, fd); err != nil { + panic(err) + } + } + + if o.name != "mod" && o.name != "div" || i != 0 { + fd.Ans = ansS(j, i, s.name, o.symbol) + fd.Input = fmt.Sprintf("%d", j) + if err := vrf2.Execute(w, fd); err != nil { + panic(err) + } + } + + } + } + + } + } + + fmt.Fprintf(w, "}\n\n") + } + + fmt.Fprint(w, ` + +// TestArithmeticConst tests results for arithmetic operations against constants. +func TestArithmeticConst(t *testing.T) { +`) + + for _, s := range szs { + fmt.Fprintf(w, `for _, test := range tests_%s%s {`, s.name, s.oponly) + // Use WriteString here to avoid a vet warning about formatting directives. + w.WriteString(`if got := test.fn(test.in); got != test.want { + t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want) + } + } +`) + } + + fmt.Fprint(w, ` +} +`) + + // gofmt result + b := w.Bytes() + src, err := format.Source(b) + if err != nil { + fmt.Printf("%s\n", b) + panic(err) + } + + // write to file + err = os.WriteFile("../arithConst_test.go", src, 0666) + if err != nil { + log.Fatalf("can't write output: %v\n", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go new file mode 100644 index 0000000000000000000000000000000000000000..dcdafc032293be67352dc5aaa07362a615ae0e98 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go @@ -0,0 +1,246 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This program generates a test to verify that the standard comparison +// operators properly handle one const operand. The test file should be +// generated with a known working version of go. +// launch with `go run cmpConstGen.go` a file called cmpConst.go +// will be written into the parent directory containing the tests + +package main + +import ( + "bytes" + "fmt" + "go/format" + "log" + "math/big" + "sort" +) + +const ( + maxU64 = (1 << 64) - 1 + maxU32 = (1 << 32) - 1 + maxU16 = (1 << 16) - 1 + maxU8 = (1 << 8) - 1 + + maxI64 = (1 << 63) - 1 + maxI32 = (1 << 31) - 1 + maxI16 = (1 << 15) - 1 + maxI8 = (1 << 7) - 1 + + minI64 = -(1 << 63) + minI32 = -(1 << 31) + minI16 = -(1 << 15) + minI8 = -(1 << 7) +) + +func cmp(left *big.Int, op string, right *big.Int) bool { + switch left.Cmp(right) { + case -1: // less than + return op == "<" || op == "<=" || op == "!=" + case 0: // equal + return op == "==" || op == "<=" || op == ">=" + case 1: // greater than + return op == ">" || op == ">=" || op == "!=" + } + panic("unexpected comparison value") +} + +func inRange(typ string, val *big.Int) bool { + min, max := &big.Int{}, &big.Int{} + switch typ { + case "uint64": + max = max.SetUint64(maxU64) + case "uint32": + max = max.SetUint64(maxU32) + case "uint16": + max = max.SetUint64(maxU16) + case "uint8": + max = max.SetUint64(maxU8) + case "int64": + min = min.SetInt64(minI64) + max = max.SetInt64(maxI64) + case "int32": + min = min.SetInt64(minI32) + max = max.SetInt64(maxI32) + case "int16": + min = min.SetInt64(minI16) + max = max.SetInt64(maxI16) + case "int8": + min = min.SetInt64(minI8) + max = max.SetInt64(maxI8) + default: + panic("unexpected type") + } + return cmp(min, "<=", val) && cmp(val, "<=", max) +} + +func getValues(typ string) []*big.Int { + Uint := func(v uint64) *big.Int { return big.NewInt(0).SetUint64(v) } + Int := func(v int64) *big.Int { return big.NewInt(0).SetInt64(v) } + values := []*big.Int{ + // limits + Uint(maxU64), + Uint(maxU64 - 1), + Uint(maxI64 + 1), + Uint(maxI64), + Uint(maxI64 - 1), + Uint(maxU32 + 1), + Uint(maxU32), + Uint(maxU32 - 1), + Uint(maxI32 + 1), + Uint(maxI32), + Uint(maxI32 - 1), + Uint(maxU16 + 1), + Uint(maxU16), + Uint(maxU16 - 1), + Uint(maxI16 + 1), + Uint(maxI16), + Uint(maxI16 - 1), + Uint(maxU8 + 1), + Uint(maxU8), + Uint(maxU8 - 1), + Uint(maxI8 + 1), + Uint(maxI8), + Uint(maxI8 - 1), + Uint(0), + Int(minI8 + 1), + Int(minI8), + Int(minI8 - 1), + Int(minI16 + 1), + Int(minI16), + Int(minI16 - 1), + Int(minI32 + 1), + Int(minI32), + Int(minI32 - 1), + Int(minI64 + 1), + Int(minI64), + + // other possibly interesting values + Uint(1), + Int(-1), + Uint(0xff << 56), + Uint(0xff << 32), + Uint(0xff << 24), + } + sort.Slice(values, func(i, j int) bool { return values[i].Cmp(values[j]) == -1 }) + var ret []*big.Int + for _, val := range values { + if !inRange(typ, val) { + continue + } + ret = append(ret, val) + } + return ret +} + +func sigString(v *big.Int) string { + var t big.Int + t.Abs(v) + if v.Sign() == -1 { + return "neg" + t.String() + } + return t.String() +} + +func main() { + types := []string{ + "uint64", "uint32", "uint16", "uint8", + "int64", "int32", "int16", "int8", + } + + w := new(bytes.Buffer) + fmt.Fprintf(w, "// Code generated by gen/cmpConstGen.go. DO NOT EDIT.\n\n") + fmt.Fprintf(w, "package main;\n") + fmt.Fprintf(w, "import (\"testing\"; \"reflect\"; \"runtime\";)\n") + fmt.Fprintf(w, "// results show the expected result for the elements left of, equal to and right of the index.\n") + fmt.Fprintf(w, "type result struct{l, e, r bool}\n") + fmt.Fprintf(w, "var (\n") + fmt.Fprintf(w, " eq = result{l: false, e: true, r: false}\n") + fmt.Fprintf(w, " ne = result{l: true, e: false, r: true}\n") + fmt.Fprintf(w, " lt = result{l: true, e: false, r: false}\n") + fmt.Fprintf(w, " le = result{l: true, e: true, r: false}\n") + fmt.Fprintf(w, " gt = result{l: false, e: false, r: true}\n") + fmt.Fprintf(w, " ge = result{l: false, e: true, r: true}\n") + fmt.Fprintf(w, ")\n") + + operators := []struct{ op, name string }{ + {"<", "lt"}, + {"<=", "le"}, + {">", "gt"}, + {">=", "ge"}, + {"==", "eq"}, + {"!=", "ne"}, + } + + for _, typ := range types { + // generate a slice containing valid values for this type + fmt.Fprintf(w, "\n// %v tests\n", typ) + values := getValues(typ) + fmt.Fprintf(w, "var %v_vals = []%v{\n", typ, typ) + for _, val := range values { + fmt.Fprintf(w, "%v,\n", val.String()) + } + fmt.Fprintf(w, "}\n") + + // generate test functions + for _, r := range values { + // TODO: could also test constant on lhs. + sig := sigString(r) + for _, op := range operators { + // no need for go:noinline because the function is called indirectly + fmt.Fprintf(w, "func %v_%v_%v(x %v) bool { return x %v %v; }\n", op.name, sig, typ, typ, op.op, r.String()) + } + } + + // generate a table of test cases + fmt.Fprintf(w, "var %v_tests = []struct{\n", typ) + fmt.Fprintf(w, " idx int // index of the constant used\n") + fmt.Fprintf(w, " exp result // expected results\n") + fmt.Fprintf(w, " fn func(%v) bool\n", typ) + fmt.Fprintf(w, "}{\n") + for i, r := range values { + sig := sigString(r) + for _, op := range operators { + fmt.Fprintf(w, "{idx: %v,", i) + fmt.Fprintf(w, "exp: %v,", op.name) + fmt.Fprintf(w, "fn: %v_%v_%v},\n", op.name, sig, typ) + } + } + fmt.Fprintf(w, "}\n") + } + + // emit the main function, looping over all test cases + fmt.Fprintf(w, "// TestComparisonsConst tests results for comparison operations against constants.\n") + fmt.Fprintf(w, "func TestComparisonsConst(t *testing.T) {\n") + for _, typ := range types { + fmt.Fprintf(w, "for i, test := range %v_tests {\n", typ) + fmt.Fprintf(w, " for j, x := range %v_vals {\n", typ) + fmt.Fprintf(w, " want := test.exp.l\n") + fmt.Fprintf(w, " if j == test.idx {\nwant = test.exp.e\n}") + fmt.Fprintf(w, " else if j > test.idx {\nwant = test.exp.r\n}\n") + fmt.Fprintf(w, " if test.fn(x) != want {\n") + fmt.Fprintf(w, " fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()\n") + fmt.Fprintf(w, " t.Errorf(\"test failed: %%v(%%v) != %%v [type=%v i=%%v j=%%v idx=%%v]\", fn, x, want, i, j, test.idx)\n", typ) + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, "}\n") + } + fmt.Fprintf(w, "}\n") + + // gofmt result + b := w.Bytes() + src, err := format.Source(b) + if err != nil { + fmt.Printf("%s\n", b) + panic(err) + } + + // write to file + err = os.WriteFile("../cmpConst_test.go", src, 0666) + if err != nil { + log.Fatalf("can't write output: %v\n", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/constFoldGen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/constFoldGen.go new file mode 100644 index 0000000000000000000000000000000000000000..70794222a6ec6441f84c4feb4725363fb02d58a8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/constFoldGen.go @@ -0,0 +1,307 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This program generates a test to verify that the standard arithmetic +// operators properly handle constant folding. The test file should be +// generated with a known working version of go. +// launch with `go run constFoldGen.go` a file called constFold_test.go +// will be written into the grandparent directory containing the tests. + +package main + +import ( + "bytes" + "fmt" + "go/format" + "log" + "os" +) + +type op struct { + name, symbol string +} +type szD struct { + name string + sn string + u []uint64 + i []int64 +} + +var szs []szD = []szD{ + szD{name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0xffffFFFFffffFFFF}}, + szD{name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF, + -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}}, + + szD{name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}}, + szD{name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0, + 1, 0x7FFFFFFF}}, + + szD{name: "uint16", sn: "16", u: []uint64{0, 1, 65535}}, + szD{name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}}, + + szD{name: "uint8", sn: "8", u: []uint64{0, 1, 255}}, + szD{name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}}, +} + +var ops = []op{ + op{"add", "+"}, op{"sub", "-"}, op{"div", "/"}, op{"mul", "*"}, + op{"lsh", "<<"}, op{"rsh", ">>"}, op{"mod", "%"}, +} + +// compute the result of i op j, cast as type t. +func ansU(i, j uint64, t, op string) string { + var ans uint64 + switch op { + case "+": + ans = i + j + case "-": + ans = i - j + case "*": + ans = i * j + case "/": + if j != 0 { + ans = i / j + } + case "%": + if j != 0 { + ans = i % j + } + case "<<": + ans = i << j + case ">>": + ans = i >> j + } + switch t { + case "uint32": + ans = uint64(uint32(ans)) + case "uint16": + ans = uint64(uint16(ans)) + case "uint8": + ans = uint64(uint8(ans)) + } + return fmt.Sprintf("%d", ans) +} + +// compute the result of i op j, cast as type t. +func ansS(i, j int64, t, op string) string { + var ans int64 + switch op { + case "+": + ans = i + j + case "-": + ans = i - j + case "*": + ans = i * j + case "/": + if j != 0 { + ans = i / j + } + case "%": + if j != 0 { + ans = i % j + } + case "<<": + ans = i << uint64(j) + case ">>": + ans = i >> uint64(j) + } + switch t { + case "int32": + ans = int64(int32(ans)) + case "int16": + ans = int64(int16(ans)) + case "int8": + ans = int64(int8(ans)) + } + return fmt.Sprintf("%d", ans) +} + +func main() { + w := new(bytes.Buffer) + fmt.Fprintf(w, "// run\n") + fmt.Fprintf(w, "// Code generated by gen/constFoldGen.go. DO NOT EDIT.\n\n") + fmt.Fprintf(w, "package gc\n") + fmt.Fprintf(w, "import \"testing\"\n") + + for _, s := range szs { + for _, o := range ops { + if o.symbol == "<<" || o.symbol == ">>" { + // shifts handled separately below, as they can have + // different types on the LHS and RHS. + continue + } + fmt.Fprintf(w, "func TestConstFold%s%s(t *testing.T) {\n", s.name, o.name) + fmt.Fprintf(w, "\tvar x, y, r %s\n", s.name) + // unsigned test cases + for _, c := range s.u { + fmt.Fprintf(w, "\tx = %d\n", c) + for _, d := range s.u { + if d == 0 && (o.symbol == "/" || o.symbol == "%") { + continue + } + fmt.Fprintf(w, "\ty = %d\n", d) + fmt.Fprintf(w, "\tr = x %s y\n", o.symbol) + want := ansU(c, d, s.name, o.symbol) + fmt.Fprintf(w, "\tif r != %s {\n", want) + fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol) + fmt.Fprintf(w, "\t}\n") + } + } + // signed test cases + for _, c := range s.i { + fmt.Fprintf(w, "\tx = %d\n", c) + for _, d := range s.i { + if d == 0 && (o.symbol == "/" || o.symbol == "%") { + continue + } + fmt.Fprintf(w, "\ty = %d\n", d) + fmt.Fprintf(w, "\tr = x %s y\n", o.symbol) + want := ansS(c, d, s.name, o.symbol) + fmt.Fprintf(w, "\tif r != %s {\n", want) + fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol) + fmt.Fprintf(w, "\t}\n") + } + } + fmt.Fprintf(w, "}\n") + } + } + + // Special signed/unsigned cases for shifts + for _, ls := range szs { + for _, rs := range szs { + if rs.name[0] != 'u' { + continue + } + for _, o := range ops { + if o.symbol != "<<" && o.symbol != ">>" { + continue + } + fmt.Fprintf(w, "func TestConstFold%s%s%s(t *testing.T) {\n", ls.name, rs.name, o.name) + fmt.Fprintf(w, "\tvar x, r %s\n", ls.name) + fmt.Fprintf(w, "\tvar y %s\n", rs.name) + // unsigned LHS + for _, c := range ls.u { + fmt.Fprintf(w, "\tx = %d\n", c) + for _, d := range rs.u { + fmt.Fprintf(w, "\ty = %d\n", d) + fmt.Fprintf(w, "\tr = x %s y\n", o.symbol) + want := ansU(c, d, ls.name, o.symbol) + fmt.Fprintf(w, "\tif r != %s {\n", want) + fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol) + fmt.Fprintf(w, "\t}\n") + } + } + // signed LHS + for _, c := range ls.i { + fmt.Fprintf(w, "\tx = %d\n", c) + for _, d := range rs.u { + fmt.Fprintf(w, "\ty = %d\n", d) + fmt.Fprintf(w, "\tr = x %s y\n", o.symbol) + want := ansS(c, int64(d), ls.name, o.symbol) + fmt.Fprintf(w, "\tif r != %s {\n", want) + fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol) + fmt.Fprintf(w, "\t}\n") + } + } + fmt.Fprintf(w, "}\n") + } + } + } + + // Constant folding for comparisons + for _, s := range szs { + fmt.Fprintf(w, "func TestConstFoldCompare%s(t *testing.T) {\n", s.name) + for _, x := range s.i { + for _, y := range s.i { + fmt.Fprintf(w, "\t{\n") + fmt.Fprintf(w, "\t\tvar x %s = %d\n", s.name, x) + fmt.Fprintf(w, "\t\tvar y %s = %d\n", s.name, y) + if x == y { + fmt.Fprintf(w, "\t\tif !(x == y) { t.Errorf(\"!(%%d == %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x == y { t.Errorf(\"%%d == %%d\", x, y) }\n") + } + if x != y { + fmt.Fprintf(w, "\t\tif !(x != y) { t.Errorf(\"!(%%d != %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x != y { t.Errorf(\"%%d != %%d\", x, y) }\n") + } + if x < y { + fmt.Fprintf(w, "\t\tif !(x < y) { t.Errorf(\"!(%%d < %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x < y { t.Errorf(\"%%d < %%d\", x, y) }\n") + } + if x > y { + fmt.Fprintf(w, "\t\tif !(x > y) { t.Errorf(\"!(%%d > %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x > y { t.Errorf(\"%%d > %%d\", x, y) }\n") + } + if x <= y { + fmt.Fprintf(w, "\t\tif !(x <= y) { t.Errorf(\"!(%%d <= %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x <= y { t.Errorf(\"%%d <= %%d\", x, y) }\n") + } + if x >= y { + fmt.Fprintf(w, "\t\tif !(x >= y) { t.Errorf(\"!(%%d >= %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x >= y { t.Errorf(\"%%d >= %%d\", x, y) }\n") + } + fmt.Fprintf(w, "\t}\n") + } + } + for _, x := range s.u { + for _, y := range s.u { + fmt.Fprintf(w, "\t{\n") + fmt.Fprintf(w, "\t\tvar x %s = %d\n", s.name, x) + fmt.Fprintf(w, "\t\tvar y %s = %d\n", s.name, y) + if x == y { + fmt.Fprintf(w, "\t\tif !(x == y) { t.Errorf(\"!(%%d == %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x == y { t.Errorf(\"%%d == %%d\", x, y) }\n") + } + if x != y { + fmt.Fprintf(w, "\t\tif !(x != y) { t.Errorf(\"!(%%d != %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x != y { t.Errorf(\"%%d != %%d\", x, y) }\n") + } + if x < y { + fmt.Fprintf(w, "\t\tif !(x < y) { t.Errorf(\"!(%%d < %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x < y { t.Errorf(\"%%d < %%d\", x, y) }\n") + } + if x > y { + fmt.Fprintf(w, "\t\tif !(x > y) { t.Errorf(\"!(%%d > %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x > y { t.Errorf(\"%%d > %%d\", x, y) }\n") + } + if x <= y { + fmt.Fprintf(w, "\t\tif !(x <= y) { t.Errorf(\"!(%%d <= %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x <= y { t.Errorf(\"%%d <= %%d\", x, y) }\n") + } + if x >= y { + fmt.Fprintf(w, "\t\tif !(x >= y) { t.Errorf(\"!(%%d >= %%d)\", x, y) }\n") + } else { + fmt.Fprintf(w, "\t\tif x >= y { t.Errorf(\"%%d >= %%d\", x, y) }\n") + } + fmt.Fprintf(w, "\t}\n") + } + } + fmt.Fprintf(w, "}\n") + } + + // gofmt result + b := w.Bytes() + src, err := format.Source(b) + if err != nil { + fmt.Printf("%s\n", b) + panic(err) + } + + // write to file + err = os.WriteFile("../../constFold_test.go", src, 0666) + if err != nil { + log.Fatalf("can't write output: %v\n", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/copyGen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/copyGen.go new file mode 100644 index 0000000000000000000000000000000000000000..dd09b3bdbc9b64d0fb2776f192f48d34910e0bd2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/copyGen.go @@ -0,0 +1,121 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "go/format" + "log" + "os" +) + +// This program generates tests to verify that copying operations +// copy the data they are supposed to and clobber no adjacent values. + +// run as `go run copyGen.go`. A file called copy.go +// will be written into the parent directory containing the tests. + +var sizes = [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 23, 24, 25, 31, 32, 33, 63, 64, 65, 1023, 1024, 1025, 1024 + 7, 1024 + 8, 1024 + 9, 1024 + 15, 1024 + 16, 1024 + 17} + +var usizes = [...]int{2, 3, 4, 5, 6, 7} + +func main() { + w := new(bytes.Buffer) + fmt.Fprintf(w, "// Code generated by gen/copyGen.go. DO NOT EDIT.\n\n") + fmt.Fprintf(w, "package main\n") + fmt.Fprintf(w, "import \"testing\"\n") + + for _, s := range sizes { + // type for test + fmt.Fprintf(w, "type T%d struct {\n", s) + fmt.Fprintf(w, " pre [8]byte\n") + fmt.Fprintf(w, " mid [%d]byte\n", s) + fmt.Fprintf(w, " post [8]byte\n") + fmt.Fprintf(w, "}\n") + + // function being tested + fmt.Fprintf(w, "//go:noinline\n") + fmt.Fprintf(w, "func t%dcopy_ssa(y, x *[%d]byte) {\n", s, s) + fmt.Fprintf(w, " *y = *x\n") + fmt.Fprintf(w, "}\n") + + // testing harness + fmt.Fprintf(w, "func testCopy%d(t *testing.T) {\n", s) + fmt.Fprintf(w, " a := T%d{[8]byte{201, 202, 203, 204, 205, 206, 207, 208},[%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "%d,", i%100) + } + fmt.Fprintf(w, "},[8]byte{211, 212, 213, 214, 215, 216, 217, 218}}\n") + fmt.Fprintf(w, " x := [%d]byte{", s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "%d,", 100+i%100) + } + fmt.Fprintf(w, "}\n") + fmt.Fprintf(w, " t%dcopy_ssa(&a.mid, &x)\n", s) + fmt.Fprintf(w, " want := T%d{[8]byte{201, 202, 203, 204, 205, 206, 207, 208},[%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "%d,", 100+i%100) + } + fmt.Fprintf(w, "},[8]byte{211, 212, 213, 214, 215, 216, 217, 218}}\n") + fmt.Fprintf(w, " if a != want {\n") + fmt.Fprintf(w, " t.Errorf(\"t%dcopy got=%%v, want %%v\\n\", a, want)\n", s) + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, "}\n") + } + + for _, s := range usizes { + // function being tested + fmt.Fprintf(w, "//go:noinline\n") + fmt.Fprintf(w, "func tu%dcopy_ssa(docopy bool, data [%d]byte, x *[%d]byte) {\n", s, s, s) + fmt.Fprintf(w, " if docopy {\n") + fmt.Fprintf(w, " *x = data\n") + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, "}\n") + + // testing harness + fmt.Fprintf(w, "func testUnalignedCopy%d(t *testing.T) {\n", s) + fmt.Fprintf(w, " var a [%d]byte\n", s) + fmt.Fprintf(w, " t%d := [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, " %d,", s+i) + } + fmt.Fprintf(w, "}\n") + fmt.Fprintf(w, " tu%dcopy_ssa(true, t%d, &a)\n", s, s) + fmt.Fprintf(w, " want%d := [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, " %d,", s+i) + } + fmt.Fprintf(w, "}\n") + fmt.Fprintf(w, " if a != want%d {\n", s) + fmt.Fprintf(w, " t.Errorf(\"tu%dcopy got=%%v, want %%v\\n\", a, want%d)\n", s, s) + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, "}\n") + } + + // boilerplate at end + fmt.Fprintf(w, "func TestCopy(t *testing.T) {\n") + for _, s := range sizes { + fmt.Fprintf(w, " testCopy%d(t)\n", s) + } + for _, s := range usizes { + fmt.Fprintf(w, " testUnalignedCopy%d(t)\n", s) + } + fmt.Fprintf(w, "}\n") + + // gofmt result + b := w.Bytes() + src, err := format.Source(b) + if err != nil { + fmt.Printf("%s\n", b) + panic(err) + } + + // write to file + err = os.WriteFile("../copy_test.go", src, 0666) + if err != nil { + log.Fatalf("can't write output: %v\n", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/zeroGen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/zeroGen.go new file mode 100644 index 0000000000000000000000000000000000000000..f3dcaa19d72782266c183f234c31d373bdf4432a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/gen/zeroGen.go @@ -0,0 +1,143 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bytes" + "fmt" + "go/format" + "log" + "os" +) + +// This program generates tests to verify that zeroing operations +// zero the data they are supposed to and clobber no adjacent values. + +// run as `go run zeroGen.go`. A file called zero.go +// will be written into the parent directory containing the tests. + +var sizes = [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 23, 24, 25, 31, 32, 33, 63, 64, 65, 1023, 1024, 1025} +var usizes = [...]int{8, 16, 24, 32, 64, 256} + +func main() { + w := new(bytes.Buffer) + fmt.Fprintf(w, "// Code generated by gen/zeroGen.go. DO NOT EDIT.\n\n") + fmt.Fprintf(w, "package main\n") + fmt.Fprintf(w, "import \"testing\"\n") + + for _, s := range sizes { + // type for test + fmt.Fprintf(w, "type Z%d struct {\n", s) + fmt.Fprintf(w, " pre [8]byte\n") + fmt.Fprintf(w, " mid [%d]byte\n", s) + fmt.Fprintf(w, " post [8]byte\n") + fmt.Fprintf(w, "}\n") + + // function being tested + fmt.Fprintf(w, "//go:noinline\n") + fmt.Fprintf(w, "func zero%d_ssa(x *[%d]byte) {\n", s, s) + fmt.Fprintf(w, " *x = [%d]byte{}\n", s) + fmt.Fprintf(w, "}\n") + + // testing harness + fmt.Fprintf(w, "func testZero%d(t *testing.T) {\n", s) + fmt.Fprintf(w, " a := Z%d{[8]byte{255,255,255,255,255,255,255,255},[%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "255,") + } + fmt.Fprintf(w, "},[8]byte{255,255,255,255,255,255,255,255}}\n") + fmt.Fprintf(w, " zero%d_ssa(&a.mid)\n", s) + fmt.Fprintf(w, " want := Z%d{[8]byte{255,255,255,255,255,255,255,255},[%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "0,") + } + fmt.Fprintf(w, "},[8]byte{255,255,255,255,255,255,255,255}}\n") + fmt.Fprintf(w, " if a != want {\n") + fmt.Fprintf(w, " t.Errorf(\"zero%d got=%%v, want %%v\\n\", a, want)\n", s) + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, "}\n") + } + + for _, s := range usizes { + // type for test + fmt.Fprintf(w, "type Z%du1 struct {\n", s) + fmt.Fprintf(w, " b bool\n") + fmt.Fprintf(w, " val [%d]byte\n", s) + fmt.Fprintf(w, "}\n") + + fmt.Fprintf(w, "type Z%du2 struct {\n", s) + fmt.Fprintf(w, " i uint16\n") + fmt.Fprintf(w, " val [%d]byte\n", s) + fmt.Fprintf(w, "}\n") + + // function being tested + fmt.Fprintf(w, "//go:noinline\n") + fmt.Fprintf(w, "func zero%du1_ssa(t *Z%du1) {\n", s, s) + fmt.Fprintf(w, " t.val = [%d]byte{}\n", s) + fmt.Fprintf(w, "}\n") + + // function being tested + fmt.Fprintf(w, "//go:noinline\n") + fmt.Fprintf(w, "func zero%du2_ssa(t *Z%du2) {\n", s, s) + fmt.Fprintf(w, " t.val = [%d]byte{}\n", s) + fmt.Fprintf(w, "}\n") + + // testing harness + fmt.Fprintf(w, "func testZero%du(t *testing.T) {\n", s) + fmt.Fprintf(w, " a := Z%du1{false, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "255,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " zero%du1_ssa(&a)\n", s) + fmt.Fprintf(w, " want := Z%du1{false, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "0,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " if a != want {\n") + fmt.Fprintf(w, " t.Errorf(\"zero%du2 got=%%v, want %%v\\n\", a, want)\n", s) + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, " b := Z%du2{15, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "255,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " zero%du2_ssa(&b)\n", s) + fmt.Fprintf(w, " wantb := Z%du2{15, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "0,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " if b != wantb {\n") + fmt.Fprintf(w, " t.Errorf(\"zero%du2 got=%%v, want %%v\\n\", b, wantb)\n", s) + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, "}\n") + } + + // boilerplate at end + fmt.Fprintf(w, "func TestZero(t *testing.T) {\n") + for _, s := range sizes { + fmt.Fprintf(w, " testZero%d(t)\n", s) + } + for _, s := range usizes { + fmt.Fprintf(w, " testZero%du(t)\n", s) + } + fmt.Fprintf(w, "}\n") + + // gofmt result + b := w.Bytes() + src, err := format.Source(b) + if err != nil { + fmt.Printf("%s\n", b) + panic(err) + } + + // write to file + err = os.WriteFile("../zero_test.go", src, 0666) + if err != nil { + log.Fatalf("can't write output: %v\n", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/loadstore_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/loadstore_test.go new file mode 100644 index 0000000000000000000000000000000000000000..052172819a7a6423849b2ebd43b3118b6f4b69a7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/loadstore_test.go @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests load/store ordering + +package main + +import "testing" + +// testLoadStoreOrder tests for reordering of stores/loads. +func testLoadStoreOrder(t *testing.T) { + z := uint32(1000) + if testLoadStoreOrder_ssa(&z, 100) == 0 { + t.Errorf("testLoadStoreOrder failed") + } +} + +//go:noinline +func testLoadStoreOrder_ssa(z *uint32, prec uint) int { + old := *z // load + *z = uint32(prec) // store + if *z < old { // load + return 1 + } + return 0 +} + +func testStoreSize(t *testing.T) { + a := [4]uint16{11, 22, 33, 44} + testStoreSize_ssa(&a[0], &a[2], 77) + want := [4]uint16{77, 22, 33, 44} + if a != want { + t.Errorf("testStoreSize failed. want = %d, got = %d", want, a) + } +} + +//go:noinline +func testStoreSize_ssa(p *uint16, q *uint16, v uint32) { + // Test to make sure that (Store ptr (Trunc32to16 val) mem) + // does not end up as a 32-bit store. It must stay a 16 bit store + // even when Trunc32to16 is rewritten to be a nop. + // To ensure that we get rewrite the Trunc32to16 before + // we rewrite the Store, we force the truncate into an + // earlier basic block by using it on both branches. + w := uint16(v) + if p != nil { + *p = w + } else { + *q = w + } +} + +//go:noinline +func testExtStore_ssa(p *byte, b bool) int { + x := *p + *p = 7 + if b { + return int(x) + } + return 0 +} + +func testExtStore(t *testing.T) { + const start = 8 + var b byte = start + if got := testExtStore_ssa(&b, true); got != start { + t.Errorf("testExtStore failed. want = %d, got = %d", start, got) + } +} + +var b int + +// testDeadStorePanic_ssa ensures that we don't optimize away stores +// that could be read by after recover(). Modeled after fixedbugs/issue1304. +// +//go:noinline +func testDeadStorePanic_ssa(a int) (r int) { + defer func() { + recover() + r = a + }() + a = 2 // store + b := a - a // optimized to zero + c := 4 + a = c / b // store, but panics + a = 3 // store + r = a + return +} + +func testDeadStorePanic(t *testing.T) { + if want, got := 2, testDeadStorePanic_ssa(1); want != got { + t.Errorf("testDeadStorePanic failed. want = %d, got = %d", want, got) + } +} + +//go:noinline +func loadHitStore8(x int8, p *int8) int32 { + x *= x // try to trash high bits (arch-dependent) + *p = x // store + return int32(*p) // load and cast +} + +//go:noinline +func loadHitStoreU8(x uint8, p *uint8) uint32 { + x *= x // try to trash high bits (arch-dependent) + *p = x // store + return uint32(*p) // load and cast +} + +//go:noinline +func loadHitStore16(x int16, p *int16) int32 { + x *= x // try to trash high bits (arch-dependent) + *p = x // store + return int32(*p) // load and cast +} + +//go:noinline +func loadHitStoreU16(x uint16, p *uint16) uint32 { + x *= x // try to trash high bits (arch-dependent) + *p = x // store + return uint32(*p) // load and cast +} + +//go:noinline +func loadHitStore32(x int32, p *int32) int64 { + x *= x // try to trash high bits (arch-dependent) + *p = x // store + return int64(*p) // load and cast +} + +//go:noinline +func loadHitStoreU32(x uint32, p *uint32) uint64 { + x *= x // try to trash high bits (arch-dependent) + *p = x // store + return uint64(*p) // load and cast +} + +func testLoadHitStore(t *testing.T) { + // Test that sign/zero extensions are kept when a load-hit-store + // is replaced by a register-register move. + { + var in int8 = (1 << 6) + 1 + var p int8 + got := loadHitStore8(in, &p) + want := int32(in * in) + if got != want { + t.Errorf("testLoadHitStore (int8) failed. want = %d, got = %d", want, got) + } + } + { + var in uint8 = (1 << 6) + 1 + var p uint8 + got := loadHitStoreU8(in, &p) + want := uint32(in * in) + if got != want { + t.Errorf("testLoadHitStore (uint8) failed. want = %d, got = %d", want, got) + } + } + { + var in int16 = (1 << 10) + 1 + var p int16 + got := loadHitStore16(in, &p) + want := int32(in * in) + if got != want { + t.Errorf("testLoadHitStore (int16) failed. want = %d, got = %d", want, got) + } + } + { + var in uint16 = (1 << 10) + 1 + var p uint16 + got := loadHitStoreU16(in, &p) + want := uint32(in * in) + if got != want { + t.Errorf("testLoadHitStore (uint16) failed. want = %d, got = %d", want, got) + } + } + { + var in int32 = (1 << 30) + 1 + var p int32 + got := loadHitStore32(in, &p) + want := int64(in * in) + if got != want { + t.Errorf("testLoadHitStore (int32) failed. want = %d, got = %d", want, got) + } + } + { + var in uint32 = (1 << 30) + 1 + var p uint32 + got := loadHitStoreU32(in, &p) + want := uint64(in * in) + if got != want { + t.Errorf("testLoadHitStore (uint32) failed. want = %d, got = %d", want, got) + } + } +} + +func TestLoadStore(t *testing.T) { + testLoadStoreOrder(t) + testStoreSize(t) + testExtStore(t) + testDeadStorePanic(t) + testLoadHitStore(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/map_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/map_test.go new file mode 100644 index 0000000000000000000000000000000000000000..71dc820c1c9a98cd180526855823e3e4c73d361f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/map_test.go @@ -0,0 +1,37 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// map.go tests map operations. +package main + +import "testing" + +//go:noinline +func lenMap_ssa(v map[int]int) int { + return len(v) +} + +func testLenMap(t *testing.T) { + + v := make(map[int]int) + v[0] = 0 + v[1] = 0 + v[2] = 0 + + if want, got := 3, lenMap_ssa(v); got != want { + t.Errorf("expected len(map) = %d, got %d", want, got) + } +} + +func testLenNilMap(t *testing.T) { + + var v map[int]int + if want, got := 0, lenMap_ssa(v); got != want { + t.Errorf("expected len(nil) = %d, got %d", want, got) + } +} +func TestMap(t *testing.T) { + testLenMap(t) + testLenNilMap(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/mysort/mysort.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/mysort/mysort.go new file mode 100644 index 0000000000000000000000000000000000000000..14852c868a79452d153f3dc6eff834d5d9a58841 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/mysort/mysort.go @@ -0,0 +1,40 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Generic sort function, tested with two different pointer types. + +package mysort + +import ( + "fmt" +) + +type LessConstraint[T any] interface { + Less(T) bool +} + +//go:noinline +func Sort[T LessConstraint[T]](x []T) { + n := len(x) + for i := 1; i < n; i++ { + for j := i; j > 0 && x[j].Less(x[j-1]); j-- { + x[j], x[j-1] = x[j-1], x[j] + } + } +} + +type MyInt struct { + Value int +} + +func (a *MyInt) Less(b *MyInt) bool { + return a.Value < b.Value +} + +//go:noinline +func F() { + sl1 := []*MyInt{&MyInt{4}, &MyInt{3}, &MyInt{8}, &MyInt{7}} + Sort(sl1) + fmt.Printf("%v %v %v %v\n", sl1[0], sl1[1], sl1[2], sl1[3]) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/namedReturn_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/namedReturn_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b07e225c1ca6557fedbebec47a21d770870a6fc4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/namedReturn_test.go @@ -0,0 +1,93 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test makes sure that naming named +// return variables in a return statement works. +// See issue #14904. + +package main + +import ( + "runtime" + "testing" +) + +// Our heap-allocated object that will be GC'd incorrectly. +// Note that we always check the second word because that's +// where 0xdeaddeaddeaddead is written. +type B [4]int + +// small (SSAable) array +type A1 [3]*B + +//go:noinline +func f1() (t A1) { + t[0] = &B{91, 92, 93, 94} + runtime.GC() + return t +} + +// large (non-SSAable) array +type A2 [8]*B + +//go:noinline +func f2() (t A2) { + t[0] = &B{91, 92, 93, 94} + runtime.GC() + return t +} + +// small (SSAable) struct +type A3 struct { + a, b, c *B +} + +//go:noinline +func f3() (t A3) { + t.a = &B{91, 92, 93, 94} + runtime.GC() + return t +} + +// large (non-SSAable) struct +type A4 struct { + a, b, c, d, e, f *B +} + +//go:noinline +func f4() (t A4) { + t.a = &B{91, 92, 93, 94} + runtime.GC() + return t +} + +var sink *B + +func f5() int { + b := &B{91, 92, 93, 94} + t := A4{b, nil, nil, nil, nil, nil} + sink = b // make sure b is heap allocated ... + sink = nil // ... but not live + runtime.GC() + t = t + return t.a[1] +} + +func TestNamedReturn(t *testing.T) { + if v := f1()[0][1]; v != 92 { + t.Errorf("f1()[0][1]=%d, want 92\n", v) + } + if v := f2()[0][1]; v != 92 { + t.Errorf("f2()[0][1]=%d, want 92\n", v) + } + if v := f3().a[1]; v != 92 { + t.Errorf("f3().a[1]=%d, want 92\n", v) + } + if v := f4().a[1]; v != 92 { + t.Errorf("f4().a[1]=%d, want 92\n", v) + } + if v := f5(); v != 92 { + t.Errorf("f5()=%d, want 92\n", v) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go new file mode 100644 index 0000000000000000000000000000000000000000..ac238f6dea42a90c375cc32c7d09c8d8e735024e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.go @@ -0,0 +1,252 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// WARNING: Please avoid updating this file. If this file needs to be updated, +// then a new devirt.pprof file should be generated: +// +// $ cd $GOROOT/src/cmd/compile/internal/test/testdata/pgo/devirtualize/ +// $ go mod init example.com/pgo/devirtualize +// $ go test -bench=. -cpuprofile ./devirt.pprof + +package devirt + +// Devirtualization of callees from transitive dependencies should work even if +// they aren't directly referenced in the package. See #61577. +// +// Dots in the last package path component are escaped in symbol names. Use one +// to ensure the escaping doesn't break lookup. +import ( + "fmt" + + "example.com/pgo/devirtualize/mult.pkg" +) + +var sink int + +type Adder interface { + Add(a, b int) int +} + +type Add struct{} + +func (Add) Add(a, b int) int { + for i := 0; i < 1000; i++ { + sink++ + } + return a + b +} + +type Sub struct{} + +func (Sub) Add(a, b int) int { + for i := 0; i < 1000; i++ { + sink++ + } + return a - b +} + +// ExerciseIface calls mostly a1 and m1. +// +//go:noinline +func ExerciseIface(iter int, a1, a2 Adder, m1, m2 mult.Multiplier) int { + // The call below must evaluate selectA() to determine the receiver to + // use. This should happen exactly once per iteration. Assert that is + // the case to ensure the IR manipulation does not result in over- or + // under-evaluation. + selectI := 0 + selectA := func(gotI int) Adder { + if gotI != selectI { + panic(fmt.Sprintf("selectA not called once per iteration; got i %d want %d", gotI, selectI)) + } + selectI++ + + if gotI%10 == 0 { + return a2 + } + return a1 + } + oneI := 0 + one := func(gotI int) int { + if gotI != oneI { + panic(fmt.Sprintf("one not called once per iteration; got i %d want %d", gotI, oneI)) + } + oneI++ + + // The function value must be evaluated before arguments, so + // selectI must have been incremented already. + if selectI != oneI { + panic(fmt.Sprintf("selectA not called before not called before one; got i %d want %d", selectI, oneI)) + } + + return 1 + } + + val := 0 + for i := 0; i < iter; i++ { + m := m1 + if i%10 == 0 { + m = m2 + } + + // N.B. Profiles only distinguish calls on a per-line level, + // making the two calls ambiguous. However because the + // interfaces and implementations are mutually exclusive, + // devirtualization can still select the correct callee for + // each. + // + // If they were not mutually exclusive (for example, two Add + // calls), then we could not definitively select the correct + // callee. + val += m.Multiply(42, selectA(i).Add(one(i), 2)) + } + return val +} + +type AddFunc func(int, int) int + +func AddFn(a, b int) int { + for i := 0; i < 1000; i++ { + sink++ + } + return a + b +} + +func SubFn(a, b int) int { + for i := 0; i < 1000; i++ { + sink++ + } + return a - b +} + +// ExerciseFuncConcrete calls mostly a1 and m1. +// +//go:noinline +func ExerciseFuncConcrete(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int { + // The call below must evaluate selectA() to determine the function to + // call. This should happen exactly once per iteration. Assert that is + // the case to ensure the IR manipulation does not result in over- or + // under-evaluation. + selectI := 0 + selectA := func(gotI int) AddFunc { + if gotI != selectI { + panic(fmt.Sprintf("selectA not called once per iteration; got i %d want %d", gotI, selectI)) + } + selectI++ + + if gotI%10 == 0 { + return a2 + } + return a1 + } + oneI := 0 + one := func(gotI int) int { + if gotI != oneI { + panic(fmt.Sprintf("one not called once per iteration; got i %d want %d", gotI, oneI)) + } + oneI++ + + // The function value must be evaluated before arguments, so + // selectI must have been incremented already. + if selectI != oneI { + panic(fmt.Sprintf("selectA not called before not called before one; got i %d want %d", selectI, oneI)) + } + + return 1 + } + + val := 0 + for i := 0; i < iter; i++ { + m := m1 + if i%10 == 0 { + m = m2 + } + + // N.B. Profiles only distinguish calls on a per-line level, + // making the two calls ambiguous. However because the + // function types are mutually exclusive, devirtualization can + // still select the correct callee for each. + // + // If they were not mutually exclusive (for example, two + // AddFunc calls), then we could not definitively select the + // correct callee. + val += int(m(42, int64(selectA(i)(one(i), 2)))) + } + return val +} + +// ExerciseFuncField calls mostly a1 and m1. +// +// This is a simplified version of ExerciseFuncConcrete, but accessing the +// function values via a struct field. +// +//go:noinline +func ExerciseFuncField(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int { + ops := struct { + a AddFunc + m mult.MultFunc + }{} + + val := 0 + for i := 0; i < iter; i++ { + ops.a = a1 + ops.m = m1 + if i%10 == 0 { + ops.a = a2 + ops.m = m2 + } + + // N.B. Profiles only distinguish calls on a per-line level, + // making the two calls ambiguous. However because the + // function types are mutually exclusive, devirtualization can + // still select the correct callee for each. + // + // If they were not mutually exclusive (for example, two + // AddFunc calls), then we could not definitively select the + // correct callee. + val += int(ops.m(42, int64(ops.a(1, 2)))) + } + return val +} + +//go:noinline +func AddClosure() AddFunc { + // Implicit closure by capturing the receiver. + var a Add + return a.Add +} + +//go:noinline +func SubClosure() AddFunc { + var s Sub + return s.Add +} + +// ExerciseFuncClosure calls mostly a1 and m1. +// +// This is a simplified version of ExerciseFuncConcrete, but we need two +// distinct call sites to test two different types of function values. +// +//go:noinline +func ExerciseFuncClosure(iter int, a1, a2 AddFunc, m1, m2 mult.MultFunc) int { + val := 0 + for i := 0; i < iter; i++ { + a := a1 + m := m1 + if i%10 == 0 { + a = a2 + m = m2 + } + + // N.B. Profiles only distinguish calls on a per-line level, + // making the two calls ambiguous. However because the + // function types are mutually exclusive, devirtualization can + // still select the correct callee for each. + // + // If they were not mutually exclusive (for example, two + // AddFunc calls), then we could not definitively select the + // correct callee. + val += int(m(42, int64(a(1, 2)))) + } + return val +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof new file mode 100644 index 0000000000000000000000000000000000000000..2a27f1bb50d21b8126c319453c69e845d444d049 Binary files /dev/null and b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof differ diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go new file mode 100644 index 0000000000000000000000000000000000000000..59b565d77fa7764a478a13cb4922751a52c9473a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt_test.go @@ -0,0 +1,73 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// WARNING: Please avoid updating this file. If this file needs to be updated, +// then a new devirt.pprof file should be generated: +// +// $ cd $GOROOT/src/cmd/compile/internal/test/testdata/pgo/devirtualize/ +// $ go mod init example.com/pgo/devirtualize +// $ go test -bench=. -cpuprofile ./devirt.pprof + +package devirt + +import ( + "testing" + + "example.com/pgo/devirtualize/mult.pkg" +) + +func BenchmarkDevirtIface(b *testing.B) { + var ( + a1 Add + a2 Sub + m1 mult.Mult + m2 mult.NegMult + ) + + ExerciseIface(b.N, a1, a2, m1, m2) +} + +// Verify that devirtualization doesn't result in calls or side effects applying more than once. +func TestDevirtIface(t *testing.T) { + var ( + a1 Add + a2 Sub + m1 mult.Mult + m2 mult.NegMult + ) + + if v := ExerciseIface(10, a1, a2, m1, m2); v != 1176 { + t.Errorf("ExerciseIface(10) got %d want 1176", v) + } +} + +func BenchmarkDevirtFuncConcrete(b *testing.B) { + ExerciseFuncConcrete(b.N, AddFn, SubFn, mult.MultFn, mult.NegMultFn) +} + +func TestDevirtFuncConcrete(t *testing.T) { + if v := ExerciseFuncConcrete(10, AddFn, SubFn, mult.MultFn, mult.NegMultFn); v != 1176 { + t.Errorf("ExerciseFuncConcrete(10) got %d want 1176", v) + } +} + +func BenchmarkDevirtFuncField(b *testing.B) { + ExerciseFuncField(b.N, AddFn, SubFn, mult.MultFn, mult.NegMultFn) +} + +func TestDevirtFuncField(t *testing.T) { + if v := ExerciseFuncField(10, AddFn, SubFn, mult.MultFn, mult.NegMultFn); v != 1176 { + t.Errorf("ExerciseFuncField(10) got %d want 1176", v) + } +} + +func BenchmarkDevirtFuncClosure(b *testing.B) { + ExerciseFuncClosure(b.N, AddClosure(), SubClosure(), mult.MultClosure(), mult.NegMultClosure()) +} + +func TestDevirtFuncClosure(t *testing.T) { + if v := ExerciseFuncClosure(10, AddClosure(), SubClosure(), mult.MultClosure(), mult.NegMultClosure()); v != 1176 { + t.Errorf("ExerciseFuncClosure(10) got %d want 1176", v) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go new file mode 100644 index 0000000000000000000000000000000000000000..113a5e1a7e42e3bc282aa0b3f4e4150136f15c02 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/devirtualize/mult.pkg/mult.go @@ -0,0 +1,72 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// WARNING: Please avoid updating this file. +// See the warning in ../devirt.go for more details. + +package mult + +var sink int + +type Multiplier interface { + Multiply(a, b int) int +} + +type Mult struct{} + +func (Mult) Multiply(a, b int) int { + for i := 0; i < 1000; i++ { + sink++ + } + return a * b +} + +type NegMult struct{} + +func (NegMult) Multiply(a, b int) int { + for i := 0; i < 1000; i++ { + sink++ + } + return -1 * a * b +} + +// N.B. Different types than AddFunc to test intra-line disambiguation. +type MultFunc func(int64, int64) int64 + +func MultFn(a, b int64) int64 { + for i := 0; i < 1000; i++ { + sink++ + } + return a * b +} + +func NegMultFn(a, b int64) int64 { + for i := 0; i < 1000; i++ { + sink++ + } + return -1 * a * b +} + +//go:noinline +func MultClosure() MultFunc { + // Explicit closure to differentiate from AddClosure. + c := 1 + return func(a, b int64) int64 { + for i := 0; i < 1000; i++ { + sink++ + } + return a * b * int64(c) + } +} + +//go:noinline +func NegMultClosure() MultFunc { + c := 1 + return func(a, b int64) int64 { + for i := 0; i < 1000; i++ { + sink++ + } + return -1 * a * b * int64(c) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.go new file mode 100644 index 0000000000000000000000000000000000000000..9a462fdfd96769b78f5174ac90622f0cdf624ccb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.go @@ -0,0 +1,90 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// WARNING: Please avoid updating this file. If this file needs to be updated, +// then a new inline_hot.pprof file should be generated: +// +// $ cd $GOROOT/src/cmd/compile/internal/test/testdata/pgo/inline/ +// $ go test -bench=. -cpuprofile ./inline_hot.pprof +package main + +import ( + "time" +) + +type BS struct { + length uint + s []uint64 +} + +const wSize = uint(64) +const lWSize = uint(6) + +func D(i uint) int { + return int((i + (wSize - 1)) >> lWSize) +} + +func N(length uint) (bs *BS) { + bs = &BS{ + length, + make([]uint64, D(length)), + } + + return bs +} + +func (b *BS) S(i uint) *BS { + b.s[i>>lWSize] |= 1 << (i & (wSize - 1)) + return b +} + +var jn = [...]byte{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} + +func T(v uint64) uint { + return uint(jn[((v&-v)*0x03f79d71b4ca8b09)>>58]) +} + +func (b *BS) NS(i uint) (uint, bool) { + x := int(i >> lWSize) + if x >= len(b.s) { + return 0, false + } + w := b.s[x] + w = w >> (i & (wSize - 1)) + if w != 0 { + return i + T(w), true + } + x = x + 1 + for x < len(b.s) { + if b.s[x] != 0 { + return uint(x)*wSize + T(b.s[x]), true + } + x = x + 1 + + } + return 0, false +} + +func A() { + s := N(100000) + for i := 0; i < 1000; i += 30 { + s.S(uint(i)) + } + for j := 0; j < 1000; j++ { + c := uint(0) + for i, e := s.NS(0); e; i, e = s.NS(i + 1) { + c++ + } + } +} + +func main() { + time.Sleep(time.Second) + A() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.pprof b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.pprof new file mode 100644 index 0000000000000000000000000000000000000000..1b55ed12336dc768c453e4e02f511d5594a85144 Binary files /dev/null and b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.pprof differ diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2725c5705303b1fc52f32e67a54665b0801cf0b3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot_test.go @@ -0,0 +1,51 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// WARNING: Please avoid updating this file. If this file needs to be updated, +// then a new inline_hot.pprof file should be generated: +// +// $ cd $GOROOT/src/cmd/compile/internal/test/testdata/pgo/inline/ +// $ go test -bench=. -cpuprofile ./inline_hot.pprof +package main + +import "testing" + +func BenchmarkA(b *testing.B) { + benchmarkB(b) +} +func benchmarkB(b *testing.B) { + + for i := 0; true; { + A() + i = i + 1 + if i >= b.N { + break + } + A() + i = i + 1 + if i >= b.N { + break + } + A() + i = i + 1 + if i >= b.N { + break + } + A() + i = i + 1 + if i >= b.N { + break + } + A() + i = i + 1 + if i >= b.N { + break + } + A() + i = i + 1 + if i >= b.N { + break + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/phi_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/phi_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c8a73ffd746892b38d9c2dfb32d29e5eb5d49be5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/phi_test.go @@ -0,0 +1,99 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +// Test to make sure spills of cast-shortened values +// don't end up spilling the pre-shortened size instead +// of the post-shortened size. + +import ( + "runtime" + "testing" +) + +var data1 [26]int32 +var data2 [26]int64 + +func init() { + for i := 0; i < 26; i++ { + // If we spill all 8 bytes of this datum, the 1 in the high-order 4 bytes + // will overwrite some other variable in the stack frame. + data2[i] = 0x100000000 + } +} + +func foo() int32 { + var a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z int32 + if always { + a = data1[0] + b = data1[1] + c = data1[2] + d = data1[3] + e = data1[4] + f = data1[5] + g = data1[6] + h = data1[7] + i = data1[8] + j = data1[9] + k = data1[10] + l = data1[11] + m = data1[12] + n = data1[13] + o = data1[14] + p = data1[15] + q = data1[16] + r = data1[17] + s = data1[18] + t = data1[19] + u = data1[20] + v = data1[21] + w = data1[22] + x = data1[23] + y = data1[24] + z = data1[25] + } else { + a = int32(data2[0]) + b = int32(data2[1]) + c = int32(data2[2]) + d = int32(data2[3]) + e = int32(data2[4]) + f = int32(data2[5]) + g = int32(data2[6]) + h = int32(data2[7]) + i = int32(data2[8]) + j = int32(data2[9]) + k = int32(data2[10]) + l = int32(data2[11]) + m = int32(data2[12]) + n = int32(data2[13]) + o = int32(data2[14]) + p = int32(data2[15]) + q = int32(data2[16]) + r = int32(data2[17]) + s = int32(data2[18]) + t = int32(data2[19]) + u = int32(data2[20]) + v = int32(data2[21]) + w = int32(data2[22]) + x = int32(data2[23]) + y = int32(data2[24]) + z = int32(data2[25]) + } + // Lots of phis of the form phi(int32,int64) of type int32 happen here. + // Some will be stack phis. For those stack phis, make sure the spill + // of the second argument uses the phi's width (4 bytes), not its width + // (8 bytes). Otherwise, a random stack slot gets clobbered. + + runtime.Gosched() + return a + b + c + d + e + f + g + h + i + j + k + l + m + n + o + p + q + r + s + t + u + v + w + x + y + z +} + +func TestPhi(t *testing.T) { + want := int32(0) + got := foo() + if got != want { + t.Fatalf("want %d, got %d\n", want, got) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/ptrsort.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/ptrsort.go new file mode 100644 index 0000000000000000000000000000000000000000..d26ba581d91dd9092e122542980643e601f03a99 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/ptrsort.go @@ -0,0 +1,30 @@ +package main + +// Test generic sort function with two different pointer types in different packages, +// make sure only one instantiation is created. + +import ( + "fmt" + + "cmd/compile/internal/test/testdata/mysort" +) + +type MyString struct { + string +} + +func (a *MyString) Less(b *MyString) bool { + return a.string < b.string +} + +func main() { + mysort.F() + + sl1 := []*mysort.MyInt{{7}, {1}, {4}, {6}} + mysort.Sort(sl1) + fmt.Printf("%v %v %v %v\n", sl1[0], sl1[1], sl1[2], sl1[3]) + + sl2 := []*MyString{{"when"}, {"in"}, {"the"}, {"course"}, {"of"}} + mysort.Sort(sl2) + fmt.Printf("%v %v %v %v %v\n", sl2[0], sl2[1], sl2[2], sl2[3], sl2[4]) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/ptrsort.out b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/ptrsort.out new file mode 100644 index 0000000000000000000000000000000000000000..41f1621d1a29b5977888b8b7d2572f23117a059b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/ptrsort.out @@ -0,0 +1,3 @@ +&{3} &{4} &{7} &{8} +&{1} &{4} &{6} &{7} +&{course} &{in} &{of} &{the} &{when} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/regalloc_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/regalloc_test.go new file mode 100644 index 0000000000000000000000000000000000000000..577f8e76842e6cd5b431af46a73dd82a6ca1969c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/regalloc_test.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests phi implementation + +package main + +import "testing" + +func phiOverwrite_ssa() int { + var n int + for i := 0; i < 10; i++ { + if i == 6 { + break + } + n = i + } + return n +} + +func phiOverwrite(t *testing.T) { + want := 5 + got := phiOverwrite_ssa() + if got != want { + t.Errorf("phiOverwrite_ssa()= %d, got %d", want, got) + } +} + +func phiOverwriteBig_ssa() int { + var a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z int + a = 1 + for idx := 0; idx < 26; idx++ { + a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z = b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, a + } + return a*1 + b*2 + c*3 + d*4 + e*5 + f*6 + g*7 + h*8 + i*9 + j*10 + k*11 + l*12 + m*13 + n*14 + o*15 + p*16 + q*17 + r*18 + s*19 + t*20 + u*21 + v*22 + w*23 + x*24 + y*25 + z*26 +} + +func phiOverwriteBig(t *testing.T) { + want := 1 + got := phiOverwriteBig_ssa() + if got != want { + t.Errorf("phiOverwriteBig_ssa()= %d, got %d", want, got) + } +} + +func TestRegalloc(t *testing.T) { + phiOverwrite(t) + phiOverwriteBig(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue20272.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue20272.go new file mode 100644 index 0000000000000000000000000000000000000000..3db0b8a357b4fd3f896578d8e4b3b0ce7a451330 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue20272.go @@ -0,0 +1,34 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +var ( + i0 uint8 + b0 byte + + i1 *uint8 + b1 *byte + + i2 **uint8 + b2 **byte + + i3 ***uint8 + b3 ***byte + + i4 ****uint8 + b4 ****byte + + i5 *****uint8 + b5 *****byte + + i6 ******uint8 + b6 ******byte + + i7 *******uint8 + b7 *******byte + + i8 ********uint8 + b8 ********byte +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue27013.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue27013.go new file mode 100644 index 0000000000000000000000000000000000000000..817f4a640ee953f4f3ccce781e26f726161724db --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue27013.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func A(arg interface{}) { + _ = arg.(interface{ Func() int32 }) + _ = arg.(interface{ Func() int32 }) + _ = arg.(interface{ Func() int32 }) + _ = arg.(interface{ Func() int32 }) + _ = arg.(interface{ Func() int32 }) + _ = arg.(interface{ Func() int32 }) + _ = arg.(interface{ Func() int32 }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue30202.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue30202.go new file mode 100644 index 0000000000000000000000000000000000000000..7b5de2cc8b1ee3831d7e679bd3d6d28094f24ca0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue30202.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func A(x interface { + X() int +}) int { + return x.X() +} + +func B(x interface { + X() int +}) int { + return x.X() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go new file mode 100644 index 0000000000000000000000000000000000000000..b87daed8e9882b4727f8fc9f01cf9e1dcdcec6f5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go @@ -0,0 +1,70 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue38068 + +// A type with a couple of inlinable, non-pointer-receiver methods +// that have params and local variables. +type A struct { + s string + next *A + prev *A +} + +// Inlinable, value-received method with locals and parms. +func (a A) double(x string, y int) string { + if y == 191 { + a.s = "" + } + q := a.s + "a" + r := a.s + "b" + return q + r +} + +// Inlinable, value-received method with locals and parms. +func (a A) triple(x string, y int) string { + q := a.s + if y == 998877 { + a.s = x + } + r := a.s + a.s + return q + r +} + +type methods struct { + m1 func(a *A, x string, y int) string + m2 func(a *A, x string, y int) string +} + +// Now a function that makes references to the methods via pointers, +// which should trigger the wrapper generation. +func P(a *A, ms *methods) { + if a != nil { + defer func() { println("done") }() + } + println(ms.m1(a, "a", 2)) + println(ms.m2(a, "b", 3)) +} + +func G(x *A, n int) { + if n <= 0 { + println(n) + return + } + // Address-taken local of type A, which will insure that the + // compiler's writeType() routine will create a method wrapper. + var a, b A + a.next = x + a.prev = &b + x = &a + G(x, n-2) +} + +var M methods + +func F() { + M.m1 = (*A).double + M.m2 = (*A).triple + G(nil, 100) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/short_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/short_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7a743b5d19e523d2036a1d323d293fa17618faa9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/short_test.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests short circuiting. + +package main + +import "testing" + +func and_ssa(arg1, arg2 bool) bool { + return arg1 && rightCall(arg2) +} + +func or_ssa(arg1, arg2 bool) bool { + return arg1 || rightCall(arg2) +} + +var rightCalled bool + +//go:noinline +func rightCall(v bool) bool { + rightCalled = true + return v + panic("unreached") +} + +func testAnd(t *testing.T, arg1, arg2, wantRes bool) { + testShortCircuit(t, "AND", arg1, arg2, and_ssa, arg1, wantRes) +} +func testOr(t *testing.T, arg1, arg2, wantRes bool) { + testShortCircuit(t, "OR", arg1, arg2, or_ssa, !arg1, wantRes) +} + +func testShortCircuit(t *testing.T, opName string, arg1, arg2 bool, fn func(bool, bool) bool, wantRightCall, wantRes bool) { + rightCalled = false + got := fn(arg1, arg2) + if rightCalled != wantRightCall { + t.Errorf("failed for %t %s %t; rightCalled=%t want=%t", arg1, opName, arg2, rightCalled, wantRightCall) + } + if wantRes != got { + t.Errorf("failed for %t %s %t; res=%t want=%t", arg1, opName, arg2, got, wantRes) + } +} + +// TestShortCircuit tests OANDAND and OOROR expressions and short circuiting. +func TestShortCircuit(t *testing.T) { + testAnd(t, false, false, false) + testAnd(t, false, true, false) + testAnd(t, true, false, false) + testAnd(t, true, true, true) + + testOr(t, false, false, false) + testOr(t, false, true, true) + testOr(t, true, false, true) + testOr(t, true, true, true) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/slice_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/slice_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c1345780347a13a03692d142db4d41abba4431f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/slice_test.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test makes sure that t.s = t.s[0:x] doesn't write +// either the slice pointer or the capacity. +// See issue #14855. + +package main + +import "testing" + +const N = 1000000 + +type X struct { + s []int +} + +func TestSlice(t *testing.T) { + done := make(chan struct{}) + a := make([]int, N+10) + + x := &X{a} + + go func() { + for i := 0; i < N; i++ { + x.s = x.s[1:9] + } + done <- struct{}{} + }() + go func() { + for i := 0; i < N; i++ { + x.s = x.s[0:8] // should only write len + } + done <- struct{}{} + }() + <-done + <-done + + if cap(x.s) != cap(a)-N { + t.Errorf("wanted cap=%d, got %d\n", cap(a)-N, cap(x.s)) + } + if &x.s[0] != &a[N] { + t.Errorf("wanted ptr=%p, got %p\n", &a[N], &x.s[0]) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/sqrtConst_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/sqrtConst_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5b7a149e42c408dfdc6697797ee7de70b2feb3e4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/sqrtConst_test.go @@ -0,0 +1,50 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "math" + "testing" +) + +var tests = [...]struct { + name string + in float64 // used for error messages, not an input + got float64 + want float64 +}{ + {"sqrt0", 0, math.Sqrt(0), 0}, + {"sqrt1", 1, math.Sqrt(1), 1}, + {"sqrt2", 2, math.Sqrt(2), math.Sqrt2}, + {"sqrt4", 4, math.Sqrt(4), 2}, + {"sqrt100", 100, math.Sqrt(100), 10}, + {"sqrt101", 101, math.Sqrt(101), 10.04987562112089}, +} + +var nanTests = [...]struct { + name string + in float64 // used for error messages, not an input + got float64 +}{ + {"sqrtNaN", math.NaN(), math.Sqrt(math.NaN())}, + {"sqrtNegative", -1, math.Sqrt(-1)}, + {"sqrtNegInf", math.Inf(-1), math.Sqrt(math.Inf(-1))}, +} + +func TestSqrtConst(t *testing.T) { + for _, test := range tests { + if test.got != test.want { + t.Errorf("%s: math.Sqrt(%f): got %f, want %f\n", test.name, test.in, test.got, test.want) + } + } + for _, test := range nanTests { + if math.IsNaN(test.got) != true { + t.Errorf("%s: math.Sqrt(%f): got %f, want NaN\n", test.name, test.in, test.got) + } + } + if got := math.Sqrt(math.Inf(1)); !math.IsInf(got, 1) { + t.Errorf("math.Sqrt(+Inf), got %f, want +Inf\n", got) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/string_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/string_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5d086f014733cd955707a35993e794d01fe93ee9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/string_test.go @@ -0,0 +1,207 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// string_ssa.go tests string operations. +package main + +import "testing" + +//go:noinline +func testStringSlice1_ssa(a string, i, j int) string { + return a[i:] +} + +//go:noinline +func testStringSlice2_ssa(a string, i, j int) string { + return a[:j] +} + +//go:noinline +func testStringSlice12_ssa(a string, i, j int) string { + return a[i:j] +} + +func testStringSlice(t *testing.T) { + tests := [...]struct { + fn func(string, int, int) string + s string + low, high int + want string + }{ + // -1 means the value is not used. + {testStringSlice1_ssa, "foobar", 0, -1, "foobar"}, + {testStringSlice1_ssa, "foobar", 3, -1, "bar"}, + {testStringSlice1_ssa, "foobar", 6, -1, ""}, + {testStringSlice2_ssa, "foobar", -1, 0, ""}, + {testStringSlice2_ssa, "foobar", -1, 3, "foo"}, + {testStringSlice2_ssa, "foobar", -1, 6, "foobar"}, + {testStringSlice12_ssa, "foobar", 0, 6, "foobar"}, + {testStringSlice12_ssa, "foobar", 0, 0, ""}, + {testStringSlice12_ssa, "foobar", 6, 6, ""}, + {testStringSlice12_ssa, "foobar", 1, 5, "ooba"}, + {testStringSlice12_ssa, "foobar", 3, 3, ""}, + {testStringSlice12_ssa, "", 0, 0, ""}, + } + + for i, test := range tests { + if got := test.fn(test.s, test.low, test.high); test.want != got { + t.Errorf("#%d %s[%d,%d] = %s, want %s", i, test.s, test.low, test.high, got, test.want) + } + } +} + +type prefix struct { + prefix string +} + +func (p *prefix) slice_ssa() { + p.prefix = p.prefix[:3] +} + +//go:noinline +func testStructSlice(t *testing.T) { + p := &prefix{"prefix"} + p.slice_ssa() + if "pre" != p.prefix { + t.Errorf("wrong field slice: wanted %s got %s", "pre", p.prefix) + } +} + +func testStringSlicePanic(t *testing.T) { + defer func() { + if r := recover(); r != nil { + //println("panicked as expected") + } + }() + + str := "foobar" + t.Errorf("got %s and expected to panic, but didn't", testStringSlice12_ssa(str, 3, 9)) +} + +const _Accuracy_name = "BelowExactAbove" + +var _Accuracy_index = [...]uint8{0, 5, 10, 15} + +//go:noinline +func testSmallIndexType_ssa(i int) string { + return _Accuracy_name[_Accuracy_index[i]:_Accuracy_index[i+1]] +} + +func testSmallIndexType(t *testing.T) { + tests := []struct { + i int + want string + }{ + {0, "Below"}, + {1, "Exact"}, + {2, "Above"}, + } + + for i, test := range tests { + if got := testSmallIndexType_ssa(test.i); got != test.want { + t.Errorf("#%d got %s wanted %s", i, got, test.want) + } + } +} + +//go:noinline +func testInt64Index_ssa(s string, i int64) byte { + return s[i] +} + +//go:noinline +func testInt64Slice_ssa(s string, i, j int64) string { + return s[i:j] +} + +func testInt64Index(t *testing.T) { + tests := []struct { + i int64 + j int64 + b byte + s string + }{ + {0, 5, 'B', "Below"}, + {5, 10, 'E', "Exact"}, + {10, 15, 'A', "Above"}, + } + + str := "BelowExactAbove" + for i, test := range tests { + if got := testInt64Index_ssa(str, test.i); got != test.b { + t.Errorf("#%d got %d wanted %d", i, got, test.b) + } + if got := testInt64Slice_ssa(str, test.i, test.j); got != test.s { + t.Errorf("#%d got %s wanted %s", i, got, test.s) + } + } +} + +func testInt64IndexPanic(t *testing.T) { + defer func() { + if r := recover(); r != nil { + //println("panicked as expected") + } + }() + + str := "foobar" + t.Errorf("got %d and expected to panic, but didn't", testInt64Index_ssa(str, 1<<32+1)) +} + +func testInt64SlicePanic(t *testing.T) { + defer func() { + if r := recover(); r != nil { + //println("panicked as expected") + } + }() + + str := "foobar" + t.Errorf("got %s and expected to panic, but didn't", testInt64Slice_ssa(str, 1<<32, 1<<32+1)) +} + +//go:noinline +func testStringElem_ssa(s string, i int) byte { + return s[i] +} + +func testStringElem(t *testing.T) { + tests := []struct { + s string + i int + n byte + }{ + {"foobar", 3, 98}, + {"foobar", 0, 102}, + {"foobar", 5, 114}, + } + for _, test := range tests { + if got := testStringElem_ssa(test.s, test.i); got != test.n { + t.Errorf("testStringElem \"%s\"[%d] = %d, wanted %d", test.s, test.i, got, test.n) + } + } +} + +//go:noinline +func testStringElemConst_ssa(i int) byte { + s := "foobar" + return s[i] +} + +func testStringElemConst(t *testing.T) { + if got := testStringElemConst_ssa(3); got != 98 { + t.Errorf("testStringElemConst= %d, wanted 98", got) + } +} + +func TestString(t *testing.T) { + testStringSlice(t) + testStringSlicePanic(t) + testStructSlice(t) + testSmallIndexType(t) + testStringElem(t) + testStringElemConst(t) + testInt64Index(t) + testInt64IndexPanic(t) + testInt64SlicePanic(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/unsafe_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/unsafe_test.go new file mode 100644 index 0000000000000000000000000000000000000000..37599d3fd4ad2167c89ebd2c7568e1ac57d45e56 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/unsafe_test.go @@ -0,0 +1,145 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "runtime" + "testing" + "unsafe" +) + +// global pointer slot +var a *[8]uint + +// unfoldable true +var always = true + +// Test to make sure that a pointer value which is alive +// across a call is retained, even when there are matching +// conversions to/from uintptr around the call. +// We arrange things very carefully to have to/from +// conversions on either side of the call which cannot be +// combined with any other conversions. +func f_ssa() *[8]uint { + // Make x a uintptr pointing to where a points. + var x uintptr + if always { + x = uintptr(unsafe.Pointer(a)) + } else { + x = 0 + } + // Clobber the global pointer. The only live ref + // to the allocated object is now x. + a = nil + + // Convert to pointer so it should hold + // the object live across GC call. + p := unsafe.Pointer(x) + + // Call gc. + runtime.GC() + + // Convert back to uintptr. + y := uintptr(p) + + // Mess with y so that the subsequent cast + // to unsafe.Pointer can't be combined with the + // uintptr cast above. + var z uintptr + if always { + z = y + } else { + z = 0 + } + return (*[8]uint)(unsafe.Pointer(z)) +} + +// g_ssa is the same as f_ssa, but with a bit of pointer +// arithmetic for added insanity. +func g_ssa() *[7]uint { + // Make x a uintptr pointing to where a points. + var x uintptr + if always { + x = uintptr(unsafe.Pointer(a)) + } else { + x = 0 + } + // Clobber the global pointer. The only live ref + // to the allocated object is now x. + a = nil + + // Offset x by one int. + x += unsafe.Sizeof(int(0)) + + // Convert to pointer so it should hold + // the object live across GC call. + p := unsafe.Pointer(x) + + // Call gc. + runtime.GC() + + // Convert back to uintptr. + y := uintptr(p) + + // Mess with y so that the subsequent cast + // to unsafe.Pointer can't be combined with the + // uintptr cast above. + var z uintptr + if always { + z = y + } else { + z = 0 + } + return (*[7]uint)(unsafe.Pointer(z)) +} + +func testf(t *testing.T) { + a = new([8]uint) + for i := 0; i < 8; i++ { + a[i] = 0xabcd + } + c := f_ssa() + for i := 0; i < 8; i++ { + if c[i] != 0xabcd { + t.Fatalf("%d:%x\n", i, c[i]) + } + } +} + +func testg(t *testing.T) { + a = new([8]uint) + for i := 0; i < 8; i++ { + a[i] = 0xabcd + } + c := g_ssa() + for i := 0; i < 7; i++ { + if c[i] != 0xabcd { + t.Fatalf("%d:%x\n", i, c[i]) + } + } +} + +func alias_ssa(ui64 *uint64, ui32 *uint32) uint32 { + *ui32 = 0xffffffff + *ui64 = 0 // store + ret := *ui32 // load from same address, should be zero + *ui64 = 0xffffffffffffffff // store + return ret +} +func testdse(t *testing.T) { + x := int64(-1) + // construct two pointers that alias one another + ui64 := (*uint64)(unsafe.Pointer(&x)) + ui32 := (*uint32)(unsafe.Pointer(&x)) + if want, got := uint32(0), alias_ssa(ui64, ui32); got != want { + t.Fatalf("alias_ssa: wanted %d, got %d\n", want, got) + } +} + +func TestUnsafe(t *testing.T) { + testf(t) + testg(t) + testdse(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/zero_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/zero_test.go new file mode 100644 index 0000000000000000000000000000000000000000..64fa25eed06931d3ae2770b4cd787f83b197403e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/testdata/zero_test.go @@ -0,0 +1,711 @@ +// Code generated by gen/zeroGen.go. DO NOT EDIT. + +package main + +import "testing" + +type Z1 struct { + pre [8]byte + mid [1]byte + post [8]byte +} + +//go:noinline +func zero1_ssa(x *[1]byte) { + *x = [1]byte{} +} +func testZero1(t *testing.T) { + a := Z1{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1]byte{255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero1_ssa(&a.mid) + want := Z1{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1]byte{0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero1 got=%v, want %v\n", a, want) + } +} + +type Z2 struct { + pre [8]byte + mid [2]byte + post [8]byte +} + +//go:noinline +func zero2_ssa(x *[2]byte) { + *x = [2]byte{} +} +func testZero2(t *testing.T) { + a := Z2{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [2]byte{255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero2_ssa(&a.mid) + want := Z2{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [2]byte{0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero2 got=%v, want %v\n", a, want) + } +} + +type Z3 struct { + pre [8]byte + mid [3]byte + post [8]byte +} + +//go:noinline +func zero3_ssa(x *[3]byte) { + *x = [3]byte{} +} +func testZero3(t *testing.T) { + a := Z3{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [3]byte{255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero3_ssa(&a.mid) + want := Z3{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [3]byte{0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero3 got=%v, want %v\n", a, want) + } +} + +type Z4 struct { + pre [8]byte + mid [4]byte + post [8]byte +} + +//go:noinline +func zero4_ssa(x *[4]byte) { + *x = [4]byte{} +} +func testZero4(t *testing.T) { + a := Z4{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [4]byte{255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero4_ssa(&a.mid) + want := Z4{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [4]byte{0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero4 got=%v, want %v\n", a, want) + } +} + +type Z5 struct { + pre [8]byte + mid [5]byte + post [8]byte +} + +//go:noinline +func zero5_ssa(x *[5]byte) { + *x = [5]byte{} +} +func testZero5(t *testing.T) { + a := Z5{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [5]byte{255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero5_ssa(&a.mid) + want := Z5{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [5]byte{0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero5 got=%v, want %v\n", a, want) + } +} + +type Z6 struct { + pre [8]byte + mid [6]byte + post [8]byte +} + +//go:noinline +func zero6_ssa(x *[6]byte) { + *x = [6]byte{} +} +func testZero6(t *testing.T) { + a := Z6{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [6]byte{255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero6_ssa(&a.mid) + want := Z6{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [6]byte{0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero6 got=%v, want %v\n", a, want) + } +} + +type Z7 struct { + pre [8]byte + mid [7]byte + post [8]byte +} + +//go:noinline +func zero7_ssa(x *[7]byte) { + *x = [7]byte{} +} +func testZero7(t *testing.T) { + a := Z7{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [7]byte{255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero7_ssa(&a.mid) + want := Z7{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [7]byte{0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero7 got=%v, want %v\n", a, want) + } +} + +type Z8 struct { + pre [8]byte + mid [8]byte + post [8]byte +} + +//go:noinline +func zero8_ssa(x *[8]byte) { + *x = [8]byte{} +} +func testZero8(t *testing.T) { + a := Z8{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero8_ssa(&a.mid) + want := Z8{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero8 got=%v, want %v\n", a, want) + } +} + +type Z9 struct { + pre [8]byte + mid [9]byte + post [8]byte +} + +//go:noinline +func zero9_ssa(x *[9]byte) { + *x = [9]byte{} +} +func testZero9(t *testing.T) { + a := Z9{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [9]byte{255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero9_ssa(&a.mid) + want := Z9{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [9]byte{0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero9 got=%v, want %v\n", a, want) + } +} + +type Z10 struct { + pre [8]byte + mid [10]byte + post [8]byte +} + +//go:noinline +func zero10_ssa(x *[10]byte) { + *x = [10]byte{} +} +func testZero10(t *testing.T) { + a := Z10{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [10]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero10_ssa(&a.mid) + want := Z10{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [10]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero10 got=%v, want %v\n", a, want) + } +} + +type Z15 struct { + pre [8]byte + mid [15]byte + post [8]byte +} + +//go:noinline +func zero15_ssa(x *[15]byte) { + *x = [15]byte{} +} +func testZero15(t *testing.T) { + a := Z15{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [15]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero15_ssa(&a.mid) + want := Z15{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [15]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero15 got=%v, want %v\n", a, want) + } +} + +type Z16 struct { + pre [8]byte + mid [16]byte + post [8]byte +} + +//go:noinline +func zero16_ssa(x *[16]byte) { + *x = [16]byte{} +} +func testZero16(t *testing.T) { + a := Z16{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero16_ssa(&a.mid) + want := Z16{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero16 got=%v, want %v\n", a, want) + } +} + +type Z17 struct { + pre [8]byte + mid [17]byte + post [8]byte +} + +//go:noinline +func zero17_ssa(x *[17]byte) { + *x = [17]byte{} +} +func testZero17(t *testing.T) { + a := Z17{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [17]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero17_ssa(&a.mid) + want := Z17{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [17]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero17 got=%v, want %v\n", a, want) + } +} + +type Z23 struct { + pre [8]byte + mid [23]byte + post [8]byte +} + +//go:noinline +func zero23_ssa(x *[23]byte) { + *x = [23]byte{} +} +func testZero23(t *testing.T) { + a := Z23{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [23]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero23_ssa(&a.mid) + want := Z23{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [23]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero23 got=%v, want %v\n", a, want) + } +} + +type Z24 struct { + pre [8]byte + mid [24]byte + post [8]byte +} + +//go:noinline +func zero24_ssa(x *[24]byte) { + *x = [24]byte{} +} +func testZero24(t *testing.T) { + a := Z24{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero24_ssa(&a.mid) + want := Z24{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero24 got=%v, want %v\n", a, want) + } +} + +type Z25 struct { + pre [8]byte + mid [25]byte + post [8]byte +} + +//go:noinline +func zero25_ssa(x *[25]byte) { + *x = [25]byte{} +} +func testZero25(t *testing.T) { + a := Z25{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [25]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero25_ssa(&a.mid) + want := Z25{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [25]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero25 got=%v, want %v\n", a, want) + } +} + +type Z31 struct { + pre [8]byte + mid [31]byte + post [8]byte +} + +//go:noinline +func zero31_ssa(x *[31]byte) { + *x = [31]byte{} +} +func testZero31(t *testing.T) { + a := Z31{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [31]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero31_ssa(&a.mid) + want := Z31{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [31]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero31 got=%v, want %v\n", a, want) + } +} + +type Z32 struct { + pre [8]byte + mid [32]byte + post [8]byte +} + +//go:noinline +func zero32_ssa(x *[32]byte) { + *x = [32]byte{} +} +func testZero32(t *testing.T) { + a := Z32{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero32_ssa(&a.mid) + want := Z32{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero32 got=%v, want %v\n", a, want) + } +} + +type Z33 struct { + pre [8]byte + mid [33]byte + post [8]byte +} + +//go:noinline +func zero33_ssa(x *[33]byte) { + *x = [33]byte{} +} +func testZero33(t *testing.T) { + a := Z33{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [33]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero33_ssa(&a.mid) + want := Z33{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [33]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero33 got=%v, want %v\n", a, want) + } +} + +type Z63 struct { + pre [8]byte + mid [63]byte + post [8]byte +} + +//go:noinline +func zero63_ssa(x *[63]byte) { + *x = [63]byte{} +} +func testZero63(t *testing.T) { + a := Z63{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [63]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero63_ssa(&a.mid) + want := Z63{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [63]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero63 got=%v, want %v\n", a, want) + } +} + +type Z64 struct { + pre [8]byte + mid [64]byte + post [8]byte +} + +//go:noinline +func zero64_ssa(x *[64]byte) { + *x = [64]byte{} +} +func testZero64(t *testing.T) { + a := Z64{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero64_ssa(&a.mid) + want := Z64{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero64 got=%v, want %v\n", a, want) + } +} + +type Z65 struct { + pre [8]byte + mid [65]byte + post [8]byte +} + +//go:noinline +func zero65_ssa(x *[65]byte) { + *x = [65]byte{} +} +func testZero65(t *testing.T) { + a := Z65{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [65]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero65_ssa(&a.mid) + want := Z65{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [65]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero65 got=%v, want %v\n", a, want) + } +} + +type Z1023 struct { + pre [8]byte + mid [1023]byte + post [8]byte +} + +//go:noinline +func zero1023_ssa(x *[1023]byte) { + *x = [1023]byte{} +} +func testZero1023(t *testing.T) { + a := Z1023{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1023]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero1023_ssa(&a.mid) + want := Z1023{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1023]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero1023 got=%v, want %v\n", a, want) + } +} + +type Z1024 struct { + pre [8]byte + mid [1024]byte + post [8]byte +} + +//go:noinline +func zero1024_ssa(x *[1024]byte) { + *x = [1024]byte{} +} +func testZero1024(t *testing.T) { + a := Z1024{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1024]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero1024_ssa(&a.mid) + want := Z1024{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1024]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero1024 got=%v, want %v\n", a, want) + } +} + +type Z1025 struct { + pre [8]byte + mid [1025]byte + post [8]byte +} + +//go:noinline +func zero1025_ssa(x *[1025]byte) { + *x = [1025]byte{} +} +func testZero1025(t *testing.T) { + a := Z1025{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1025]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero1025_ssa(&a.mid) + want := Z1025{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1025]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + if a != want { + t.Errorf("zero1025 got=%v, want %v\n", a, want) + } +} + +type Z8u1 struct { + b bool + val [8]byte +} +type Z8u2 struct { + i uint16 + val [8]byte +} + +//go:noinline +func zero8u1_ssa(t *Z8u1) { + t.val = [8]byte{} +} + +//go:noinline +func zero8u2_ssa(t *Z8u2) { + t.val = [8]byte{} +} +func testZero8u(t *testing.T) { + a := Z8u1{false, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero8u1_ssa(&a) + want := Z8u1{false, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + t.Errorf("zero8u2 got=%v, want %v\n", a, want) + } + b := Z8u2{15, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero8u2_ssa(&b) + wantb := Z8u2{15, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + t.Errorf("zero8u2 got=%v, want %v\n", b, wantb) + } +} + +type Z16u1 struct { + b bool + val [16]byte +} +type Z16u2 struct { + i uint16 + val [16]byte +} + +//go:noinline +func zero16u1_ssa(t *Z16u1) { + t.val = [16]byte{} +} + +//go:noinline +func zero16u2_ssa(t *Z16u2) { + t.val = [16]byte{} +} +func testZero16u(t *testing.T) { + a := Z16u1{false, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero16u1_ssa(&a) + want := Z16u1{false, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + t.Errorf("zero16u2 got=%v, want %v\n", a, want) + } + b := Z16u2{15, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero16u2_ssa(&b) + wantb := Z16u2{15, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + t.Errorf("zero16u2 got=%v, want %v\n", b, wantb) + } +} + +type Z24u1 struct { + b bool + val [24]byte +} +type Z24u2 struct { + i uint16 + val [24]byte +} + +//go:noinline +func zero24u1_ssa(t *Z24u1) { + t.val = [24]byte{} +} + +//go:noinline +func zero24u2_ssa(t *Z24u2) { + t.val = [24]byte{} +} +func testZero24u(t *testing.T) { + a := Z24u1{false, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero24u1_ssa(&a) + want := Z24u1{false, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + t.Errorf("zero24u2 got=%v, want %v\n", a, want) + } + b := Z24u2{15, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero24u2_ssa(&b) + wantb := Z24u2{15, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + t.Errorf("zero24u2 got=%v, want %v\n", b, wantb) + } +} + +type Z32u1 struct { + b bool + val [32]byte +} +type Z32u2 struct { + i uint16 + val [32]byte +} + +//go:noinline +func zero32u1_ssa(t *Z32u1) { + t.val = [32]byte{} +} + +//go:noinline +func zero32u2_ssa(t *Z32u2) { + t.val = [32]byte{} +} +func testZero32u(t *testing.T) { + a := Z32u1{false, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero32u1_ssa(&a) + want := Z32u1{false, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + t.Errorf("zero32u2 got=%v, want %v\n", a, want) + } + b := Z32u2{15, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero32u2_ssa(&b) + wantb := Z32u2{15, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + t.Errorf("zero32u2 got=%v, want %v\n", b, wantb) + } +} + +type Z64u1 struct { + b bool + val [64]byte +} +type Z64u2 struct { + i uint16 + val [64]byte +} + +//go:noinline +func zero64u1_ssa(t *Z64u1) { + t.val = [64]byte{} +} + +//go:noinline +func zero64u2_ssa(t *Z64u2) { + t.val = [64]byte{} +} +func testZero64u(t *testing.T) { + a := Z64u1{false, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero64u1_ssa(&a) + want := Z64u1{false, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + t.Errorf("zero64u2 got=%v, want %v\n", a, want) + } + b := Z64u2{15, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero64u2_ssa(&b) + wantb := Z64u2{15, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + t.Errorf("zero64u2 got=%v, want %v\n", b, wantb) + } +} + +type Z256u1 struct { + b bool + val [256]byte +} +type Z256u2 struct { + i uint16 + val [256]byte +} + +//go:noinline +func zero256u1_ssa(t *Z256u1) { + t.val = [256]byte{} +} + +//go:noinline +func zero256u2_ssa(t *Z256u2) { + t.val = [256]byte{} +} +func testZero256u(t *testing.T) { + a := Z256u1{false, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero256u1_ssa(&a) + want := Z256u1{false, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + t.Errorf("zero256u2 got=%v, want %v\n", a, want) + } + b := Z256u2{15, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero256u2_ssa(&b) + wantb := Z256u2{15, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + t.Errorf("zero256u2 got=%v, want %v\n", b, wantb) + } +} +func TestZero(t *testing.T) { + testZero1(t) + testZero2(t) + testZero3(t) + testZero4(t) + testZero5(t) + testZero6(t) + testZero7(t) + testZero8(t) + testZero9(t) + testZero10(t) + testZero15(t) + testZero16(t) + testZero17(t) + testZero23(t) + testZero24(t) + testZero25(t) + testZero31(t) + testZero32(t) + testZero33(t) + testZero63(t) + testZero64(t) + testZero65(t) + testZero1023(t) + testZero1024(t) + testZero1025(t) + testZero8u(t) + testZero16u(t) + testZero24u(t) + testZero32u(t) + testZero64u(t) + testZero256u(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/truncconst_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/truncconst_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7705042ca2c84f3ef1db4ff7833035bfd22d2854 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/truncconst_test.go @@ -0,0 +1,63 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import "testing" + +var f52want float64 = 1.0 / (1 << 52) +var f53want float64 = 1.0 / (1 << 53) + +func TestTruncFlt(t *testing.T) { + const f52 = 1 + 1.0/(1<<52) + const f53 = 1 + 1.0/(1<<53) + + if got := f52 - 1; got != f52want { + t.Errorf("f52-1 = %g, want %g", got, f52want) + } + if got := float64(f52) - 1; got != f52want { + t.Errorf("float64(f52)-1 = %g, want %g", got, f52want) + } + if got := f53 - 1; got != f53want { + t.Errorf("f53-1 = %g, want %g", got, f53want) + } + if got := float64(f53) - 1; got != 0 { + t.Errorf("float64(f53)-1 = %g, want 0", got) + } +} + +func TestTruncCmplx(t *testing.T) { + const r52 = complex(1+1.0/(1<<52), 0) + const r53 = complex(1+1.0/(1<<53), 0) + + if got := real(r52 - 1); got != f52want { + t.Errorf("real(r52-1) = %g, want %g", got, f52want) + } + if got := real(complex128(r52) - 1); got != f52want { + t.Errorf("real(complex128(r52)-1) = %g, want %g", got, f52want) + } + if got := real(r53 - 1); got != f53want { + t.Errorf("real(r53-1) = %g, want %g", got, f53want) + } + if got := real(complex128(r53) - 1); got != 0 { + t.Errorf("real(complex128(r53)-1) = %g, want 0", got) + } + + const i52 = complex(0, 1+1.0/(1<<52)) + const i53 = complex(0, 1+1.0/(1<<53)) + + if got := imag(i52 - 1i); got != f52want { + t.Errorf("imag(i52-1i) = %g, want %g", got, f52want) + } + if got := imag(complex128(i52) - 1i); got != f52want { + t.Errorf("imag(complex128(i52)-1i) = %g, want %g", got, f52want) + } + if got := imag(i53 - 1i); got != f53want { + t.Errorf("imag(i53-1i) = %g, want %g", got, f53want) + } + if got := imag(complex128(i53) - 1i); got != 0 { + t.Errorf("imag(complex128(i53)-1i) = %g, want 0", got) + } + +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/zerorange_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/zerorange_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e92b5d342fe136348e2c82d9c33eeb27702c3869 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/test/zerorange_test.go @@ -0,0 +1,184 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "testing" +) + +var glob = 3 +var globp *int64 + +// Testing compilation of arch.ZeroRange of various sizes. + +// By storing a pointer to an int64 output param in a global, the compiler must +// ensure that output param is allocated on the heap. Also, since there is a +// defer, the pointer to each output param must be zeroed in the prologue (see +// plive.go:epilogue()). So, we will get a block of one or more stack slots that +// need to be zeroed. Hence, we are testing compilation completes successfully when +// zerorange calls of various sizes (8-136 bytes) are generated. We are not +// testing runtime correctness (which is hard to do for the current uses of +// ZeroRange). + +func TestZeroRange(t *testing.T) { + testZeroRange8(t) + testZeroRange16(t) + testZeroRange32(t) + testZeroRange64(t) + testZeroRange136(t) +} + +func testZeroRange8(t *testing.T) (r int64) { + defer func() { + glob = 4 + }() + globp = &r + return +} + +func testZeroRange16(t *testing.T) (r, s int64) { + defer func() { + glob = 4 + }() + globp = &r + globp = &s + return +} + +func testZeroRange32(t *testing.T) (r, s, t2, u int64) { + defer func() { + glob = 4 + }() + globp = &r + globp = &s + globp = &t2 + globp = &u + return +} + +func testZeroRange64(t *testing.T) (r, s, t2, u, v, w, x, y int64) { + defer func() { + glob = 4 + }() + globp = &r + globp = &s + globp = &t2 + globp = &u + globp = &v + globp = &w + globp = &x + globp = &y + return +} + +func testZeroRange136(t *testing.T) (r, s, t2, u, v, w, x, y, r1, s1, t1, u1, v1, w1, x1, y1, z1 int64) { + defer func() { + glob = 4 + }() + globp = &r + globp = &s + globp = &t2 + globp = &u + globp = &v + globp = &w + globp = &x + globp = &y + globp = &r1 + globp = &s1 + globp = &t1 + globp = &u1 + globp = &v1 + globp = &w1 + globp = &x1 + globp = &y1 + globp = &z1 + return +} + +type S struct { + x [2]uint64 + p *uint64 + y [2]uint64 + q uint64 +} + +type M struct { + x [8]uint64 + p *uint64 + y [8]uint64 + q uint64 +} + +type L struct { + x [4096]uint64 + p *uint64 + y [4096]uint64 + q uint64 +} + +//go:noinline +func triggerZerorangeLarge(f, g, h uint64) (rv0 uint64) { + ll := L{p: &f} + da := f + rv0 = f + g + h + defer func(dl L, i uint64) { + rv0 += dl.q + i + }(ll, da) + return rv0 +} + +//go:noinline +func triggerZerorangeMedium(f, g, h uint64) (rv0 uint64) { + ll := M{p: &f} + rv0 = f + g + h + defer func(dm M, i uint64) { + rv0 += dm.q + i + }(ll, f) + return rv0 +} + +//go:noinline +func triggerZerorangeSmall(f, g, h uint64) (rv0 uint64) { + ll := S{p: &f} + rv0 = f + g + h + defer func(ds S, i uint64) { + rv0 += ds.q + i + }(ll, f) + return rv0 +} + +// This test was created as a follow up to issue #45372, to help +// improve coverage of the compiler's arch-specific "zerorange" +// function, which is invoked to zero out ambiguously live portions of +// the stack frame in certain specific circumstances. +// +// In the current compiler implementation, for zerorange to be +// invoked, we need to have an ambiguously live variable that needs +// zeroing. One way to trigger this is to have a function with an +// open-coded defer, where the opendefer function has an argument that +// contains a pointer (this is what's used below). +// +// At the moment this test doesn't do any specific checking for +// code sequence, or verification that things were properly set to zero, +// this seems as though it would be too tricky and would result +// in a "brittle" test. +// +// The small/medium/large scenarios below are inspired by the amd64 +// implementation of zerorange, which generates different code +// depending on the size of the thing that needs to be zeroed out +// (I've verified at the time of the writing of this test that it +// exercises the various cases). +func TestZerorange45372(t *testing.T) { + if r := triggerZerorangeLarge(101, 303, 505); r != 1010 { + t.Errorf("large: wanted %d got %d", 1010, r) + } + if r := triggerZerorangeMedium(101, 303, 505); r != 1010 { + t.Errorf("medium: wanted %d got %d", 1010, r) + } + if r := triggerZerorangeSmall(101, 303, 505); r != 1010 { + t.Errorf("small: wanted %d got %d", 1010, r) + } + +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typebits/typebits.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typebits/typebits.go new file mode 100644 index 0000000000000000000000000000000000000000..b07f4374c24efdf5e5bb920a595519974b974a3e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typebits/typebits.go @@ -0,0 +1,96 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typebits + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/bitvec" + "cmd/compile/internal/types" +) + +// NOTE: The bitmap for a specific type t could be cached in t after +// the first run and then simply copied into bv at the correct offset +// on future calls with the same type t. +func Set(t *types.Type, off int64, bv bitvec.BitVec) { + set(t, off, bv, false) +} + +// SetNoCheck is like Set, but do not check for alignment. +func SetNoCheck(t *types.Type, off int64, bv bitvec.BitVec) { + set(t, off, bv, true) +} + +func set(t *types.Type, off int64, bv bitvec.BitVec, skip bool) { + if !skip && uint8(t.Alignment()) > 0 && off&int64(uint8(t.Alignment())-1) != 0 { + base.Fatalf("typebits.Set: invalid initial alignment: type %v has alignment %d, but offset is %v", t, uint8(t.Alignment()), off) + } + if !t.HasPointers() { + // Note: this case ensures that pointers to not-in-heap types + // are not considered pointers by garbage collection and stack copying. + return + } + + switch t.Kind() { + case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP: + if off&int64(types.PtrSize-1) != 0 { + base.Fatalf("typebits.Set: invalid alignment, %v", t) + } + bv.Set(int32(off / int64(types.PtrSize))) // pointer + + case types.TSTRING: + // struct { byte *str; intgo len; } + if off&int64(types.PtrSize-1) != 0 { + base.Fatalf("typebits.Set: invalid alignment, %v", t) + } + bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot + + case types.TINTER: + // struct { Itab *tab; void *data; } + // or, when isnilinter(t)==true: + // struct { Type *type; void *data; } + if off&int64(types.PtrSize-1) != 0 { + base.Fatalf("typebits.Set: invalid alignment, %v", t) + } + // The first word of an interface is a pointer, but we don't + // treat it as such. + // 1. If it is a non-empty interface, the pointer points to an itab + // which is always in persistentalloc space. + // 2. If it is an empty interface, the pointer points to a _type. + // a. If it is a compile-time-allocated type, it points into + // the read-only data section. + // b. If it is a reflect-allocated type, it points into the Go heap. + // Reflect is responsible for keeping a reference to + // the underlying type so it won't be GCd. + // If we ever have a moving GC, we need to change this for 2b (as + // well as scan itabs to update their itab._type fields). + bv.Set(int32(off/int64(types.PtrSize) + 1)) // pointer in second slot + + case types.TSLICE: + // struct { byte *array; uintgo len; uintgo cap; } + if off&int64(types.PtrSize-1) != 0 { + base.Fatalf("typebits.Set: invalid TARRAY alignment, %v", t) + } + bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer) + + case types.TARRAY: + elt := t.Elem() + if elt.Size() == 0 { + // Short-circuit for #20739. + break + } + for i := int64(0); i < t.NumElem(); i++ { + set(elt, off, bv, skip) + off += elt.Size() + } + + case types.TSTRUCT: + for _, f := range t.Fields() { + set(f.Type, off+f.Offset, bv, skip) + } + + default: + base.Fatalf("typebits.Set: unexpected type, %v", t) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/_builtin/coverage.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/_builtin/coverage.go new file mode 100644 index 0000000000000000000000000000000000000000..02226356bc9873df39ecb1b7dc4107e1a73bccc4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/_builtin/coverage.go @@ -0,0 +1,13 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// NOTE: If you change this file you must run "go generate" +// to update builtin.go. This is not done automatically +// to avoid depending on having a working compiler binary. + +//go:build ignore + +package coverage + +func initHook(istest bool) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/_builtin/runtime.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/_builtin/runtime.go new file mode 100644 index 0000000000000000000000000000000000000000..421152967cd61db724c6a36df937271f5b592807 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/_builtin/runtime.go @@ -0,0 +1,286 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// NOTE: If you change this file you must run "go generate" +// to update builtin.go. This is not done automatically +// to avoid depending on having a working compiler binary. + +//go:build ignore + +package runtime + +// emitted by compiler, not referred to by go programs + +import "unsafe" + +func newobject(typ *byte) *any +func mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer +func panicdivide() +func panicshift() +func panicmakeslicelen() +func panicmakeslicecap() +func throwinit() +func panicwrap() + +func gopanic(interface{}) +func gorecover(*int32) interface{} +func goschedguarded() + +// Note: these declarations are just for wasm port. +// Other ports call assembly stubs instead. +func goPanicIndex(x int, y int) +func goPanicIndexU(x uint, y int) +func goPanicSliceAlen(x int, y int) +func goPanicSliceAlenU(x uint, y int) +func goPanicSliceAcap(x int, y int) +func goPanicSliceAcapU(x uint, y int) +func goPanicSliceB(x int, y int) +func goPanicSliceBU(x uint, y int) +func goPanicSlice3Alen(x int, y int) +func goPanicSlice3AlenU(x uint, y int) +func goPanicSlice3Acap(x int, y int) +func goPanicSlice3AcapU(x uint, y int) +func goPanicSlice3B(x int, y int) +func goPanicSlice3BU(x uint, y int) +func goPanicSlice3C(x int, y int) +func goPanicSlice3CU(x uint, y int) +func goPanicSliceConvert(x int, y int) + +func printbool(bool) +func printfloat(float64) +func printint(int64) +func printhex(uint64) +func printuint(uint64) +func printcomplex(complex128) +func printstring(string) +func printpointer(any) +func printuintptr(uintptr) +func printiface(any) +func printeface(any) +func printslice(any) +func printnl() +func printsp() +func printlock() +func printunlock() + +func concatstring2(*[32]byte, string, string) string +func concatstring3(*[32]byte, string, string, string) string +func concatstring4(*[32]byte, string, string, string, string) string +func concatstring5(*[32]byte, string, string, string, string, string) string +func concatstrings(*[32]byte, []string) string + +func cmpstring(string, string) int +func intstring(*[4]byte, int64) string +func slicebytetostring(buf *[32]byte, ptr *byte, n int) string +func slicebytetostringtmp(ptr *byte, n int) string +func slicerunetostring(*[32]byte, []rune) string +func stringtoslicebyte(*[32]byte, string) []byte +func stringtoslicerune(*[32]rune, string) []rune +func slicecopy(toPtr *any, toLen int, fromPtr *any, fromLen int, wid uintptr) int + +func decoderune(string, int) (retv rune, retk int) +func countrunes(string) int + +// Convert non-interface type to the data word of a (empty or nonempty) interface. +func convT(typ *byte, elem *any) unsafe.Pointer + +// Same as convT, for types with no pointers in them. +func convTnoptr(typ *byte, elem *any) unsafe.Pointer + +// Specialized versions of convT for specific types. +// These functions take concrete types in the runtime. But they may +// be used for a wider range of types, which have the same memory +// layout as the parameter type. The compiler converts the +// to-be-converted type to the parameter type before calling the +// runtime function. This way, the call is ABI-insensitive. +func convT16(val uint16) unsafe.Pointer +func convT32(val uint32) unsafe.Pointer +func convT64(val uint64) unsafe.Pointer +func convTstring(val string) unsafe.Pointer +func convTslice(val []uint8) unsafe.Pointer + +// interface type assertions x.(T) +func assertE2I(inter *byte, typ *byte) *byte +func assertE2I2(inter *byte, typ *byte) *byte +func panicdottypeE(have, want, iface *byte) +func panicdottypeI(have, want, iface *byte) +func panicnildottype(want *byte) +func typeAssert(s *byte, typ *byte) *byte + +// interface switches +func interfaceSwitch(s *byte, t *byte) (int, *byte) + +// interface equality. Type/itab pointers are already known to be equal, so +// we only need to pass one. +func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool) +func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool) + +// panic for iteration after exit in range func +func panicrangeexit() + +// defer in range over func +func deferrangefunc() interface{} + +func rand32() uint32 + +// *byte is really *runtime.Type +func makemap64(mapType *byte, hint int64, mapbuf *any) (hmap map[any]any) +func makemap(mapType *byte, hint int, mapbuf *any) (hmap map[any]any) +func makemap_small() (hmap map[any]any) +func mapaccess1(mapType *byte, hmap map[any]any, key *any) (val *any) +func mapaccess1_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any) +func mapaccess1_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any) +func mapaccess1_faststr(mapType *byte, hmap map[any]any, key string) (val *any) +func mapaccess1_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any) +func mapaccess2(mapType *byte, hmap map[any]any, key *any) (val *any, pres bool) +func mapaccess2_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any, pres bool) +func mapaccess2_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any, pres bool) +func mapaccess2_faststr(mapType *byte, hmap map[any]any, key string) (val *any, pres bool) +func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool) +func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any) +func mapassign_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any) +func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any) +func mapassign_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any) +func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any) +func mapassign_faststr(mapType *byte, hmap map[any]any, key string) (val *any) +func mapiterinit(mapType *byte, hmap map[any]any, hiter *any) +func mapdelete(mapType *byte, hmap map[any]any, key *any) +func mapdelete_fast32(mapType *byte, hmap map[any]any, key uint32) +func mapdelete_fast64(mapType *byte, hmap map[any]any, key uint64) +func mapdelete_faststr(mapType *byte, hmap map[any]any, key string) +func mapiternext(hiter *any) +func mapclear(mapType *byte, hmap map[any]any) + +// *byte is really *runtime.Type +func makechan64(chanType *byte, size int64) (hchan chan any) +func makechan(chanType *byte, size int) (hchan chan any) +func chanrecv1(hchan <-chan any, elem *any) +func chanrecv2(hchan <-chan any, elem *any) bool +func chansend1(hchan chan<- any, elem *any) +func closechan(hchan any) + +var writeBarrier struct { + enabled bool + pad [3]byte + cgo bool + alignme uint64 +} + +// *byte is really *runtime.Type +func typedmemmove(typ *byte, dst *any, src *any) +func typedmemclr(typ *byte, dst *any) +func typedslicecopy(typ *byte, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int + +func selectnbsend(hchan chan<- any, elem *any) bool +func selectnbrecv(elem *any, hchan <-chan any) (bool, bool) + +func selectsetpc(pc *uintptr) +func selectgo(cas0 *byte, order0 *byte, pc0 *uintptr, nsends int, nrecvs int, block bool) (int, bool) +func block() + +func makeslice(typ *byte, len int, cap int) unsafe.Pointer +func makeslice64(typ *byte, len int64, cap int64) unsafe.Pointer +func makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer +func growslice(oldPtr *any, newLen, oldCap, num int, et *byte) (ary []any) +func unsafeslicecheckptr(typ *byte, ptr unsafe.Pointer, len int64) +func panicunsafeslicelen() +func panicunsafeslicenilptr() +func unsafestringcheckptr(ptr unsafe.Pointer, len int64) +func panicunsafestringlen() +func panicunsafestringnilptr() + +func memmove(to *any, frm *any, length uintptr) +func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) +func memclrHasPointers(ptr unsafe.Pointer, n uintptr) + +func memequal(x, y *any, size uintptr) bool +func memequal0(x, y *any) bool +func memequal8(x, y *any) bool +func memequal16(x, y *any) bool +func memequal32(x, y *any) bool +func memequal64(x, y *any) bool +func memequal128(x, y *any) bool +func f32equal(p, q unsafe.Pointer) bool +func f64equal(p, q unsafe.Pointer) bool +func c64equal(p, q unsafe.Pointer) bool +func c128equal(p, q unsafe.Pointer) bool +func strequal(p, q unsafe.Pointer) bool +func interequal(p, q unsafe.Pointer) bool +func nilinterequal(p, q unsafe.Pointer) bool + +func memhash(x *any, h uintptr, size uintptr) uintptr +func memhash0(p unsafe.Pointer, h uintptr) uintptr +func memhash8(p unsafe.Pointer, h uintptr) uintptr +func memhash16(p unsafe.Pointer, h uintptr) uintptr +func memhash32(p unsafe.Pointer, h uintptr) uintptr +func memhash64(p unsafe.Pointer, h uintptr) uintptr +func memhash128(p unsafe.Pointer, h uintptr) uintptr +func f32hash(p *any, h uintptr) uintptr +func f64hash(p *any, h uintptr) uintptr +func c64hash(p *any, h uintptr) uintptr +func c128hash(p *any, h uintptr) uintptr +func strhash(a *any, h uintptr) uintptr +func interhash(p *any, h uintptr) uintptr +func nilinterhash(p *any, h uintptr) uintptr + +// only used on 32-bit +func int64div(int64, int64) int64 +func uint64div(uint64, uint64) uint64 +func int64mod(int64, int64) int64 +func uint64mod(uint64, uint64) uint64 +func float64toint64(float64) int64 +func float64touint64(float64) uint64 +func float64touint32(float64) uint32 +func int64tofloat64(int64) float64 +func int64tofloat32(int64) float32 +func uint64tofloat64(uint64) float64 +func uint64tofloat32(uint64) float32 +func uint32tofloat64(uint32) float64 + +func complex128div(num complex128, den complex128) (quo complex128) + +func getcallerpc() uintptr +func getcallersp() uintptr + +// race detection +func racefuncenter(uintptr) +func racefuncexit() +func raceread(uintptr) +func racewrite(uintptr) +func racereadrange(addr, size uintptr) +func racewriterange(addr, size uintptr) + +// memory sanitizer +func msanread(addr, size uintptr) +func msanwrite(addr, size uintptr) +func msanmove(dst, src, size uintptr) + +// address sanitizer +func asanread(addr, size uintptr) +func asanwrite(addr, size uintptr) + +func checkptrAlignment(unsafe.Pointer, *byte, uintptr) +func checkptrArithmetic(unsafe.Pointer, []unsafe.Pointer) + +func libfuzzerTraceCmp1(uint8, uint8, uint) +func libfuzzerTraceCmp2(uint16, uint16, uint) +func libfuzzerTraceCmp4(uint32, uint32, uint) +func libfuzzerTraceCmp8(uint64, uint64, uint) +func libfuzzerTraceConstCmp1(uint8, uint8, uint) +func libfuzzerTraceConstCmp2(uint16, uint16, uint) +func libfuzzerTraceConstCmp4(uint32, uint32, uint) +func libfuzzerTraceConstCmp8(uint64, uint64, uint) +func libfuzzerHookStrCmp(string, string, uint) +func libfuzzerHookEqualFold(string, string, uint) + +func addCovMeta(p unsafe.Pointer, len uint32, hash [16]byte, pkpath string, pkgId int, cmode uint8, cgran uint8) uint32 + +// architecture variants +var x86HasPOPCNT bool +var x86HasSSE41 bool +var x86HasFMA bool +var armHasVFPv4 bool +var arm64HasATOMICS bool + +func asanregisterglobals(unsafe.Pointer, uintptr) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/bexport.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/bexport.go new file mode 100644 index 0000000000000000000000000000000000000000..ed9a0114aff4e902d0e82c4059b3b7f9b3299f33 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/bexport.go @@ -0,0 +1,16 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag +) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/builtin.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/builtin.go new file mode 100644 index 0000000000000000000000000000000000000000..09f60c68c0fb9dd17642ac9e5bcb01c4dc401d1f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/builtin.go @@ -0,0 +1,408 @@ +// Code generated by mkbuiltin.go. DO NOT EDIT. + +package typecheck + +import ( + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// Not inlining this function removes a significant chunk of init code. +// +//go:noinline +func newSig(params, results []*types.Field) *types.Type { + return types.NewSignature(nil, params, results) +} + +func params(tlist ...*types.Type) []*types.Field { + flist := make([]*types.Field, len(tlist)) + for i, typ := range tlist { + flist[i] = types.NewField(src.NoXPos, nil, typ) + } + return flist +} + +var runtimeDecls = [...]struct { + name string + tag int + typ int +}{ + {"newobject", funcTag, 4}, + {"mallocgc", funcTag, 8}, + {"panicdivide", funcTag, 9}, + {"panicshift", funcTag, 9}, + {"panicmakeslicelen", funcTag, 9}, + {"panicmakeslicecap", funcTag, 9}, + {"throwinit", funcTag, 9}, + {"panicwrap", funcTag, 9}, + {"gopanic", funcTag, 11}, + {"gorecover", funcTag, 14}, + {"goschedguarded", funcTag, 9}, + {"goPanicIndex", funcTag, 16}, + {"goPanicIndexU", funcTag, 18}, + {"goPanicSliceAlen", funcTag, 16}, + {"goPanicSliceAlenU", funcTag, 18}, + {"goPanicSliceAcap", funcTag, 16}, + {"goPanicSliceAcapU", funcTag, 18}, + {"goPanicSliceB", funcTag, 16}, + {"goPanicSliceBU", funcTag, 18}, + {"goPanicSlice3Alen", funcTag, 16}, + {"goPanicSlice3AlenU", funcTag, 18}, + {"goPanicSlice3Acap", funcTag, 16}, + {"goPanicSlice3AcapU", funcTag, 18}, + {"goPanicSlice3B", funcTag, 16}, + {"goPanicSlice3BU", funcTag, 18}, + {"goPanicSlice3C", funcTag, 16}, + {"goPanicSlice3CU", funcTag, 18}, + {"goPanicSliceConvert", funcTag, 16}, + {"printbool", funcTag, 19}, + {"printfloat", funcTag, 21}, + {"printint", funcTag, 23}, + {"printhex", funcTag, 25}, + {"printuint", funcTag, 25}, + {"printcomplex", funcTag, 27}, + {"printstring", funcTag, 29}, + {"printpointer", funcTag, 30}, + {"printuintptr", funcTag, 31}, + {"printiface", funcTag, 30}, + {"printeface", funcTag, 30}, + {"printslice", funcTag, 30}, + {"printnl", funcTag, 9}, + {"printsp", funcTag, 9}, + {"printlock", funcTag, 9}, + {"printunlock", funcTag, 9}, + {"concatstring2", funcTag, 34}, + {"concatstring3", funcTag, 35}, + {"concatstring4", funcTag, 36}, + {"concatstring5", funcTag, 37}, + {"concatstrings", funcTag, 39}, + {"cmpstring", funcTag, 40}, + {"intstring", funcTag, 43}, + {"slicebytetostring", funcTag, 44}, + {"slicebytetostringtmp", funcTag, 45}, + {"slicerunetostring", funcTag, 48}, + {"stringtoslicebyte", funcTag, 50}, + {"stringtoslicerune", funcTag, 53}, + {"slicecopy", funcTag, 54}, + {"decoderune", funcTag, 55}, + {"countrunes", funcTag, 56}, + {"convT", funcTag, 57}, + {"convTnoptr", funcTag, 57}, + {"convT16", funcTag, 59}, + {"convT32", funcTag, 61}, + {"convT64", funcTag, 62}, + {"convTstring", funcTag, 63}, + {"convTslice", funcTag, 66}, + {"assertE2I", funcTag, 67}, + {"assertE2I2", funcTag, 67}, + {"panicdottypeE", funcTag, 68}, + {"panicdottypeI", funcTag, 68}, + {"panicnildottype", funcTag, 69}, + {"typeAssert", funcTag, 67}, + {"interfaceSwitch", funcTag, 70}, + {"ifaceeq", funcTag, 72}, + {"efaceeq", funcTag, 72}, + {"panicrangeexit", funcTag, 9}, + {"deferrangefunc", funcTag, 73}, + {"rand32", funcTag, 74}, + {"makemap64", funcTag, 76}, + {"makemap", funcTag, 77}, + {"makemap_small", funcTag, 78}, + {"mapaccess1", funcTag, 79}, + {"mapaccess1_fast32", funcTag, 80}, + {"mapaccess1_fast64", funcTag, 81}, + {"mapaccess1_faststr", funcTag, 82}, + {"mapaccess1_fat", funcTag, 83}, + {"mapaccess2", funcTag, 84}, + {"mapaccess2_fast32", funcTag, 85}, + {"mapaccess2_fast64", funcTag, 86}, + {"mapaccess2_faststr", funcTag, 87}, + {"mapaccess2_fat", funcTag, 88}, + {"mapassign", funcTag, 79}, + {"mapassign_fast32", funcTag, 80}, + {"mapassign_fast32ptr", funcTag, 89}, + {"mapassign_fast64", funcTag, 81}, + {"mapassign_fast64ptr", funcTag, 89}, + {"mapassign_faststr", funcTag, 82}, + {"mapiterinit", funcTag, 90}, + {"mapdelete", funcTag, 90}, + {"mapdelete_fast32", funcTag, 91}, + {"mapdelete_fast64", funcTag, 92}, + {"mapdelete_faststr", funcTag, 93}, + {"mapiternext", funcTag, 94}, + {"mapclear", funcTag, 95}, + {"makechan64", funcTag, 97}, + {"makechan", funcTag, 98}, + {"chanrecv1", funcTag, 100}, + {"chanrecv2", funcTag, 101}, + {"chansend1", funcTag, 103}, + {"closechan", funcTag, 30}, + {"writeBarrier", varTag, 105}, + {"typedmemmove", funcTag, 106}, + {"typedmemclr", funcTag, 107}, + {"typedslicecopy", funcTag, 108}, + {"selectnbsend", funcTag, 109}, + {"selectnbrecv", funcTag, 110}, + {"selectsetpc", funcTag, 111}, + {"selectgo", funcTag, 112}, + {"block", funcTag, 9}, + {"makeslice", funcTag, 113}, + {"makeslice64", funcTag, 114}, + {"makeslicecopy", funcTag, 115}, + {"growslice", funcTag, 117}, + {"unsafeslicecheckptr", funcTag, 118}, + {"panicunsafeslicelen", funcTag, 9}, + {"panicunsafeslicenilptr", funcTag, 9}, + {"unsafestringcheckptr", funcTag, 119}, + {"panicunsafestringlen", funcTag, 9}, + {"panicunsafestringnilptr", funcTag, 9}, + {"memmove", funcTag, 120}, + {"memclrNoHeapPointers", funcTag, 121}, + {"memclrHasPointers", funcTag, 121}, + {"memequal", funcTag, 122}, + {"memequal0", funcTag, 123}, + {"memequal8", funcTag, 123}, + {"memequal16", funcTag, 123}, + {"memequal32", funcTag, 123}, + {"memequal64", funcTag, 123}, + {"memequal128", funcTag, 123}, + {"f32equal", funcTag, 124}, + {"f64equal", funcTag, 124}, + {"c64equal", funcTag, 124}, + {"c128equal", funcTag, 124}, + {"strequal", funcTag, 124}, + {"interequal", funcTag, 124}, + {"nilinterequal", funcTag, 124}, + {"memhash", funcTag, 125}, + {"memhash0", funcTag, 126}, + {"memhash8", funcTag, 126}, + {"memhash16", funcTag, 126}, + {"memhash32", funcTag, 126}, + {"memhash64", funcTag, 126}, + {"memhash128", funcTag, 126}, + {"f32hash", funcTag, 127}, + {"f64hash", funcTag, 127}, + {"c64hash", funcTag, 127}, + {"c128hash", funcTag, 127}, + {"strhash", funcTag, 127}, + {"interhash", funcTag, 127}, + {"nilinterhash", funcTag, 127}, + {"int64div", funcTag, 128}, + {"uint64div", funcTag, 129}, + {"int64mod", funcTag, 128}, + {"uint64mod", funcTag, 129}, + {"float64toint64", funcTag, 130}, + {"float64touint64", funcTag, 131}, + {"float64touint32", funcTag, 132}, + {"int64tofloat64", funcTag, 133}, + {"int64tofloat32", funcTag, 135}, + {"uint64tofloat64", funcTag, 136}, + {"uint64tofloat32", funcTag, 137}, + {"uint32tofloat64", funcTag, 138}, + {"complex128div", funcTag, 139}, + {"getcallerpc", funcTag, 140}, + {"getcallersp", funcTag, 140}, + {"racefuncenter", funcTag, 31}, + {"racefuncexit", funcTag, 9}, + {"raceread", funcTag, 31}, + {"racewrite", funcTag, 31}, + {"racereadrange", funcTag, 141}, + {"racewriterange", funcTag, 141}, + {"msanread", funcTag, 141}, + {"msanwrite", funcTag, 141}, + {"msanmove", funcTag, 142}, + {"asanread", funcTag, 141}, + {"asanwrite", funcTag, 141}, + {"checkptrAlignment", funcTag, 143}, + {"checkptrArithmetic", funcTag, 145}, + {"libfuzzerTraceCmp1", funcTag, 146}, + {"libfuzzerTraceCmp2", funcTag, 147}, + {"libfuzzerTraceCmp4", funcTag, 148}, + {"libfuzzerTraceCmp8", funcTag, 149}, + {"libfuzzerTraceConstCmp1", funcTag, 146}, + {"libfuzzerTraceConstCmp2", funcTag, 147}, + {"libfuzzerTraceConstCmp4", funcTag, 148}, + {"libfuzzerTraceConstCmp8", funcTag, 149}, + {"libfuzzerHookStrCmp", funcTag, 150}, + {"libfuzzerHookEqualFold", funcTag, 150}, + {"addCovMeta", funcTag, 152}, + {"x86HasPOPCNT", varTag, 6}, + {"x86HasSSE41", varTag, 6}, + {"x86HasFMA", varTag, 6}, + {"armHasVFPv4", varTag, 6}, + {"arm64HasATOMICS", varTag, 6}, + {"asanregisterglobals", funcTag, 121}, +} + +func runtimeTypes() []*types.Type { + var typs [153]*types.Type + typs[0] = types.ByteType + typs[1] = types.NewPtr(typs[0]) + typs[2] = types.Types[types.TANY] + typs[3] = types.NewPtr(typs[2]) + typs[4] = newSig(params(typs[1]), params(typs[3])) + typs[5] = types.Types[types.TUINTPTR] + typs[6] = types.Types[types.TBOOL] + typs[7] = types.Types[types.TUNSAFEPTR] + typs[8] = newSig(params(typs[5], typs[1], typs[6]), params(typs[7])) + typs[9] = newSig(nil, nil) + typs[10] = types.Types[types.TINTER] + typs[11] = newSig(params(typs[10]), nil) + typs[12] = types.Types[types.TINT32] + typs[13] = types.NewPtr(typs[12]) + typs[14] = newSig(params(typs[13]), params(typs[10])) + typs[15] = types.Types[types.TINT] + typs[16] = newSig(params(typs[15], typs[15]), nil) + typs[17] = types.Types[types.TUINT] + typs[18] = newSig(params(typs[17], typs[15]), nil) + typs[19] = newSig(params(typs[6]), nil) + typs[20] = types.Types[types.TFLOAT64] + typs[21] = newSig(params(typs[20]), nil) + typs[22] = types.Types[types.TINT64] + typs[23] = newSig(params(typs[22]), nil) + typs[24] = types.Types[types.TUINT64] + typs[25] = newSig(params(typs[24]), nil) + typs[26] = types.Types[types.TCOMPLEX128] + typs[27] = newSig(params(typs[26]), nil) + typs[28] = types.Types[types.TSTRING] + typs[29] = newSig(params(typs[28]), nil) + typs[30] = newSig(params(typs[2]), nil) + typs[31] = newSig(params(typs[5]), nil) + typs[32] = types.NewArray(typs[0], 32) + typs[33] = types.NewPtr(typs[32]) + typs[34] = newSig(params(typs[33], typs[28], typs[28]), params(typs[28])) + typs[35] = newSig(params(typs[33], typs[28], typs[28], typs[28]), params(typs[28])) + typs[36] = newSig(params(typs[33], typs[28], typs[28], typs[28], typs[28]), params(typs[28])) + typs[37] = newSig(params(typs[33], typs[28], typs[28], typs[28], typs[28], typs[28]), params(typs[28])) + typs[38] = types.NewSlice(typs[28]) + typs[39] = newSig(params(typs[33], typs[38]), params(typs[28])) + typs[40] = newSig(params(typs[28], typs[28]), params(typs[15])) + typs[41] = types.NewArray(typs[0], 4) + typs[42] = types.NewPtr(typs[41]) + typs[43] = newSig(params(typs[42], typs[22]), params(typs[28])) + typs[44] = newSig(params(typs[33], typs[1], typs[15]), params(typs[28])) + typs[45] = newSig(params(typs[1], typs[15]), params(typs[28])) + typs[46] = types.RuneType + typs[47] = types.NewSlice(typs[46]) + typs[48] = newSig(params(typs[33], typs[47]), params(typs[28])) + typs[49] = types.NewSlice(typs[0]) + typs[50] = newSig(params(typs[33], typs[28]), params(typs[49])) + typs[51] = types.NewArray(typs[46], 32) + typs[52] = types.NewPtr(typs[51]) + typs[53] = newSig(params(typs[52], typs[28]), params(typs[47])) + typs[54] = newSig(params(typs[3], typs[15], typs[3], typs[15], typs[5]), params(typs[15])) + typs[55] = newSig(params(typs[28], typs[15]), params(typs[46], typs[15])) + typs[56] = newSig(params(typs[28]), params(typs[15])) + typs[57] = newSig(params(typs[1], typs[3]), params(typs[7])) + typs[58] = types.Types[types.TUINT16] + typs[59] = newSig(params(typs[58]), params(typs[7])) + typs[60] = types.Types[types.TUINT32] + typs[61] = newSig(params(typs[60]), params(typs[7])) + typs[62] = newSig(params(typs[24]), params(typs[7])) + typs[63] = newSig(params(typs[28]), params(typs[7])) + typs[64] = types.Types[types.TUINT8] + typs[65] = types.NewSlice(typs[64]) + typs[66] = newSig(params(typs[65]), params(typs[7])) + typs[67] = newSig(params(typs[1], typs[1]), params(typs[1])) + typs[68] = newSig(params(typs[1], typs[1], typs[1]), nil) + typs[69] = newSig(params(typs[1]), nil) + typs[70] = newSig(params(typs[1], typs[1]), params(typs[15], typs[1])) + typs[71] = types.NewPtr(typs[5]) + typs[72] = newSig(params(typs[71], typs[7], typs[7]), params(typs[6])) + typs[73] = newSig(nil, params(typs[10])) + typs[74] = newSig(nil, params(typs[60])) + typs[75] = types.NewMap(typs[2], typs[2]) + typs[76] = newSig(params(typs[1], typs[22], typs[3]), params(typs[75])) + typs[77] = newSig(params(typs[1], typs[15], typs[3]), params(typs[75])) + typs[78] = newSig(nil, params(typs[75])) + typs[79] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3])) + typs[80] = newSig(params(typs[1], typs[75], typs[60]), params(typs[3])) + typs[81] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3])) + typs[82] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3])) + typs[83] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3])) + typs[84] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3], typs[6])) + typs[85] = newSig(params(typs[1], typs[75], typs[60]), params(typs[3], typs[6])) + typs[86] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3], typs[6])) + typs[87] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3], typs[6])) + typs[88] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3], typs[6])) + typs[89] = newSig(params(typs[1], typs[75], typs[7]), params(typs[3])) + typs[90] = newSig(params(typs[1], typs[75], typs[3]), nil) + typs[91] = newSig(params(typs[1], typs[75], typs[60]), nil) + typs[92] = newSig(params(typs[1], typs[75], typs[24]), nil) + typs[93] = newSig(params(typs[1], typs[75], typs[28]), nil) + typs[94] = newSig(params(typs[3]), nil) + typs[95] = newSig(params(typs[1], typs[75]), nil) + typs[96] = types.NewChan(typs[2], types.Cboth) + typs[97] = newSig(params(typs[1], typs[22]), params(typs[96])) + typs[98] = newSig(params(typs[1], typs[15]), params(typs[96])) + typs[99] = types.NewChan(typs[2], types.Crecv) + typs[100] = newSig(params(typs[99], typs[3]), nil) + typs[101] = newSig(params(typs[99], typs[3]), params(typs[6])) + typs[102] = types.NewChan(typs[2], types.Csend) + typs[103] = newSig(params(typs[102], typs[3]), nil) + typs[104] = types.NewArray(typs[0], 3) + typs[105] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[104]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])}) + typs[106] = newSig(params(typs[1], typs[3], typs[3]), nil) + typs[107] = newSig(params(typs[1], typs[3]), nil) + typs[108] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15])) + typs[109] = newSig(params(typs[102], typs[3]), params(typs[6])) + typs[110] = newSig(params(typs[3], typs[99]), params(typs[6], typs[6])) + typs[111] = newSig(params(typs[71]), nil) + typs[112] = newSig(params(typs[1], typs[1], typs[71], typs[15], typs[15], typs[6]), params(typs[15], typs[6])) + typs[113] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7])) + typs[114] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7])) + typs[115] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7])) + typs[116] = types.NewSlice(typs[2]) + typs[117] = newSig(params(typs[3], typs[15], typs[15], typs[15], typs[1]), params(typs[116])) + typs[118] = newSig(params(typs[1], typs[7], typs[22]), nil) + typs[119] = newSig(params(typs[7], typs[22]), nil) + typs[120] = newSig(params(typs[3], typs[3], typs[5]), nil) + typs[121] = newSig(params(typs[7], typs[5]), nil) + typs[122] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6])) + typs[123] = newSig(params(typs[3], typs[3]), params(typs[6])) + typs[124] = newSig(params(typs[7], typs[7]), params(typs[6])) + typs[125] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5])) + typs[126] = newSig(params(typs[7], typs[5]), params(typs[5])) + typs[127] = newSig(params(typs[3], typs[5]), params(typs[5])) + typs[128] = newSig(params(typs[22], typs[22]), params(typs[22])) + typs[129] = newSig(params(typs[24], typs[24]), params(typs[24])) + typs[130] = newSig(params(typs[20]), params(typs[22])) + typs[131] = newSig(params(typs[20]), params(typs[24])) + typs[132] = newSig(params(typs[20]), params(typs[60])) + typs[133] = newSig(params(typs[22]), params(typs[20])) + typs[134] = types.Types[types.TFLOAT32] + typs[135] = newSig(params(typs[22]), params(typs[134])) + typs[136] = newSig(params(typs[24]), params(typs[20])) + typs[137] = newSig(params(typs[24]), params(typs[134])) + typs[138] = newSig(params(typs[60]), params(typs[20])) + typs[139] = newSig(params(typs[26], typs[26]), params(typs[26])) + typs[140] = newSig(nil, params(typs[5])) + typs[141] = newSig(params(typs[5], typs[5]), nil) + typs[142] = newSig(params(typs[5], typs[5], typs[5]), nil) + typs[143] = newSig(params(typs[7], typs[1], typs[5]), nil) + typs[144] = types.NewSlice(typs[7]) + typs[145] = newSig(params(typs[7], typs[144]), nil) + typs[146] = newSig(params(typs[64], typs[64], typs[17]), nil) + typs[147] = newSig(params(typs[58], typs[58], typs[17]), nil) + typs[148] = newSig(params(typs[60], typs[60], typs[17]), nil) + typs[149] = newSig(params(typs[24], typs[24], typs[17]), nil) + typs[150] = newSig(params(typs[28], typs[28], typs[17]), nil) + typs[151] = types.NewArray(typs[0], 16) + typs[152] = newSig(params(typs[7], typs[60], typs[151], typs[28], typs[15], typs[64], typs[64]), params(typs[60])) + return typs[:] +} + +var coverageDecls = [...]struct { + name string + tag int + typ int +}{ + {"initHook", funcTag, 1}, +} + +func coverageTypes() []*types.Type { + var typs [2]*types.Type + typs[0] = types.Types[types.TBOOL] + typs[1] = newSig(params(typs[0]), nil) + return typs[:] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/builtin_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/builtin_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3c0d6b81712ead54763294be4f3b481b8f12f0d9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/builtin_test.go @@ -0,0 +1,31 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "bytes" + "internal/testenv" + "os" + "testing" +) + +func TestBuiltin(t *testing.T) { + testenv.MustHaveGoRun(t) + t.Parallel() + + old, err := os.ReadFile("builtin.go") + if err != nil { + t.Fatal(err) + } + + new, err := testenv.Command(t, testenv.GoToolPath(t), "run", "mkbuiltin.go", "-stdout").Output() + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(old, new) { + t.Fatal("builtin.go out of date; run mkbuiltin.go") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/const.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/const.go new file mode 100644 index 0000000000000000000000000000000000000000..e7f9ec5cd81a31cd6aa8869cf9e119c2f5dd32c9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/const.go @@ -0,0 +1,486 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "fmt" + "go/constant" + "go/token" + "math" + "math/big" + "unicode" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" +) + +func roundFloat(v constant.Value, sz int64) constant.Value { + switch sz { + case 4: + f, _ := constant.Float32Val(v) + return makeFloat64(float64(f)) + case 8: + f, _ := constant.Float64Val(v) + return makeFloat64(f) + } + base.Fatalf("unexpected size: %v", sz) + panic("unreachable") +} + +// truncate float literal fv to 32-bit or 64-bit precision +// according to type; return truncated value. +func truncfltlit(v constant.Value, t *types.Type) constant.Value { + if t.IsUntyped() { + return v + } + + return roundFloat(v, t.Size()) +} + +// truncate Real and Imag parts of Mpcplx to 32-bit or 64-bit +// precision, according to type; return truncated value. In case of +// overflow, calls Errorf but does not truncate the input value. +func trunccmplxlit(v constant.Value, t *types.Type) constant.Value { + if t.IsUntyped() { + return v + } + + fsz := t.Size() / 2 + return makeComplex(roundFloat(constant.Real(v), fsz), roundFloat(constant.Imag(v), fsz)) +} + +// TODO(mdempsky): Replace these with better APIs. +func convlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) } +func DefaultLit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) } + +// convlit1 converts an untyped expression n to type t. If n already +// has a type, convlit1 has no effect. +// +// For explicit conversions, t must be non-nil, and integer-to-string +// conversions are allowed. +// +// For implicit conversions (e.g., assignments), t may be nil; if so, +// n is converted to its default type. +// +// If there's an error converting n to t, context is used in the error +// message. +func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir.Node { + if explicit && t == nil { + base.Fatalf("explicit conversion missing type") + } + if t != nil && t.IsUntyped() { + base.Fatalf("bad conversion to untyped: %v", t) + } + + if n == nil || n.Type() == nil { + // Allow sloppy callers. + return n + } + if !n.Type().IsUntyped() { + // Already typed; nothing to do. + return n + } + + // Nil is technically not a constant, so handle it specially. + if n.Type().Kind() == types.TNIL { + if n.Op() != ir.ONIL { + base.Fatalf("unexpected op: %v (%v)", n, n.Op()) + } + n = ir.Copy(n) + if t == nil { + base.Fatalf("use of untyped nil") + } + + if !t.HasNil() { + // Leave for caller to handle. + return n + } + + n.SetType(t) + return n + } + + if t == nil || !ir.OKForConst[t.Kind()] { + t = defaultType(n.Type()) + } + + switch n.Op() { + default: + base.Fatalf("unexpected untyped expression: %v", n) + + case ir.OLITERAL: + v := ConvertVal(n.Val(), t, explicit) + if v.Kind() == constant.Unknown { + n = ir.NewConstExpr(n.Val(), n) + break + } + n = ir.NewConstExpr(v, n) + n.SetType(t) + return n + + case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG: + ot := operandType(n.Op(), t) + if ot == nil { + n = DefaultLit(n, nil) + break + } + + n := n.(*ir.UnaryExpr) + n.X = convlit(n.X, ot) + if n.X.Type() == nil { + n.SetType(nil) + return n + } + n.SetType(t) + return n + + case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND, ir.OCOMPLEX: + ot := operandType(n.Op(), t) + if ot == nil { + n = DefaultLit(n, nil) + break + } + + var l, r ir.Node + switch n := n.(type) { + case *ir.BinaryExpr: + n.X = convlit(n.X, ot) + n.Y = convlit(n.Y, ot) + l, r = n.X, n.Y + case *ir.LogicalExpr: + n.X = convlit(n.X, ot) + n.Y = convlit(n.Y, ot) + l, r = n.X, n.Y + } + + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) + return n + } + if !types.Identical(l.Type(), r.Type()) { + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) + n.SetType(nil) + return n + } + + n.SetType(t) + return n + + case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + n := n.(*ir.BinaryExpr) + if !t.IsBoolean() { + break + } + n.SetType(t) + return n + + case ir.OLSH, ir.ORSH: + n := n.(*ir.BinaryExpr) + n.X = convlit1(n.X, t, explicit, nil) + n.SetType(n.X.Type()) + if n.Type() != nil && !n.Type().IsInteger() { + base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type()) + n.SetType(nil) + } + return n + } + + if explicit { + base.Fatalf("cannot convert %L to type %v", n, t) + } else if context != nil { + base.Fatalf("cannot use %L as type %v in %s", n, t, context()) + } else { + base.Fatalf("cannot use %L as type %v", n, t) + } + + n.SetType(nil) + return n +} + +func operandType(op ir.Op, t *types.Type) *types.Type { + switch op { + case ir.OCOMPLEX: + if t.IsComplex() { + return types.FloatForComplex(t) + } + case ir.OREAL, ir.OIMAG: + if t.IsFloat() { + return types.ComplexForFloat(t) + } + default: + if okfor[op][t.Kind()] { + return t + } + } + return nil +} + +// ConvertVal converts v into a representation appropriate for t. If +// no such representation exists, it returns constant.MakeUnknown() +// instead. +// +// If explicit is true, then conversions from integer to string are +// also allowed. +func ConvertVal(v constant.Value, t *types.Type, explicit bool) constant.Value { + switch ct := v.Kind(); ct { + case constant.Bool: + if t.IsBoolean() { + return v + } + + case constant.String: + if t.IsString() { + return v + } + + case constant.Int: + if explicit && t.IsString() { + return tostr(v) + } + fallthrough + case constant.Float, constant.Complex: + switch { + case t.IsInteger(): + v = toint(v) + return v + case t.IsFloat(): + v = toflt(v) + v = truncfltlit(v, t) + return v + case t.IsComplex(): + v = tocplx(v) + v = trunccmplxlit(v, t) + return v + } + } + + return constant.MakeUnknown() +} + +func tocplx(v constant.Value) constant.Value { + return constant.ToComplex(v) +} + +func toflt(v constant.Value) constant.Value { + if v.Kind() == constant.Complex { + v = constant.Real(v) + } + + return constant.ToFloat(v) +} + +func toint(v constant.Value) constant.Value { + if v.Kind() == constant.Complex { + v = constant.Real(v) + } + + if v := constant.ToInt(v); v.Kind() == constant.Int { + return v + } + + // The value of v cannot be represented as an integer; + // so we need to print an error message. + // Unfortunately some float values cannot be + // reasonably formatted for inclusion in an error + // message (example: 1 + 1e-100), so first we try to + // format the float; if the truncation resulted in + // something that looks like an integer we omit the + // value from the error message. + // (See issue #11371). + f := ir.BigFloat(v) + if f.MantExp(nil) > 2*ir.ConstPrec { + base.Errorf("integer too large") + } else { + var t big.Float + t.Parse(fmt.Sprint(v), 0) + if t.IsInt() { + base.Errorf("constant truncated to integer") + } else { + base.Errorf("constant %v truncated to integer", v) + } + } + + // Prevent follow-on errors. + return constant.MakeUnknown() +} + +func tostr(v constant.Value) constant.Value { + if v.Kind() == constant.Int { + r := unicode.ReplacementChar + if x, ok := constant.Uint64Val(v); ok && x <= unicode.MaxRune { + r = rune(x) + } + v = constant.MakeString(string(r)) + } + return v +} + +func makeFloat64(f float64) constant.Value { + if math.IsInf(f, 0) { + base.Fatalf("infinity is not a valid constant") + } + return constant.MakeFloat64(f) +} + +func makeComplex(real, imag constant.Value) constant.Value { + return constant.BinaryOp(constant.ToFloat(real), token.ADD, constant.MakeImag(constant.ToFloat(imag))) +} + +// DefaultLit on both nodes simultaneously; +// if they're both ideal going in they better +// get the same type going out. +// force means must assign concrete (non-ideal) type. +// The results of defaultlit2 MUST be assigned back to l and r, e.g. +// +// n.Left, n.Right = defaultlit2(n.Left, n.Right, force) +func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) { + if l.Type() == nil || r.Type() == nil { + return l, r + } + + if !l.Type().IsInterface() && !r.Type().IsInterface() { + // Can't mix bool with non-bool, string with non-string. + if l.Type().IsBoolean() != r.Type().IsBoolean() { + return l, r + } + if l.Type().IsString() != r.Type().IsString() { + return l, r + } + } + + if !l.Type().IsUntyped() { + r = convlit(r, l.Type()) + return l, r + } + + if !r.Type().IsUntyped() { + l = convlit(l, r.Type()) + return l, r + } + + if !force { + return l, r + } + + // Can't mix nil with anything untyped. + if ir.IsNil(l) || ir.IsNil(r) { + return l, r + } + t := defaultType(mixUntyped(l.Type(), r.Type())) + l = convlit(l, t) + r = convlit(r, t) + return l, r +} + +func mixUntyped(t1, t2 *types.Type) *types.Type { + if t1 == t2 { + return t1 + } + + rank := func(t *types.Type) int { + switch t { + case types.UntypedInt: + return 0 + case types.UntypedRune: + return 1 + case types.UntypedFloat: + return 2 + case types.UntypedComplex: + return 3 + } + base.Fatalf("bad type %v", t) + panic("unreachable") + } + + if rank(t2) > rank(t1) { + return t2 + } + return t1 +} + +func defaultType(t *types.Type) *types.Type { + if !t.IsUntyped() || t.Kind() == types.TNIL { + return t + } + + switch t { + case types.UntypedBool: + return types.Types[types.TBOOL] + case types.UntypedString: + return types.Types[types.TSTRING] + case types.UntypedInt: + return types.Types[types.TINT] + case types.UntypedRune: + return types.RuneType + case types.UntypedFloat: + return types.Types[types.TFLOAT64] + case types.UntypedComplex: + return types.Types[types.TCOMPLEX128] + } + + base.Fatalf("bad type %v", t) + return nil +} + +// IndexConst checks if Node n contains a constant expression +// representable as a non-negative int and returns its value. +// If n is not a constant expression, not representable as an +// integer, or negative, it returns -1. If n is too large, it +// returns -2. +func IndexConst(n ir.Node) int64 { + if n.Op() != ir.OLITERAL { + return -1 + } + if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL { + return -1 + } + + v := toint(n.Val()) + if v.Kind() != constant.Int || constant.Sign(v) < 0 { + return -1 + } + if ir.ConstOverflow(v, types.Types[types.TINT]) { + return -2 + } + return ir.IntVal(types.Types[types.TINT], v) +} + +// callOrChan reports whether n is a call or channel operation. +func callOrChan(n ir.Node) bool { + switch n.Op() { + case ir.OAPPEND, + ir.OCALL, + ir.OCALLFUNC, + ir.OCALLINTER, + ir.OCALLMETH, + ir.OCAP, + ir.OCLEAR, + ir.OCLOSE, + ir.OCOMPLEX, + ir.OCOPY, + ir.ODELETE, + ir.OIMAG, + ir.OLEN, + ir.OMAKE, + ir.OMAX, + ir.OMIN, + ir.ONEW, + ir.OPANIC, + ir.OPRINT, + ir.OPRINTLN, + ir.OREAL, + ir.ORECOVER, + ir.ORECOVERFP, + ir.ORECV, + ir.OUNSAFEADD, + ir.OUNSAFESLICE, + ir.OUNSAFESLICEDATA, + ir.OUNSAFESTRING, + ir.OUNSAFESTRINGDATA: + return true + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/dcl.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/dcl.go new file mode 100644 index 0000000000000000000000000000000000000000..4a847e85583f8ad32c9abf55a30195adb12d2703 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/dcl.go @@ -0,0 +1,125 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "fmt" + "sync" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +var funcStack []*ir.Func // stack of previous values of ir.CurFunc + +// DeclFunc declares the parameters for fn and adds it to +// Target.Funcs. +// +// Before returning, it sets CurFunc to fn. When the caller is done +// constructing fn, it must call FinishFuncBody to restore CurFunc. +func DeclFunc(fn *ir.Func) { + fn.DeclareParams(true) + fn.Nname.Defn = fn + Target.Funcs = append(Target.Funcs, fn) + + funcStack = append(funcStack, ir.CurFunc) + ir.CurFunc = fn +} + +// FinishFuncBody restores ir.CurFunc to its state before the last +// call to DeclFunc. +func FinishFuncBody() { + funcStack, ir.CurFunc = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1] +} + +func CheckFuncStack() { + if len(funcStack) != 0 { + base.Fatalf("funcStack is non-empty: %v", len(funcStack)) + } +} + +// make a new Node off the books. +func TempAt(pos src.XPos, curfn *ir.Func, typ *types.Type) *ir.Name { + if curfn == nil { + base.FatalfAt(pos, "no curfn for TempAt") + } + if typ == nil { + base.FatalfAt(pos, "TempAt called with nil type") + } + if typ.Kind() == types.TFUNC && typ.Recv() != nil { + base.FatalfAt(pos, "misuse of method type: %v", typ) + } + types.CalcSize(typ) + + sym := &types.Sym{ + Name: autotmpname(len(curfn.Dcl)), + Pkg: types.LocalPkg, + } + name := curfn.NewLocal(pos, sym, typ) + name.SetEsc(ir.EscNever) + name.SetUsed(true) + name.SetAutoTemp(true) + + return name +} + +var ( + autotmpnamesmu sync.Mutex + autotmpnames []string +) + +// autotmpname returns the name for an autotmp variable numbered n. +func autotmpname(n int) string { + autotmpnamesmu.Lock() + defer autotmpnamesmu.Unlock() + + // Grow autotmpnames, if needed. + if n >= len(autotmpnames) { + autotmpnames = append(autotmpnames, make([]string, n+1-len(autotmpnames))...) + autotmpnames = autotmpnames[:cap(autotmpnames)] + } + + s := autotmpnames[n] + if s == "" { + // Give each tmp a different name so that they can be registerized. + // Add a preceding . to avoid clashing with legal names. + prefix := ".autotmp_%d" + + s = fmt.Sprintf(prefix, n) + autotmpnames[n] = s + } + return s +} + +// f is method type, with receiver. +// return function type, receiver as first argument (or not). +func NewMethodType(sig *types.Type, recv *types.Type) *types.Type { + nrecvs := 0 + if recv != nil { + nrecvs++ + } + + // TODO(mdempsky): Move this function to types. + // TODO(mdempsky): Preserve positions, names, and package from sig+recv. + + params := make([]*types.Field, nrecvs+sig.NumParams()) + if recv != nil { + params[0] = types.NewField(base.Pos, nil, recv) + } + for i, param := range sig.Params() { + d := types.NewField(base.Pos, nil, param.Type) + d.SetIsDDD(param.IsDDD()) + params[nrecvs+i] = d + } + + results := make([]*types.Field, sig.NumResults()) + for i, t := range sig.Results() { + results[i] = types.NewField(base.Pos, nil, t.Type) + } + + return types.NewSignature(nil, params, results) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/export.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/export.go new file mode 100644 index 0000000000000000000000000000000000000000..585c1b78c23169345e61e0bfa0095fa3710cffa1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/export.go @@ -0,0 +1,33 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// importfunc declares symbol s as an imported function with type t. +func importfunc(s *types.Sym, t *types.Type) { + fn := ir.NewFunc(src.NoXPos, src.NoXPos, s, t) + importsym(fn.Nname) +} + +// importvar declares symbol s as an imported variable with type t. +func importvar(s *types.Sym, t *types.Type) { + n := ir.NewNameAt(src.NoXPos, s, t) + n.Class = ir.PEXTERN + importsym(n) +} + +func importsym(name *ir.Name) { + sym := name.Sym() + if sym.Def != nil { + base.Fatalf("importsym of symbol that already exists: %v", sym.Def) + } + sym.Def = name +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/expr.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/expr.go new file mode 100644 index 0000000000000000000000000000000000000000..12d1743874caa06514d49dfcac8629a2bba3a3bd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/expr.go @@ -0,0 +1,933 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "fmt" + "go/constant" + "go/token" + "internal/types/errors" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) { + if l.Type() == nil || r.Type() == nil { + return l, r, nil + } + + r = DefaultLit(r, types.Types[types.TUINT]) + t := r.Type() + if !t.IsInteger() { + base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type()) + return l, r, nil + } + t = l.Type() + if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() { + base.Errorf("invalid operation: %v (shift of type %v)", n, t) + return l, r, nil + } + + // no DefaultLit for left + // the outer context gives the type + t = l.Type() + if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL { + t = types.UntypedInt + } + return l, r, t +} + +// tcArith typechecks operands of a binary arithmetic expression. +// The result of tcArith MUST be assigned back to original operands, +// t is the type of the expression, and should be set by the caller. e.g: +// +// n.X, n.Y, t = tcArith(n, op, n.X, n.Y) +// n.SetType(t) +func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) { + l, r = defaultlit2(l, r, false) + if l.Type() == nil || r.Type() == nil { + return l, r, nil + } + t := l.Type() + if t.Kind() == types.TIDEAL { + t = r.Type() + } + aop := ir.OXXX + if n.Op().IsCmp() && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { + // comparison is okay as long as one side is + // assignable to the other. convert so they have + // the same type. + // + // the only conversion that isn't a no-op is concrete == interface. + // in that case, check comparability of the concrete type. + // The conversion allocates, so only do it if the concrete type is huge. + converted := false + if r.Type().Kind() != types.TBLANK { + aop, _ = assignOp(l.Type(), r.Type()) + if aop != ir.OXXX { + if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type())) + return l, r, nil + } + + types.CalcSize(l.Type()) + if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Size() >= 1<<16 { + l = ir.NewConvExpr(base.Pos, aop, r.Type(), l) + l.SetTypecheck(1) + } + + t = r.Type() + converted = true + } + } + + if !converted && l.Type().Kind() != types.TBLANK { + aop, _ = assignOp(r.Type(), l.Type()) + if aop != ir.OXXX { + if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type())) + return l, r, nil + } + + types.CalcSize(r.Type()) + if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Size() >= 1<<16 { + r = ir.NewConvExpr(base.Pos, aop, l.Type(), r) + r.SetTypecheck(1) + } + + t = l.Type() + } + } + } + + if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) { + l, r = defaultlit2(l, r, true) + if l.Type() == nil || r.Type() == nil { + return l, r, nil + } + if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 { + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) + return l, r, nil + } + } + + if t.Kind() == types.TIDEAL { + t = mixUntyped(l.Type(), r.Type()) + } + if dt := defaultType(t); !okfor[op][dt.Kind()] { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t)) + return l, r, nil + } + + // okfor allows any array == array, map == map, func == func. + // restrict to slice/map/func == nil and nil == slice/map/func. + if l.Type().IsArray() && !types.IsComparable(l.Type()) { + base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type()) + return l, r, nil + } + + if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) { + base.Errorf("invalid operation: %v (slice can only be compared to nil)", n) + return l, r, nil + } + + if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) { + base.Errorf("invalid operation: %v (map can only be compared to nil)", n) + return l, r, nil + } + + if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) { + base.Errorf("invalid operation: %v (func can only be compared to nil)", n) + return l, r, nil + } + + if l.Type().IsStruct() { + if f := types.IncomparableField(l.Type()); f != nil { + base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type) + return l, r, nil + } + } + + return l, r, t +} + +// The result of tcCompLit MUST be assigned back to n, e.g. +// +// n.Left = tcCompLit(n.Left) +func tcCompLit(n *ir.CompLitExpr) (res ir.Node) { + if base.EnableTrace && base.Flag.LowerT { + defer tracePrint("tcCompLit", n)(&res) + } + + lno := base.Pos + defer func() { + base.Pos = lno + }() + + ir.SetPos(n) + + t := n.Type() + base.AssertfAt(t != nil, n.Pos(), "missing type in composite literal") + + switch t.Kind() { + default: + base.Errorf("invalid composite literal type %v", t) + n.SetType(nil) + + case types.TARRAY: + typecheckarraylit(t.Elem(), t.NumElem(), n.List, "array literal") + n.SetOp(ir.OARRAYLIT) + + case types.TSLICE: + length := typecheckarraylit(t.Elem(), -1, n.List, "slice literal") + n.SetOp(ir.OSLICELIT) + n.Len = length + + case types.TMAP: + for i3, l := range n.List { + ir.SetPos(l) + if l.Op() != ir.OKEY { + n.List[i3] = Expr(l) + base.Errorf("missing key in map literal") + continue + } + l := l.(*ir.KeyExpr) + + r := l.Key + r = Expr(r) + l.Key = AssignConv(r, t.Key(), "map key") + + r = l.Value + r = Expr(r) + l.Value = AssignConv(r, t.Elem(), "map value") + } + + n.SetOp(ir.OMAPLIT) + + case types.TSTRUCT: + // Need valid field offsets for Xoffset below. + types.CalcSize(t) + + errored := false + if len(n.List) != 0 && nokeys(n.List) { + // simple list of variables + ls := n.List + for i, n1 := range ls { + ir.SetPos(n1) + n1 = Expr(n1) + ls[i] = n1 + if i >= t.NumFields() { + if !errored { + base.Errorf("too many values in %v", n) + errored = true + } + continue + } + + f := t.Field(i) + s := f.Sym + + // Do the test for assigning to unexported fields. + // But if this is an instantiated function, then + // the function has already been typechecked. In + // that case, don't do the test, since it can fail + // for the closure structs created in + // walkClosure(), because the instantiated + // function is compiled as if in the source + // package of the generic function. + if !(ir.CurFunc != nil && strings.Contains(ir.CurFunc.Nname.Sym().Name, "[")) { + if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg { + base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t) + } + } + // No pushtype allowed here. Must name fields for that. + n1 = AssignConv(n1, f.Type, "field value") + ls[i] = ir.NewStructKeyExpr(base.Pos, f, n1) + } + if len(ls) < t.NumFields() { + base.Errorf("too few values in %v", n) + } + } else { + hash := make(map[string]bool) + + // keyed list + ls := n.List + for i, n := range ls { + ir.SetPos(n) + + sk, ok := n.(*ir.StructKeyExpr) + if !ok { + kv, ok := n.(*ir.KeyExpr) + if !ok { + if !errored { + base.Errorf("mixture of field:value and value initializers") + errored = true + } + ls[i] = Expr(n) + continue + } + + sk = tcStructLitKey(t, kv) + if sk == nil { + continue + } + + fielddup(sk.Sym().Name, hash) + } + + // No pushtype allowed here. Tried and rejected. + sk.Value = Expr(sk.Value) + sk.Value = AssignConv(sk.Value, sk.Field.Type, "field value") + ls[i] = sk + } + } + + n.SetOp(ir.OSTRUCTLIT) + } + + return n +} + +// tcStructLitKey typechecks an OKEY node that appeared within a +// struct literal. +func tcStructLitKey(typ *types.Type, kv *ir.KeyExpr) *ir.StructKeyExpr { + key := kv.Key + + sym := key.Sym() + + // An OXDOT uses the Sym field to hold + // the field to the right of the dot, + // so s will be non-nil, but an OXDOT + // is never a valid struct literal key. + if sym == nil || sym.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || sym.IsBlank() { + base.Errorf("invalid field name %v in struct initializer", key) + return nil + } + + if f := Lookdot1(nil, sym, typ, typ.Fields(), 0); f != nil { + return ir.NewStructKeyExpr(kv.Pos(), f, kv.Value) + } + + if ci := Lookdot1(nil, sym, typ, typ.Fields(), 2); ci != nil { // Case-insensitive lookup. + if visible(ci.Sym) { + base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", sym, typ, ci.Sym) + } else if nonexported(sym) && sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion. + base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", sym, typ) + } else { + base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ) + } + return nil + } + + var f *types.Field + p, _ := dotpath(sym, typ, &f, true) + if p == nil || f.IsMethod() { + base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ) + return nil + } + + // dotpath returns the parent embedded types in reverse order. + var ep []string + for ei := len(p) - 1; ei >= 0; ei-- { + ep = append(ep, p[ei].field.Sym.Name) + } + ep = append(ep, sym.Name) + base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), typ) + return nil +} + +// tcConv typechecks an OCONV node. +func tcConv(n *ir.ConvExpr) ir.Node { + types.CheckSize(n.Type()) // ensure width is calculated for backend + n.X = Expr(n.X) + n.X = convlit1(n.X, n.Type(), true, nil) + t := n.X.Type() + if t == nil || n.Type() == nil { + n.SetType(nil) + return n + } + op, why := convertOp(n.X.Op() == ir.OLITERAL, t, n.Type()) + if op == ir.OXXX { + // Due to //go:nointerface, we may be stricter than types2 here (#63333). + base.ErrorfAt(n.Pos(), errors.InvalidConversion, "cannot convert %L to type %v%s", n.X, n.Type(), why) + n.SetType(nil) + return n + } + + n.SetOp(op) + switch n.Op() { + case ir.OCONVNOP: + if t.Kind() == n.Type().Kind() { + switch t.Kind() { + case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128: + // Floating point casts imply rounding and + // so the conversion must be kept. + n.SetOp(ir.OCONV) + } + } + + // do not convert to []byte literal. See CL 125796. + // generated code and compiler memory footprint is better without it. + case ir.OSTR2BYTES: + // ok + + case ir.OSTR2RUNES: + if n.X.Op() == ir.OLITERAL { + return stringtoruneslit(n) + } + + case ir.OBYTES2STR: + if t.Elem() != types.ByteType && t.Elem() != types.Types[types.TUINT8] { + // If t is a slice of a user-defined byte type B (not uint8 + // or byte), then add an extra CONVNOP from []B to []byte, so + // that the call to slicebytetostring() added in walk will + // typecheck correctly. + n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.ByteType), n.X) + n.X.SetTypecheck(1) + } + + case ir.ORUNES2STR: + if t.Elem() != types.RuneType && t.Elem() != types.Types[types.TINT32] { + // If t is a slice of a user-defined rune type B (not uint32 + // or rune), then add an extra CONVNOP from []B to []rune, so + // that the call to slicerunetostring() added in walk will + // typecheck correctly. + n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.RuneType), n.X) + n.X.SetTypecheck(1) + } + + } + return n +} + +// DotField returns a field selector expression that selects the +// index'th field of the given expression, which must be of struct or +// pointer-to-struct type. +func DotField(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr { + op, typ := ir.ODOT, x.Type() + if typ.IsPtr() { + op, typ = ir.ODOTPTR, typ.Elem() + } + if !typ.IsStruct() { + base.FatalfAt(pos, "DotField of non-struct: %L", x) + } + + // TODO(mdempsky): This is the backend's responsibility. + types.CalcSize(typ) + + field := typ.Field(index) + return dot(pos, field.Type, op, x, field) +} + +func dot(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node, selection *types.Field) *ir.SelectorExpr { + n := ir.NewSelectorExpr(pos, op, x, selection.Sym) + n.Selection = selection + n.SetType(typ) + n.SetTypecheck(1) + return n +} + +// XDotMethod returns an expression representing the field selection +// x.sym. If any implicit field selection are necessary, those are +// inserted too. +func XDotField(pos src.XPos, x ir.Node, sym *types.Sym) *ir.SelectorExpr { + n := Expr(ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)).(*ir.SelectorExpr) + if n.Op() != ir.ODOT && n.Op() != ir.ODOTPTR { + base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n) + } + return n +} + +// XDotMethod returns an expression representing the method value +// x.sym (i.e., x is a value, not a type). If any implicit field +// selection are necessary, those are inserted too. +// +// If callee is true, the result is an ODOTMETH/ODOTINTER, otherwise +// an OMETHVALUE. +func XDotMethod(pos src.XPos, x ir.Node, sym *types.Sym, callee bool) *ir.SelectorExpr { + n := ir.NewSelectorExpr(pos, ir.OXDOT, x, sym) + if callee { + n = Callee(n).(*ir.SelectorExpr) + if n.Op() != ir.ODOTMETH && n.Op() != ir.ODOTINTER { + base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n) + } + } else { + n = Expr(n).(*ir.SelectorExpr) + if n.Op() != ir.OMETHVALUE { + base.FatalfAt(pos, "unexpected result op: %v (%v)", n.Op(), n) + } + } + return n +} + +// tcDot typechecks an OXDOT or ODOT node. +func tcDot(n *ir.SelectorExpr, top int) ir.Node { + if n.Op() == ir.OXDOT { + n = AddImplicitDots(n) + n.SetOp(ir.ODOT) + if n.X == nil { + n.SetType(nil) + return n + } + } + + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + + t := n.X.Type() + if t == nil { + base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.X), fmt.Sprint(n)) + n.SetType(nil) + return n + } + + if n.X.Op() == ir.OTYPE { + base.FatalfAt(n.Pos(), "use NewMethodExpr to construct OMETHEXPR") + } + + if t.IsPtr() && !t.Elem().IsInterface() { + t = t.Elem() + if t == nil { + n.SetType(nil) + return n + } + n.SetOp(ir.ODOTPTR) + types.CheckSize(t) + } + + if n.Sel.IsBlank() { + base.Errorf("cannot refer to blank field or method") + n.SetType(nil) + return n + } + + if Lookdot(n, t, 0) == nil { + // Legitimate field or method lookup failed, try to explain the error + switch { + case t.IsEmptyInterface(): + base.Errorf("%v undefined (type %v is interface with no methods)", n, n.X.Type()) + + case t.IsPtr() && t.Elem().IsInterface(): + // Pointer to interface is almost always a mistake. + base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.X.Type()) + + case Lookdot(n, t, 1) != nil: + // Field or method matches by name, but it is not exported. + base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sel) + + default: + if mt := Lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup. + base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.X.Type(), n.Sel, mt.Sym) + } else { + base.Errorf("%v undefined (type %v has no field or method %v)", n, n.X.Type(), n.Sel) + } + } + n.SetType(nil) + return n + } + + if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 { + n.SetOp(ir.OMETHVALUE) + n.SetType(NewMethodType(n.Type(), nil)) + } + return n +} + +// tcDotType typechecks an ODOTTYPE node. +func tcDotType(n *ir.TypeAssertExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + if !t.IsInterface() { + base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t) + n.SetType(nil) + return n + } + + base.AssertfAt(n.Type() != nil, n.Pos(), "missing type: %v", n) + + if n.Type() != nil && !n.Type().IsInterface() { + why := ImplementsExplain(n.Type(), t) + if why != "" { + base.Fatalf("impossible type assertion:\n\t%s", why) + n.SetType(nil) + return n + } + } + return n +} + +// tcITab typechecks an OITAB node. +func tcITab(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + t := n.X.Type() + if t == nil { + n.SetType(nil) + return n + } + if !t.IsInterface() { + base.Fatalf("OITAB of %v", t) + } + n.SetType(types.NewPtr(types.Types[types.TUINTPTR])) + return n +} + +// tcIndex typechecks an OINDEX node. +func tcIndex(n *ir.IndexExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + n.X = implicitstar(n.X) + l := n.X + n.Index = Expr(n.Index) + r := n.Index + t := l.Type() + if t == nil || r.Type() == nil { + n.SetType(nil) + return n + } + switch t.Kind() { + default: + base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t) + n.SetType(nil) + return n + + case types.TSTRING, types.TARRAY, types.TSLICE: + n.Index = indexlit(n.Index) + if t.IsString() { + n.SetType(types.ByteType) + } else { + n.SetType(t.Elem()) + } + why := "string" + if t.IsArray() { + why = "array" + } else if t.IsSlice() { + why = "slice" + } + + if n.Index.Type() != nil && !n.Index.Type().IsInteger() { + base.Errorf("non-integer %s index %v", why, n.Index) + return n + } + + if !n.Bounded() && ir.IsConst(n.Index, constant.Int) { + x := n.Index.Val() + if constant.Sign(x) < 0 { + base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Index) + } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) { + base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Index, t.NumElem()) + } else if ir.IsConst(n.X, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.X))))) { + base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Index, len(ir.StringVal(n.X))) + } else if ir.ConstOverflow(x, types.Types[types.TINT]) { + base.Errorf("invalid %s index %v (index too large)", why, n.Index) + } + } + + case types.TMAP: + n.Index = AssignConv(n.Index, t.Key(), "map index") + n.SetType(t.Elem()) + n.SetOp(ir.OINDEXMAP) + n.Assigned = false + } + return n +} + +// tcLenCap typechecks an OLEN or OCAP node. +func tcLenCap(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + n.X = implicitstar(n.X) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + + var ok bool + if n.Op() == ir.OLEN { + ok = okforlen[t.Kind()] + } else { + ok = okforcap[t.Kind()] + } + if !ok { + base.Errorf("invalid argument %L for %v", l, n.Op()) + n.SetType(nil) + return n + } + + n.SetType(types.Types[types.TINT]) + return n +} + +// tcUnsafeData typechecks an OUNSAFESLICEDATA or OUNSAFESTRINGDATA node. +func tcUnsafeData(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + + var kind types.Kind + if n.Op() == ir.OUNSAFESLICEDATA { + kind = types.TSLICE + } else { + /* kind is string */ + kind = types.TSTRING + } + + if t.Kind() != kind { + base.Errorf("invalid argument %L for %v", l, n.Op()) + n.SetType(nil) + return n + } + + if kind == types.TSTRING { + t = types.ByteType + } else { + t = t.Elem() + } + n.SetType(types.NewPtr(t)) + return n +} + +// tcRecv typechecks an ORECV node. +func tcRecv(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + if !t.IsChan() { + base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t) + n.SetType(nil) + return n + } + + if !t.ChanDir().CanRecv() { + base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t) + n.SetType(nil) + return n + } + + n.SetType(t.Elem()) + return n +} + +// tcSPtr typechecks an OSPTR node. +func tcSPtr(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + t := n.X.Type() + if t == nil { + n.SetType(nil) + return n + } + if !t.IsSlice() && !t.IsString() { + base.Fatalf("OSPTR of %v", t) + } + if t.IsString() { + n.SetType(types.NewPtr(types.Types[types.TUINT8])) + } else { + n.SetType(types.NewPtr(t.Elem())) + } + return n +} + +// tcSlice typechecks an OSLICE or OSLICE3 node. +func tcSlice(n *ir.SliceExpr) ir.Node { + n.X = DefaultLit(Expr(n.X), nil) + n.Low = indexlit(Expr(n.Low)) + n.High = indexlit(Expr(n.High)) + n.Max = indexlit(Expr(n.Max)) + hasmax := n.Op().IsSlice3() + l := n.X + if l.Type() == nil { + n.SetType(nil) + return n + } + if l.Type().IsArray() { + if !ir.IsAddressable(n.X) { + base.Errorf("invalid operation %v (slice of unaddressable value)", n) + n.SetType(nil) + return n + } + + addr := NodAddr(n.X) + addr.SetImplicit(true) + n.X = Expr(addr) + l = n.X + } + t := l.Type() + var tp *types.Type + if t.IsString() { + if hasmax { + base.Errorf("invalid operation %v (3-index slice of string)", n) + n.SetType(nil) + return n + } + n.SetType(t) + n.SetOp(ir.OSLICESTR) + } else if t.IsPtr() && t.Elem().IsArray() { + tp = t.Elem() + n.SetType(types.NewSlice(tp.Elem())) + types.CalcSize(n.Type()) + if hasmax { + n.SetOp(ir.OSLICE3ARR) + } else { + n.SetOp(ir.OSLICEARR) + } + } else if t.IsSlice() { + n.SetType(t) + } else { + base.Errorf("cannot slice %v (type %v)", l, t) + n.SetType(nil) + return n + } + + if n.Low != nil && !checksliceindex(l, n.Low, tp) { + n.SetType(nil) + return n + } + if n.High != nil && !checksliceindex(l, n.High, tp) { + n.SetType(nil) + return n + } + if n.Max != nil && !checksliceindex(l, n.Max, tp) { + n.SetType(nil) + return n + } + if !checksliceconst(n.Low, n.High) || !checksliceconst(n.Low, n.Max) || !checksliceconst(n.High, n.Max) { + n.SetType(nil) + return n + } + return n +} + +// tcSliceHeader typechecks an OSLICEHEADER node. +func tcSliceHeader(n *ir.SliceHeaderExpr) ir.Node { + // Errors here are Fatalf instead of Errorf because only the compiler + // can construct an OSLICEHEADER node. + // Components used in OSLICEHEADER that are supplied by parsed source code + // have already been typechecked in e.g. OMAKESLICE earlier. + t := n.Type() + if t == nil { + base.Fatalf("no type specified for OSLICEHEADER") + } + + if !t.IsSlice() { + base.Fatalf("invalid type %v for OSLICEHEADER", n.Type()) + } + + if n.Ptr == nil || n.Ptr.Type() == nil || !n.Ptr.Type().IsUnsafePtr() { + base.Fatalf("need unsafe.Pointer for OSLICEHEADER") + } + + n.Ptr = Expr(n.Ptr) + n.Len = DefaultLit(Expr(n.Len), types.Types[types.TINT]) + n.Cap = DefaultLit(Expr(n.Cap), types.Types[types.TINT]) + + if ir.IsConst(n.Len, constant.Int) && ir.Int64Val(n.Len) < 0 { + base.Fatalf("len for OSLICEHEADER must be non-negative") + } + + if ir.IsConst(n.Cap, constant.Int) && ir.Int64Val(n.Cap) < 0 { + base.Fatalf("cap for OSLICEHEADER must be non-negative") + } + + if ir.IsConst(n.Len, constant.Int) && ir.IsConst(n.Cap, constant.Int) && constant.Compare(n.Len.Val(), token.GTR, n.Cap.Val()) { + base.Fatalf("len larger than cap for OSLICEHEADER") + } + + return n +} + +// tcStringHeader typechecks an OSTRINGHEADER node. +func tcStringHeader(n *ir.StringHeaderExpr) ir.Node { + t := n.Type() + if t == nil { + base.Fatalf("no type specified for OSTRINGHEADER") + } + + if !t.IsString() { + base.Fatalf("invalid type %v for OSTRINGHEADER", n.Type()) + } + + if n.Ptr == nil || n.Ptr.Type() == nil || !n.Ptr.Type().IsUnsafePtr() { + base.Fatalf("need unsafe.Pointer for OSTRINGHEADER") + } + + n.Ptr = Expr(n.Ptr) + n.Len = DefaultLit(Expr(n.Len), types.Types[types.TINT]) + + if ir.IsConst(n.Len, constant.Int) && ir.Int64Val(n.Len) < 0 { + base.Fatalf("len for OSTRINGHEADER must be non-negative") + } + + return n +} + +// tcStar typechecks an ODEREF node, which may be an expression or a type. +func tcStar(n *ir.StarExpr, top int) ir.Node { + n.X = typecheck(n.X, ctxExpr|ctxType) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + + // TODO(mdempsky): Remove (along with ctxType above) once I'm + // confident this code path isn't needed any more. + if l.Op() == ir.OTYPE { + base.Fatalf("unexpected type in deref expression: %v", l) + } + + if !t.IsPtr() { + if top&(ctxExpr|ctxStmt) != 0 { + base.Errorf("invalid indirect of %L", n.X) + n.SetType(nil) + return n + } + base.Errorf("%v is not a type", l) + return n + } + + n.SetType(t.Elem()) + return n +} + +// tcUnaryArith typechecks a unary arithmetic expression. +func tcUnaryArith(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + if !okfor[n.Op()][defaultType(t).Kind()] { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t)) + n.SetType(nil) + return n + } + + n.SetType(t) + return n +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/func.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/func.go new file mode 100644 index 0000000000000000000000000000000000000000..5c54a5bd49e2484865924db36ddf08afc11afed8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/func.go @@ -0,0 +1,834 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" + + "fmt" + "go/constant" + "go/token" +) + +// MakeDotArgs package all the arguments that match a ... T parameter into a []T. +func MakeDotArgs(pos src.XPos, typ *types.Type, args []ir.Node) ir.Node { + if len(args) == 0 { + return ir.NewNilExpr(pos, typ) + } + + args = append([]ir.Node(nil), args...) + lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, typ, args) + lit.SetImplicit(true) + + n := Expr(lit) + if n.Type() == nil { + base.FatalfAt(pos, "mkdotargslice: typecheck failed") + } + return n +} + +// FixVariadicCall rewrites calls to variadic functions to use an +// explicit ... argument if one is not already present. +func FixVariadicCall(call *ir.CallExpr) { + fntype := call.Fun.Type() + if !fntype.IsVariadic() || call.IsDDD { + return + } + + vi := fntype.NumParams() - 1 + vt := fntype.Param(vi).Type + + args := call.Args + extra := args[vi:] + slice := MakeDotArgs(call.Pos(), vt, extra) + for i := range extra { + extra[i] = nil // allow GC + } + + call.Args = append(args[:vi], slice) + call.IsDDD = true +} + +// FixMethodCall rewrites a method call t.M(...) into a function call T.M(t, ...). +func FixMethodCall(call *ir.CallExpr) { + if call.Fun.Op() != ir.ODOTMETH { + return + } + + dot := call.Fun.(*ir.SelectorExpr) + + fn := NewMethodExpr(dot.Pos(), dot.X.Type(), dot.Selection.Sym) + + args := make([]ir.Node, 1+len(call.Args)) + args[0] = dot.X + copy(args[1:], call.Args) + + call.SetOp(ir.OCALLFUNC) + call.Fun = fn + call.Args = args +} + +func AssertFixedCall(call *ir.CallExpr) { + if call.Fun.Type().IsVariadic() && !call.IsDDD { + base.FatalfAt(call.Pos(), "missed FixVariadicCall") + } + if call.Op() == ir.OCALLMETH { + base.FatalfAt(call.Pos(), "missed FixMethodCall") + } +} + +// ClosureType returns the struct type used to hold all the information +// needed in the closure for clo (clo must be a OCLOSURE node). +// The address of a variable of the returned type can be cast to a func. +func ClosureType(clo *ir.ClosureExpr) *types.Type { + // Create closure in the form of a composite literal. + // supposing the closure captures an int i and a string s + // and has one float64 argument and no results, + // the generated code looks like: + // + // clos = &struct{F uintptr; X0 *int; X1 *string}{func.1, &i, &s} + // + // The use of the struct provides type information to the garbage + // collector so that it can walk the closure. We could use (in this + // case) [3]unsafe.Pointer instead, but that would leave the gc in + // the dark. The information appears in the binary in the form of + // type descriptors; the struct is unnamed and uses exported field + // names so that closures in multiple packages with the same struct + // type can share the descriptor. + + fields := make([]*types.Field, 1+len(clo.Func.ClosureVars)) + fields[0] = types.NewField(base.AutogeneratedPos, types.LocalPkg.Lookup("F"), types.Types[types.TUINTPTR]) + for i, v := range clo.Func.ClosureVars { + typ := v.Type() + if !v.Byval() { + typ = types.NewPtr(typ) + } + fields[1+i] = types.NewField(base.AutogeneratedPos, types.LocalPkg.LookupNum("X", i), typ) + } + typ := types.NewStruct(fields) + typ.SetNoalg(true) + return typ +} + +// MethodValueType returns the struct type used to hold all the information +// needed in the closure for a OMETHVALUE node. The address of a variable of +// the returned type can be cast to a func. +func MethodValueType(n *ir.SelectorExpr) *types.Type { + t := types.NewStruct([]*types.Field{ + types.NewField(base.Pos, Lookup("F"), types.Types[types.TUINTPTR]), + types.NewField(base.Pos, Lookup("R"), n.X.Type()), + }) + t.SetNoalg(true) + return t +} + +// type check function definition +// To be called by typecheck, not directly. +// (Call typecheck.Func instead.) +func tcFunc(n *ir.Func) { + if base.EnableTrace && base.Flag.LowerT { + defer tracePrint("tcFunc", n)(nil) + } + + if name := n.Nname; name.Typecheck() == 0 { + base.AssertfAt(name.Type() != nil, n.Pos(), "missing type: %v", name) + name.SetTypecheck(1) + } +} + +// tcCall typechecks an OCALL node. +func tcCall(n *ir.CallExpr, top int) ir.Node { + Stmts(n.Init()) // imported rewritten f(g()) calls (#30907) + n.Fun = typecheck(n.Fun, ctxExpr|ctxType|ctxCallee) + + l := n.Fun + + if l.Op() == ir.ONAME && l.(*ir.Name).BuiltinOp != 0 { + l := l.(*ir.Name) + if n.IsDDD && l.BuiltinOp != ir.OAPPEND { + base.Errorf("invalid use of ... with builtin %v", l) + } + + // builtin: OLEN, OCAP, etc. + switch l.BuiltinOp { + default: + base.Fatalf("unknown builtin %v", l) + + case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OMAX, ir.OMIN, ir.OPRINT, ir.OPRINTLN, ir.ORECOVER: + n.SetOp(l.BuiltinOp) + n.Fun = nil + n.SetTypecheck(0) // re-typechecking new op is OK, not a loop + return typecheck(n, top) + + case ir.OCAP, ir.OCLEAR, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL, ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA: + typecheckargs(n) + fallthrough + case ir.ONEW: + arg, ok := needOneArg(n, "%v", n.Op()) + if !ok { + n.SetType(nil) + return n + } + u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg) + return typecheck(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init + + case ir.OCOMPLEX, ir.OCOPY, ir.OUNSAFEADD, ir.OUNSAFESLICE, ir.OUNSAFESTRING: + typecheckargs(n) + arg1, arg2, ok := needTwoArgs(n) + if !ok { + n.SetType(nil) + return n + } + b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2) + return typecheck(ir.InitExpr(n.Init(), b), top) // typecheckargs can add to old.Init + } + panic("unreachable") + } + + n.Fun = DefaultLit(n.Fun, nil) + l = n.Fun + if l.Op() == ir.OTYPE { + if n.IsDDD { + base.Fatalf("invalid use of ... in type conversion to %v", l.Type()) + } + + // pick off before type-checking arguments + arg, ok := needOneArg(n, "conversion to %v", l.Type()) + if !ok { + n.SetType(nil) + return n + } + + n := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg) + n.SetType(l.Type()) + return tcConv(n) + } + + RewriteNonNameCall(n) + typecheckargs(n) + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + types.CheckSize(t) + + switch l.Op() { + case ir.ODOTINTER: + n.SetOp(ir.OCALLINTER) + + case ir.ODOTMETH: + l := l.(*ir.SelectorExpr) + n.SetOp(ir.OCALLMETH) + + // typecheckaste was used here but there wasn't enough + // information further down the call chain to know if we + // were testing a method receiver for unexported fields. + // It isn't necessary, so just do a sanity check. + tp := t.Recv().Type + + if l.X == nil || !types.Identical(l.X.Type(), tp) { + base.Fatalf("method receiver") + } + + default: + n.SetOp(ir.OCALLFUNC) + if t.Kind() != types.TFUNC { + if o := l; o.Name() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil { + // be more specific when the non-function + // name matches a predeclared function + base.Errorf("cannot call non-function %L, declared at %s", + l, base.FmtPos(o.Name().Pos())) + } else { + base.Errorf("cannot call non-function %L", l) + } + n.SetType(nil) + return n + } + } + + typecheckaste(ir.OCALL, n.Fun, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.Fun) }) + FixVariadicCall(n) + FixMethodCall(n) + if t.NumResults() == 0 { + return n + } + if t.NumResults() == 1 { + n.SetType(l.Type().Result(0).Type) + + if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.ONAME { + if sym := n.Fun.(*ir.Name).Sym(); types.RuntimeSymName(sym) == "getg" { + // Emit code for runtime.getg() directly instead of calling function. + // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk, + // so that the ordering pass can make sure to preserve the semantics of the original code + // (in particular, the exact time of the function call) by introducing temporaries. + // In this case, we know getg() always returns the same result within a given function + // and we want to avoid the temporaries, so we do the rewrite earlier than is typical. + n.SetOp(ir.OGETG) + } + } + return n + } + + // multiple return + if top&(ctxMultiOK|ctxStmt) == 0 { + base.Errorf("multiple-value %v() in single-value context", l) + return n + } + + n.SetType(l.Type().ResultsTuple()) + return n +} + +// tcAppend typechecks an OAPPEND node. +func tcAppend(n *ir.CallExpr) ir.Node { + typecheckargs(n) + args := n.Args + if len(args) == 0 { + base.Errorf("missing arguments to append") + n.SetType(nil) + return n + } + + t := args[0].Type() + if t == nil { + n.SetType(nil) + return n + } + + n.SetType(t) + if !t.IsSlice() { + if ir.IsNil(args[0]) { + base.Errorf("first argument to append must be typed slice; have untyped nil") + n.SetType(nil) + return n + } + + base.Errorf("first argument to append must be slice; have %L", t) + n.SetType(nil) + return n + } + + if n.IsDDD { + if len(args) == 1 { + base.Errorf("cannot use ... on first argument to append") + n.SetType(nil) + return n + } + + if len(args) != 2 { + base.Errorf("too many arguments to append") + n.SetType(nil) + return n + } + + // AssignConv is of args[1] not required here, as the + // types of args[0] and args[1] don't need to match + // (They will both have an underlying type which are + // slices of identical base types, or be []byte and string.) + // See issue 53888. + return n + } + + as := args[1:] + for i, n := range as { + if n.Type() == nil { + continue + } + as[i] = AssignConv(n, t.Elem(), "append") + types.CheckSize(as[i].Type()) // ensure width is calculated for backend + } + return n +} + +// tcClear typechecks an OCLEAR node. +func tcClear(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + + switch { + case t.IsMap(), t.IsSlice(): + default: + base.Errorf("invalid operation: %v (argument must be a map or slice)", n) + n.SetType(nil) + return n + } + + return n +} + +// tcClose typechecks an OCLOSE node. +func tcClose(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + if !t.IsChan() { + base.Errorf("invalid operation: %v (non-chan type %v)", n, t) + n.SetType(nil) + return n + } + + if !t.ChanDir().CanSend() { + base.Errorf("invalid operation: %v (cannot close receive-only channel)", n) + n.SetType(nil) + return n + } + return n +} + +// tcComplex typechecks an OCOMPLEX node. +func tcComplex(n *ir.BinaryExpr) ir.Node { + l := Expr(n.X) + r := Expr(n.Y) + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) + return n + } + l, r = defaultlit2(l, r, false) + if l.Type() == nil || r.Type() == nil { + n.SetType(nil) + return n + } + n.X = l + n.Y = r + + if !types.Identical(l.Type(), r.Type()) { + base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type()) + n.SetType(nil) + return n + } + + var t *types.Type + switch l.Type().Kind() { + default: + base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type()) + n.SetType(nil) + return n + + case types.TIDEAL: + t = types.UntypedComplex + + case types.TFLOAT32: + t = types.Types[types.TCOMPLEX64] + + case types.TFLOAT64: + t = types.Types[types.TCOMPLEX128] + } + n.SetType(t) + return n +} + +// tcCopy typechecks an OCOPY node. +func tcCopy(n *ir.BinaryExpr) ir.Node { + n.SetType(types.Types[types.TINT]) + n.X = Expr(n.X) + n.X = DefaultLit(n.X, nil) + n.Y = Expr(n.Y) + n.Y = DefaultLit(n.Y, nil) + if n.X.Type() == nil || n.Y.Type() == nil { + n.SetType(nil) + return n + } + + // copy([]byte, string) + if n.X.Type().IsSlice() && n.Y.Type().IsString() { + if types.Identical(n.X.Type().Elem(), types.ByteType) { + return n + } + base.Errorf("arguments to copy have different element types: %L and string", n.X.Type()) + n.SetType(nil) + return n + } + + if !n.X.Type().IsSlice() || !n.Y.Type().IsSlice() { + if !n.X.Type().IsSlice() && !n.Y.Type().IsSlice() { + base.Errorf("arguments to copy must be slices; have %L, %L", n.X.Type(), n.Y.Type()) + } else if !n.X.Type().IsSlice() { + base.Errorf("first argument to copy should be slice; have %L", n.X.Type()) + } else { + base.Errorf("second argument to copy should be slice or string; have %L", n.Y.Type()) + } + n.SetType(nil) + return n + } + + if !types.Identical(n.X.Type().Elem(), n.Y.Type().Elem()) { + base.Errorf("arguments to copy have different element types: %L and %L", n.X.Type(), n.Y.Type()) + n.SetType(nil) + return n + } + return n +} + +// tcDelete typechecks an ODELETE node. +func tcDelete(n *ir.CallExpr) ir.Node { + typecheckargs(n) + args := n.Args + if len(args) == 0 { + base.Errorf("missing arguments to delete") + n.SetType(nil) + return n + } + + if len(args) == 1 { + base.Errorf("missing second (key) argument to delete") + n.SetType(nil) + return n + } + + if len(args) != 2 { + base.Errorf("too many arguments to delete") + n.SetType(nil) + return n + } + + l := args[0] + r := args[1] + if l.Type() != nil && !l.Type().IsMap() { + base.Errorf("first argument to delete must be map; have %L", l.Type()) + n.SetType(nil) + return n + } + + args[1] = AssignConv(r, l.Type().Key(), "delete") + return n +} + +// tcMake typechecks an OMAKE node. +func tcMake(n *ir.CallExpr) ir.Node { + args := n.Args + if len(args) == 0 { + base.Errorf("missing argument to make") + n.SetType(nil) + return n + } + + n.Args = nil + l := args[0] + l = typecheck(l, ctxType) + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + + i := 1 + var nn ir.Node + switch t.Kind() { + default: + base.Errorf("cannot make type %v", t) + n.SetType(nil) + return n + + case types.TSLICE: + if i >= len(args) { + base.Errorf("missing len argument to make(%v)", t) + n.SetType(nil) + return n + } + + l = args[i] + i++ + l = Expr(l) + var r ir.Node + if i < len(args) { + r = args[i] + i++ + r = Expr(r) + } + + if l.Type() == nil || (r != nil && r.Type() == nil) { + n.SetType(nil) + return n + } + if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) { + n.SetType(nil) + return n + } + if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) { + base.Errorf("len larger than cap in make(%v)", t) + n.SetType(nil) + return n + } + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKESLICE, l, r) + + case types.TMAP: + if i < len(args) { + l = args[i] + i++ + l = Expr(l) + l = DefaultLit(l, types.Types[types.TINT]) + if l.Type() == nil { + n.SetType(nil) + return n + } + if !checkmake(t, "size", &l) { + n.SetType(nil) + return n + } + } else { + l = ir.NewInt(base.Pos, 0) + } + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil) + nn.SetEsc(n.Esc()) + + case types.TCHAN: + l = nil + if i < len(args) { + l = args[i] + i++ + l = Expr(l) + l = DefaultLit(l, types.Types[types.TINT]) + if l.Type() == nil { + n.SetType(nil) + return n + } + if !checkmake(t, "buffer", &l) { + n.SetType(nil) + return n + } + } else { + l = ir.NewInt(base.Pos, 0) + } + nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil) + } + + if i < len(args) { + base.Errorf("too many arguments to make(%v)", t) + n.SetType(nil) + return n + } + + nn.SetType(t) + return nn +} + +// tcMakeSliceCopy typechecks an OMAKESLICECOPY node. +func tcMakeSliceCopy(n *ir.MakeExpr) ir.Node { + // Errors here are Fatalf instead of Errorf because only the compiler + // can construct an OMAKESLICECOPY node. + // Components used in OMAKESCLICECOPY that are supplied by parsed source code + // have already been typechecked in OMAKE and OCOPY earlier. + t := n.Type() + + if t == nil { + base.Fatalf("no type specified for OMAKESLICECOPY") + } + + if !t.IsSlice() { + base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type()) + } + + if n.Len == nil { + base.Fatalf("missing len argument for OMAKESLICECOPY") + } + + if n.Cap == nil { + base.Fatalf("missing slice argument to copy for OMAKESLICECOPY") + } + + n.Len = Expr(n.Len) + n.Cap = Expr(n.Cap) + + n.Len = DefaultLit(n.Len, types.Types[types.TINT]) + + if !n.Len.Type().IsInteger() && n.Type().Kind() != types.TIDEAL { + base.Errorf("non-integer len argument in OMAKESLICECOPY") + } + + if ir.IsConst(n.Len, constant.Int) { + if ir.ConstOverflow(n.Len.Val(), types.Types[types.TINT]) { + base.Fatalf("len for OMAKESLICECOPY too large") + } + if constant.Sign(n.Len.Val()) < 0 { + base.Fatalf("len for OMAKESLICECOPY must be non-negative") + } + } + return n +} + +// tcNew typechecks an ONEW node. +func tcNew(n *ir.UnaryExpr) ir.Node { + if n.X == nil { + // Fatalf because the OCALL above checked for us, + // so this must be an internally-generated mistake. + base.Fatalf("missing argument to new") + } + l := n.X + l = typecheck(l, ctxType) + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + n.X = l + n.SetType(types.NewPtr(t)) + return n +} + +// tcPanic typechecks an OPANIC node. +func tcPanic(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + n.X = AssignConv(n.X, types.Types[types.TINTER], "argument to panic") + if n.X.Type() == nil { + n.SetType(nil) + return n + } + return n +} + +// tcPrint typechecks an OPRINT or OPRINTN node. +func tcPrint(n *ir.CallExpr) ir.Node { + typecheckargs(n) + ls := n.Args + for i1, n1 := range ls { + // Special case for print: int constant is int64, not int. + if ir.IsConst(n1, constant.Int) { + ls[i1] = DefaultLit(ls[i1], types.Types[types.TINT64]) + } else { + ls[i1] = DefaultLit(ls[i1], nil) + } + } + return n +} + +// tcMinMax typechecks an OMIN or OMAX node. +func tcMinMax(n *ir.CallExpr) ir.Node { + typecheckargs(n) + arg0 := n.Args[0] + for _, arg := range n.Args[1:] { + if !types.Identical(arg.Type(), arg0.Type()) { + base.FatalfAt(n.Pos(), "mismatched arguments: %L and %L", arg0, arg) + } + } + n.SetType(arg0.Type()) + return n +} + +// tcRealImag typechecks an OREAL or OIMAG node. +func tcRealImag(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + l := n.X + t := l.Type() + if t == nil { + n.SetType(nil) + return n + } + + // Determine result type. + switch t.Kind() { + case types.TIDEAL: + n.SetType(types.UntypedFloat) + case types.TCOMPLEX64: + n.SetType(types.Types[types.TFLOAT32]) + case types.TCOMPLEX128: + n.SetType(types.Types[types.TFLOAT64]) + default: + base.Errorf("invalid argument %L for %v", l, n.Op()) + n.SetType(nil) + return n + } + return n +} + +// tcRecover typechecks an ORECOVER node. +func tcRecover(n *ir.CallExpr) ir.Node { + if len(n.Args) != 0 { + base.Errorf("too many arguments to recover") + n.SetType(nil) + return n + } + + // FP is equal to caller's SP plus FixedFrameSize. + var fp ir.Node = ir.NewCallExpr(n.Pos(), ir.OGETCALLERSP, nil, nil) + if off := base.Ctxt.Arch.FixedFrameSize; off != 0 { + fp = ir.NewBinaryExpr(n.Pos(), ir.OADD, fp, ir.NewInt(base.Pos, off)) + } + // TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr. + fp = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp) + + n.SetOp(ir.ORECOVERFP) + n.SetType(types.Types[types.TINTER]) + n.Args = []ir.Node{Expr(fp)} + return n +} + +// tcUnsafeAdd typechecks an OUNSAFEADD node. +func tcUnsafeAdd(n *ir.BinaryExpr) *ir.BinaryExpr { + n.X = AssignConv(Expr(n.X), types.Types[types.TUNSAFEPTR], "argument to unsafe.Add") + n.Y = DefaultLit(Expr(n.Y), types.Types[types.TINT]) + if n.X.Type() == nil || n.Y.Type() == nil { + n.SetType(nil) + return n + } + if !n.Y.Type().IsInteger() { + n.SetType(nil) + return n + } + n.SetType(n.X.Type()) + return n +} + +// tcUnsafeSlice typechecks an OUNSAFESLICE node. +func tcUnsafeSlice(n *ir.BinaryExpr) *ir.BinaryExpr { + n.X = Expr(n.X) + n.Y = Expr(n.Y) + if n.X.Type() == nil || n.Y.Type() == nil { + n.SetType(nil) + return n + } + t := n.X.Type() + if !t.IsPtr() { + base.Errorf("first argument to unsafe.Slice must be pointer; have %L", t) + } else if t.Elem().NotInHeap() { + // TODO(mdempsky): This can be relaxed, but should only affect the + // Go runtime itself. End users should only see not-in-heap + // types due to incomplete C structs in cgo, and those types don't + // have a meaningful size anyway. + base.Errorf("unsafe.Slice of incomplete (or unallocatable) type not allowed") + } + + if !checkunsafesliceorstring(n.Op(), &n.Y) { + n.SetType(nil) + return n + } + n.SetType(types.NewSlice(t.Elem())) + return n +} + +// tcUnsafeString typechecks an OUNSAFESTRING node. +func tcUnsafeString(n *ir.BinaryExpr) *ir.BinaryExpr { + n.X = Expr(n.X) + n.Y = Expr(n.Y) + if n.X.Type() == nil || n.Y.Type() == nil { + n.SetType(nil) + return n + } + t := n.X.Type() + if !t.IsPtr() || !types.Identical(t.Elem(), types.Types[types.TUINT8]) { + base.Errorf("first argument to unsafe.String must be *byte; have %L", t) + } + + if !checkunsafesliceorstring(n.Op(), &n.Y) { + n.SetType(nil) + return n + } + n.SetType(types.Types[types.TSTRING]) + return n +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/iexport.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/iexport.go new file mode 100644 index 0000000000000000000000000000000000000000..83d35b365f29d561f0e6b78bda439933aeae6a4f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/iexport.go @@ -0,0 +1,260 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package export. +// +// The indexed export data format is an evolution of the previous +// binary export data format. Its chief contribution is introducing an +// index table, which allows efficient random access of individual +// declarations and inline function bodies. In turn, this allows +// avoiding unnecessary work for compilation units that import large +// packages. +// +// +// The top-level data format is structured as: +// +// Header struct { +// Tag byte // 'i' +// Version uvarint +// StringSize uvarint +// DataSize uvarint +// } +// +// Strings [StringSize]byte +// Data [DataSize]byte +// +// MainIndex []struct{ +// PkgPath stringOff +// PkgName stringOff +// PkgHeight uvarint +// +// Decls []struct{ +// Name stringOff +// Offset declOff +// } +// } +// +// Fingerprint [8]byte +// +// uvarint means a uint64 written out using uvarint encoding. +// +// []T means a uvarint followed by that many T objects. In other +// words: +// +// Len uvarint +// Elems [Len]T +// +// stringOff means a uvarint that indicates an offset within the +// Strings section. At that offset is another uvarint, followed by +// that many bytes, which form the string value. +// +// declOff means a uvarint that indicates an offset within the Data +// section where the associated declaration can be found. +// +// +// There are five kinds of declarations, distinguished by their first +// byte: +// +// type Var struct { +// Tag byte // 'V' +// Pos Pos +// Type typeOff +// } +// +// type Func struct { +// Tag byte // 'F' or 'G' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'G' +// Signature Signature +// } +// +// type Const struct { +// Tag byte // 'C' +// Pos Pos +// Value Value +// } +// +// type Type struct { +// Tag byte // 'T' or 'U' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'U' +// Underlying typeOff +// +// Methods []struct{ // omitted if Underlying is an interface type +// Pos Pos +// Name stringOff +// Recv Param +// Signature Signature +// } +// } +// +// type Alias struct { +// Tag byte // 'A' +// Pos Pos +// Type typeOff +// } +// +// // "Automatic" declaration of each typeparam +// type TypeParam struct { +// Tag byte // 'P' +// Pos Pos +// Implicit bool +// Constraint typeOff +// } +// +// typeOff means a uvarint that either indicates a predeclared type, +// or an offset into the Data section. If the uvarint is less than +// predeclReserved, then it indicates the index into the predeclared +// types list (see predeclared in bexport.go for order). Otherwise, +// subtracting predeclReserved yields the offset of a type descriptor. +// +// Value means a type, kind, and type-specific value. See +// (*exportWriter).value for details. +// +// +// There are twelve kinds of type descriptors, distinguished by an itag: +// +// type DefinedType struct { +// Tag itag // definedType +// Name stringOff +// PkgPath stringOff +// } +// +// type PointerType struct { +// Tag itag // pointerType +// Elem typeOff +// } +// +// type SliceType struct { +// Tag itag // sliceType +// Elem typeOff +// } +// +// type ArrayType struct { +// Tag itag // arrayType +// Len uint64 +// Elem typeOff +// } +// +// type ChanType struct { +// Tag itag // chanType +// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv +// Elem typeOff +// } +// +// type MapType struct { +// Tag itag // mapType +// Key typeOff +// Elem typeOff +// } +// +// type FuncType struct { +// Tag itag // signatureType +// PkgPath stringOff +// Signature Signature +// } +// +// type StructType struct { +// Tag itag // structType +// PkgPath stringOff +// Fields []struct { +// Pos Pos +// Name stringOff +// Type typeOff +// Embedded bool +// Note stringOff +// } +// } +// +// type InterfaceType struct { +// Tag itag // interfaceType +// PkgPath stringOff +// Embeddeds []struct { +// Pos Pos +// Type typeOff +// } +// Methods []struct { +// Pos Pos +// Name stringOff +// Signature Signature +// } +// } +// +// // Reference to a type param declaration +// type TypeParamType struct { +// Tag itag // typeParamType +// Name stringOff +// PkgPath stringOff +// } +// +// // Instantiation of a generic type (like List[T2] or List[int]) +// type InstanceType struct { +// Tag itag // instanceType +// Pos pos +// TypeArgs []typeOff +// BaseType typeOff +// } +// +// type UnionType struct { +// Tag itag // interfaceType +// Terms []struct { +// tilde bool +// Type typeOff +// } +// } +// +// +// +// type Signature struct { +// Params []Param +// Results []Param +// Variadic bool // omitted if Results is empty +// } +// +// type Param struct { +// Pos Pos +// Name stringOff +// Type typOff +// } +// +// +// Pos encodes a file:line:column triple, incorporating a simple delta +// encoding scheme within a data object. See exportWriter.pos for +// details. +// +// +// Compiler-specific details. +// +// cmd/compile writes out a second index for inline bodies and also +// appends additional compiler-specific details after declarations. +// Third-party tools are not expected to depend on these details and +// they're expected to change much more rapidly, so they're omitted +// here. See exportWriter's varExt/funcExt/etc methods for details. + +package typecheck + +import ( + "strings" +) + +const blankMarker = "$" + +// TparamName returns the real name of a type parameter, after stripping its +// qualifying prefix and reverting blank-name encoding. See TparamExportName +// for details. +func TparamName(exportName string) string { + // Remove the "path" from the type param name that makes it unique. + ix := strings.LastIndex(exportName, ".") + if ix < 0 { + return "" + } + name := exportName[ix+1:] + if strings.HasPrefix(name, blankMarker) { + return "_" + } + return name +} + +// The name used for dictionary parameters or local variables. +const LocalDictName = ".dict" diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/iimport.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/iimport.go new file mode 100644 index 0000000000000000000000000000000000000000..cb3feb1e7a8691fe0e4486b5020ab82c066a402a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/iimport.go @@ -0,0 +1,53 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See iexport.go for the export data format. + +package typecheck + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" +) + +// HaveInlineBody reports whether we have fn's inline body available +// for inlining. +// +// It's a function literal so that it can be overridden for +// GOEXPERIMENT=unified. +var HaveInlineBody = func(fn *ir.Func) bool { + base.Fatalf("HaveInlineBody not overridden") + panic("unreachable") +} + +func SetBaseTypeIndex(t *types.Type, i, pi int64) { + if t.Obj() == nil { + base.Fatalf("SetBaseTypeIndex on non-defined type %v", t) + } + if i != -1 && pi != -1 { + typeSymIdx[t] = [2]int64{i, pi} + } +} + +// Map imported type T to the index of type descriptor symbols of T and *T, +// so we can use index to reference the symbol. +// TODO(mdempsky): Store this information directly in the Type's Name. +var typeSymIdx = make(map[*types.Type][2]int64) + +func BaseTypeIndex(t *types.Type) int64 { + tbase := t + if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil { + tbase = t.Elem() + } + i, ok := typeSymIdx[tbase] + if !ok { + return -1 + } + if t != tbase { + return i[1] + } + return i[0] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/mkbuiltin.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/mkbuiltin.go new file mode 100644 index 0000000000000000000000000000000000000000..28afac5d7ab1909088f1888d05495ba03c517c05 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/mkbuiltin.go @@ -0,0 +1,254 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +// Generate builtin.go from builtin/runtime.go. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "io" + "log" + "os" + "path/filepath" + "strconv" + "strings" +) + +var stdout = flag.Bool("stdout", false, "write to stdout instead of builtin.go") +var nofmt = flag.Bool("nofmt", false, "skip formatting builtin.go") + +func main() { + flag.Parse() + + var b bytes.Buffer + fmt.Fprintln(&b, "// Code generated by mkbuiltin.go. DO NOT EDIT.") + fmt.Fprintln(&b) + fmt.Fprintln(&b, "package typecheck") + fmt.Fprintln(&b) + fmt.Fprintln(&b, `import (`) + fmt.Fprintln(&b, ` "cmd/compile/internal/types"`) + fmt.Fprintln(&b, ` "cmd/internal/src"`) + fmt.Fprintln(&b, `)`) + + fmt.Fprintln(&b, ` +// Not inlining this function removes a significant chunk of init code. +//go:noinline +func newSig(params, results []*types.Field) *types.Type { + return types.NewSignature(nil, params, results) +} + +func params(tlist ...*types.Type) []*types.Field { + flist := make([]*types.Field, len(tlist)) + for i, typ := range tlist { + flist[i] = types.NewField(src.NoXPos, nil, typ) + } + return flist +} +`) + + mkbuiltin(&b, "runtime") + mkbuiltin(&b, "coverage") + + var err error + out := b.Bytes() + if !*nofmt { + out, err = format.Source(out) + if err != nil { + log.Fatal(err) + } + } + if *stdout { + _, err = os.Stdout.Write(out) + } else { + err = os.WriteFile("builtin.go", out, 0666) + } + if err != nil { + log.Fatal(err) + } +} + +func mkbuiltin(w io.Writer, name string) { + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, filepath.Join("_builtin", name+".go"), nil, 0) + if err != nil { + log.Fatal(err) + } + + var interner typeInterner + + fmt.Fprintf(w, "var %sDecls = [...]struct { name string; tag int; typ int }{\n", name) + for _, decl := range f.Decls { + switch decl := decl.(type) { + case *ast.FuncDecl: + if decl.Recv != nil { + log.Fatal("methods unsupported") + } + if decl.Body != nil { + log.Fatal("unexpected function body") + } + fmt.Fprintf(w, "{%q, funcTag, %d},\n", decl.Name.Name, interner.intern(decl.Type)) + case *ast.GenDecl: + if decl.Tok == token.IMPORT { + if len(decl.Specs) != 1 || decl.Specs[0].(*ast.ImportSpec).Path.Value != "\"unsafe\"" { + log.Fatal("runtime cannot import other package") + } + continue + } + if decl.Tok != token.VAR { + log.Fatal("unhandled declaration kind", decl.Tok) + } + for _, spec := range decl.Specs { + spec := spec.(*ast.ValueSpec) + if len(spec.Values) != 0 { + log.Fatal("unexpected values") + } + typ := interner.intern(spec.Type) + for _, name := range spec.Names { + fmt.Fprintf(w, "{%q, varTag, %d},\n", name.Name, typ) + } + } + default: + log.Fatal("unhandled decl type", decl) + } + } + fmt.Fprintln(w, "}") + + fmt.Fprintln(w) + fmt.Fprintf(w, "func %sTypes() []*types.Type {\n", name) + fmt.Fprintf(w, "var typs [%d]*types.Type\n", len(interner.typs)) + for i, typ := range interner.typs { + fmt.Fprintf(w, "typs[%d] = %s\n", i, typ) + } + fmt.Fprintln(w, "return typs[:]") + fmt.Fprintln(w, "}") +} + +// typeInterner maps Go type expressions to compiler code that +// constructs the denoted type. It recognizes and reuses common +// subtype expressions. +type typeInterner struct { + typs []string + hash map[string]int +} + +func (i *typeInterner) intern(t ast.Expr) int { + x := i.mktype(t) + v, ok := i.hash[x] + if !ok { + v = len(i.typs) + if i.hash == nil { + i.hash = make(map[string]int) + } + i.hash[x] = v + i.typs = append(i.typs, x) + } + return v +} + +func (i *typeInterner) subtype(t ast.Expr) string { + return fmt.Sprintf("typs[%d]", i.intern(t)) +} + +func (i *typeInterner) mktype(t ast.Expr) string { + switch t := t.(type) { + case *ast.Ident: + switch t.Name { + case "byte": + return "types.ByteType" + case "rune": + return "types.RuneType" + } + return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(t.Name)) + case *ast.SelectorExpr: + if t.X.(*ast.Ident).Name != "unsafe" || t.Sel.Name != "Pointer" { + log.Fatalf("unhandled type: %#v", t) + } + return "types.Types[types.TUNSAFEPTR]" + + case *ast.ArrayType: + if t.Len == nil { + return fmt.Sprintf("types.NewSlice(%s)", i.subtype(t.Elt)) + } + return fmt.Sprintf("types.NewArray(%s, %d)", i.subtype(t.Elt), intconst(t.Len)) + case *ast.ChanType: + dir := "types.Cboth" + switch t.Dir { + case ast.SEND: + dir = "types.Csend" + case ast.RECV: + dir = "types.Crecv" + } + return fmt.Sprintf("types.NewChan(%s, %s)", i.subtype(t.Value), dir) + case *ast.FuncType: + return fmt.Sprintf("newSig(%s, %s)", i.fields(t.Params, false), i.fields(t.Results, false)) + case *ast.InterfaceType: + if len(t.Methods.List) != 0 { + log.Fatal("non-empty interfaces unsupported") + } + return "types.Types[types.TINTER]" + case *ast.MapType: + return fmt.Sprintf("types.NewMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value)) + case *ast.StarExpr: + return fmt.Sprintf("types.NewPtr(%s)", i.subtype(t.X)) + case *ast.StructType: + return fmt.Sprintf("types.NewStruct(%s)", i.fields(t.Fields, true)) + + default: + log.Fatalf("unhandled type: %#v", t) + panic("unreachable") + } +} + +func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string { + if fl == nil || len(fl.List) == 0 { + return "nil" + } + + var res []string + for _, f := range fl.List { + typ := i.subtype(f.Type) + if len(f.Names) == 0 { + res = append(res, typ) + } else { + for _, name := range f.Names { + if keepNames { + res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, Lookup(%q), %s)", name.Name, typ)) + } else { + res = append(res, typ) + } + } + } + } + + if keepNames { + return fmt.Sprintf("[]*types.Field{%s}", strings.Join(res, ", ")) + } + return fmt.Sprintf("params(%s)", strings.Join(res, ", ")) +} + +func intconst(e ast.Expr) int64 { + switch e := e.(type) { + case *ast.BasicLit: + if e.Kind != token.INT { + log.Fatalf("expected INT, got %v", e.Kind) + } + x, err := strconv.ParseInt(e.Value, 0, 64) + if err != nil { + log.Fatal(err) + } + return x + default: + log.Fatalf("unhandled expr: %#v", e) + panic("unreachable") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/stmt.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/stmt.go new file mode 100644 index 0000000000000000000000000000000000000000..8d792485d863c401d98b279449a763415b46e96c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/stmt.go @@ -0,0 +1,727 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" + "internal/types/errors" +) + +func RangeExprType(t *types.Type) *types.Type { + if t.IsPtr() && t.Elem().IsArray() { + return t.Elem() + } + return t +} + +func typecheckrangeExpr(n *ir.RangeStmt) { +} + +// type check assignment. +// if this assignment is the definition of a var on the left side, +// fill in the var's type. +func tcAssign(n *ir.AssignStmt) { + if base.EnableTrace && base.Flag.LowerT { + defer tracePrint("tcAssign", n)(nil) + } + + if n.Y == nil { + n.X = AssignExpr(n.X) + return + } + + lhs, rhs := []ir.Node{n.X}, []ir.Node{n.Y} + assign(n, lhs, rhs) + n.X, n.Y = lhs[0], rhs[0] + + // TODO(mdempsky): This seems out of place. + if !ir.IsBlank(n.X) { + types.CheckSize(n.X.Type()) // ensure width is calculated for backend + } +} + +func tcAssignList(n *ir.AssignListStmt) { + if base.EnableTrace && base.Flag.LowerT { + defer tracePrint("tcAssignList", n)(nil) + } + + assign(n, n.Lhs, n.Rhs) +} + +func assign(stmt ir.Node, lhs, rhs []ir.Node) { + // delicate little dance. + // the definition of lhs may refer to this assignment + // as its definition, in which case it will call tcAssign. + // in that case, do not call typecheck back, or it will cycle. + // if the variable has a type (ntype) then typechecking + // will not look at defn, so it is okay (and desirable, + // so that the conversion below happens). + + checkLHS := func(i int, typ *types.Type) { + if n := lhs[i]; typ != nil && ir.DeclaredBy(n, stmt) && n.Type() == nil { + base.Assertf(typ.Kind() == types.TNIL, "unexpected untyped nil") + n.SetType(defaultType(typ)) + } + if lhs[i].Typecheck() == 0 { + lhs[i] = AssignExpr(lhs[i]) + } + checkassign(lhs[i]) + } + + assignType := func(i int, typ *types.Type) { + checkLHS(i, typ) + if typ != nil { + checkassignto(typ, lhs[i]) + } + } + + cr := len(rhs) + if len(rhs) == 1 { + rhs[0] = typecheck(rhs[0], ctxExpr|ctxMultiOK) + if rtyp := rhs[0].Type(); rtyp != nil && rtyp.IsFuncArgStruct() { + cr = rtyp.NumFields() + } + } else { + Exprs(rhs) + } + + // x, ok = y +assignOK: + for len(lhs) == 2 && cr == 1 { + stmt := stmt.(*ir.AssignListStmt) + r := rhs[0] + + switch r.Op() { + case ir.OINDEXMAP: + stmt.SetOp(ir.OAS2MAPR) + case ir.ORECV: + stmt.SetOp(ir.OAS2RECV) + case ir.ODOTTYPE: + r := r.(*ir.TypeAssertExpr) + stmt.SetOp(ir.OAS2DOTTYPE) + r.SetOp(ir.ODOTTYPE2) + case ir.ODYNAMICDOTTYPE: + r := r.(*ir.DynamicTypeAssertExpr) + stmt.SetOp(ir.OAS2DOTTYPE) + r.SetOp(ir.ODYNAMICDOTTYPE2) + default: + break assignOK + } + + assignType(0, r.Type()) + assignType(1, types.UntypedBool) + return + } + + if len(lhs) != cr { + if r, ok := rhs[0].(*ir.CallExpr); ok && len(rhs) == 1 { + if r.Type() != nil { + base.ErrorfAt(stmt.Pos(), errors.WrongAssignCount, "assignment mismatch: %d variable%s but %v returns %d value%s", len(lhs), plural(len(lhs)), r.Fun, cr, plural(cr)) + } + } else { + base.ErrorfAt(stmt.Pos(), errors.WrongAssignCount, "assignment mismatch: %d variable%s but %v value%s", len(lhs), plural(len(lhs)), len(rhs), plural(len(rhs))) + } + + for i := range lhs { + checkLHS(i, nil) + } + return + } + + // x,y,z = f() + if cr > len(rhs) { + stmt := stmt.(*ir.AssignListStmt) + stmt.SetOp(ir.OAS2FUNC) + r := rhs[0].(*ir.CallExpr) + rtyp := r.Type() + + mismatched := false + failed := false + for i := range lhs { + result := rtyp.Field(i).Type + assignType(i, result) + + if lhs[i].Type() == nil || result == nil { + failed = true + } else if lhs[i] != ir.BlankNode && !types.Identical(lhs[i].Type(), result) { + mismatched = true + } + } + if mismatched && !failed { + RewriteMultiValueCall(stmt, r) + } + return + } + + for i, r := range rhs { + checkLHS(i, r.Type()) + if lhs[i].Type() != nil { + rhs[i] = AssignConv(r, lhs[i].Type(), "assignment") + } + } +} + +func plural(n int) string { + if n == 1 { + return "" + } + return "s" +} + +// tcCheckNil typechecks an OCHECKNIL node. +func tcCheckNil(n *ir.UnaryExpr) ir.Node { + n.X = Expr(n.X) + if !n.X.Type().IsPtrShaped() { + base.FatalfAt(n.Pos(), "%L is not pointer shaped", n.X) + } + return n +} + +// tcFor typechecks an OFOR node. +func tcFor(n *ir.ForStmt) ir.Node { + Stmts(n.Init()) + n.Cond = Expr(n.Cond) + n.Cond = DefaultLit(n.Cond, nil) + if n.Cond != nil { + t := n.Cond.Type() + if t != nil && !t.IsBoolean() { + base.Errorf("non-bool %L used as for condition", n.Cond) + } + } + n.Post = Stmt(n.Post) + Stmts(n.Body) + return n +} + +// tcGoDefer typechecks (normalizes) an OGO/ODEFER statement. +func tcGoDefer(n *ir.GoDeferStmt) { + call := normalizeGoDeferCall(n.Pos(), n.Op(), n.Call, n.PtrInit()) + call.GoDefer = true + n.Call = call +} + +// normalizeGoDeferCall normalizes call into a normal function call +// with no arguments and no results, suitable for use in an OGO/ODEFER +// statement. +// +// For example, it normalizes: +// +// f(x, y) +// +// into: +// +// x1, y1 := x, y // added to init +// func() { f(x1, y1) }() // result +func normalizeGoDeferCall(pos src.XPos, op ir.Op, call ir.Node, init *ir.Nodes) *ir.CallExpr { + init.Append(ir.TakeInit(call)...) + + if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC { + if sig := call.Fun.Type(); sig.NumParams()+sig.NumResults() == 0 { + return call // already in normal form + } + } + + // Create a new wrapper function without parameters or results. + wrapperFn := ir.NewClosureFunc(pos, pos, op, types.NewSignature(nil, nil, nil), ir.CurFunc, Target) + wrapperFn.DeclareParams(true) + wrapperFn.SetWrapper(true) + + // argps collects the list of operands within the call expression + // that must be evaluated at the go/defer statement. + var argps []*ir.Node + + var visit func(argp *ir.Node) + visit = func(argp *ir.Node) { + arg := *argp + if arg == nil { + return + } + + // Recognize a few common expressions that can be evaluated within + // the wrapper, so we don't need to allocate space for them within + // the closure. + switch arg.Op() { + case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR, ir.ONEW: + return + case ir.ONAME: + arg := arg.(*ir.Name) + if arg.Class == ir.PFUNC { + return // reference to global function + } + case ir.OADDR: + arg := arg.(*ir.AddrExpr) + if arg.X.Op() == ir.OLINKSYMOFFSET { + return // address of global symbol + } + + case ir.OCONVNOP: + arg := arg.(*ir.ConvExpr) + + // For unsafe.Pointer->uintptr conversion arguments, save the + // unsafe.Pointer argument. This is necessary to handle cases + // like fixedbugs/issue24491a.go correctly. + // + // TODO(mdempsky): Limit to static callees with + // //go:uintptr{escapes,keepalive}? + if arg.Type().IsUintptr() && arg.X.Type().IsUnsafePtr() { + visit(&arg.X) + return + } + + case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT: + // TODO(mdempsky): For very large slices, it may be preferable + // to construct them at the go/defer statement instead. + list := arg.(*ir.CompLitExpr).List + for i, el := range list { + switch el := el.(type) { + case *ir.KeyExpr: + visit(&el.Value) + case *ir.StructKeyExpr: + visit(&el.Value) + default: + visit(&list[i]) + } + } + return + } + + argps = append(argps, argp) + } + + visitList := func(list []ir.Node) { + for i := range list { + visit(&list[i]) + } + } + + switch call.Op() { + default: + base.Fatalf("unexpected call op: %v", call.Op()) + + case ir.OCALLFUNC: + call := call.(*ir.CallExpr) + + // If the callee is a named function, link to the original callee. + if wrapped := ir.StaticCalleeName(call.Fun); wrapped != nil { + wrapperFn.WrappedFunc = wrapped.Func + } + + visit(&call.Fun) + visitList(call.Args) + + case ir.OCALLINTER: + call := call.(*ir.CallExpr) + argps = append(argps, &call.Fun.(*ir.SelectorExpr).X) // must be first for OCHECKNIL; see below + visitList(call.Args) + + case ir.OAPPEND, ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP: + call := call.(*ir.CallExpr) + visitList(call.Args) + visit(&call.RType) + + case ir.OCOPY: + call := call.(*ir.BinaryExpr) + visit(&call.X) + visit(&call.Y) + visit(&call.RType) + + case ir.OCLEAR, ir.OCLOSE, ir.OPANIC: + call := call.(*ir.UnaryExpr) + visit(&call.X) + } + + if len(argps) != 0 { + // Found one or more operands that need to be evaluated upfront + // and spilled to temporary variables, which can be captured by + // the wrapper function. + + stmtPos := base.Pos + callPos := base.Pos + + as := ir.NewAssignListStmt(callPos, ir.OAS2, make([]ir.Node, len(argps)), make([]ir.Node, len(argps))) + for i, argp := range argps { + arg := *argp + + pos := callPos + if ir.HasUniquePos(arg) { + pos = arg.Pos() + } + + // tmp := arg + tmp := TempAt(pos, ir.CurFunc, arg.Type()) + init.Append(Stmt(ir.NewDecl(pos, ir.ODCL, tmp))) + tmp.Defn = as + as.Lhs[i] = tmp + as.Rhs[i] = arg + + // Rewrite original expression to use/capture tmp. + *argp = ir.NewClosureVar(pos, wrapperFn, tmp) + } + init.Append(Stmt(as)) + + // For "go/defer iface.M()", if iface is nil, we need to panic at + // the point of the go/defer statement. + if call.Op() == ir.OCALLINTER { + iface := as.Lhs[0] + init.Append(Stmt(ir.NewUnaryExpr(stmtPos, ir.OCHECKNIL, ir.NewUnaryExpr(iface.Pos(), ir.OITAB, iface)))) + } + } + + // Move call into the wrapper function, now that it's safe to + // evaluate there. + wrapperFn.Body = []ir.Node{call} + + // Finally, construct a call to the wrapper. + return Call(call.Pos(), wrapperFn.OClosure, nil, false).(*ir.CallExpr) +} + +// tcIf typechecks an OIF node. +func tcIf(n *ir.IfStmt) ir.Node { + Stmts(n.Init()) + n.Cond = Expr(n.Cond) + n.Cond = DefaultLit(n.Cond, nil) + if n.Cond != nil { + t := n.Cond.Type() + if t != nil && !t.IsBoolean() { + base.Errorf("non-bool %L used as if condition", n.Cond) + } + } + Stmts(n.Body) + Stmts(n.Else) + return n +} + +// range +func tcRange(n *ir.RangeStmt) { + n.X = Expr(n.X) + + // delicate little dance. see tcAssignList + if n.Key != nil { + if !ir.DeclaredBy(n.Key, n) { + n.Key = AssignExpr(n.Key) + } + checkassign(n.Key) + } + if n.Value != nil { + if !ir.DeclaredBy(n.Value, n) { + n.Value = AssignExpr(n.Value) + } + checkassign(n.Value) + } + + // second half of dance + n.SetTypecheck(1) + if n.Key != nil && n.Key.Typecheck() == 0 { + n.Key = AssignExpr(n.Key) + } + if n.Value != nil && n.Value.Typecheck() == 0 { + n.Value = AssignExpr(n.Value) + } + + Stmts(n.Body) +} + +// tcReturn typechecks an ORETURN node. +func tcReturn(n *ir.ReturnStmt) ir.Node { + if ir.CurFunc == nil { + base.FatalfAt(n.Pos(), "return outside function") + } + + typecheckargs(n) + if len(n.Results) != 0 { + typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" }) + } + return n +} + +// select +func tcSelect(sel *ir.SelectStmt) { + var def *ir.CommClause + lno := ir.SetPos(sel) + Stmts(sel.Init()) + for _, ncase := range sel.Cases { + if ncase.Comm == nil { + // default + if def != nil { + base.ErrorfAt(ncase.Pos(), errors.DuplicateDefault, "multiple defaults in select (first at %v)", ir.Line(def)) + } else { + def = ncase + } + } else { + n := Stmt(ncase.Comm) + ncase.Comm = n + oselrecv2 := func(dst, recv ir.Node, def bool) { + selrecv := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, []ir.Node{dst, ir.BlankNode}, []ir.Node{recv}) + selrecv.Def = def + selrecv.SetTypecheck(1) + selrecv.SetInit(n.Init()) + ncase.Comm = selrecv + } + switch n.Op() { + default: + pos := n.Pos() + if n.Op() == ir.ONAME { + // We don't have the right position for ONAME nodes (see #15459 and + // others). Using ncase.Pos for now as it will provide the correct + // line number (assuming the expression follows the "case" keyword + // on the same line). This matches the approach before 1.10. + pos = ncase.Pos() + } + base.ErrorfAt(pos, errors.InvalidSelectCase, "select case must be receive, send or assign recv") + + case ir.OAS: + // convert x = <-c into x, _ = <-c + // remove implicit conversions; the eventual assignment + // will reintroduce them. + n := n.(*ir.AssignStmt) + if r := n.Y; r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE { + r := r.(*ir.ConvExpr) + if r.Implicit() { + n.Y = r.X + } + } + if n.Y.Op() != ir.ORECV { + base.ErrorfAt(n.Pos(), errors.InvalidSelectCase, "select assignment must have receive on right hand side") + break + } + oselrecv2(n.X, n.Y, n.Def) + + case ir.OAS2RECV: + n := n.(*ir.AssignListStmt) + if n.Rhs[0].Op() != ir.ORECV { + base.ErrorfAt(n.Pos(), errors.InvalidSelectCase, "select assignment must have receive on right hand side") + break + } + n.SetOp(ir.OSELRECV2) + + case ir.ORECV: + // convert <-c into _, _ = <-c + n := n.(*ir.UnaryExpr) + oselrecv2(ir.BlankNode, n, false) + + case ir.OSEND: + break + } + } + + Stmts(ncase.Body) + } + + base.Pos = lno +} + +// tcSend typechecks an OSEND node. +func tcSend(n *ir.SendStmt) ir.Node { + n.Chan = Expr(n.Chan) + n.Value = Expr(n.Value) + n.Chan = DefaultLit(n.Chan, nil) + t := n.Chan.Type() + if t == nil { + return n + } + if !t.IsChan() { + base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t) + return n + } + + if !t.ChanDir().CanSend() { + base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t) + return n + } + + n.Value = AssignConv(n.Value, t.Elem(), "send") + if n.Value.Type() == nil { + return n + } + return n +} + +// tcSwitch typechecks a switch statement. +func tcSwitch(n *ir.SwitchStmt) { + Stmts(n.Init()) + if n.Tag != nil && n.Tag.Op() == ir.OTYPESW { + tcSwitchType(n) + } else { + tcSwitchExpr(n) + } +} + +func tcSwitchExpr(n *ir.SwitchStmt) { + t := types.Types[types.TBOOL] + if n.Tag != nil { + n.Tag = Expr(n.Tag) + n.Tag = DefaultLit(n.Tag, nil) + t = n.Tag.Type() + } + + var nilonly string + if t != nil { + switch { + case t.IsMap(): + nilonly = "map" + case t.Kind() == types.TFUNC: + nilonly = "func" + case t.IsSlice(): + nilonly = "slice" + + case !types.IsComparable(t): + if t.IsStruct() { + base.ErrorfAt(n.Pos(), errors.InvalidExprSwitch, "cannot switch on %L (struct containing %v cannot be compared)", n.Tag, types.IncomparableField(t).Type) + } else { + base.ErrorfAt(n.Pos(), errors.InvalidExprSwitch, "cannot switch on %L", n.Tag) + } + t = nil + } + } + + var defCase ir.Node + for _, ncase := range n.Cases { + ls := ncase.List + if len(ls) == 0 { // default: + if defCase != nil { + base.ErrorfAt(ncase.Pos(), errors.DuplicateDefault, "multiple defaults in switch (first at %v)", ir.Line(defCase)) + } else { + defCase = ncase + } + } + + for i := range ls { + ir.SetPos(ncase) + ls[i] = Expr(ls[i]) + ls[i] = DefaultLit(ls[i], t) + n1 := ls[i] + if t == nil || n1.Type() == nil { + continue + } + + if nilonly != "" && !ir.IsNil(n1) { + base.ErrorfAt(ncase.Pos(), errors.MismatchedTypes, "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Tag) + } else if t.IsInterface() && !n1.Type().IsInterface() && !types.IsComparable(n1.Type()) { + base.ErrorfAt(ncase.Pos(), errors.UndefinedOp, "invalid case %L in switch (incomparable type)", n1) + } else { + op1, _ := assignOp(n1.Type(), t) + op2, _ := assignOp(t, n1.Type()) + if op1 == ir.OXXX && op2 == ir.OXXX { + if n.Tag != nil { + base.ErrorfAt(ncase.Pos(), errors.MismatchedTypes, "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Tag, n1.Type(), t) + } else { + base.ErrorfAt(ncase.Pos(), errors.MismatchedTypes, "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type()) + } + } + } + } + + Stmts(ncase.Body) + } +} + +func tcSwitchType(n *ir.SwitchStmt) { + guard := n.Tag.(*ir.TypeSwitchGuard) + guard.X = Expr(guard.X) + t := guard.X.Type() + if t != nil && !t.IsInterface() { + base.ErrorfAt(n.Pos(), errors.InvalidTypeSwitch, "cannot type switch on non-interface value %L", guard.X) + t = nil + } + + // We don't actually declare the type switch's guarded + // declaration itself. So if there are no cases, we won't + // notice that it went unused. + if v := guard.Tag; v != nil && !ir.IsBlank(v) && len(n.Cases) == 0 { + base.ErrorfAt(v.Pos(), errors.UnusedVar, "%v declared but not used", v.Sym()) + } + + var defCase, nilCase ir.Node + var ts typeSet + for _, ncase := range n.Cases { + ls := ncase.List + if len(ls) == 0 { // default: + if defCase != nil { + base.ErrorfAt(ncase.Pos(), errors.DuplicateDefault, "multiple defaults in switch (first at %v)", ir.Line(defCase)) + } else { + defCase = ncase + } + } + + for i := range ls { + ls[i] = typecheck(ls[i], ctxExpr|ctxType) + n1 := ls[i] + if t == nil || n1.Type() == nil { + continue + } + + if ir.IsNil(n1) { // case nil: + if nilCase != nil { + base.ErrorfAt(ncase.Pos(), errors.DuplicateCase, "multiple nil cases in type switch (first at %v)", ir.Line(nilCase)) + } else { + nilCase = ncase + } + continue + } + if n1.Op() == ir.ODYNAMICTYPE { + continue + } + if n1.Op() != ir.OTYPE { + base.ErrorfAt(ncase.Pos(), errors.NotAType, "%L is not a type", n1) + continue + } + if !n1.Type().IsInterface() { + why := ImplementsExplain(n1.Type(), t) + if why != "" { + base.ErrorfAt(ncase.Pos(), errors.ImpossibleAssert, "impossible type switch case: %L cannot have dynamic type %v (%s)", guard.X, n1.Type(), why) + } + continue + } + + ts.add(ncase.Pos(), n1.Type()) + } + + if ncase.Var != nil { + // Assign the clause variable's type. + vt := t + if len(ls) == 1 { + if ls[0].Op() == ir.OTYPE || ls[0].Op() == ir.ODYNAMICTYPE { + vt = ls[0].Type() + } else if !ir.IsNil(ls[0]) { + // Invalid single-type case; + // mark variable as broken. + vt = nil + } + } + + nvar := ncase.Var + nvar.SetType(vt) + if vt != nil { + nvar = AssignExpr(nvar).(*ir.Name) + } else { + // Clause variable is broken; prevent typechecking. + nvar.SetTypecheck(1) + } + ncase.Var = nvar + } + + Stmts(ncase.Body) + } +} + +type typeSet struct { + m map[string]src.XPos +} + +func (s *typeSet) add(pos src.XPos, typ *types.Type) { + if s.m == nil { + s.m = make(map[string]src.XPos) + } + + ls := typ.LinkString() + if prev, ok := s.m[ls]; ok { + base.ErrorfAt(pos, errors.DuplicateCase, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev)) + return + } + s.m[ls] = pos +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/subr.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/subr.go new file mode 100644 index 0000000000000000000000000000000000000000..d64b0f0e2230542681655c69e64f3776a819afa3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/subr.go @@ -0,0 +1,792 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "fmt" + "sort" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" +) + +func AssignConv(n ir.Node, t *types.Type, context string) ir.Node { + return assignconvfn(n, t, func() string { return context }) +} + +// LookupNum returns types.LocalPkg.LookupNum(prefix, n). +func LookupNum(prefix string, n int) *types.Sym { + return types.LocalPkg.LookupNum(prefix, n) +} + +// Given funarg struct list, return list of fn args. +func NewFuncParams(origs []*types.Field) []*types.Field { + res := make([]*types.Field, len(origs)) + for i, orig := range origs { + p := types.NewField(orig.Pos, orig.Sym, orig.Type) + p.SetIsDDD(orig.IsDDD()) + res[i] = p + } + return res +} + +// NodAddr returns a node representing &n at base.Pos. +func NodAddr(n ir.Node) *ir.AddrExpr { + return NodAddrAt(base.Pos, n) +} + +// NodAddrAt returns a node representing &n at position pos. +func NodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr { + return ir.NewAddrExpr(pos, Expr(n)) +} + +// LinksymAddr returns a new expression that evaluates to the address +// of lsym. typ specifies the type of the addressed memory. +func LinksymAddr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *ir.AddrExpr { + n := ir.NewLinksymExpr(pos, lsym, typ) + return Expr(NodAddrAt(pos, n)).(*ir.AddrExpr) +} + +func NodNil() ir.Node { + return ir.NewNilExpr(base.Pos, types.Types[types.TNIL]) +} + +// AddImplicitDots finds missing fields in obj.field that +// will give the shortest unique addressing and +// modifies the tree with missing field names. +func AddImplicitDots(n *ir.SelectorExpr) *ir.SelectorExpr { + n.X = typecheck(n.X, ctxType|ctxExpr) + t := n.X.Type() + if t == nil { + return n + } + + if n.X.Op() == ir.OTYPE { + return n + } + + s := n.Sel + if s == nil { + return n + } + + switch path, ambig := dotpath(s, t, nil, false); { + case path != nil: + // rebuild elided dots + for c := len(path) - 1; c >= 0; c-- { + dot := ir.NewSelectorExpr(n.Pos(), ir.ODOT, n.X, path[c].field.Sym) + dot.SetImplicit(true) + dot.SetType(path[c].field.Type) + n.X = dot + } + case ambig: + base.Errorf("ambiguous selector %v", n) + n.X = nil + } + + return n +} + +// CalcMethods calculates all the methods (including embedding) of a non-interface +// type t. +func CalcMethods(t *types.Type) { + if t == nil || len(t.AllMethods()) != 0 { + return + } + + // mark top-level method symbols + // so that expand1 doesn't consider them. + for _, f := range t.Methods() { + f.Sym.SetUniq(true) + } + + // generate all reachable methods + slist = slist[:0] + expand1(t, true) + + // check each method to be uniquely reachable + var ms []*types.Field + for i, sl := range slist { + slist[i].field = nil + sl.field.Sym.SetUniq(false) + + var f *types.Field + path, _ := dotpath(sl.field.Sym, t, &f, false) + if path == nil { + continue + } + + // dotpath may have dug out arbitrary fields, we only want methods. + if !f.IsMethod() { + continue + } + + // add it to the base type method list + f = f.Copy() + f.Embedded = 1 // needs a trampoline + for _, d := range path { + if d.field.Type.IsPtr() { + f.Embedded = 2 + break + } + } + ms = append(ms, f) + } + + for _, f := range t.Methods() { + f.Sym.SetUniq(false) + } + + ms = append(ms, t.Methods()...) + sort.Sort(types.MethodsByName(ms)) + t.SetAllMethods(ms) +} + +// adddot1 returns the number of fields or methods named s at depth d in Type t. +// If exactly one exists, it will be returned in *save (if save is not nil), +// and dotlist will contain the path of embedded fields traversed to find it, +// in reverse order. If none exist, more will indicate whether t contains any +// embedded fields at depth d, so callers can decide whether to retry at +// a greater depth. +func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) { + if t.Recur() { + return + } + t.SetRecur(true) + defer t.SetRecur(false) + + var u *types.Type + d-- + if d < 0 { + // We've reached our target depth. If t has any fields/methods + // named s, then we're done. Otherwise, we still need to check + // below for embedded fields. + c = lookdot0(s, t, save, ignorecase) + if c != 0 { + return c, false + } + } + + u = t + if u.IsPtr() { + u = u.Elem() + } + if !u.IsStruct() && !u.IsInterface() { + return c, false + } + + var fields []*types.Field + if u.IsStruct() { + fields = u.Fields() + } else { + fields = u.AllMethods() + } + for _, f := range fields { + if f.Embedded == 0 || f.Sym == nil { + continue + } + if d < 0 { + // Found an embedded field at target depth. + return c, true + } + a, more1 := adddot1(s, f.Type, d, save, ignorecase) + if a != 0 && c == 0 { + dotlist[d].field = f + } + c += a + if more1 { + more = true + } + } + + return c, more +} + +// dotlist is used by adddot1 to record the path of embedded fields +// used to access a target field or method. +// Must be non-nil so that dotpath returns a non-nil slice even if d is zero. +var dotlist = make([]dlist, 10) + +// Convert node n for assignment to type t. +func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node { + if n == nil || n.Type() == nil { + return n + } + + if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL { + base.Errorf("use of untyped nil") + } + + n = convlit1(n, t, false, context) + if n.Type() == nil { + base.Fatalf("cannot assign %v to %v", n, t) + } + if n.Type().IsUntyped() { + base.Fatalf("%L has untyped type", n) + } + if t.Kind() == types.TBLANK { + return n + } + if types.Identical(n.Type(), t) { + return n + } + + op, why := assignOp(n.Type(), t) + if op == ir.OXXX { + base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why) + op = ir.OCONV + } + + r := ir.NewConvExpr(base.Pos, op, t, n) + r.SetTypecheck(1) + r.SetImplicit(true) + return r +} + +// Is type src assignment compatible to type dst? +// If so, return op code to use in conversion. +// If not, return OXXX. In this case, the string return parameter may +// hold a reason why. In all other cases, it'll be the empty string. +func assignOp(src, dst *types.Type) (ir.Op, string) { + if src == dst { + return ir.OCONVNOP, "" + } + if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil { + return ir.OXXX, "" + } + + // 1. src type is identical to dst. + if types.Identical(src, dst) { + return ir.OCONVNOP, "" + } + + // 2. src and dst have identical underlying types and + // a. either src or dst is not a named type, or + // b. both are empty interface types, or + // c. at least one is a gcshape type. + // For assignable but different non-empty interface types, + // we want to recompute the itab. Recomputing the itab ensures + // that itabs are unique (thus an interface with a compile-time + // type I has an itab with interface type I). + if types.Identical(src.Underlying(), dst.Underlying()) { + if src.IsEmptyInterface() { + // Conversion between two empty interfaces + // requires no code. + return ir.OCONVNOP, "" + } + if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() { + // Conversion between two types, at least one unnamed, + // needs no conversion. The exception is nonempty interfaces + // which need to have their itab updated. + return ir.OCONVNOP, "" + } + if src.IsShape() || dst.IsShape() { + // Conversion between a shape type and one of the types + // it represents also needs no conversion. + return ir.OCONVNOP, "" + } + } + + // 3. dst is an interface type and src implements dst. + if dst.IsInterface() && src.Kind() != types.TNIL { + if src.IsShape() { + // Shape types implement things they have already + // been typechecked to implement, even if they + // don't have the methods for them. + return ir.OCONVIFACE, "" + } + if src.HasShape() { + // Unified IR uses OCONVIFACE for converting all derived types + // to interface type, not just type arguments themselves. + return ir.OCONVIFACE, "" + } + + why := ImplementsExplain(src, dst) + if why == "" { + return ir.OCONVIFACE, "" + } + return ir.OXXX, ":\n\t" + why + } + + if isptrto(dst, types.TINTER) { + why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst) + return ir.OXXX, why + } + + if src.IsInterface() && dst.Kind() != types.TBLANK { + var why string + if Implements(dst, src) { + why = ": need type assertion" + } + return ir.OXXX, why + } + + // 4. src is a bidirectional channel value, dst is a channel type, + // src and dst have identical element types, and + // either src or dst is not a named type. + if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() { + if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) { + return ir.OCONVNOP, "" + } + } + + // 5. src is the predeclared identifier nil and dst is a nillable type. + if src.Kind() == types.TNIL { + switch dst.Kind() { + case types.TPTR, + types.TFUNC, + types.TMAP, + types.TCHAN, + types.TINTER, + types.TSLICE: + return ir.OCONVNOP, "" + } + } + + // 6. rule about untyped constants - already converted by DefaultLit. + + // 7. Any typed value can be assigned to the blank identifier. + if dst.Kind() == types.TBLANK { + return ir.OCONVNOP, "" + } + + return ir.OXXX, "" +} + +// Can we convert a value of type src to a value of type dst? +// If so, return op code to use in conversion (maybe OCONVNOP). +// If not, return OXXX. In this case, the string return parameter may +// hold a reason why. In all other cases, it'll be the empty string. +// srcConstant indicates whether the value of type src is a constant. +func convertOp(srcConstant bool, src, dst *types.Type) (ir.Op, string) { + if src == dst { + return ir.OCONVNOP, "" + } + if src == nil || dst == nil { + return ir.OXXX, "" + } + + // Conversions from regular to not-in-heap are not allowed + // (unless it's unsafe.Pointer). These are runtime-specific + // rules. + // (a) Disallow (*T) to (*U) where T is not-in-heap but U isn't. + if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() { + why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem()) + return ir.OXXX, why + } + // (b) Disallow string to []T where T is not-in-heap. + if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) { + why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem()) + return ir.OXXX, why + } + + // 1. src can be assigned to dst. + op, why := assignOp(src, dst) + if op != ir.OXXX { + return op, why + } + + // The rules for interfaces are no different in conversions + // than assignments. If interfaces are involved, stop now + // with the good message from assignop. + // Otherwise clear the error. + if src.IsInterface() || dst.IsInterface() { + return ir.OXXX, why + } + + // 2. Ignoring struct tags, src and dst have identical underlying types. + if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) { + return ir.OCONVNOP, "" + } + + // 3. src and dst are unnamed pointer types and, ignoring struct tags, + // their base types have identical underlying types. + if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil { + if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) { + return ir.OCONVNOP, "" + } + } + + // 4. src and dst are both integer or floating point types. + if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) { + if types.SimType[src.Kind()] == types.SimType[dst.Kind()] { + return ir.OCONVNOP, "" + } + return ir.OCONV, "" + } + + // 5. src and dst are both complex types. + if src.IsComplex() && dst.IsComplex() { + if types.SimType[src.Kind()] == types.SimType[dst.Kind()] { + return ir.OCONVNOP, "" + } + return ir.OCONV, "" + } + + // Special case for constant conversions: any numeric + // conversion is potentially okay. We'll validate further + // within evconst. See #38117. + if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) { + return ir.OCONV, "" + } + + // 6. src is an integer or has type []byte or []rune + // and dst is a string type. + if src.IsInteger() && dst.IsString() { + return ir.ORUNESTR, "" + } + + if src.IsSlice() && dst.IsString() { + if src.Elem().Kind() == types.ByteType.Kind() { + return ir.OBYTES2STR, "" + } + if src.Elem().Kind() == types.RuneType.Kind() { + return ir.ORUNES2STR, "" + } + } + + // 7. src is a string and dst is []byte or []rune. + // String to slice. + if src.IsString() && dst.IsSlice() { + if dst.Elem().Kind() == types.ByteType.Kind() { + return ir.OSTR2BYTES, "" + } + if dst.Elem().Kind() == types.RuneType.Kind() { + return ir.OSTR2RUNES, "" + } + } + + // 8. src is a pointer or uintptr and dst is unsafe.Pointer. + if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() { + return ir.OCONVNOP, "" + } + + // 9. src is unsafe.Pointer and dst is a pointer or uintptr. + if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) { + return ir.OCONVNOP, "" + } + + // 10. src is a slice and dst is an array or pointer-to-array. + // They must have same element type. + if src.IsSlice() { + if dst.IsArray() && types.Identical(src.Elem(), dst.Elem()) { + return ir.OSLICE2ARR, "" + } + if dst.IsPtr() && dst.Elem().IsArray() && + types.Identical(src.Elem(), dst.Elem().Elem()) { + return ir.OSLICE2ARRPTR, "" + } + } + + return ir.OXXX, "" +} + +// Code to resolve elided DOTs in embedded types. + +// A dlist stores a pointer to a TFIELD Type embedded within +// a TSTRUCT or TINTER Type. +type dlist struct { + field *types.Field +} + +// dotpath computes the unique shortest explicit selector path to fully qualify +// a selection expression x.f, where x is of type t and f is the symbol s. +// If no such path exists, dotpath returns nil. +// If there are multiple shortest paths to the same depth, ambig is true. +func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []dlist, ambig bool) { + // The embedding of types within structs imposes a tree structure onto + // types: structs parent the types they embed, and types parent their + // fields or methods. Our goal here is to find the shortest path to + // a field or method named s in the subtree rooted at t. To accomplish + // that, we iteratively perform depth-first searches of increasing depth + // until we either find the named field/method or exhaust the tree. + for d := 0; ; d++ { + if d > len(dotlist) { + dotlist = append(dotlist, dlist{}) + } + if c, more := adddot1(s, t, d, save, ignorecase); c == 1 { + return dotlist[:d], false + } else if c > 1 { + return nil, true + } else if !more { + return nil, false + } + } +} + +func expand0(t *types.Type) { + u := t + if u.IsPtr() { + u = u.Elem() + } + + if u.IsInterface() { + for _, f := range u.AllMethods() { + if f.Sym.Uniq() { + continue + } + f.Sym.SetUniq(true) + slist = append(slist, symlink{field: f}) + } + + return + } + + u = types.ReceiverBaseType(t) + if u != nil { + for _, f := range u.Methods() { + if f.Sym.Uniq() { + continue + } + f.Sym.SetUniq(true) + slist = append(slist, symlink{field: f}) + } + } +} + +func expand1(t *types.Type, top bool) { + if t.Recur() { + return + } + t.SetRecur(true) + + if !top { + expand0(t) + } + + u := t + if u.IsPtr() { + u = u.Elem() + } + + if u.IsStruct() || u.IsInterface() { + var fields []*types.Field + if u.IsStruct() { + fields = u.Fields() + } else { + fields = u.AllMethods() + } + for _, f := range fields { + if f.Embedded == 0 { + continue + } + if f.Sym == nil { + continue + } + expand1(f.Type, false) + } + } + + t.SetRecur(false) +} + +func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) *types.Field { + if t == nil { + return nil + } + + var m *types.Field + path, _ := dotpath(s, t, &m, ignorecase) + if path == nil { + return nil + } + + if !m.IsMethod() { + return nil + } + + return m +} + +// Implements reports whether t implements the interface iface. t can be +// an interface, a type parameter, or a concrete type. +func Implements(t, iface *types.Type) bool { + var missing, have *types.Field + var ptr int + return implements(t, iface, &missing, &have, &ptr) +} + +// ImplementsExplain reports whether t implements the interface iface. t can be +// an interface, a type parameter, or a concrete type. If t does not implement +// iface, a non-empty string is returned explaining why. +func ImplementsExplain(t, iface *types.Type) string { + var missing, have *types.Field + var ptr int + if implements(t, iface, &missing, &have, &ptr) { + return "" + } + + if isptrto(t, types.TINTER) { + return fmt.Sprintf("%v is pointer to interface, not interface", t) + } else if have != nil && have.Sym == missing.Sym && have.Nointerface() { + return fmt.Sprintf("%v does not implement %v (%v method is marked 'nointerface')", t, iface, missing.Sym) + } else if have != nil && have.Sym == missing.Sym { + return fmt.Sprintf("%v does not implement %v (wrong type for %v method)\n"+ + "\t\thave %v%S\n\t\twant %v%S", t, iface, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + } else if ptr != 0 { + return fmt.Sprintf("%v does not implement %v (%v method has pointer receiver)", t, iface, missing.Sym) + } else if have != nil { + return fmt.Sprintf("%v does not implement %v (missing %v method)\n"+ + "\t\thave %v%S\n\t\twant %v%S", t, iface, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type) + } + return fmt.Sprintf("%v does not implement %v (missing %v method)", t, iface, missing.Sym) +} + +// implements reports whether t implements the interface iface. t can be +// an interface, a type parameter, or a concrete type. If implements returns +// false, it stores a method of iface that is not implemented in *m. If the +// method name matches but the type is wrong, it additionally stores the type +// of the method (on t) in *samename. +func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool { + t0 := t + if t == nil { + return false + } + + if t.IsInterface() { + i := 0 + tms := t.AllMethods() + for _, im := range iface.AllMethods() { + for i < len(tms) && tms[i].Sym != im.Sym { + i++ + } + if i == len(tms) { + *m = im + *samename = nil + *ptr = 0 + return false + } + tm := tms[i] + if !types.Identical(tm.Type, im.Type) { + *m = im + *samename = tm + *ptr = 0 + return false + } + } + + return true + } + + t = types.ReceiverBaseType(t) + var tms []*types.Field + if t != nil { + CalcMethods(t) + tms = t.AllMethods() + } + i := 0 + for _, im := range iface.AllMethods() { + for i < len(tms) && tms[i].Sym != im.Sym { + i++ + } + if i == len(tms) { + *m = im + *samename = ifacelookdot(im.Sym, t, true) + *ptr = 0 + return false + } + tm := tms[i] + if tm.Nointerface() || !types.Identical(tm.Type, im.Type) { + *m = im + *samename = tm + *ptr = 0 + return false + } + + // if pointer receiver in method, + // the method does not exist for value types. + if !types.IsMethodApplicable(t0, tm) { + if false && base.Flag.LowerR != 0 { + base.Errorf("interface pointer mismatch") + } + + *m = im + *samename = nil + *ptr = 1 + return false + } + } + + return true +} + +func isptrto(t *types.Type, et types.Kind) bool { + if t == nil { + return false + } + if !t.IsPtr() { + return false + } + t = t.Elem() + if t == nil { + return false + } + if t.Kind() != et { + return false + } + return true +} + +// lookdot0 returns the number of fields or methods named s associated +// with Type t. If exactly one exists, it will be returned in *save +// (if save is not nil). +func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int { + u := t + if u.IsPtr() { + u = u.Elem() + } + + c := 0 + if u.IsStruct() || u.IsInterface() { + var fields []*types.Field + if u.IsStruct() { + fields = u.Fields() + } else { + fields = u.AllMethods() + } + for _, f := range fields { + if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) { + if save != nil { + *save = f + } + c++ + } + } + } + + u = t + if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() { + // If t is a defined pointer type, then x.m is shorthand for (*x).m. + u = t.Elem() + } + u = types.ReceiverBaseType(u) + if u != nil { + for _, f := range u.Methods() { + if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) { + if save != nil { + *save = f + } + c++ + } + } + } + + return c +} + +var slist []symlink + +// Code to help generate trampoline functions for methods on embedded +// types. These are approx the same as the corresponding AddImplicitDots +// routines except that they expect to be called with unique tasks and +// they return the actual methods. + +type symlink struct { + field *types.Field +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/syms.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/syms.go new file mode 100644 index 0000000000000000000000000000000000000000..a977b5e1101b9a34a418cd6e9b753e6566d2aa5a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/syms.go @@ -0,0 +1,134 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/obj" +) + +// LookupRuntime returns a function or variable declared in +// _builtin/runtime.go. If types_ is non-empty, successive occurrences +// of the "any" placeholder type will be substituted. +func LookupRuntime(name string, types_ ...*types.Type) *ir.Name { + s := ir.Pkgs.Runtime.Lookup(name) + if s == nil || s.Def == nil { + base.Fatalf("LookupRuntime: can't find runtime.%s", name) + } + n := s.Def.(*ir.Name) + if len(types_) != 0 { + n = substArgTypes(n, types_...) + } + return n +} + +// SubstArgTypes substitutes the given list of types for +// successive occurrences of the "any" placeholder in the +// type syntax expression n.Type. +func substArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name { + for _, t := range types_ { + types.CalcSize(t) + } + n := ir.NewNameAt(old.Pos(), old.Sym(), types.SubstAny(old.Type(), &types_)) + n.Class = old.Class + n.Func = old.Func + if len(types_) > 0 { + base.Fatalf("SubstArgTypes: too many argument types") + } + return n +} + +// AutoLabel generates a new Name node for use with +// an automatically generated label. +// prefix is a short mnemonic (e.g. ".s" for switch) +// to help with debugging. +// It should begin with "." to avoid conflicts with +// user labels. +func AutoLabel(prefix string) *types.Sym { + if prefix[0] != '.' { + base.Fatalf("autolabel prefix must start with '.', have %q", prefix) + } + fn := ir.CurFunc + if ir.CurFunc == nil { + base.Fatalf("autolabel outside function") + } + n := fn.Label + fn.Label++ + return LookupNum(prefix, int(n)) +} + +func Lookup(name string) *types.Sym { + return types.LocalPkg.Lookup(name) +} + +// InitRuntime loads the definitions for the low-level runtime functions, +// so that the compiler can generate calls to them, +// but does not make them visible to user code. +func InitRuntime() { + base.Timer.Start("fe", "loadsys") + + typs := runtimeTypes() + for _, d := range &runtimeDecls { + sym := ir.Pkgs.Runtime.Lookup(d.name) + typ := typs[d.typ] + switch d.tag { + case funcTag: + importfunc(sym, typ) + case varTag: + importvar(sym, typ) + default: + base.Fatalf("unhandled declaration tag %v", d.tag) + } + } +} + +// LookupRuntimeFunc looks up Go function name in package runtime. This function +// must follow the internal calling convention. +func LookupRuntimeFunc(name string) *obj.LSym { + return LookupRuntimeABI(name, obj.ABIInternal) +} + +// LookupRuntimeVar looks up a variable (or assembly function) name in package +// runtime. If this is a function, it may have a special calling +// convention. +func LookupRuntimeVar(name string) *obj.LSym { + return LookupRuntimeABI(name, obj.ABI0) +} + +// LookupRuntimeABI looks up a name in package runtime using the given ABI. +func LookupRuntimeABI(name string, abi obj.ABI) *obj.LSym { + return base.PkgLinksym("runtime", name, abi) +} + +// InitCoverage loads the definitions for routines called +// by code coverage instrumentation (similar to InitRuntime above). +func InitCoverage() { + typs := coverageTypes() + for _, d := range &coverageDecls { + sym := ir.Pkgs.Coverage.Lookup(d.name) + typ := typs[d.typ] + switch d.tag { + case funcTag: + importfunc(sym, typ) + case varTag: + importvar(sym, typ) + default: + base.Fatalf("unhandled declaration tag %v", d.tag) + } + } +} + +// LookupCoverage looks up the Go function 'name' in package +// runtime/coverage. This function must follow the internal calling +// convention. +func LookupCoverage(name string) *ir.Name { + sym := ir.Pkgs.Coverage.Lookup(name) + if sym == nil { + base.Fatalf("LookupCoverage: can't find runtime/coverage.%s", name) + } + return sym.Def.(*ir.Name) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/target.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/target.go new file mode 100644 index 0000000000000000000000000000000000000000..018614d68bfc4f69c4e5260efa6f29d1416710e5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/target.go @@ -0,0 +1,12 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run mkbuiltin.go + +package typecheck + +import "cmd/compile/internal/ir" + +// Target is the package being compiled. +var Target *ir.Package diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/type.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/type.go new file mode 100644 index 0000000000000000000000000000000000000000..37c394393a1a733ff372ed78774477c7bcce920c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/type.go @@ -0,0 +1,5 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/typecheck.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/typecheck.go new file mode 100644 index 0000000000000000000000000000000000000000..b22e45358ea83184905d8e826ec1192689430a0f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/typecheck/typecheck.go @@ -0,0 +1,1317 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typecheck + +import ( + "fmt" + "go/constant" + "go/token" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +func AssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) } +func Expr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) } +func Stmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) } + +func Exprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) } +func Stmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) } + +func Call(pos src.XPos, callee ir.Node, args []ir.Node, dots bool) ir.Node { + call := ir.NewCallExpr(pos, ir.OCALL, callee, args) + call.IsDDD = dots + return typecheck(call, ctxStmt|ctxExpr) +} + +func Callee(n ir.Node) ir.Node { + return typecheck(n, ctxExpr|ctxCallee) +} + +var traceIndent []byte + +func tracePrint(title string, n ir.Node) func(np *ir.Node) { + indent := traceIndent + + // guard against nil + var pos, op string + var tc uint8 + if n != nil { + pos = base.FmtPos(n.Pos()) + op = n.Op().String() + tc = n.Typecheck() + } + + types.SkipSizeForTracing = true + defer func() { types.SkipSizeForTracing = false }() + fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc) + traceIndent = append(traceIndent, ". "...) + + return func(np *ir.Node) { + traceIndent = traceIndent[:len(traceIndent)-2] + + // if we have a result, use that + if np != nil { + n = *np + } + + // guard against nil + // use outer pos, op so we don't get empty pos/op if n == nil (nicer output) + var tc uint8 + var typ *types.Type + if n != nil { + pos = base.FmtPos(n.Pos()) + op = n.Op().String() + tc = n.Typecheck() + typ = n.Type() + } + + types.SkipSizeForTracing = true + defer func() { types.SkipSizeForTracing = false }() + fmt.Printf("%s: %s=> %p %s %v tc=%d type=%L\n", pos, indent, n, op, n, tc, typ) + } +} + +const ( + ctxStmt = 1 << iota // evaluated at statement level + ctxExpr // evaluated in value context + ctxType // evaluated in type context + ctxCallee // call-only expressions are ok + ctxMultiOK // multivalue function returns are ok + ctxAssign // assigning to expression +) + +// type checks the whole tree of an expression. +// calculates expression types. +// evaluates compile time constants. +// marks variables that escape the local frame. +// rewrites n.Op to be more specific in some cases. + +func typecheckslice(l []ir.Node, top int) { + for i := range l { + l[i] = typecheck(l[i], top) + } +} + +var _typekind = []string{ + types.TINT: "int", + types.TUINT: "uint", + types.TINT8: "int8", + types.TUINT8: "uint8", + types.TINT16: "int16", + types.TUINT16: "uint16", + types.TINT32: "int32", + types.TUINT32: "uint32", + types.TINT64: "int64", + types.TUINT64: "uint64", + types.TUINTPTR: "uintptr", + types.TCOMPLEX64: "complex64", + types.TCOMPLEX128: "complex128", + types.TFLOAT32: "float32", + types.TFLOAT64: "float64", + types.TBOOL: "bool", + types.TSTRING: "string", + types.TPTR: "pointer", + types.TUNSAFEPTR: "unsafe.Pointer", + types.TSTRUCT: "struct", + types.TINTER: "interface", + types.TCHAN: "chan", + types.TMAP: "map", + types.TARRAY: "array", + types.TSLICE: "slice", + types.TFUNC: "func", + types.TNIL: "nil", + types.TIDEAL: "untyped number", +} + +func typekind(t *types.Type) string { + if t.IsUntyped() { + return fmt.Sprintf("%v", t) + } + et := t.Kind() + if int(et) < len(_typekind) { + s := _typekind[et] + if s != "" { + return s + } + } + return fmt.Sprintf("etype=%d", et) +} + +// typecheck type checks node n. +// The result of typecheck MUST be assigned back to n, e.g. +// +// n.Left = typecheck(n.Left, top) +func typecheck(n ir.Node, top int) (res ir.Node) { + if n == nil { + return nil + } + + // only trace if there's work to do + if base.EnableTrace && base.Flag.LowerT { + defer tracePrint("typecheck", n)(&res) + } + + lno := ir.SetPos(n) + defer func() { base.Pos = lno }() + + // Skip over parens. + for n.Op() == ir.OPAREN { + n = n.(*ir.ParenExpr).X + } + + // Skip typecheck if already done. + // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed. + if n.Typecheck() == 1 || n.Typecheck() == 3 { + switch n.Op() { + case ir.ONAME: + break + + default: + return n + } + } + + if n.Typecheck() == 2 { + base.FatalfAt(n.Pos(), "typechecking loop") + } + + n.SetTypecheck(2) + n = typecheck1(n, top) + n.SetTypecheck(1) + + t := n.Type() + if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE { + switch t.Kind() { + case types.TFUNC, // might have TANY; wait until it's called + types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK: + break + + default: + types.CheckSize(t) + } + } + + return n +} + +// indexlit implements typechecking of untyped values as +// array/slice indexes. It is almost equivalent to DefaultLit +// but also accepts untyped numeric values representable as +// value of type int (see also checkmake for comparison). +// The result of indexlit MUST be assigned back to n, e.g. +// +// n.Left = indexlit(n.Left) +func indexlit(n ir.Node) ir.Node { + if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL { + return DefaultLit(n, types.Types[types.TINT]) + } + return n +} + +// typecheck1 should ONLY be called from typecheck. +func typecheck1(n ir.Node, top int) ir.Node { + switch n.Op() { + default: + ir.Dump("typecheck", n) + base.Fatalf("typecheck %v", n.Op()) + panic("unreachable") + + case ir.ONAME: + n := n.(*ir.Name) + if n.BuiltinOp != 0 { + if top&ctxCallee == 0 { + base.Errorf("use of builtin %v not in function call", n.Sym()) + n.SetType(nil) + return n + } + return n + } + if top&ctxAssign == 0 { + // not a write to the variable + if ir.IsBlank(n) { + base.Errorf("cannot use _ as value") + n.SetType(nil) + return n + } + n.SetUsed(true) + } + return n + + // type or expr + case ir.ODEREF: + n := n.(*ir.StarExpr) + return tcStar(n, top) + + // x op= y + case ir.OASOP: + n := n.(*ir.AssignOpStmt) + n.X, n.Y = Expr(n.X), Expr(n.Y) + checkassign(n.X) + if n.IncDec && !okforarith[n.X.Type().Kind()] { + base.Errorf("invalid operation: %v (non-numeric type %v)", n, n.X.Type()) + return n + } + switch n.AsOp { + case ir.OLSH, ir.ORSH: + n.X, n.Y, _ = tcShift(n, n.X, n.Y) + case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD, ir.OMUL, ir.OOR, ir.OSUB, ir.OXOR: + n.X, n.Y, _ = tcArith(n, n.AsOp, n.X, n.Y) + default: + base.Fatalf("invalid assign op: %v", n.AsOp) + } + return n + + // logical operators + case ir.OANDAND, ir.OOROR: + n := n.(*ir.LogicalExpr) + n.X, n.Y = Expr(n.X), Expr(n.Y) + if n.X.Type() == nil || n.Y.Type() == nil { + n.SetType(nil) + return n + } + // For "x == x && len(s)", it's better to report that "len(s)" (type int) + // can't be used with "&&" than to report that "x == x" (type untyped bool) + // can't be converted to int (see issue #41500). + if !n.X.Type().IsBoolean() { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.X.Type())) + n.SetType(nil) + return n + } + if !n.Y.Type().IsBoolean() { + base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Y.Type())) + n.SetType(nil) + return n + } + l, r, t := tcArith(n, n.Op(), n.X, n.Y) + n.X, n.Y = l, r + n.SetType(t) + return n + + // shift operators + case ir.OLSH, ir.ORSH: + n := n.(*ir.BinaryExpr) + n.X, n.Y = Expr(n.X), Expr(n.Y) + l, r, t := tcShift(n, n.X, n.Y) + n.X, n.Y = l, r + n.SetType(t) + return n + + // comparison operators + case ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, ir.ONE: + n := n.(*ir.BinaryExpr) + n.X, n.Y = Expr(n.X), Expr(n.Y) + l, r, t := tcArith(n, n.Op(), n.X, n.Y) + if t != nil { + n.X, n.Y = l, r + n.SetType(types.UntypedBool) + n.X, n.Y = defaultlit2(l, r, true) + } + return n + + // binary operators + case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD, ir.OMUL, ir.OOR, ir.OSUB, ir.OXOR: + n := n.(*ir.BinaryExpr) + n.X, n.Y = Expr(n.X), Expr(n.Y) + l, r, t := tcArith(n, n.Op(), n.X, n.Y) + if t != nil && t.Kind() == types.TSTRING && n.Op() == ir.OADD { + // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ... + var add *ir.AddStringExpr + if l.Op() == ir.OADDSTR { + add = l.(*ir.AddStringExpr) + add.SetPos(n.Pos()) + } else { + add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l}) + } + if r.Op() == ir.OADDSTR { + r := r.(*ir.AddStringExpr) + add.List.Append(r.List.Take()...) + } else { + add.List.Append(r) + } + add.SetType(t) + return add + } + n.X, n.Y = l, r + n.SetType(t) + return n + + case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS: + n := n.(*ir.UnaryExpr) + return tcUnaryArith(n) + + // exprs + case ir.OCOMPLIT: + return tcCompLit(n.(*ir.CompLitExpr)) + + case ir.OXDOT, ir.ODOT: + n := n.(*ir.SelectorExpr) + return tcDot(n, top) + + case ir.ODOTTYPE: + n := n.(*ir.TypeAssertExpr) + return tcDotType(n) + + case ir.OINDEX: + n := n.(*ir.IndexExpr) + return tcIndex(n) + + case ir.ORECV: + n := n.(*ir.UnaryExpr) + return tcRecv(n) + + case ir.OSEND: + n := n.(*ir.SendStmt) + return tcSend(n) + + case ir.OSLICEHEADER: + n := n.(*ir.SliceHeaderExpr) + return tcSliceHeader(n) + + case ir.OSTRINGHEADER: + n := n.(*ir.StringHeaderExpr) + return tcStringHeader(n) + + case ir.OMAKESLICECOPY: + n := n.(*ir.MakeExpr) + return tcMakeSliceCopy(n) + + case ir.OSLICE, ir.OSLICE3: + n := n.(*ir.SliceExpr) + return tcSlice(n) + + // call and call like + case ir.OCALL: + n := n.(*ir.CallExpr) + return tcCall(n, top) + + case ir.OCAP, ir.OLEN: + n := n.(*ir.UnaryExpr) + return tcLenCap(n) + + case ir.OMIN, ir.OMAX: + n := n.(*ir.CallExpr) + return tcMinMax(n) + + case ir.OREAL, ir.OIMAG: + n := n.(*ir.UnaryExpr) + return tcRealImag(n) + + case ir.OCOMPLEX: + n := n.(*ir.BinaryExpr) + return tcComplex(n) + + case ir.OCLEAR: + n := n.(*ir.UnaryExpr) + return tcClear(n) + + case ir.OCLOSE: + n := n.(*ir.UnaryExpr) + return tcClose(n) + + case ir.ODELETE: + n := n.(*ir.CallExpr) + return tcDelete(n) + + case ir.OAPPEND: + n := n.(*ir.CallExpr) + return tcAppend(n) + + case ir.OCOPY: + n := n.(*ir.BinaryExpr) + return tcCopy(n) + + case ir.OCONV: + n := n.(*ir.ConvExpr) + return tcConv(n) + + case ir.OMAKE: + n := n.(*ir.CallExpr) + return tcMake(n) + + case ir.ONEW: + n := n.(*ir.UnaryExpr) + return tcNew(n) + + case ir.OPRINT, ir.OPRINTLN: + n := n.(*ir.CallExpr) + return tcPrint(n) + + case ir.OPANIC: + n := n.(*ir.UnaryExpr) + return tcPanic(n) + + case ir.ORECOVER: + n := n.(*ir.CallExpr) + return tcRecover(n) + + case ir.OUNSAFEADD: + n := n.(*ir.BinaryExpr) + return tcUnsafeAdd(n) + + case ir.OUNSAFESLICE: + n := n.(*ir.BinaryExpr) + return tcUnsafeSlice(n) + + case ir.OUNSAFESLICEDATA: + n := n.(*ir.UnaryExpr) + return tcUnsafeData(n) + + case ir.OUNSAFESTRING: + n := n.(*ir.BinaryExpr) + return tcUnsafeString(n) + + case ir.OUNSAFESTRINGDATA: + n := n.(*ir.UnaryExpr) + return tcUnsafeData(n) + + case ir.OITAB: + n := n.(*ir.UnaryExpr) + return tcITab(n) + + case ir.OIDATA: + // Whoever creates the OIDATA node must know a priori the concrete type at that moment, + // usually by just having checked the OITAB. + n := n.(*ir.UnaryExpr) + base.Fatalf("cannot typecheck interface data %v", n) + panic("unreachable") + + case ir.OSPTR: + n := n.(*ir.UnaryExpr) + return tcSPtr(n) + + case ir.OCFUNC: + n := n.(*ir.UnaryExpr) + n.X = Expr(n.X) + n.SetType(types.Types[types.TUINTPTR]) + return n + + case ir.OGETCALLERPC, ir.OGETCALLERSP: + n := n.(*ir.CallExpr) + if len(n.Args) != 0 { + base.FatalfAt(n.Pos(), "unexpected arguments: %v", n) + } + n.SetType(types.Types[types.TUINTPTR]) + return n + + case ir.OCONVNOP: + n := n.(*ir.ConvExpr) + n.X = Expr(n.X) + return n + + // statements + case ir.OAS: + n := n.(*ir.AssignStmt) + tcAssign(n) + + // Code that creates temps does not bother to set defn, so do it here. + if n.X.Op() == ir.ONAME && ir.IsAutoTmp(n.X) { + n.X.Name().Defn = n + } + return n + + case ir.OAS2: + tcAssignList(n.(*ir.AssignListStmt)) + return n + + case ir.OBREAK, + ir.OCONTINUE, + ir.ODCL, + ir.OGOTO, + ir.OFALL: + return n + + case ir.OBLOCK: + n := n.(*ir.BlockStmt) + Stmts(n.List) + return n + + case ir.OLABEL: + if n.Sym().IsBlank() { + // Empty identifier is valid but useless. + // Eliminate now to simplify life later. + // See issues 7538, 11589, 11593. + n = ir.NewBlockStmt(n.Pos(), nil) + } + return n + + case ir.ODEFER, ir.OGO: + n := n.(*ir.GoDeferStmt) + n.Call = typecheck(n.Call, ctxStmt|ctxExpr) + tcGoDefer(n) + return n + + case ir.OFOR: + n := n.(*ir.ForStmt) + return tcFor(n) + + case ir.OIF: + n := n.(*ir.IfStmt) + return tcIf(n) + + case ir.ORETURN: + n := n.(*ir.ReturnStmt) + return tcReturn(n) + + case ir.OTAILCALL: + n := n.(*ir.TailCallStmt) + n.Call = typecheck(n.Call, ctxStmt|ctxExpr).(*ir.CallExpr) + return n + + case ir.OCHECKNIL: + n := n.(*ir.UnaryExpr) + return tcCheckNil(n) + + case ir.OSELECT: + tcSelect(n.(*ir.SelectStmt)) + return n + + case ir.OSWITCH: + tcSwitch(n.(*ir.SwitchStmt)) + return n + + case ir.ORANGE: + tcRange(n.(*ir.RangeStmt)) + return n + + case ir.OTYPESW: + n := n.(*ir.TypeSwitchGuard) + base.Fatalf("use of .(type) outside type switch") + return n + + case ir.ODCLFUNC: + tcFunc(n.(*ir.Func)) + return n + } + + // No return n here! + // Individual cases can type-assert n, introducing a new one. + // Each must execute its own return n. +} + +func typecheckargs(n ir.InitNode) { + var list []ir.Node + switch n := n.(type) { + default: + base.Fatalf("typecheckargs %+v", n.Op()) + case *ir.CallExpr: + list = n.Args + if n.IsDDD { + Exprs(list) + return + } + case *ir.ReturnStmt: + list = n.Results + } + if len(list) != 1 { + Exprs(list) + return + } + + typecheckslice(list, ctxExpr|ctxMultiOK) + t := list[0].Type() + if t == nil || !t.IsFuncArgStruct() { + return + } + + // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...). + RewriteMultiValueCall(n, list[0]) +} + +// RewriteNonNameCall replaces non-Name call expressions with temps, +// rewriting f()(...) to t0 := f(); t0(...). +func RewriteNonNameCall(n *ir.CallExpr) { + np := &n.Fun + if dot, ok := (*np).(*ir.SelectorExpr); ok && (dot.Op() == ir.ODOTMETH || dot.Op() == ir.ODOTINTER || dot.Op() == ir.OMETHVALUE) { + np = &dot.X // peel away method selector + } + + // Check for side effects in the callee expression. + // We explicitly special case new(T) though, because it doesn't have + // observable side effects, and keeping it in place allows better escape analysis. + if !ir.Any(*np, func(n ir.Node) bool { return n.Op() != ir.ONEW && callOrChan(n) }) { + return + } + + tmp := TempAt(base.Pos, ir.CurFunc, (*np).Type()) + as := ir.NewAssignStmt(base.Pos, tmp, *np) + as.PtrInit().Append(Stmt(ir.NewDecl(n.Pos(), ir.ODCL, tmp))) + *np = tmp + + n.PtrInit().Append(Stmt(as)) +} + +// RewriteMultiValueCall rewrites multi-valued f() to use temporaries, +// so the backend wouldn't need to worry about tuple-valued expressions. +func RewriteMultiValueCall(n ir.InitNode, call ir.Node) { + as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, []ir.Node{call}) + results := call.Type().Fields() + list := make([]ir.Node, len(results)) + for i, result := range results { + tmp := TempAt(base.Pos, ir.CurFunc, result.Type) + as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, tmp)) + as.Lhs.Append(tmp) + list[i] = tmp + } + + n.PtrInit().Append(Stmt(as)) + + switch n := n.(type) { + default: + base.Fatalf("rewriteMultiValueCall %+v", n.Op()) + case *ir.CallExpr: + n.Args = list + case *ir.ReturnStmt: + n.Results = list + case *ir.AssignListStmt: + if n.Op() != ir.OAS2FUNC { + base.Fatalf("rewriteMultiValueCall: invalid op %v", n.Op()) + } + as.SetOp(ir.OAS2FUNC) + n.SetOp(ir.OAS2) + n.Rhs = make([]ir.Node, len(list)) + for i, tmp := range list { + n.Rhs[i] = AssignConv(tmp, n.Lhs[i].Type(), "assignment") + } + } +} + +func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool { + t := r.Type() + if t == nil { + return false + } + if !t.IsInteger() { + base.Errorf("invalid slice index %v (type %v)", r, t) + return false + } + + if r.Op() == ir.OLITERAL { + x := r.Val() + if constant.Sign(x) < 0 { + base.Errorf("invalid slice index %v (index must be non-negative)", r) + return false + } else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) { + base.Errorf("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem()) + return false + } else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(ir.StringVal(l))))) { + base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(ir.StringVal(l))) + return false + } else if ir.ConstOverflow(x, types.Types[types.TINT]) { + base.Errorf("invalid slice index %v (index too large)", r) + return false + } + } + + return true +} + +func checksliceconst(lo ir.Node, hi ir.Node) bool { + if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) { + base.Errorf("invalid slice index: %v > %v", lo, hi) + return false + } + + return true +} + +// The result of implicitstar MUST be assigned back to n, e.g. +// +// n.Left = implicitstar(n.Left) +func implicitstar(n ir.Node) ir.Node { + // insert implicit * if needed for fixed array + t := n.Type() + if t == nil || !t.IsPtr() { + return n + } + t = t.Elem() + if t == nil { + return n + } + if !t.IsArray() { + return n + } + star := ir.NewStarExpr(base.Pos, n) + star.SetImplicit(true) + return Expr(star) +} + +func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) { + if len(n.Args) == 0 { + p := fmt.Sprintf(f, args...) + base.Errorf("missing argument to %s: %v", p, n) + return nil, false + } + + if len(n.Args) > 1 { + p := fmt.Sprintf(f, args...) + base.Errorf("too many arguments to %s: %v", p, n) + return n.Args[0], false + } + + return n.Args[0], true +} + +func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) { + if len(n.Args) != 2 { + if len(n.Args) < 2 { + base.Errorf("not enough arguments in call to %v", n) + } else { + base.Errorf("too many arguments in call to %v", n) + } + return nil, nil, false + } + return n.Args[0], n.Args[1], true +} + +// Lookdot1 looks up the specified method s in the list fs of methods, returning +// the matching field or nil. If dostrcmp is 0, it matches the symbols. If +// dostrcmp is 1, it matches by name exactly. If dostrcmp is 2, it matches names +// with case folding. +func Lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs []*types.Field, dostrcmp int) *types.Field { + var r *types.Field + for _, f := range fs { + if dostrcmp != 0 && f.Sym.Name == s.Name { + return f + } + if dostrcmp == 2 && strings.EqualFold(f.Sym.Name, s.Name) { + return f + } + if f.Sym != s { + continue + } + if r != nil { + if errnode != nil { + base.Errorf("ambiguous selector %v", errnode) + } else if t.IsPtr() { + base.Errorf("ambiguous selector (%v).%v", t, s) + } else { + base.Errorf("ambiguous selector %v.%v", t, s) + } + break + } + + r = f + } + + return r +} + +// NewMethodExpr returns an OMETHEXPR node representing method +// expression "recv.sym". +func NewMethodExpr(pos src.XPos, recv *types.Type, sym *types.Sym) *ir.SelectorExpr { + // Compute the method set for recv. + var ms []*types.Field + if recv.IsInterface() { + ms = recv.AllMethods() + } else { + mt := types.ReceiverBaseType(recv) + if mt == nil { + base.FatalfAt(pos, "type %v has no receiver base type", recv) + } + CalcMethods(mt) + ms = mt.AllMethods() + } + + m := Lookdot1(nil, sym, recv, ms, 0) + if m == nil { + base.FatalfAt(pos, "type %v has no method %v", recv, sym) + } + + if !types.IsMethodApplicable(recv, m) { + base.FatalfAt(pos, "invalid method expression %v.%v (needs pointer receiver)", recv, sym) + } + + n := ir.NewSelectorExpr(pos, ir.OMETHEXPR, ir.TypeNode(recv), sym) + n.Selection = m + n.SetType(NewMethodType(m.Type, recv)) + n.SetTypecheck(1) + return n +} + +func derefall(t *types.Type) *types.Type { + for t != nil && t.IsPtr() { + t = t.Elem() + } + return t +} + +// Lookdot looks up field or method n.Sel in the type t and returns the matching +// field. It transforms the op of node n to ODOTINTER or ODOTMETH, if appropriate. +// It also may add a StarExpr node to n.X as needed for access to non-pointer +// methods. If dostrcmp is 0, it matches the field/method with the exact symbol +// as n.Sel (appropriate for exported fields). If dostrcmp is 1, it matches by name +// exactly. If dostrcmp is 2, it matches names with case folding. +func Lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field { + s := n.Sel + + types.CalcSize(t) + var f1 *types.Field + if t.IsStruct() { + f1 = Lookdot1(n, s, t, t.Fields(), dostrcmp) + } else if t.IsInterface() { + f1 = Lookdot1(n, s, t, t.AllMethods(), dostrcmp) + } + + var f2 *types.Field + if n.X.Type() == t || n.X.Type().Sym() == nil { + mt := types.ReceiverBaseType(t) + if mt != nil { + f2 = Lookdot1(n, s, mt, mt.Methods(), dostrcmp) + } + } + + if f1 != nil { + if dostrcmp > 1 { + // Already in the process of diagnosing an error. + return f1 + } + if f2 != nil { + base.Errorf("%v is both field and method", n.Sel) + } + if f1.Offset == types.BADWIDTH { + base.Fatalf("Lookdot badwidth t=%v, f1=%v@%p", t, f1, f1) + } + n.Selection = f1 + n.SetType(f1.Type) + if t.IsInterface() { + if n.X.Type().IsPtr() { + star := ir.NewStarExpr(base.Pos, n.X) + star.SetImplicit(true) + n.X = Expr(star) + } + + n.SetOp(ir.ODOTINTER) + } + return f1 + } + + if f2 != nil { + if dostrcmp > 1 { + // Already in the process of diagnosing an error. + return f2 + } + orig := n.X + tt := n.X.Type() + types.CalcSize(tt) + rcvr := f2.Type.Recv().Type + if !types.Identical(rcvr, tt) { + if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) { + checklvalue(n.X, "call pointer method on") + addr := NodAddr(n.X) + addr.SetImplicit(true) + n.X = typecheck(addr, ctxType|ctxExpr) + } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) { + star := ir.NewStarExpr(base.Pos, n.X) + star.SetImplicit(true) + n.X = typecheck(star, ctxType|ctxExpr) + } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) { + base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sel, n.X) + for tt.IsPtr() { + // Stop one level early for method with pointer receiver. + if rcvr.IsPtr() && !tt.Elem().IsPtr() { + break + } + star := ir.NewStarExpr(base.Pos, n.X) + star.SetImplicit(true) + n.X = typecheck(star, ctxType|ctxExpr) + tt = tt.Elem() + } + } else { + base.Fatalf("method mismatch: %v for %v", rcvr, tt) + } + } + + // Check that we haven't implicitly dereferenced any defined pointer types. + for x := n.X; ; { + var inner ir.Node + implicit := false + switch x := x.(type) { + case *ir.AddrExpr: + inner, implicit = x.X, x.Implicit() + case *ir.SelectorExpr: + inner, implicit = x.X, x.Implicit() + case *ir.StarExpr: + inner, implicit = x.X, x.Implicit() + } + if !implicit { + break + } + if inner.Type().Sym() != nil && (x.Op() == ir.ODEREF || x.Op() == ir.ODOTPTR) { + // Found an implicit dereference of a defined pointer type. + // Restore n.X for better error message. + n.X = orig + return nil + } + x = inner + } + + n.Selection = f2 + n.SetType(f2.Type) + n.SetOp(ir.ODOTMETH) + + return f2 + } + + return nil +} + +func nokeys(l ir.Nodes) bool { + for _, n := range l { + if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY { + return false + } + } + return true +} + +func hasddd(params []*types.Field) bool { + // TODO(mdempsky): Simply check the last param. + for _, tl := range params { + if tl.IsDDD() { + return true + } + } + + return false +} + +// typecheck assignment: type list = expression list +func typecheckaste(op ir.Op, call ir.Node, isddd bool, params []*types.Field, nl ir.Nodes, desc func() string) { + var t *types.Type + var i int + + lno := base.Pos + defer func() { base.Pos = lno }() + + var n ir.Node + if len(nl) == 1 { + n = nl[0] + } + + n1 := len(params) + n2 := len(nl) + if !hasddd(params) { + if isddd { + goto invalidddd + } + if n2 > n1 { + goto toomany + } + if n2 < n1 { + goto notenough + } + } else { + if !isddd { + if n2 < n1-1 { + goto notenough + } + } else { + if n2 > n1 { + goto toomany + } + if n2 < n1 { + goto notenough + } + } + } + + i = 0 + for _, tl := range params { + t = tl.Type + if tl.IsDDD() { + if isddd { + if i >= len(nl) { + goto notenough + } + if len(nl)-i > 1 { + goto toomany + } + n = nl[i] + ir.SetPos(n) + if n.Type() != nil { + nl[i] = assignconvfn(n, t, desc) + } + return + } + + // TODO(mdempsky): Make into ... call with implicit slice. + for ; i < len(nl); i++ { + n = nl[i] + ir.SetPos(n) + if n.Type() != nil { + nl[i] = assignconvfn(n, t.Elem(), desc) + } + } + return + } + + if i >= len(nl) { + goto notenough + } + n = nl[i] + ir.SetPos(n) + if n.Type() != nil { + nl[i] = assignconvfn(n, t, desc) + } + i++ + } + + if i < len(nl) { + goto toomany + } + +invalidddd: + if isddd { + if call != nil { + base.Errorf("invalid use of ... in call to %v", call) + } else { + base.Errorf("invalid use of ... in %v", op) + } + } + return + +notenough: + if n == nil || n.Type() != nil { + base.Fatalf("not enough arguments to %v", op) + } + return + +toomany: + base.Fatalf("too many arguments to %v", op) +} + +// type check composite. +func fielddup(name string, hash map[string]bool) { + if hash[name] { + base.Errorf("duplicate field name in struct literal: %s", name) + return + } + hash[name] = true +} + +// typecheckarraylit type-checks a sequence of slice/array literal elements. +func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 { + // If there are key/value pairs, create a map to keep seen + // keys so we can check for duplicate indices. + var indices map[int64]bool + for _, elt := range elts { + if elt.Op() == ir.OKEY { + indices = make(map[int64]bool) + break + } + } + + var key, length int64 + for i, elt := range elts { + ir.SetPos(elt) + r := elts[i] + var kv *ir.KeyExpr + if elt.Op() == ir.OKEY { + elt := elt.(*ir.KeyExpr) + elt.Key = Expr(elt.Key) + key = IndexConst(elt.Key) + if key < 0 { + base.Fatalf("invalid index: %v", elt.Key) + } + kv = elt + r = elt.Value + } + + r = Expr(r) + r = AssignConv(r, elemType, ctx) + if kv != nil { + kv.Value = r + } else { + elts[i] = r + } + + if key >= 0 { + if indices != nil { + if indices[key] { + base.Errorf("duplicate index in %s: %d", ctx, key) + } else { + indices[key] = true + } + } + + if bound >= 0 && key >= bound { + base.Errorf("array index %d out of bounds [0:%d]", key, bound) + bound = -1 + } + } + + key++ + if key > length { + length = key + } + } + + return length +} + +// visible reports whether sym is exported or locally defined. +func visible(sym *types.Sym) bool { + return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == types.LocalPkg) +} + +// nonexported reports whether sym is an unexported field. +func nonexported(sym *types.Sym) bool { + return sym != nil && !types.IsExported(sym.Name) +} + +func checklvalue(n ir.Node, verb string) { + if !ir.IsAddressable(n) { + base.Errorf("cannot %s %v", verb, n) + } +} + +func checkassign(n ir.Node) { + // have already complained about n being invalid + if n.Type() == nil { + if base.Errors() == 0 { + base.Fatalf("expected an error about %v", n) + } + return + } + + if ir.IsAddressable(n) { + return + } + if n.Op() == ir.OINDEXMAP { + n := n.(*ir.IndexExpr) + n.Assigned = true + return + } + + defer n.SetType(nil) + + switch { + case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).X.Op() == ir.OINDEXMAP: + base.Errorf("cannot assign to struct field %v in map", n) + case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).X.Type().IsString()) || n.Op() == ir.OSLICESTR: + base.Errorf("cannot assign to %v (strings are immutable)", n) + case n.Op() == ir.OLITERAL && n.Sym() != nil && ir.IsConstNode(n): + base.Errorf("cannot assign to %v (declared const)", n) + default: + base.Errorf("cannot assign to %v", n) + } +} + +func checkassignto(src *types.Type, dst ir.Node) { + // TODO(mdempsky): Handle all untyped types correctly. + if src == types.UntypedBool && dst.Type().IsBoolean() { + return + } + + if op, why := assignOp(src, dst.Type()); op == ir.OXXX { + base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why) + return + } +} + +// The result of stringtoruneslit MUST be assigned back to n, e.g. +// +// n.Left = stringtoruneslit(n.Left) +func stringtoruneslit(n *ir.ConvExpr) ir.Node { + if n.X.Op() != ir.OLITERAL || n.X.Val().Kind() != constant.String { + base.Fatalf("stringtoarraylit %v", n) + } + + var l []ir.Node + i := 0 + for _, r := range ir.StringVal(n.X) { + l = append(l, ir.NewKeyExpr(base.Pos, ir.NewInt(base.Pos, int64(i)), ir.NewInt(base.Pos, int64(r)))) + i++ + } + + return Expr(ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, n.Type(), l)) +} + +func checkmake(t *types.Type, arg string, np *ir.Node) bool { + n := *np + if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL { + base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type()) + return false + } + + // Do range checks for constants before DefaultLit + // to avoid redundant "constant NNN overflows int" errors. + if n.Op() == ir.OLITERAL { + v := toint(n.Val()) + if constant.Sign(v) < 0 { + base.Errorf("negative %s argument in make(%v)", arg, t) + return false + } + if ir.ConstOverflow(v, types.Types[types.TINT]) { + base.Errorf("%s argument too large in make(%v)", arg, t) + return false + } + } + + // DefaultLit is necessary for non-constants too: n might be 1.1<" + } + + q := pkgqual(s.Pkg, verb, mode) + if q == "" { + return s.Name + } + + buf := fmtBufferPool.Get().(*bytes.Buffer) + buf.Reset() + defer fmtBufferPool.Put(buf) + + buf.WriteString(q) + buf.WriteByte('.') + buf.WriteString(s.Name) + return InternString(buf.Bytes()) +} + +func sconv2(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) { + if verb == 'L' { + panic("linksymfmt") + } + if s == nil { + b.WriteString("") + return + } + + symfmt(b, s, verb, mode) +} + +func symfmt(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) { + name := s.Name + if q := pkgqual(s.Pkg, verb, mode); q != "" { + b.WriteString(q) + b.WriteByte('.') + } + b.WriteString(name) +} + +// pkgqual returns the qualifier that should be used for printing +// symbols from the given package in the given mode. +// If it returns the empty string, no qualification is needed. +func pkgqual(pkg *Pkg, verb rune, mode fmtMode) string { + if pkg == nil { + return "" + } + if verb != 'S' { + switch mode { + case fmtGo: // This is for the user + if pkg == BuiltinPkg || pkg == LocalPkg { + return "" + } + + // If the name was used by multiple packages, display the full path, + if pkg.Name != "" && NumImport[pkg.Name] > 1 { + return strconv.Quote(pkg.Path) + } + return pkg.Name + + case fmtDebug: + return pkg.Name + + case fmtTypeIDName: + // dcommontype, typehash + return pkg.Name + + case fmtTypeID: + // (methodsym), typesym, weaksym + return pkg.Prefix + } + } + + return "" +} + +// Type + +var BasicTypeNames = []string{ + TINT: "int", + TUINT: "uint", + TINT8: "int8", + TUINT8: "uint8", + TINT16: "int16", + TUINT16: "uint16", + TINT32: "int32", + TUINT32: "uint32", + TINT64: "int64", + TUINT64: "uint64", + TUINTPTR: "uintptr", + TFLOAT32: "float32", + TFLOAT64: "float64", + TCOMPLEX64: "complex64", + TCOMPLEX128: "complex128", + TBOOL: "bool", + TANY: "any", + TSTRING: "string", + TNIL: "nil", + TIDEAL: "untyped number", + TBLANK: "blank", +} + +var fmtBufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// Format implements formatting for a Type. +// The valid formats are: +// +// %v Go syntax +// %+v Debug syntax: Go syntax with a KIND- prefix for all but builtins. +// %L Go syntax for underlying type if t is named +// %S short Go syntax: drop leading "func" in function type +// %-S special case for method receiver symbol +func (t *Type) Format(s fmt.State, verb rune) { + mode := fmtGo + switch verb { + case 'v', 'S', 'L': + if verb == 'v' && s.Flag('+') { // %+v is debug format + mode = fmtDebug + } + if verb == 'S' && s.Flag('-') { // %-S is special case for receiver - short typeid format + mode = fmtTypeID + } + fmt.Fprint(s, tconv(t, verb, mode)) + default: + fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t) + } +} + +// String returns the Go syntax for the type t. +func (t *Type) String() string { + return tconv(t, 0, fmtGo) +} + +// LinkString returns a string description of t, suitable for use in +// link symbols. +// +// The description corresponds to type identity. That is, for any pair +// of types t1 and t2, Identical(t1, t2) == (t1.LinkString() == +// t2.LinkString()) is true. Thus it's safe to use as a map key to +// implement a type-identity-keyed map. +func (t *Type) LinkString() string { + return tconv(t, 0, fmtTypeID) +} + +// NameString generates a user-readable, mostly unique string +// description of t. NameString always returns the same description +// for identical types, even across compilation units. +// +// NameString qualifies identifiers by package name, so it has +// collisions when different packages share the same names and +// identifiers. It also does not distinguish function-scope defined +// types from package-scoped defined types or from each other. +func (t *Type) NameString() string { + return tconv(t, 0, fmtTypeIDName) +} + +func tconv(t *Type, verb rune, mode fmtMode) string { + buf := fmtBufferPool.Get().(*bytes.Buffer) + buf.Reset() + defer fmtBufferPool.Put(buf) + + tconv2(buf, t, verb, mode, nil) + return InternString(buf.Bytes()) +} + +// tconv2 writes a string representation of t to b. +// flag and mode control exactly what is printed. +// Any types x that are already in the visited map get printed as @%d where %d=visited[x]. +// See #16897 before changing the implementation of tconv. +func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type]int) { + if off, ok := visited[t]; ok { + // We've seen this type before, so we're trying to print it recursively. + // Print a reference to it instead. + fmt.Fprintf(b, "@%d", off) + return + } + if t == nil { + b.WriteString("") + return + } + if t.Kind() == TSSA { + b.WriteString(t.extra.(string)) + return + } + if t.Kind() == TTUPLE { + b.WriteString(t.FieldType(0).String()) + b.WriteByte(',') + b.WriteString(t.FieldType(1).String()) + return + } + + if t.Kind() == TRESULTS { + tys := t.extra.(*Results).Types + for i, et := range tys { + if i > 0 { + b.WriteByte(',') + } + b.WriteString(et.String()) + } + return + } + + if t == AnyType || t == ByteType || t == RuneType { + // in %-T mode collapse predeclared aliases with their originals. + switch mode { + case fmtTypeIDName, fmtTypeID: + t = Types[t.Kind()] + default: + sconv2(b, t.Sym(), 'S', mode) + return + } + } + if t == ErrorType { + b.WriteString("error") + return + } + + // Unless the 'L' flag was specified, if the type has a name, just print that name. + if verb != 'L' && t.Sym() != nil && t != Types[t.Kind()] { + // Default to 'v' if verb is invalid. + if verb != 'S' { + verb = 'v' + } + + // In unified IR, function-scope defined types will have a ·N + // suffix embedded directly in their Name. Trim this off for + // non-fmtTypeID modes. + sym := t.Sym() + if mode != fmtTypeID { + base, _ := SplitVargenSuffix(sym.Name) + if len(base) < len(sym.Name) { + sym = &Sym{Pkg: sym.Pkg, Name: base} + } + } + sconv2(b, sym, verb, mode) + return + } + + if int(t.Kind()) < len(BasicTypeNames) && BasicTypeNames[t.Kind()] != "" { + var name string + switch t { + case UntypedBool: + name = "untyped bool" + case UntypedString: + name = "untyped string" + case UntypedInt: + name = "untyped int" + case UntypedRune: + name = "untyped rune" + case UntypedFloat: + name = "untyped float" + case UntypedComplex: + name = "untyped complex" + default: + name = BasicTypeNames[t.Kind()] + } + b.WriteString(name) + return + } + + if mode == fmtDebug { + b.WriteString(t.Kind().String()) + b.WriteByte('-') + tconv2(b, t, 'v', fmtGo, visited) + return + } + + // At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't + // try to print it recursively. + // We record the offset in the result buffer where the type's text starts. This offset serves as a reference + // point for any later references to the same type. + // Note that we remove the type from the visited map as soon as the recursive call is done. + // This prevents encoding types like map[*int]*int as map[*int]@4. (That encoding would work, + // but I'd like to use the @ notation only when strictly necessary.) + if visited == nil { + visited = map[*Type]int{} + } + visited[t] = b.Len() + defer delete(visited, t) + + switch t.Kind() { + case TPTR: + b.WriteByte('*') + switch mode { + case fmtTypeID, fmtTypeIDName: + if verb == 'S' { + tconv2(b, t.Elem(), 'S', mode, visited) + return + } + } + tconv2(b, t.Elem(), 'v', mode, visited) + + case TARRAY: + b.WriteByte('[') + b.WriteString(strconv.FormatInt(t.NumElem(), 10)) + b.WriteByte(']') + tconv2(b, t.Elem(), 0, mode, visited) + + case TSLICE: + b.WriteString("[]") + tconv2(b, t.Elem(), 0, mode, visited) + + case TCHAN: + switch t.ChanDir() { + case Crecv: + b.WriteString("<-chan ") + tconv2(b, t.Elem(), 0, mode, visited) + case Csend: + b.WriteString("chan<- ") + tconv2(b, t.Elem(), 0, mode, visited) + default: + b.WriteString("chan ") + if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym() == nil && t.Elem().ChanDir() == Crecv { + b.WriteByte('(') + tconv2(b, t.Elem(), 0, mode, visited) + b.WriteByte(')') + } else { + tconv2(b, t.Elem(), 0, mode, visited) + } + } + + case TMAP: + b.WriteString("map[") + tconv2(b, t.Key(), 0, mode, visited) + b.WriteByte(']') + tconv2(b, t.Elem(), 0, mode, visited) + + case TINTER: + if t.IsEmptyInterface() { + b.WriteString("interface {}") + break + } + b.WriteString("interface {") + for i, f := range t.AllMethods() { + if i != 0 { + b.WriteByte(';') + } + b.WriteByte(' ') + switch { + case f.Sym == nil: + // Check first that a symbol is defined for this type. + // Wrong interface definitions may have types lacking a symbol. + break + case IsExported(f.Sym.Name): + sconv2(b, f.Sym, 'S', mode) + default: + if mode != fmtTypeIDName { + mode = fmtTypeID + } + sconv2(b, f.Sym, 'v', mode) + } + tconv2(b, f.Type, 'S', mode, visited) + } + if len(t.AllMethods()) != 0 { + b.WriteByte(' ') + } + b.WriteByte('}') + + case TFUNC: + if verb == 'S' { + // no leading func + } else { + if t.Recv() != nil { + b.WriteString("method") + formatParams(b, t.Recvs(), mode, visited) + b.WriteByte(' ') + } + b.WriteString("func") + } + formatParams(b, t.Params(), mode, visited) + + switch t.NumResults() { + case 0: + // nothing to do + + case 1: + b.WriteByte(' ') + tconv2(b, t.Result(0).Type, 0, mode, visited) // struct->field->field's type + + default: + b.WriteByte(' ') + formatParams(b, t.Results(), mode, visited) + } + + case TSTRUCT: + if m := t.StructType().Map; m != nil { + mt := m.MapType() + // Format the bucket struct for map[x]y as map.bucket[x]y. + // This avoids a recursive print that generates very long names. + switch t { + case mt.Bucket: + b.WriteString("map.bucket[") + default: + base.Fatalf("unknown internal map type") + } + tconv2(b, m.Key(), 0, mode, visited) + b.WriteByte(']') + tconv2(b, m.Elem(), 0, mode, visited) + break + } + + b.WriteString("struct {") + for i, f := range t.Fields() { + if i != 0 { + b.WriteByte(';') + } + b.WriteByte(' ') + fldconv(b, f, 'L', mode, visited, false) + } + if t.NumFields() != 0 { + b.WriteByte(' ') + } + b.WriteByte('}') + + case TFORW: + b.WriteString("undefined") + if t.Sym() != nil { + b.WriteByte(' ') + sconv2(b, t.Sym(), 'v', mode) + } + + case TUNSAFEPTR: + b.WriteString("unsafe.Pointer") + + case Txxx: + b.WriteString("Txxx") + + default: + // Don't know how to handle - fall back to detailed prints + b.WriteString(t.Kind().String()) + b.WriteString(" <") + sconv2(b, t.Sym(), 'v', mode) + b.WriteString(">") + + } +} + +func formatParams(b *bytes.Buffer, params []*Field, mode fmtMode, visited map[*Type]int) { + b.WriteByte('(') + fieldVerb := 'v' + switch mode { + case fmtTypeID, fmtTypeIDName, fmtGo: + // no argument names on function signature, and no "noescape"/"nosplit" tags + fieldVerb = 'S' + } + for i, param := range params { + if i != 0 { + b.WriteString(", ") + } + fldconv(b, param, fieldVerb, mode, visited, true) + } + b.WriteByte(')') +} + +func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Type]int, isParam bool) { + if f == nil { + b.WriteString("") + return + } + + var name string + nameSep := " " + if verb != 'S' { + s := f.Sym + + // Using type aliases and embedded fields, it's possible to + // construct types that can't be directly represented as a + // type literal. For example, given "type Int = int" (#50190), + // it would be incorrect to format "struct{ Int }" as either + // "struct{ int }" or "struct{ Int int }", because those each + // represent other, distinct types. + // + // So for the purpose of LinkString (i.e., fmtTypeID), we use + // the non-standard syntax "struct{ Int = int }" to represent + // embedded fields that have been renamed through the use of + // type aliases. + if f.Embedded != 0 { + if mode == fmtTypeID { + nameSep = " = " + + // Compute tsym, the symbol that would normally be used as + // the field name when embedding f.Type. + // TODO(mdempsky): Check for other occurrences of this logic + // and deduplicate. + typ := f.Type + if typ.IsPtr() { + base.Assertf(typ.Sym() == nil, "embedded pointer type has name: %L", typ) + typ = typ.Elem() + } + tsym := typ.Sym() + + // If the field name matches the embedded type's name, then + // suppress printing of the field name. For example, format + // "struct{ T }" as simply that instead of "struct{ T = T }". + if tsym != nil && (s == tsym || IsExported(tsym.Name) && s.Name == tsym.Name) { + s = nil + } + } else { + // Suppress the field name for embedded fields for + // non-LinkString formats, to match historical behavior. + // TODO(mdempsky): Re-evaluate this. + s = nil + } + } + + if s != nil { + if isParam { + name = fmt.Sprint(f.Nname) + } else if verb == 'L' { + name = s.Name + if !IsExported(name) && mode != fmtTypeIDName { + name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg) + } + } else { + name = sconv(s, 0, mode) + } + } + } + + if name != "" { + b.WriteString(name) + b.WriteString(nameSep) + } + + if f.IsDDD() { + var et *Type + if f.Type != nil { + et = f.Type.Elem() + } + b.WriteString("...") + tconv2(b, et, 0, mode, visited) + } else { + tconv2(b, f.Type, 0, mode, visited) + } + + if verb != 'S' && !isParam && f.Note != "" { + b.WriteString(" ") + b.WriteString(strconv.Quote(f.Note)) + } +} + +// SplitVargenSuffix returns name split into a base string and a ·N +// suffix, if any. +func SplitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} + +// TypeHash computes a hash value for type t to use in type switch statements. +func TypeHash(t *Type) uint32 { + p := t.LinkString() + + // Using SHA256 is overkill, but reduces accidental collisions. + h := notsha256.Sum256([]byte(p)) + return binary.LittleEndian.Uint32(h[:4]) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/goversion.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/goversion.go new file mode 100644 index 0000000000000000000000000000000000000000..ac08a49d0cab7c928fd2761394eba89f08912b9e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/goversion.go @@ -0,0 +1,88 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import ( + "fmt" + "internal/goversion" + "internal/lazyregexp" + "log" + "strconv" + + "cmd/compile/internal/base" +) + +// A lang is a language version broken into major and minor numbers. +type lang struct { + major, minor int +} + +// langWant is the desired language version set by the -lang flag. +// If the -lang flag is not set, this is the zero value, meaning that +// any language version is supported. +var langWant lang + +// AllowsGoVersion reports whether local package is allowed +// to use Go version major.minor. +func AllowsGoVersion(major, minor int) bool { + if langWant.major == 0 && langWant.minor == 0 { + return true + } + return langWant.major > major || (langWant.major == major && langWant.minor >= minor) +} + +// ParseLangFlag verifies that the -lang flag holds a valid value, and +// exits if not. It initializes data used by AllowsGoVersion. +func ParseLangFlag() { + if base.Flag.Lang == "" { + return + } + + var err error + langWant, err = parseLang(base.Flag.Lang) + if err != nil { + log.Fatalf("invalid value %q for -lang: %v", base.Flag.Lang, err) + } + + if def := currentLang(); base.Flag.Lang != def { + defVers, err := parseLang(def) + if err != nil { + log.Fatalf("internal error parsing default lang %q: %v", def, err) + } + if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) { + log.Fatalf("invalid value %q for -lang: max known version is %q", base.Flag.Lang, def) + } + } +} + +// parseLang parses a -lang option into a langVer. +func parseLang(s string) (lang, error) { + if s == "go1" { // cmd/go's new spelling of "go1.0" (#65528) + s = "go1.0" + } + + matches := goVersionRE.FindStringSubmatch(s) + if matches == nil { + return lang{}, fmt.Errorf(`should be something like "go1.12"`) + } + major, err := strconv.Atoi(matches[1]) + if err != nil { + return lang{}, err + } + minor, err := strconv.Atoi(matches[2]) + if err != nil { + return lang{}, err + } + return lang{major: major, minor: minor}, nil +} + +// currentLang returns the current language version. +func currentLang() string { + return fmt.Sprintf("go1.%d", goversion.Version) +} + +// goVersionRE is a regular expression that matches the valid +// arguments to the -lang flag. +var goVersionRE = lazyregexp.New(`^go([1-9]\d*)\.(0|[1-9]\d*)$`) diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/identity.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/identity.go new file mode 100644 index 0000000000000000000000000000000000000000..fa28c038bddb97f06b9c7d5f0c93bda49ff91175 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/identity.go @@ -0,0 +1,157 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +const ( + identIgnoreTags = 1 << iota + identStrict +) + +// Identical reports whether t1 and t2 are identical types, following the spec rules. +// Receiver parameter types are ignored. Named (defined) types are only equal if they +// are pointer-equal - i.e. there must be a unique types.Type for each specific named +// type. Also, a type containing a shape type is considered identical to another type +// (shape or not) if their underlying types are the same, or they are both pointers. +func Identical(t1, t2 *Type) bool { + return identical(t1, t2, 0, nil) +} + +// IdenticalIgnoreTags is like Identical, but it ignores struct tags +// for struct identity. +func IdenticalIgnoreTags(t1, t2 *Type) bool { + return identical(t1, t2, identIgnoreTags, nil) +} + +// IdenticalStrict is like Identical, but matches types exactly, without the +// exception for shapes. +func IdenticalStrict(t1, t2 *Type) bool { + return identical(t1, t2, identStrict, nil) +} + +type typePair struct { + t1 *Type + t2 *Type +} + +func identical(t1, t2 *Type, flags int, assumedEqual map[typePair]struct{}) bool { + if t1 == t2 { + return true + } + if t1 == nil || t2 == nil || t1.kind != t2.kind { + return false + } + if t1.obj != nil || t2.obj != nil { + if flags&identStrict == 0 && (t1.HasShape() || t2.HasShape()) { + switch t1.kind { + case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64, TBOOL, TSTRING, TPTR, TUNSAFEPTR: + return true + } + // fall through to unnamed type comparison for complex types. + goto cont + } + // Special case: we keep byte/uint8 and rune/int32 + // separate for error messages. Treat them as equal. + switch t1.kind { + case TUINT8: + return (t1 == Types[TUINT8] || t1 == ByteType) && (t2 == Types[TUINT8] || t2 == ByteType) + case TINT32: + return (t1 == Types[TINT32] || t1 == RuneType) && (t2 == Types[TINT32] || t2 == RuneType) + case TINTER: + // Make sure named any type matches any unnamed empty interface + // (but not a shape type, if identStrict). + isUnnamedEface := func(t *Type) bool { return t.IsEmptyInterface() && t.Sym() == nil } + if flags&identStrict != 0 { + return t1 == AnyType && isUnnamedEface(t2) && !t2.HasShape() || t2 == AnyType && isUnnamedEface(t1) && !t1.HasShape() + } + return t1 == AnyType && isUnnamedEface(t2) || t2 == AnyType && isUnnamedEface(t1) + default: + return false + } + } +cont: + + // Any cyclic type must go through a named type, and if one is + // named, it is only identical to the other if they are the + // same pointer (t1 == t2), so there's no chance of chasing + // cycles ad infinitum, so no need for a depth counter. + if assumedEqual == nil { + assumedEqual = make(map[typePair]struct{}) + } else if _, ok := assumedEqual[typePair{t1, t2}]; ok { + return true + } + assumedEqual[typePair{t1, t2}] = struct{}{} + + switch t1.kind { + case TIDEAL: + // Historically, cmd/compile used a single "untyped + // number" type, so all untyped number types were + // identical. Match this behavior. + // TODO(mdempsky): Revisit this. + return true + + case TINTER: + if len(t1.AllMethods()) != len(t2.AllMethods()) { + return false + } + for i, f1 := range t1.AllMethods() { + f2 := t2.AllMethods()[i] + if f1.Sym != f2.Sym || !identical(f1.Type, f2.Type, flags, assumedEqual) { + return false + } + } + return true + + case TSTRUCT: + if t1.NumFields() != t2.NumFields() { + return false + } + for i, f1 := range t1.Fields() { + f2 := t2.Field(i) + if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !identical(f1.Type, f2.Type, flags, assumedEqual) { + return false + } + if (flags&identIgnoreTags) == 0 && f1.Note != f2.Note { + return false + } + } + return true + + case TFUNC: + // Check parameters and result parameters for type equality. + // We intentionally ignore receiver parameters for type + // equality, because they're never relevant. + if t1.NumParams() != t2.NumParams() || + t1.NumResults() != t2.NumResults() || + t1.IsVariadic() != t2.IsVariadic() { + return false + } + + fs1 := t1.ParamsResults() + fs2 := t2.ParamsResults() + for i, f1 := range fs1 { + if !identical(f1.Type, fs2[i].Type, flags, assumedEqual) { + return false + } + } + return true + + case TARRAY: + if t1.NumElem() != t2.NumElem() { + return false + } + + case TCHAN: + if t1.ChanDir() != t2.ChanDir() { + return false + } + + case TMAP: + if !identical(t1.Key(), t2.Key(), flags, assumedEqual) { + return false + } + } + + return identical(t1.Elem(), t2.Elem(), flags, assumedEqual) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/kind_string.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/kind_string.go new file mode 100644 index 0000000000000000000000000000000000000000..1e1e84624080b08e614821c3e936c5c58daa4f01 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/kind_string.go @@ -0,0 +1,60 @@ +// Code generated by "stringer -type Kind -trimprefix T type.go"; DO NOT EDIT. + +package types + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Txxx-0] + _ = x[TINT8-1] + _ = x[TUINT8-2] + _ = x[TINT16-3] + _ = x[TUINT16-4] + _ = x[TINT32-5] + _ = x[TUINT32-6] + _ = x[TINT64-7] + _ = x[TUINT64-8] + _ = x[TINT-9] + _ = x[TUINT-10] + _ = x[TUINTPTR-11] + _ = x[TCOMPLEX64-12] + _ = x[TCOMPLEX128-13] + _ = x[TFLOAT32-14] + _ = x[TFLOAT64-15] + _ = x[TBOOL-16] + _ = x[TPTR-17] + _ = x[TFUNC-18] + _ = x[TSLICE-19] + _ = x[TARRAY-20] + _ = x[TSTRUCT-21] + _ = x[TCHAN-22] + _ = x[TMAP-23] + _ = x[TINTER-24] + _ = x[TFORW-25] + _ = x[TANY-26] + _ = x[TSTRING-27] + _ = x[TUNSAFEPTR-28] + _ = x[TIDEAL-29] + _ = x[TNIL-30] + _ = x[TBLANK-31] + _ = x[TFUNCARGS-32] + _ = x[TCHANARGS-33] + _ = x[TSSA-34] + _ = x[TTUPLE-35] + _ = x[TRESULTS-36] + _ = x[NTYPE-37] +} + +const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE" + +var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 185, 190, 197, 202} + +func (i Kind) String() string { + if i >= Kind(len(_Kind_index)-1) { + return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/pkg.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/pkg.go new file mode 100644 index 0000000000000000000000000000000000000000..c6ce7889afbb7bd7abc4d32a30e53278bda7b44f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/pkg.go @@ -0,0 +1,131 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import ( + "cmd/internal/obj" + "cmd/internal/objabi" + "fmt" + "strconv" + "sync" +) + +// pkgMap maps a package path to a package. +var pkgMap = make(map[string]*Pkg) + +type Pkg struct { + Path string // string literal used in import statement, e.g. "runtime/internal/sys" + Name string // package name, e.g. "sys" + Prefix string // escaped path for use in symbol table + Syms map[string]*Sym + Pathsym *obj.LSym + + Direct bool // imported directly +} + +// NewPkg returns a new Pkg for the given package path and name. +// Unless name is the empty string, if the package exists already, +// the existing package name and the provided name must match. +func NewPkg(path, name string) *Pkg { + if p := pkgMap[path]; p != nil { + if name != "" && p.Name != name { + panic(fmt.Sprintf("conflicting package names %s and %s for path %q", p.Name, name, path)) + } + return p + } + + p := new(Pkg) + p.Path = path + p.Name = name + if path == "go.shape" { + // Don't escape "go.shape", since it's not needed (it's a builtin + // package), and we don't want escape codes showing up in shape type + // names, which also appear in names of function/method + // instantiations. + p.Prefix = path + } else { + p.Prefix = objabi.PathToPrefix(path) + } + p.Syms = make(map[string]*Sym) + pkgMap[path] = p + + return p +} + +func PkgMap() map[string]*Pkg { + return pkgMap +} + +var nopkg = &Pkg{ + Syms: make(map[string]*Sym), +} + +func (pkg *Pkg) Lookup(name string) *Sym { + s, _ := pkg.LookupOK(name) + return s +} + +// LookupOK looks up name in pkg and reports whether it previously existed. +func (pkg *Pkg) LookupOK(name string) (s *Sym, existed bool) { + // TODO(gri) remove this check in favor of specialized lookup + if pkg == nil { + pkg = nopkg + } + if s := pkg.Syms[name]; s != nil { + return s, true + } + + s = &Sym{ + Name: name, + Pkg: pkg, + } + pkg.Syms[name] = s + return s, false +} + +func (pkg *Pkg) LookupBytes(name []byte) *Sym { + // TODO(gri) remove this check in favor of specialized lookup + if pkg == nil { + pkg = nopkg + } + if s := pkg.Syms[string(name)]; s != nil { + return s + } + str := InternString(name) + return pkg.Lookup(str) +} + +// LookupNum looks up the symbol starting with prefix and ending with +// the decimal n. If prefix is too long, LookupNum panics. +func (pkg *Pkg) LookupNum(prefix string, n int) *Sym { + var buf [20]byte // plenty long enough for all current users + copy(buf[:], prefix) + b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10) + return pkg.LookupBytes(b) +} + +// Selector looks up a selector identifier. +func (pkg *Pkg) Selector(name string) *Sym { + if IsExported(name) { + pkg = LocalPkg + } + return pkg.Lookup(name) +} + +var ( + internedStringsmu sync.Mutex // protects internedStrings + internedStrings = map[string]string{} +) + +func InternString(b []byte) string { + internedStringsmu.Lock() + s, ok := internedStrings[string(b)] // string(b) here doesn't allocate + if !ok { + s = string(b) + internedStrings[s] = s + } + internedStringsmu.Unlock() + return s +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/size.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/size.go new file mode 100644 index 0000000000000000000000000000000000000000..6ba2b9153b13d3c64a8c568d27e5afa7875c3461 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/size.go @@ -0,0 +1,638 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import ( + "math" + "sort" + + "cmd/compile/internal/base" + "cmd/internal/src" + "internal/types/errors" +) + +var PtrSize int + +var RegSize int + +// Slices in the runtime are represented by three components: +// +// type slice struct { +// ptr unsafe.Pointer +// len int +// cap int +// } +// +// Strings in the runtime are represented by two components: +// +// type string struct { +// ptr unsafe.Pointer +// len int +// } +// +// These variables are the offsets of fields and sizes of these structs. +var ( + SlicePtrOffset int64 + SliceLenOffset int64 + SliceCapOffset int64 + + SliceSize int64 + StringSize int64 +) + +var SkipSizeForTracing bool + +// typePos returns the position associated with t. +// This is where t was declared or where it appeared as a type expression. +func typePos(t *Type) src.XPos { + if pos := t.Pos(); pos.IsKnown() { + return pos + } + base.Fatalf("bad type: %v", t) + panic("unreachable") +} + +// MaxWidth is the maximum size of a value on the target architecture. +var MaxWidth int64 + +// CalcSizeDisabled indicates whether it is safe +// to calculate Types' widths and alignments. See CalcSize. +var CalcSizeDisabled bool + +// machine size and rounding alignment is dictated around +// the size of a pointer, set in gc.Main (see ../gc/main.go). +var defercalc int + +// RoundUp rounds o to a multiple of r, r is a power of 2. +func RoundUp(o int64, r int64) int64 { + if r < 1 || r > 8 || r&(r-1) != 0 { + base.Fatalf("Round %d", r) + } + return (o + r - 1) &^ (r - 1) +} + +// expandiface computes the method set for interface type t by +// expanding embedded interfaces. +func expandiface(t *Type) { + seen := make(map[*Sym]*Field) + var methods []*Field + + addMethod := func(m *Field, explicit bool) { + switch prev := seen[m.Sym]; { + case prev == nil: + seen[m.Sym] = m + case !explicit && Identical(m.Type, prev.Type): + return + default: + base.ErrorfAt(m.Pos, errors.DuplicateDecl, "duplicate method %s", m.Sym.Name) + } + methods = append(methods, m) + } + + { + methods := t.Methods() + sort.SliceStable(methods, func(i, j int) bool { + mi, mj := methods[i], methods[j] + + // Sort embedded types by type name (if any). + if mi.Sym == nil && mj.Sym == nil { + return mi.Type.Sym().Less(mj.Type.Sym()) + } + + // Sort methods before embedded types. + if mi.Sym == nil || mj.Sym == nil { + return mi.Sym != nil + } + + // Sort methods by symbol name. + return mi.Sym.Less(mj.Sym) + }) + } + + for _, m := range t.Methods() { + if m.Sym == nil { + continue + } + + CheckSize(m.Type) + addMethod(m, true) + } + + for _, m := range t.Methods() { + if m.Sym != nil || m.Type == nil { + continue + } + + // In 1.18, embedded types can be anything. In Go 1.17, we disallow + // embedding anything other than interfaces. This requirement was caught + // by types2 already, so allow non-interface here. + if !m.Type.IsInterface() { + continue + } + + // Embedded interface: duplicate all methods + // and add to t's method set. + for _, t1 := range m.Type.AllMethods() { + f := NewField(m.Pos, t1.Sym, t1.Type) + addMethod(f, false) + + // Clear position after typechecking, for consistency with types2. + f.Pos = src.NoXPos + } + + // Clear position after typechecking, for consistency with types2. + m.Pos = src.NoXPos + } + + sort.Sort(MethodsByName(methods)) + + if int64(len(methods)) >= MaxWidth/int64(PtrSize) { + base.ErrorfAt(typePos(t), 0, "interface too large") + } + for i, m := range methods { + m.Offset = int64(i) * int64(PtrSize) + } + + t.SetAllMethods(methods) +} + +// calcStructOffset computes the offsets of a sequence of fields, +// starting at the given offset. It returns the resulting offset and +// maximum field alignment. +func calcStructOffset(t *Type, fields []*Field, offset int64) int64 { + for _, f := range fields { + CalcSize(f.Type) + offset = RoundUp(offset, int64(f.Type.align)) + + if t.IsStruct() { // param offsets depend on ABI + f.Offset = offset + + // If type T contains a field F marked as not-in-heap, + // then T must also be a not-in-heap type. Otherwise, + // you could heap allocate T and then get a pointer F, + // which would be a heap pointer to a not-in-heap type. + if f.Type.NotInHeap() { + t.SetNotInHeap(true) + } + } + + offset += f.Type.width + + maxwidth := MaxWidth + // On 32-bit systems, reflect tables impose an additional constraint + // that each field start offset must fit in 31 bits. + if maxwidth < 1<<32 { + maxwidth = 1<<31 - 1 + } + if offset >= maxwidth { + base.ErrorfAt(typePos(t), 0, "type %L too large", t) + offset = 8 // small but nonzero + } + } + + return offset +} + +func isAtomicStdPkg(p *Pkg) bool { + if p.Prefix == `""` { + panic("bad package prefix") + } + return p.Prefix == "sync/atomic" || p.Prefix == "runtime/internal/atomic" +} + +// CalcSize calculates and stores the size and alignment for t. +// If CalcSizeDisabled is set, and the size/alignment +// have not already been calculated, it calls Fatal. +// This is used to prevent data races in the back end. +func CalcSize(t *Type) { + // Calling CalcSize when typecheck tracing enabled is not safe. + // See issue #33658. + if base.EnableTrace && SkipSizeForTracing { + return + } + if PtrSize == 0 { + // Assume this is a test. + return + } + + if t == nil { + return + } + + if t.width == -2 { + t.width = 0 + t.align = 1 + base.Fatalf("invalid recursive type %v", t) + return + } + + if t.widthCalculated() { + return + } + + if CalcSizeDisabled { + base.Fatalf("width not calculated: %v", t) + } + + // defer CheckSize calls until after we're done + DeferCheckSize() + + lno := base.Pos + if pos := t.Pos(); pos.IsKnown() { + base.Pos = pos + } + + t.width = -2 + t.align = 0 // 0 means use t.Width, below + + et := t.Kind() + switch et { + case TFUNC, TCHAN, TMAP, TSTRING: + break + + // SimType == 0 during bootstrap + default: + if SimType[t.Kind()] != 0 { + et = SimType[t.Kind()] + } + } + + var w int64 + switch et { + default: + base.Fatalf("CalcSize: unknown type: %v", t) + + // compiler-specific stuff + case TINT8, TUINT8, TBOOL: + // bool is int8 + w = 1 + t.intRegs = 1 + + case TINT16, TUINT16: + w = 2 + t.intRegs = 1 + + case TINT32, TUINT32: + w = 4 + t.intRegs = 1 + + case TINT64, TUINT64: + w = 8 + t.align = uint8(RegSize) + t.intRegs = uint8(8 / RegSize) + + case TFLOAT32: + w = 4 + t.floatRegs = 1 + + case TFLOAT64: + w = 8 + t.align = uint8(RegSize) + t.floatRegs = 1 + + case TCOMPLEX64: + w = 8 + t.align = 4 + t.floatRegs = 2 + + case TCOMPLEX128: + w = 16 + t.align = uint8(RegSize) + t.floatRegs = 2 + + case TPTR: + w = int64(PtrSize) + t.intRegs = 1 + CheckSize(t.Elem()) + + case TUNSAFEPTR: + w = int64(PtrSize) + t.intRegs = 1 + + case TINTER: // implemented as 2 pointers + w = 2 * int64(PtrSize) + t.align = uint8(PtrSize) + t.intRegs = 2 + expandiface(t) + + case TCHAN: // implemented as pointer + w = int64(PtrSize) + t.intRegs = 1 + + CheckSize(t.Elem()) + + // Make fake type to trigger channel element size check after + // any top-level recursive type has been completed. + t1 := NewChanArgs(t) + CheckSize(t1) + + case TCHANARGS: + t1 := t.ChanArgs() + CalcSize(t1) // just in case + // Make sure size of t1.Elem() is calculated at this point. We can + // use CalcSize() here rather than CheckSize(), because the top-level + // (possibly recursive) type will have been calculated before the fake + // chanargs is handled. + CalcSize(t1.Elem()) + if t1.Elem().width >= 1<<16 { + base.Errorf("channel element type too large (>64kB)") + } + w = 1 // anything will do + + case TMAP: // implemented as pointer + w = int64(PtrSize) + t.intRegs = 1 + CheckSize(t.Elem()) + CheckSize(t.Key()) + + case TFORW: // should have been filled in + base.Fatalf("invalid recursive type %v", t) + + case TANY: // not a real type; should be replaced before use. + base.Fatalf("CalcSize any") + + case TSTRING: + if StringSize == 0 { + base.Fatalf("early CalcSize string") + } + w = StringSize + t.align = uint8(PtrSize) + t.intRegs = 2 + + case TARRAY: + if t.Elem() == nil { + break + } + + CalcSize(t.Elem()) + t.SetNotInHeap(t.Elem().NotInHeap()) + if t.Elem().width != 0 { + cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().width) + if uint64(t.NumElem()) > cap { + base.Errorf("type %L larger than address space", t) + } + } + w = t.NumElem() * t.Elem().width + t.align = t.Elem().align + + // ABIInternal only allows "trivial" arrays (i.e., length 0 or 1) + // to be passed by register. + switch t.NumElem() { + case 0: + t.intRegs = 0 + t.floatRegs = 0 + case 1: + t.intRegs = t.Elem().intRegs + t.floatRegs = t.Elem().floatRegs + default: + t.intRegs = math.MaxUint8 + t.floatRegs = math.MaxUint8 + } + + case TSLICE: + if t.Elem() == nil { + break + } + w = SliceSize + CheckSize(t.Elem()) + t.align = uint8(PtrSize) + t.intRegs = 3 + + case TSTRUCT: + if t.IsFuncArgStruct() { + base.Fatalf("CalcSize fn struct %v", t) + } + CalcStructSize(t) + w = t.width + + // make fake type to check later to + // trigger function argument computation. + case TFUNC: + t1 := NewFuncArgs(t) + CheckSize(t1) + w = int64(PtrSize) // width of func type is pointer + t.intRegs = 1 + + // function is 3 cated structures; + // compute their widths as side-effect. + case TFUNCARGS: + t1 := t.FuncArgs() + // TODO(mdempsky): Should package abi be responsible for computing argwid? + w = calcStructOffset(t1, t1.Recvs(), 0) + w = calcStructOffset(t1, t1.Params(), w) + w = RoundUp(w, int64(RegSize)) + w = calcStructOffset(t1, t1.Results(), w) + w = RoundUp(w, int64(RegSize)) + t1.extra.(*Func).Argwid = w + t.align = 1 + } + + if PtrSize == 4 && w != int64(int32(w)) { + base.Errorf("type %v too large", t) + } + + t.width = w + if t.align == 0 { + if w == 0 || w > 8 || w&(w-1) != 0 { + base.Fatalf("invalid alignment for %v", t) + } + t.align = uint8(w) + } + + base.Pos = lno + + ResumeCheckSize() +} + +// CalcStructSize calculates the size of t, +// filling in t.width, t.align, t.intRegs, and t.floatRegs, +// even if size calculation is otherwise disabled. +func CalcStructSize(t *Type) { + var maxAlign uint8 = 1 + + // Recognize special types. This logic is duplicated in go/types and + // cmd/compile/internal/types2. + if sym := t.Sym(); sym != nil { + switch { + case sym.Name == "align64" && isAtomicStdPkg(sym.Pkg): + maxAlign = 8 + case sym.Pkg.Path == "runtime/internal/sys" && sym.Name == "nih": + t.SetNotInHeap(true) + } + } + + fields := t.Fields() + size := calcStructOffset(t, fields, 0) + + // For non-zero-sized structs which end in a zero-sized field, we + // add an extra byte of padding to the type. This padding ensures + // that taking the address of a zero-sized field can't manufacture a + // pointer to the next object in the heap. See issue 9401. + if size > 0 && fields[len(fields)-1].Type.width == 0 { + size++ + } + + var intRegs, floatRegs uint64 + for _, field := range fields { + typ := field.Type + + // The alignment of a struct type is the maximum alignment of its + // field types. + if align := typ.align; align > maxAlign { + maxAlign = align + } + + // Each field needs its own registers. + // We sum in uint64 to avoid possible overflows. + intRegs += uint64(typ.intRegs) + floatRegs += uint64(typ.floatRegs) + } + + // Final size includes trailing padding. + size = RoundUp(size, int64(maxAlign)) + + if intRegs > math.MaxUint8 || floatRegs > math.MaxUint8 { + intRegs = math.MaxUint8 + floatRegs = math.MaxUint8 + } + + t.width = size + t.align = maxAlign + t.intRegs = uint8(intRegs) + t.floatRegs = uint8(floatRegs) +} + +func (t *Type) widthCalculated() bool { + return t.align > 0 +} + +// when a type's width should be known, we call CheckSize +// to compute it. during a declaration like +// +// type T *struct { next T } +// +// it is necessary to defer the calculation of the struct width +// until after T has been initialized to be a pointer to that struct. +// similarly, during import processing structs may be used +// before their definition. in those situations, calling +// DeferCheckSize() stops width calculations until +// ResumeCheckSize() is called, at which point all the +// CalcSizes that were deferred are executed. +// CalcSize should only be called when the type's size +// is needed immediately. CheckSize makes sure the +// size is evaluated eventually. + +var deferredTypeStack []*Type + +func CheckSize(t *Type) { + if t == nil { + return + } + + // function arg structs should not be checked + // outside of the enclosing function. + if t.IsFuncArgStruct() { + base.Fatalf("CheckSize %v", t) + } + + if defercalc == 0 { + CalcSize(t) + return + } + + // if type has not yet been pushed on deferredTypeStack yet, do it now + if !t.Deferwidth() { + t.SetDeferwidth(true) + deferredTypeStack = append(deferredTypeStack, t) + } +} + +func DeferCheckSize() { + defercalc++ +} + +func ResumeCheckSize() { + if defercalc == 1 { + for len(deferredTypeStack) > 0 { + t := deferredTypeStack[len(deferredTypeStack)-1] + deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1] + t.SetDeferwidth(false) + CalcSize(t) + } + } + + defercalc-- +} + +// PtrDataSize returns the length in bytes of the prefix of t +// containing pointer data. Anything after this offset is scalar data. +// +// PtrDataSize is only defined for actual Go types. It's an error to +// use it on compiler-internal types (e.g., TSSA, TRESULTS). +func PtrDataSize(t *Type) int64 { + switch t.Kind() { + case TBOOL, TINT8, TUINT8, TINT16, TUINT16, TINT32, + TUINT32, TINT64, TUINT64, TINT, TUINT, + TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64: + return 0 + + case TPTR: + if t.Elem().NotInHeap() { + return 0 + } + return int64(PtrSize) + + case TUNSAFEPTR, TFUNC, TCHAN, TMAP: + return int64(PtrSize) + + case TSTRING: + // struct { byte *str; intgo len; } + return int64(PtrSize) + + case TINTER: + // struct { Itab *tab; void *data; } or + // struct { Type *type; void *data; } + // Note: see comment in typebits.Set + return 2 * int64(PtrSize) + + case TSLICE: + if t.Elem().NotInHeap() { + return 0 + } + // struct { byte *array; uintgo len; uintgo cap; } + return int64(PtrSize) + + case TARRAY: + if t.NumElem() == 0 { + return 0 + } + // t.NumElem() > 0 + size := PtrDataSize(t.Elem()) + if size == 0 { + return 0 + } + return (t.NumElem()-1)*t.Elem().Size() + size + + case TSTRUCT: + // Find the last field that has pointers, if any. + fs := t.Fields() + for i := len(fs) - 1; i >= 0; i-- { + if size := PtrDataSize(fs[i].Type); size > 0 { + return fs[i].Offset + size + } + } + return 0 + + case TSSA: + if t != TypeInt128 { + base.Fatalf("PtrDataSize: unexpected ssa type %v", t) + } + return 0 + + default: + base.Fatalf("PtrDataSize: unexpected type, %v", t) + return 0 + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sizeof_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sizeof_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8a6f24124a88960ec70f4cf9159df441d0601391 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sizeof_test.go @@ -0,0 +1,48 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import ( + "reflect" + "testing" + "unsafe" +) + +// Assert that the size of important structures do not change unexpectedly. + +func TestSizeof(t *testing.T) { + const _64bit = unsafe.Sizeof(uintptr(0)) == 8 + + var tests = []struct { + val interface{} // type as a value + _32bit uintptr // size on 32bit platforms + _64bit uintptr // size on 64bit platforms + }{ + {Sym{}, 32, 64}, + {Type{}, 56, 96}, + {Map{}, 12, 24}, + {Forward{}, 20, 32}, + {Func{}, 32, 56}, + {Struct{}, 12, 24}, + {Interface{}, 0, 0}, + {Chan{}, 8, 16}, + {Array{}, 12, 16}, + {FuncArgs{}, 4, 8}, + {ChanArgs{}, 4, 8}, + {Ptr{}, 4, 8}, + {Slice{}, 4, 8}, + } + + for _, tt := range tests { + want := tt._32bit + if _64bit { + want = tt._64bit + } + got := reflect.TypeOf(tt.val).Size() + if want != got { + t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sort.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sort.go new file mode 100644 index 0000000000000000000000000000000000000000..765c070cd94193c4c37db0f4415d1ee3ea06b031 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sort.go @@ -0,0 +1,19 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +// MethodsByName sorts methods by name. +type MethodsByName []*Field + +func (x MethodsByName) Len() int { return len(x) } +func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) } + +// EmbeddedsByName sorts embedded types by name. +type EmbeddedsByName []*Field + +func (x EmbeddedsByName) Len() int { return len(x) } +func (x EmbeddedsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x EmbeddedsByName) Less(i, j int) bool { return x[i].Type.Sym().Less(x[j].Type.Sym()) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sym.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sym.go new file mode 100644 index 0000000000000000000000000000000000000000..67fa6bb1d0c1cc4a450ecf14f4cbd74e53113bdf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sym.go @@ -0,0 +1,138 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import ( + "cmd/compile/internal/base" + "cmd/internal/obj" + "unicode" + "unicode/utf8" +) + +// Sym represents an object name in a segmented (pkg, name) namespace. +// Most commonly, this is a Go identifier naming an object declared within a package, +// but Syms are also used to name internal synthesized objects. +// +// As an exception, field and method names that are exported use the Sym +// associated with localpkg instead of the package that declared them. This +// allows using Sym pointer equality to test for Go identifier uniqueness when +// handling selector expressions. +// +// Ideally, Sym should be used for representing Go language constructs, +// while cmd/internal/obj.LSym is used for representing emitted artifacts. +// +// NOTE: In practice, things can be messier than the description above +// for various reasons (historical, convenience). +type Sym struct { + Linkname string // link name + + Pkg *Pkg + Name string // object name + + // The unique ONAME, OTYPE, OPACK, or OLITERAL node that this symbol is + // bound to within the current scope. (Most parts of the compiler should + // prefer passing the Node directly, rather than relying on this field.) + // + // Deprecated: New code should avoid depending on Sym.Def. Add + // mdempsky@ as a reviewer for any CLs involving Sym.Def. + Def Object + + flags bitset8 +} + +const ( + symOnExportList = 1 << iota // added to exportlist (no need to add again) + symUniq + symSiggen // type symbol has been generated + symAsm // on asmlist, for writing to -asmhdr + symFunc // function symbol +) + +func (sym *Sym) OnExportList() bool { return sym.flags&symOnExportList != 0 } +func (sym *Sym) Uniq() bool { return sym.flags&symUniq != 0 } +func (sym *Sym) Siggen() bool { return sym.flags&symSiggen != 0 } +func (sym *Sym) Asm() bool { return sym.flags&symAsm != 0 } +func (sym *Sym) Func() bool { return sym.flags&symFunc != 0 } + +func (sym *Sym) SetOnExportList(b bool) { sym.flags.set(symOnExportList, b) } +func (sym *Sym) SetUniq(b bool) { sym.flags.set(symUniq, b) } +func (sym *Sym) SetSiggen(b bool) { sym.flags.set(symSiggen, b) } +func (sym *Sym) SetAsm(b bool) { sym.flags.set(symAsm, b) } +func (sym *Sym) SetFunc(b bool) { sym.flags.set(symFunc, b) } + +func (sym *Sym) IsBlank() bool { + return sym != nil && sym.Name == "_" +} + +// Deprecated: This method should not be used directly. Instead, use a +// higher-level abstraction that directly returns the linker symbol +// for a named object. For example, reflectdata.TypeLinksym(t) instead +// of reflectdata.TypeSym(t).Linksym(). +func (sym *Sym) Linksym() *obj.LSym { + abi := obj.ABI0 + if sym.Func() { + abi = obj.ABIInternal + } + return sym.LinksymABI(abi) +} + +// Deprecated: This method should not be used directly. Instead, use a +// higher-level abstraction that directly returns the linker symbol +// for a named object. For example, (*ir.Name).LinksymABI(abi) instead +// of (*ir.Name).Sym().LinksymABI(abi). +func (sym *Sym) LinksymABI(abi obj.ABI) *obj.LSym { + if sym == nil { + base.Fatalf("nil symbol") + } + if sym.Linkname != "" { + return base.Linkname(sym.Linkname, abi) + } + return base.PkgLinksym(sym.Pkg.Prefix, sym.Name, abi) +} + +// Less reports whether symbol a is ordered before symbol b. +// +// Symbols are ordered exported before non-exported, then by name, and +// finally (for non-exported symbols) by package path. +func (a *Sym) Less(b *Sym) bool { + if a == b { + return false + } + + // Nil before non-nil. + if a == nil { + return true + } + if b == nil { + return false + } + + // Exported symbols before non-exported. + ea := IsExported(a.Name) + eb := IsExported(b.Name) + if ea != eb { + return ea + } + + // Order by name and then (for non-exported names) by package + // height and path. + if a.Name != b.Name { + return a.Name < b.Name + } + if !ea { + return a.Pkg.Path < b.Pkg.Path + } + return false +} + +// IsExported reports whether name is an exported Go symbol (that is, +// whether it begins with an upper-case letter). +func IsExported(name string) bool { + if r := name[0]; r < utf8.RuneSelf { + return 'A' <= r && r <= 'Z' + } + r, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(r) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sym_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sym_test.go new file mode 100644 index 0000000000000000000000000000000000000000..94efd42aa4ad446592c4e22706b8ce2545314eeb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/sym_test.go @@ -0,0 +1,59 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types_test + +import ( + "cmd/compile/internal/types" + "reflect" + "sort" + "testing" +) + +func TestSymLess(t *testing.T) { + var ( + local = types.NewPkg("", "") + abc = types.NewPkg("abc", "") + uvw = types.NewPkg("uvw", "") + xyz = types.NewPkg("xyz", "") + gr = types.NewPkg("gr", "") + ) + + data := []*types.Sym{ + abc.Lookup("b"), + local.Lookup("B"), + local.Lookup("C"), + uvw.Lookup("c"), + local.Lookup("C"), + gr.Lookup("φ"), + local.Lookup("Φ"), + xyz.Lookup("b"), + abc.Lookup("a"), + local.Lookup("B"), + } + want := []*types.Sym{ + local.Lookup("B"), + local.Lookup("B"), + local.Lookup("C"), + local.Lookup("C"), + local.Lookup("Φ"), + abc.Lookup("a"), + abc.Lookup("b"), + xyz.Lookup("b"), + uvw.Lookup("c"), + gr.Lookup("φ"), + } + if len(data) != len(want) { + t.Fatal("want and data must match") + } + if reflect.DeepEqual(data, want) { + t.Fatal("data must be shuffled") + } + sort.Slice(data, func(i, j int) bool { return data[i].Less(data[j]) }) + if !reflect.DeepEqual(data, want) { + t.Logf("want: %#v", want) + t.Logf("data: %#v", data) + t.Errorf("sorting failed") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/type.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/type.go new file mode 100644 index 0000000000000000000000000000000000000000..c2b0ca3a4458c274754f211e7e61ea7b39775983 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/type.go @@ -0,0 +1,1986 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import ( + "cmd/compile/internal/base" + "cmd/internal/objabi" + "cmd/internal/src" + "fmt" + "go/constant" + "internal/types/errors" + "sync" +) + +// Object represents an ir.Node, but without needing to import cmd/compile/internal/ir, +// which would cause an import cycle. The uses in other packages must type assert +// values of type Object to ir.Node or a more specific type. +type Object interface { + Pos() src.XPos + Sym() *Sym + Type() *Type +} + +//go:generate stringer -type Kind -trimprefix T type.go + +// Kind describes a kind of type. +type Kind uint8 + +const ( + Txxx Kind = iota + + TINT8 + TUINT8 + TINT16 + TUINT16 + TINT32 + TUINT32 + TINT64 + TUINT64 + TINT + TUINT + TUINTPTR + + TCOMPLEX64 + TCOMPLEX128 + + TFLOAT32 + TFLOAT64 + + TBOOL + + TPTR + TFUNC + TSLICE + TARRAY + TSTRUCT + TCHAN + TMAP + TINTER + TFORW + TANY + TSTRING + TUNSAFEPTR + + // pseudo-types for literals + TIDEAL // untyped numeric constants + TNIL + TBLANK + + // pseudo-types used temporarily only during frame layout (CalcSize()) + TFUNCARGS + TCHANARGS + + // SSA backend types + TSSA // internal types used by SSA backend (flags, memory, etc.) + TTUPLE // a pair of types, used by SSA backend + TRESULTS // multiple types; the result of calling a function or method, with a memory at the end. + + NTYPE +) + +// ChanDir is whether a channel can send, receive, or both. +type ChanDir uint8 + +func (c ChanDir) CanRecv() bool { return c&Crecv != 0 } +func (c ChanDir) CanSend() bool { return c&Csend != 0 } + +const ( + // types of channel + // must match ../../../../reflect/type.go:/ChanDir + Crecv ChanDir = 1 << 0 + Csend ChanDir = 1 << 1 + Cboth ChanDir = Crecv | Csend +) + +// Types stores pointers to predeclared named types. +// +// It also stores pointers to several special types: +// - Types[TANY] is the placeholder "any" type recognized by SubstArgTypes. +// - Types[TBLANK] represents the blank variable's type. +// - Types[TINTER] is the canonical "interface{}" type. +// - Types[TNIL] represents the predeclared "nil" value's type. +// - Types[TUNSAFEPTR] is package unsafe's Pointer type. +var Types [NTYPE]*Type + +var ( + // Predeclared alias types. These are actually created as distinct + // defined types for better error messages, but are then specially + // treated as identical to their respective underlying types. + AnyType *Type + ByteType *Type + RuneType *Type + + // Predeclared error interface type. + ErrorType *Type + // Predeclared comparable interface type. + ComparableType *Type + + // Types to represent untyped string and boolean constants. + UntypedString = newType(TSTRING) + UntypedBool = newType(TBOOL) + + // Types to represent untyped numeric constants. + UntypedInt = newType(TIDEAL) + UntypedRune = newType(TIDEAL) + UntypedFloat = newType(TIDEAL) + UntypedComplex = newType(TIDEAL) +) + +// UntypedTypes maps from a constant.Kind to its untyped Type +// representation. +var UntypedTypes = [...]*Type{ + constant.Bool: UntypedBool, + constant.String: UntypedString, + constant.Int: UntypedInt, + constant.Float: UntypedFloat, + constant.Complex: UntypedComplex, +} + +// DefaultKinds maps from a constant.Kind to its default Kind. +var DefaultKinds = [...]Kind{ + constant.Bool: TBOOL, + constant.String: TSTRING, + constant.Int: TINT, + constant.Float: TFLOAT64, + constant.Complex: TCOMPLEX128, +} + +// A Type represents a Go type. +// +// There may be multiple unnamed types with identical structure. However, there must +// be a unique Type object for each unique named (defined) type. After noding, a +// package-level type can be looked up by building its unique symbol sym (sym = +// package.Lookup(name)) and checking sym.Def. If sym.Def is non-nil, the type +// already exists at package scope and is available at sym.Def.(*ir.Name).Type(). +// Local types (which may have the same name as a package-level type) are +// distinguished by their vargen, which is embedded in their symbol name. +type Type struct { + // extra contains extra etype-specific fields. + // As an optimization, those etype-specific structs which contain exactly + // one pointer-shaped field are stored as values rather than pointers when possible. + // + // TMAP: *Map + // TFORW: *Forward + // TFUNC: *Func + // TSTRUCT: *Struct + // TINTER: *Interface + // TFUNCARGS: FuncArgs + // TCHANARGS: ChanArgs + // TCHAN: *Chan + // TPTR: Ptr + // TARRAY: *Array + // TSLICE: Slice + // TSSA: string + extra interface{} + + // width is the width of this Type in bytes. + width int64 // valid if Align > 0 + + // list of base methods (excluding embedding) + methods fields + // list of all methods (including embedding) + allMethods fields + + // canonical OTYPE node for a named type (should be an ir.Name node with same sym) + obj Object + // the underlying type (type literal or predeclared type) for a defined type + underlying *Type + + // Cache of composite types, with this type being the element type. + cache struct { + ptr *Type // *T, or nil + slice *Type // []T, or nil + } + + kind Kind // kind of type + align uint8 // the required alignment of this type, in bytes (0 means Width and Align have not yet been computed) + + intRegs, floatRegs uint8 // registers needed for ABIInternal + + flags bitset8 + + // For defined (named) generic types, a pointer to the list of type params + // (in order) of this type that need to be instantiated. For instantiated + // generic types, this is the targs used to instantiate them. These targs + // may be typeparams (for re-instantiated types such as Value[T2]) or + // concrete types (for fully instantiated types such as Value[int]). + // rparams is only set for named types that are generic or are fully + // instantiated from a generic type, and is otherwise set to nil. + // TODO(danscales): choose a better name. + rparams *[]*Type +} + +// Registers returns the number of integer and floating-point +// registers required to represent a parameter of this type under the +// ABIInternal calling conventions. +// +// If t must be passed by memory, Registers returns (math.MaxUint8, +// math.MaxUint8). +func (t *Type) Registers() (uint8, uint8) { + CalcSize(t) + return t.intRegs, t.floatRegs +} + +func (*Type) CanBeAnSSAAux() {} + +const ( + typeNotInHeap = 1 << iota // type cannot be heap allocated + typeNoalg // suppress hash and eq algorithm generation + typeDeferwidth // width computation has been deferred and type is on deferredTypeStack + typeRecur + typeIsShape // represents a set of closely related types, for generics + typeHasShape // there is a shape somewhere in the type +) + +func (t *Type) NotInHeap() bool { return t.flags&typeNotInHeap != 0 } +func (t *Type) Noalg() bool { return t.flags&typeNoalg != 0 } +func (t *Type) Deferwidth() bool { return t.flags&typeDeferwidth != 0 } +func (t *Type) Recur() bool { return t.flags&typeRecur != 0 } +func (t *Type) IsShape() bool { return t.flags&typeIsShape != 0 } +func (t *Type) HasShape() bool { return t.flags&typeHasShape != 0 } + +func (t *Type) SetNotInHeap(b bool) { t.flags.set(typeNotInHeap, b) } +func (t *Type) SetNoalg(b bool) { t.flags.set(typeNoalg, b) } +func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) } +func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) } + +// Should always do SetHasShape(true) when doing SetIsShape(true). +func (t *Type) SetIsShape(b bool) { t.flags.set(typeIsShape, b) } +func (t *Type) SetHasShape(b bool) { t.flags.set(typeHasShape, b) } + +// Kind returns the kind of type t. +func (t *Type) Kind() Kind { return t.kind } + +// Sym returns the name of type t. +func (t *Type) Sym() *Sym { + if t.obj != nil { + return t.obj.Sym() + } + return nil +} + +// Underlying returns the underlying type of type t. +func (t *Type) Underlying() *Type { return t.underlying } + +// Pos returns a position associated with t, if any. +// This should only be used for diagnostics. +func (t *Type) Pos() src.XPos { + if t.obj != nil { + return t.obj.Pos() + } + return src.NoXPos +} + +func (t *Type) RParams() []*Type { + if t.rparams == nil { + return nil + } + return *t.rparams +} + +func (t *Type) SetRParams(rparams []*Type) { + if len(rparams) == 0 { + base.Fatalf("Setting nil or zero-length rparams") + } + t.rparams = &rparams + // HasShape should be set if any type argument is or has a shape type. + for _, rparam := range rparams { + if rparam.HasShape() { + t.SetHasShape(true) + break + } + } +} + +// IsFullyInstantiated reports whether t is a fully instantiated generic type; i.e. an +// instantiated generic type where all type arguments are non-generic or fully +// instantiated generic types. +func (t *Type) IsFullyInstantiated() bool { + return len(t.RParams()) > 0 +} + +// Map contains Type fields specific to maps. +type Map struct { + Key *Type // Key type + Elem *Type // Val (elem) type + + Bucket *Type // internal struct type representing a hash bucket +} + +// MapType returns t's extra map-specific fields. +func (t *Type) MapType() *Map { + t.wantEtype(TMAP) + return t.extra.(*Map) +} + +// Forward contains Type fields specific to forward types. +type Forward struct { + Copyto []*Type // where to copy the eventual value to + Embedlineno src.XPos // first use of this type as an embedded type +} + +// forwardType returns t's extra forward-type-specific fields. +func (t *Type) forwardType() *Forward { + t.wantEtype(TFORW) + return t.extra.(*Forward) +} + +// Func contains Type fields specific to func types. +type Func struct { + allParams []*Field // slice of all parameters, in receiver/params/results order + + startParams int // index of the start of the (regular) parameters section + startResults int // index of the start of the results section + + resultsTuple *Type // struct-like type representing multi-value results + + // Argwid is the total width of the function receiver, params, and results. + // It gets calculated via a temporary TFUNCARGS type. + // Note that TFUNC's Width is Widthptr. + Argwid int64 +} + +func (ft *Func) recvs() []*Field { return ft.allParams[:ft.startParams] } +func (ft *Func) params() []*Field { return ft.allParams[ft.startParams:ft.startResults] } +func (ft *Func) results() []*Field { return ft.allParams[ft.startResults:] } +func (ft *Func) recvParams() []*Field { return ft.allParams[:ft.startResults] } +func (ft *Func) paramsResults() []*Field { return ft.allParams[ft.startParams:] } + +// funcType returns t's extra func-specific fields. +func (t *Type) funcType() *Func { + t.wantEtype(TFUNC) + return t.extra.(*Func) +} + +// StructType contains Type fields specific to struct types. +type Struct struct { + fields fields + + // Maps have three associated internal structs (see struct MapType). + // Map links such structs back to their map type. + Map *Type + + ParamTuple bool // whether this struct is actually a tuple of signature parameters +} + +// StructType returns t's extra struct-specific fields. +func (t *Type) StructType() *Struct { + t.wantEtype(TSTRUCT) + return t.extra.(*Struct) +} + +// Interface contains Type fields specific to interface types. +type Interface struct { +} + +// Ptr contains Type fields specific to pointer types. +type Ptr struct { + Elem *Type // element type +} + +// ChanArgs contains Type fields specific to TCHANARGS types. +type ChanArgs struct { + T *Type // reference to a chan type whose elements need a width check +} + +// // FuncArgs contains Type fields specific to TFUNCARGS types. +type FuncArgs struct { + T *Type // reference to a func type whose elements need a width check +} + +// Chan contains Type fields specific to channel types. +type Chan struct { + Elem *Type // element type + Dir ChanDir // channel direction +} + +// chanType returns t's extra channel-specific fields. +func (t *Type) chanType() *Chan { + t.wantEtype(TCHAN) + return t.extra.(*Chan) +} + +type Tuple struct { + first *Type + second *Type + // Any tuple with a memory type must put that memory type second. +} + +// Results are the output from calls that will be late-expanded. +type Results struct { + Types []*Type // Last element is memory output from call. +} + +// Array contains Type fields specific to array types. +type Array struct { + Elem *Type // element type + Bound int64 // number of elements; <0 if unknown yet +} + +// Slice contains Type fields specific to slice types. +type Slice struct { + Elem *Type // element type +} + +// A Field is a (Sym, Type) pairing along with some other information, and, +// depending on the context, is used to represent: +// - a field in a struct +// - a method in an interface or associated with a named type +// - a function parameter +type Field struct { + flags bitset8 + + Embedded uint8 // embedded field + + Pos src.XPos + + // Name of field/method/parameter. Can be nil for interface fields embedded + // in interfaces and unnamed parameters. + Sym *Sym + Type *Type // field type + Note string // literal string annotation + + // For fields that represent function parameters, Nname points to the + // associated ONAME Node. For fields that represent methods, Nname points to + // the function name node. + Nname Object + + // Offset in bytes of this field or method within its enclosing struct + // or interface Type. For parameters, this is BADWIDTH. + Offset int64 +} + +const ( + fieldIsDDD = 1 << iota // field is ... argument + fieldNointerface +) + +func (f *Field) IsDDD() bool { return f.flags&fieldIsDDD != 0 } +func (f *Field) Nointerface() bool { return f.flags&fieldNointerface != 0 } + +func (f *Field) SetIsDDD(b bool) { f.flags.set(fieldIsDDD, b) } +func (f *Field) SetNointerface(b bool) { f.flags.set(fieldNointerface, b) } + +// End returns the offset of the first byte immediately after this field. +func (f *Field) End() int64 { + return f.Offset + f.Type.width +} + +// IsMethod reports whether f represents a method rather than a struct field. +func (f *Field) IsMethod() bool { + return f.Type.kind == TFUNC && f.Type.Recv() != nil +} + +// fields is a pointer to a slice of *Field. +// This saves space in Types that do not have fields or methods +// compared to a simple slice of *Field. +type fields struct { + s *[]*Field +} + +// Slice returns the entries in f as a slice. +// Changes to the slice entries will be reflected in f. +func (f *fields) Slice() []*Field { + if f.s == nil { + return nil + } + return *f.s +} + +// Set sets f to a slice. +// This takes ownership of the slice. +func (f *fields) Set(s []*Field) { + if len(s) == 0 { + f.s = nil + } else { + // Copy s and take address of t rather than s to avoid + // allocation in the case where len(s) == 0. + t := s + f.s = &t + } +} + +// newType returns a new Type of the specified kind. +func newType(et Kind) *Type { + t := &Type{ + kind: et, + width: BADWIDTH, + } + t.underlying = t + // TODO(josharian): lazily initialize some of these? + switch t.kind { + case TMAP: + t.extra = new(Map) + case TFORW: + t.extra = new(Forward) + case TFUNC: + t.extra = new(Func) + case TSTRUCT: + t.extra = new(Struct) + case TINTER: + t.extra = new(Interface) + case TPTR: + t.extra = Ptr{} + case TCHANARGS: + t.extra = ChanArgs{} + case TFUNCARGS: + t.extra = FuncArgs{} + case TCHAN: + t.extra = new(Chan) + case TTUPLE: + t.extra = new(Tuple) + case TRESULTS: + t.extra = new(Results) + } + return t +} + +// NewArray returns a new fixed-length array Type. +func NewArray(elem *Type, bound int64) *Type { + if bound < 0 { + base.Fatalf("NewArray: invalid bound %v", bound) + } + t := newType(TARRAY) + t.extra = &Array{Elem: elem, Bound: bound} + if elem.HasShape() { + t.SetHasShape(true) + } + return t +} + +// NewSlice returns the slice Type with element type elem. +func NewSlice(elem *Type) *Type { + if t := elem.cache.slice; t != nil { + if t.Elem() != elem { + base.Fatalf("elem mismatch") + } + if elem.HasShape() != t.HasShape() { + base.Fatalf("Incorrect HasShape flag for cached slice type") + } + return t + } + + t := newType(TSLICE) + t.extra = Slice{Elem: elem} + elem.cache.slice = t + if elem.HasShape() { + t.SetHasShape(true) + } + return t +} + +// NewChan returns a new chan Type with direction dir. +func NewChan(elem *Type, dir ChanDir) *Type { + t := newType(TCHAN) + ct := t.chanType() + ct.Elem = elem + ct.Dir = dir + if elem.HasShape() { + t.SetHasShape(true) + } + return t +} + +func NewTuple(t1, t2 *Type) *Type { + t := newType(TTUPLE) + t.extra.(*Tuple).first = t1 + t.extra.(*Tuple).second = t2 + if t1.HasShape() || t2.HasShape() { + t.SetHasShape(true) + } + return t +} + +func newResults(types []*Type) *Type { + t := newType(TRESULTS) + t.extra.(*Results).Types = types + return t +} + +func NewResults(types []*Type) *Type { + if len(types) == 1 && types[0] == TypeMem { + return TypeResultMem + } + return newResults(types) +} + +func newSSA(name string) *Type { + t := newType(TSSA) + t.extra = name + return t +} + +// NewMap returns a new map Type with key type k and element (aka value) type v. +func NewMap(k, v *Type) *Type { + t := newType(TMAP) + mt := t.MapType() + mt.Key = k + mt.Elem = v + if k.HasShape() || v.HasShape() { + t.SetHasShape(true) + } + return t +} + +// NewPtrCacheEnabled controls whether *T Types are cached in T. +// Caching is disabled just before starting the backend. +// This allows the backend to run concurrently. +var NewPtrCacheEnabled = true + +// NewPtr returns the pointer type pointing to t. +func NewPtr(elem *Type) *Type { + if elem == nil { + base.Fatalf("NewPtr: pointer to elem Type is nil") + } + + if t := elem.cache.ptr; t != nil { + if t.Elem() != elem { + base.Fatalf("NewPtr: elem mismatch") + } + if elem.HasShape() != t.HasShape() { + base.Fatalf("Incorrect HasShape flag for cached pointer type") + } + return t + } + + t := newType(TPTR) + t.extra = Ptr{Elem: elem} + t.width = int64(PtrSize) + t.align = uint8(PtrSize) + t.intRegs = 1 + if NewPtrCacheEnabled { + elem.cache.ptr = t + } + if elem.HasShape() { + t.SetHasShape(true) + } + if elem.Noalg() { + t.SetNoalg(true) + } + return t +} + +// NewChanArgs returns a new TCHANARGS type for channel type c. +func NewChanArgs(c *Type) *Type { + t := newType(TCHANARGS) + t.extra = ChanArgs{T: c} + return t +} + +// NewFuncArgs returns a new TFUNCARGS type for func type f. +func NewFuncArgs(f *Type) *Type { + t := newType(TFUNCARGS) + t.extra = FuncArgs{T: f} + return t +} + +func NewField(pos src.XPos, sym *Sym, typ *Type) *Field { + f := &Field{ + Pos: pos, + Sym: sym, + Type: typ, + Offset: BADWIDTH, + } + if typ == nil { + base.Fatalf("typ is nil") + } + return f +} + +// SubstAny walks t, replacing instances of "any" with successive +// elements removed from types. It returns the substituted type. +func SubstAny(t *Type, types *[]*Type) *Type { + if t == nil { + return nil + } + + switch t.kind { + default: + // Leave the type unchanged. + + case TANY: + if len(*types) == 0 { + base.Fatalf("SubstArgTypes: not enough argument types") + } + t = (*types)[0] + *types = (*types)[1:] + + case TPTR: + elem := SubstAny(t.Elem(), types) + if elem != t.Elem() { + t = t.copy() + t.extra = Ptr{Elem: elem} + } + + case TARRAY: + elem := SubstAny(t.Elem(), types) + if elem != t.Elem() { + t = t.copy() + t.extra.(*Array).Elem = elem + } + + case TSLICE: + elem := SubstAny(t.Elem(), types) + if elem != t.Elem() { + t = t.copy() + t.extra = Slice{Elem: elem} + } + + case TCHAN: + elem := SubstAny(t.Elem(), types) + if elem != t.Elem() { + t = t.copy() + t.extra.(*Chan).Elem = elem + } + + case TMAP: + key := SubstAny(t.Key(), types) + elem := SubstAny(t.Elem(), types) + if key != t.Key() || elem != t.Elem() { + t = t.copy() + t.extra.(*Map).Key = key + t.extra.(*Map).Elem = elem + } + + case TFUNC: + ft := t.funcType() + allParams := substFields(ft.allParams, types) + + t = t.copy() + ft = t.funcType() + ft.allParams = allParams + + rt := ft.resultsTuple + rt = rt.copy() + ft.resultsTuple = rt + rt.setFields(t.Results()) + + case TSTRUCT: + // Make a copy of all fields, including ones whose type does not change. + // This prevents aliasing across functions, which can lead to later + // fields getting their Offset incorrectly overwritten. + nfs := substFields(t.Fields(), types) + t = t.copy() + t.setFields(nfs) + } + + return t +} + +func substFields(fields []*Field, types *[]*Type) []*Field { + nfs := make([]*Field, len(fields)) + for i, f := range fields { + nft := SubstAny(f.Type, types) + nfs[i] = f.Copy() + nfs[i].Type = nft + } + return nfs +} + +// copy returns a shallow copy of the Type. +func (t *Type) copy() *Type { + if t == nil { + return nil + } + nt := *t + // copy any *T Extra fields, to avoid aliasing + switch t.kind { + case TMAP: + x := *t.extra.(*Map) + nt.extra = &x + case TFORW: + x := *t.extra.(*Forward) + nt.extra = &x + case TFUNC: + x := *t.extra.(*Func) + nt.extra = &x + case TSTRUCT: + x := *t.extra.(*Struct) + nt.extra = &x + case TINTER: + x := *t.extra.(*Interface) + nt.extra = &x + case TCHAN: + x := *t.extra.(*Chan) + nt.extra = &x + case TARRAY: + x := *t.extra.(*Array) + nt.extra = &x + case TTUPLE, TSSA, TRESULTS: + base.Fatalf("ssa types cannot be copied") + } + // TODO(mdempsky): Find out why this is necessary and explain. + if t.underlying == t { + nt.underlying = &nt + } + return &nt +} + +func (f *Field) Copy() *Field { + nf := *f + return &nf +} + +func (t *Type) wantEtype(et Kind) { + if t.kind != et { + base.Fatalf("want %v, but have %v", et, t) + } +} + +// ResultTuple returns the result type of signature type t as a tuple. +// This can be used as the type of multi-valued call expressions. +func (t *Type) ResultsTuple() *Type { return t.funcType().resultsTuple } + +// Recvs returns a slice of receiver parameters of signature type t. +// The returned slice always has length 0 or 1. +func (t *Type) Recvs() []*Field { return t.funcType().recvs() } + +// Params returns a slice of regular parameters of signature type t. +func (t *Type) Params() []*Field { return t.funcType().params() } + +// Results returns a slice of result parameters of signature type t. +func (t *Type) Results() []*Field { return t.funcType().results() } + +// RecvsParamsResults returns a slice containing all of the +// signature's parameters in receiver (if any), (normal) parameters, +// and then results. +func (t *Type) RecvParamsResults() []*Field { return t.funcType().allParams } + +// RecvParams returns a slice containing the signature's receiver (if +// any) followed by its (normal) parameters. +func (t *Type) RecvParams() []*Field { return t.funcType().recvParams() } + +// ParamsResults returns a slice containing the signature's (normal) +// parameters followed by its results. +func (t *Type) ParamsResults() []*Field { return t.funcType().paramsResults() } + +func (t *Type) NumRecvs() int { return len(t.Recvs()) } +func (t *Type) NumParams() int { return len(t.Params()) } +func (t *Type) NumResults() int { return len(t.Results()) } + +// IsVariadic reports whether function type t is variadic. +func (t *Type) IsVariadic() bool { + n := t.NumParams() + return n > 0 && t.Param(n-1).IsDDD() +} + +// Recv returns the receiver of function type t, if any. +func (t *Type) Recv() *Field { + if s := t.Recvs(); len(s) == 1 { + return s[0] + } + return nil +} + +// Param returns the i'th parameter of signature type t. +func (t *Type) Param(i int) *Field { return t.Params()[i] } + +// Result returns the i'th result of signature type t. +func (t *Type) Result(i int) *Field { return t.Results()[i] } + +// Key returns the key type of map type t. +func (t *Type) Key() *Type { + t.wantEtype(TMAP) + return t.extra.(*Map).Key +} + +// Elem returns the type of elements of t. +// Usable with pointers, channels, arrays, slices, and maps. +func (t *Type) Elem() *Type { + switch t.kind { + case TPTR: + return t.extra.(Ptr).Elem + case TARRAY: + return t.extra.(*Array).Elem + case TSLICE: + return t.extra.(Slice).Elem + case TCHAN: + return t.extra.(*Chan).Elem + case TMAP: + return t.extra.(*Map).Elem + } + base.Fatalf("Type.Elem %s", t.kind) + return nil +} + +// ChanArgs returns the channel type for TCHANARGS type t. +func (t *Type) ChanArgs() *Type { + t.wantEtype(TCHANARGS) + return t.extra.(ChanArgs).T +} + +// FuncArgs returns the func type for TFUNCARGS type t. +func (t *Type) FuncArgs() *Type { + t.wantEtype(TFUNCARGS) + return t.extra.(FuncArgs).T +} + +// IsFuncArgStruct reports whether t is a struct representing function parameters or results. +func (t *Type) IsFuncArgStruct() bool { + return t.kind == TSTRUCT && t.extra.(*Struct).ParamTuple +} + +// Methods returns a pointer to the base methods (excluding embedding) for type t. +// These can either be concrete methods (for non-interface types) or interface +// methods (for interface types). +func (t *Type) Methods() []*Field { + return t.methods.Slice() +} + +// AllMethods returns a pointer to all the methods (including embedding) for type t. +// For an interface type, this is the set of methods that are typically iterated +// over. For non-interface types, AllMethods() only returns a valid result after +// CalcMethods() has been called at least once. +func (t *Type) AllMethods() []*Field { + if t.kind == TINTER { + // Calculate the full method set of an interface type on the fly + // now, if not done yet. + CalcSize(t) + } + return t.allMethods.Slice() +} + +// SetMethods sets the direct method set for type t (i.e., *not* +// including promoted methods from embedded types). +func (t *Type) SetMethods(fs []*Field) { + t.methods.Set(fs) +} + +// SetAllMethods sets the set of all methods for type t (i.e., +// including promoted methods from embedded types). +func (t *Type) SetAllMethods(fs []*Field) { + t.allMethods.Set(fs) +} + +// fields returns the fields of struct type t. +func (t *Type) fields() *fields { + t.wantEtype(TSTRUCT) + return &t.extra.(*Struct).fields +} + +// Field returns the i'th field of struct type t. +func (t *Type) Field(i int) *Field { return t.Fields()[i] } + +// Fields returns a slice of containing all fields of +// a struct type t. +func (t *Type) Fields() []*Field { return t.fields().Slice() } + +// setFields sets struct type t's fields to fields. +func (t *Type) setFields(fields []*Field) { + // If we've calculated the width of t before, + // then some other type such as a function signature + // might now have the wrong type. + // Rather than try to track and invalidate those, + // enforce that SetFields cannot be called once + // t's width has been calculated. + if t.widthCalculated() { + base.Fatalf("SetFields of %v: width previously calculated", t) + } + t.wantEtype(TSTRUCT) + t.fields().Set(fields) +} + +// SetInterface sets the base methods of an interface type t. +func (t *Type) SetInterface(methods []*Field) { + t.wantEtype(TINTER) + t.methods.Set(methods) +} + +// ArgWidth returns the total aligned argument size for a function. +// It includes the receiver, parameters, and results. +func (t *Type) ArgWidth() int64 { + t.wantEtype(TFUNC) + return t.extra.(*Func).Argwid +} + +func (t *Type) Size() int64 { + if t.kind == TSSA { + if t == TypeInt128 { + return 16 + } + return 0 + } + CalcSize(t) + return t.width +} + +func (t *Type) Alignment() int64 { + CalcSize(t) + return int64(t.align) +} + +func (t *Type) SimpleString() string { + return t.kind.String() +} + +// Cmp is a comparison between values a and b. +// +// -1 if a < b +// 0 if a == b +// 1 if a > b +type Cmp int8 + +const ( + CMPlt = Cmp(-1) + CMPeq = Cmp(0) + CMPgt = Cmp(1) +) + +// Compare compares types for purposes of the SSA back +// end, returning a Cmp (one of CMPlt, CMPeq, CMPgt). +// The answers are correct for an optimizer +// or code generator, but not necessarily typechecking. +// The order chosen is arbitrary, only consistency and division +// into equivalence classes (Types that compare CMPeq) matters. +func (t *Type) Compare(x *Type) Cmp { + if x == t { + return CMPeq + } + return t.cmp(x) +} + +func cmpForNe(x bool) Cmp { + if x { + return CMPlt + } + return CMPgt +} + +func (r *Sym) cmpsym(s *Sym) Cmp { + if r == s { + return CMPeq + } + if r == nil { + return CMPlt + } + if s == nil { + return CMPgt + } + // Fast sort, not pretty sort + if len(r.Name) != len(s.Name) { + return cmpForNe(len(r.Name) < len(s.Name)) + } + if r.Pkg != s.Pkg { + if len(r.Pkg.Prefix) != len(s.Pkg.Prefix) { + return cmpForNe(len(r.Pkg.Prefix) < len(s.Pkg.Prefix)) + } + if r.Pkg.Prefix != s.Pkg.Prefix { + return cmpForNe(r.Pkg.Prefix < s.Pkg.Prefix) + } + } + if r.Name != s.Name { + return cmpForNe(r.Name < s.Name) + } + return CMPeq +} + +// cmp compares two *Types t and x, returning CMPlt, +// CMPeq, CMPgt as tx, for an arbitrary +// and optimizer-centric notion of comparison. +// TODO(josharian): make this safe for recursive interface types +// and use in signatlist sorting. See issue 19869. +func (t *Type) cmp(x *Type) Cmp { + // This follows the structure of function identical in identity.go + // with two exceptions. + // 1. Symbols are compared more carefully because a <,=,> result is desired. + // 2. Maps are treated specially to avoid endless recursion -- maps + // contain an internal data type not expressible in Go source code. + if t == x { + return CMPeq + } + if t == nil { + return CMPlt + } + if x == nil { + return CMPgt + } + + if t.kind != x.kind { + return cmpForNe(t.kind < x.kind) + } + + if t.obj != nil || x.obj != nil { + // Special case: we keep byte and uint8 separate + // for error messages. Treat them as equal. + switch t.kind { + case TUINT8: + if (t == Types[TUINT8] || t == ByteType) && (x == Types[TUINT8] || x == ByteType) { + return CMPeq + } + + case TINT32: + if (t == Types[RuneType.kind] || t == RuneType) && (x == Types[RuneType.kind] || x == RuneType) { + return CMPeq + } + + case TINTER: + // Make sure named any type matches any empty interface. + if t == AnyType && x.IsEmptyInterface() || x == AnyType && t.IsEmptyInterface() { + return CMPeq + } + } + } + + if c := t.Sym().cmpsym(x.Sym()); c != CMPeq { + return c + } + + if x.obj != nil { + return CMPeq + } + // both syms nil, look at structure below. + + switch t.kind { + case TBOOL, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TUNSAFEPTR, TUINTPTR, + TINT8, TINT16, TINT32, TINT64, TINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINT: + return CMPeq + + case TSSA: + tname := t.extra.(string) + xname := x.extra.(string) + // desire fast sorting, not pretty sorting. + if len(tname) == len(xname) { + if tname == xname { + return CMPeq + } + if tname < xname { + return CMPlt + } + return CMPgt + } + if len(tname) > len(xname) { + return CMPgt + } + return CMPlt + + case TTUPLE: + xtup := x.extra.(*Tuple) + ttup := t.extra.(*Tuple) + if c := ttup.first.Compare(xtup.first); c != CMPeq { + return c + } + return ttup.second.Compare(xtup.second) + + case TRESULTS: + xResults := x.extra.(*Results) + tResults := t.extra.(*Results) + xl, tl := len(xResults.Types), len(tResults.Types) + if tl != xl { + if tl < xl { + return CMPlt + } + return CMPgt + } + for i := 0; i < tl; i++ { + if c := tResults.Types[i].Compare(xResults.Types[i]); c != CMPeq { + return c + } + } + return CMPeq + + case TMAP: + if c := t.Key().cmp(x.Key()); c != CMPeq { + return c + } + return t.Elem().cmp(x.Elem()) + + case TPTR, TSLICE: + // No special cases for these, they are handled + // by the general code after the switch. + + case TSTRUCT: + if t.StructType().Map == nil { + if x.StructType().Map != nil { + return CMPlt // nil < non-nil + } + // to the fallthrough + } else if x.StructType().Map == nil { + return CMPgt // nil > non-nil + } else if t.StructType().Map.MapType().Bucket == t { + // Both have non-nil Map + // Special case for Maps which include a recursive type where the recursion is not broken with a named type + if x.StructType().Map.MapType().Bucket != x { + return CMPlt // bucket maps are least + } + return t.StructType().Map.cmp(x.StructType().Map) + } else if x.StructType().Map.MapType().Bucket == x { + return CMPgt // bucket maps are least + } // If t != t.Map.Bucket, fall through to general case + + tfs := t.Fields() + xfs := x.Fields() + for i := 0; i < len(tfs) && i < len(xfs); i++ { + t1, x1 := tfs[i], xfs[i] + if t1.Embedded != x1.Embedded { + return cmpForNe(t1.Embedded < x1.Embedded) + } + if t1.Note != x1.Note { + return cmpForNe(t1.Note < x1.Note) + } + if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq { + return c + } + if c := t1.Type.cmp(x1.Type); c != CMPeq { + return c + } + } + if len(tfs) != len(xfs) { + return cmpForNe(len(tfs) < len(xfs)) + } + return CMPeq + + case TINTER: + tfs := t.AllMethods() + xfs := x.AllMethods() + for i := 0; i < len(tfs) && i < len(xfs); i++ { + t1, x1 := tfs[i], xfs[i] + if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq { + return c + } + if c := t1.Type.cmp(x1.Type); c != CMPeq { + return c + } + } + if len(tfs) != len(xfs) { + return cmpForNe(len(tfs) < len(xfs)) + } + return CMPeq + + case TFUNC: + if tn, xn := t.NumRecvs(), x.NumRecvs(); tn != xn { + return cmpForNe(tn < xn) + } + if tn, xn := t.NumParams(), x.NumParams(); tn != xn { + return cmpForNe(tn < xn) + } + if tn, xn := t.NumResults(), x.NumResults(); tn != xn { + return cmpForNe(tn < xn) + } + if tv, xv := t.IsVariadic(), x.IsVariadic(); tv != xv { + return cmpForNe(!tv) + } + + tfs := t.RecvParamsResults() + xfs := x.RecvParamsResults() + for i, tf := range tfs { + if c := tf.Type.cmp(xfs[i].Type); c != CMPeq { + return c + } + } + return CMPeq + + case TARRAY: + if t.NumElem() != x.NumElem() { + return cmpForNe(t.NumElem() < x.NumElem()) + } + + case TCHAN: + if t.ChanDir() != x.ChanDir() { + return cmpForNe(t.ChanDir() < x.ChanDir()) + } + + default: + e := fmt.Sprintf("Do not know how to compare %v with %v", t, x) + panic(e) + } + + // Common element type comparison for TARRAY, TCHAN, TPTR, and TSLICE. + return t.Elem().cmp(x.Elem()) +} + +// IsKind reports whether t is a Type of the specified kind. +func (t *Type) IsKind(et Kind) bool { + return t != nil && t.kind == et +} + +func (t *Type) IsBoolean() bool { + return t.kind == TBOOL +} + +var unsignedEType = [...]Kind{ + TINT8: TUINT8, + TUINT8: TUINT8, + TINT16: TUINT16, + TUINT16: TUINT16, + TINT32: TUINT32, + TUINT32: TUINT32, + TINT64: TUINT64, + TUINT64: TUINT64, + TINT: TUINT, + TUINT: TUINT, + TUINTPTR: TUINTPTR, +} + +// ToUnsigned returns the unsigned equivalent of integer type t. +func (t *Type) ToUnsigned() *Type { + if !t.IsInteger() { + base.Fatalf("unsignedType(%v)", t) + } + return Types[unsignedEType[t.kind]] +} + +func (t *Type) IsInteger() bool { + switch t.kind { + case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR: + return true + } + return t == UntypedInt || t == UntypedRune +} + +func (t *Type) IsSigned() bool { + switch t.kind { + case TINT8, TINT16, TINT32, TINT64, TINT: + return true + } + return false +} + +func (t *Type) IsUnsigned() bool { + switch t.kind { + case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR: + return true + } + return false +} + +func (t *Type) IsFloat() bool { + return t.kind == TFLOAT32 || t.kind == TFLOAT64 || t == UntypedFloat +} + +func (t *Type) IsComplex() bool { + return t.kind == TCOMPLEX64 || t.kind == TCOMPLEX128 || t == UntypedComplex +} + +// IsPtr reports whether t is a regular Go pointer type. +// This does not include unsafe.Pointer. +func (t *Type) IsPtr() bool { + return t.kind == TPTR +} + +// IsPtrElem reports whether t is the element of a pointer (to t). +func (t *Type) IsPtrElem() bool { + return t.cache.ptr != nil +} + +// IsUnsafePtr reports whether t is an unsafe pointer. +func (t *Type) IsUnsafePtr() bool { + return t.kind == TUNSAFEPTR +} + +// IsUintptr reports whether t is a uintptr. +func (t *Type) IsUintptr() bool { + return t.kind == TUINTPTR +} + +// IsPtrShaped reports whether t is represented by a single machine pointer. +// In addition to regular Go pointer types, this includes map, channel, and +// function types and unsafe.Pointer. It does not include array or struct types +// that consist of a single pointer shaped type. +// TODO(mdempsky): Should it? See golang.org/issue/15028. +func (t *Type) IsPtrShaped() bool { + return t.kind == TPTR || t.kind == TUNSAFEPTR || + t.kind == TMAP || t.kind == TCHAN || t.kind == TFUNC +} + +// HasNil reports whether the set of values determined by t includes nil. +func (t *Type) HasNil() bool { + switch t.kind { + case TCHAN, TFUNC, TINTER, TMAP, TNIL, TPTR, TSLICE, TUNSAFEPTR: + return true + } + return false +} + +func (t *Type) IsString() bool { + return t.kind == TSTRING +} + +func (t *Type) IsMap() bool { + return t.kind == TMAP +} + +func (t *Type) IsChan() bool { + return t.kind == TCHAN +} + +func (t *Type) IsSlice() bool { + return t.kind == TSLICE +} + +func (t *Type) IsArray() bool { + return t.kind == TARRAY +} + +func (t *Type) IsStruct() bool { + return t.kind == TSTRUCT +} + +func (t *Type) IsInterface() bool { + return t.kind == TINTER +} + +// IsEmptyInterface reports whether t is an empty interface type. +func (t *Type) IsEmptyInterface() bool { + return t.IsInterface() && len(t.AllMethods()) == 0 +} + +// IsScalar reports whether 't' is a scalar Go type, e.g. +// bool/int/float/complex. Note that struct and array types consisting +// of a single scalar element are not considered scalar, likewise +// pointer types are also not considered scalar. +func (t *Type) IsScalar() bool { + switch t.kind { + case TBOOL, TINT8, TUINT8, TINT16, TUINT16, TINT32, + TUINT32, TINT64, TUINT64, TINT, TUINT, + TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64: + return true + } + return false +} + +func (t *Type) PtrTo() *Type { + return NewPtr(t) +} + +func (t *Type) NumFields() int { + if t.kind == TRESULTS { + return len(t.extra.(*Results).Types) + } + return len(t.Fields()) +} +func (t *Type) FieldType(i int) *Type { + if t.kind == TTUPLE { + switch i { + case 0: + return t.extra.(*Tuple).first + case 1: + return t.extra.(*Tuple).second + default: + panic("bad tuple index") + } + } + if t.kind == TRESULTS { + return t.extra.(*Results).Types[i] + } + return t.Field(i).Type +} +func (t *Type) FieldOff(i int) int64 { + return t.Field(i).Offset +} +func (t *Type) FieldName(i int) string { + return t.Field(i).Sym.Name +} + +// OffsetOf reports the offset of the field of a struct. +// The field is looked up by name. +func (t *Type) OffsetOf(name string) int64 { + if t.kind != TSTRUCT { + base.Fatalf("can't call OffsetOf on non-struct %v", t) + } + for _, f := range t.Fields() { + if f.Sym.Name == name { + return f.Offset + } + } + base.Fatalf("couldn't find field %s in %v", name, t) + return -1 +} + +func (t *Type) NumElem() int64 { + t.wantEtype(TARRAY) + return t.extra.(*Array).Bound +} + +type componentsIncludeBlankFields bool + +const ( + IgnoreBlankFields componentsIncludeBlankFields = false + CountBlankFields componentsIncludeBlankFields = true +) + +// NumComponents returns the number of primitive elements that compose t. +// Struct and array types are flattened for the purpose of counting. +// All other types (including string, slice, and interface types) count as one element. +// If countBlank is IgnoreBlankFields, then blank struct fields +// (and their comprised elements) are excluded from the count. +// struct { x, y [3]int } has six components; [10]struct{ x, y string } has twenty. +func (t *Type) NumComponents(countBlank componentsIncludeBlankFields) int64 { + switch t.kind { + case TSTRUCT: + if t.IsFuncArgStruct() { + base.Fatalf("NumComponents func arg struct") + } + var n int64 + for _, f := range t.Fields() { + if countBlank == IgnoreBlankFields && f.Sym.IsBlank() { + continue + } + n += f.Type.NumComponents(countBlank) + } + return n + case TARRAY: + return t.NumElem() * t.Elem().NumComponents(countBlank) + } + return 1 +} + +// SoleComponent returns the only primitive component in t, +// if there is exactly one. Otherwise, it returns nil. +// Components are counted as in NumComponents, including blank fields. +// Keep in sync with cmd/compile/internal/walk/convert.go:soleComponent. +func (t *Type) SoleComponent() *Type { + switch t.kind { + case TSTRUCT: + if t.IsFuncArgStruct() { + base.Fatalf("SoleComponent func arg struct") + } + if t.NumFields() != 1 { + return nil + } + return t.Field(0).Type.SoleComponent() + case TARRAY: + if t.NumElem() != 1 { + return nil + } + return t.Elem().SoleComponent() + } + return t +} + +// ChanDir returns the direction of a channel type t. +// The direction will be one of Crecv, Csend, or Cboth. +func (t *Type) ChanDir() ChanDir { + t.wantEtype(TCHAN) + return t.extra.(*Chan).Dir +} + +func (t *Type) IsMemory() bool { + if t == TypeMem || t.kind == TTUPLE && t.extra.(*Tuple).second == TypeMem { + return true + } + if t.kind == TRESULTS { + if types := t.extra.(*Results).Types; len(types) > 0 && types[len(types)-1] == TypeMem { + return true + } + } + return false +} +func (t *Type) IsFlags() bool { return t == TypeFlags } +func (t *Type) IsVoid() bool { return t == TypeVoid } +func (t *Type) IsTuple() bool { return t.kind == TTUPLE } +func (t *Type) IsResults() bool { return t.kind == TRESULTS } + +// IsUntyped reports whether t is an untyped type. +func (t *Type) IsUntyped() bool { + if t == nil { + return false + } + if t == UntypedString || t == UntypedBool { + return true + } + switch t.kind { + case TNIL, TIDEAL: + return true + } + return false +} + +// HasPointers reports whether t contains a heap pointer. +// Note that this function ignores pointers to not-in-heap types. +func (t *Type) HasPointers() bool { + return PtrDataSize(t) > 0 +} + +var recvType *Type + +// FakeRecvType returns the singleton type used for interface method receivers. +func FakeRecvType() *Type { + if recvType == nil { + recvType = NewPtr(newType(TSTRUCT)) + } + return recvType +} + +func FakeRecv() *Field { + return NewField(base.AutogeneratedPos, nil, FakeRecvType()) +} + +var ( + // TSSA types. HasPointers assumes these are pointer-free. + TypeInvalid = newSSA("invalid") + TypeMem = newSSA("mem") + TypeFlags = newSSA("flags") + TypeVoid = newSSA("void") + TypeInt128 = newSSA("int128") + TypeResultMem = newResults([]*Type{TypeMem}) +) + +func init() { + TypeInt128.width = 16 + TypeInt128.align = 8 +} + +// NewNamed returns a new named type for the given type name. obj should be an +// ir.Name. The new type is incomplete (marked as TFORW kind), and the underlying +// type should be set later via SetUnderlying(). References to the type are +// maintained until the type is filled in, so those references can be updated when +// the type is complete. +func NewNamed(obj Object) *Type { + t := newType(TFORW) + t.obj = obj + if obj.Sym().Pkg == ShapePkg { + t.SetIsShape(true) + t.SetHasShape(true) + } + return t +} + +// Obj returns the canonical type name node for a named type t, nil for an unnamed type. +func (t *Type) Obj() Object { + return t.obj +} + +// SetUnderlying sets the underlying type of an incomplete type (i.e. type whose kind +// is currently TFORW). SetUnderlying automatically updates any types that were waiting +// for this type to be completed. +func (t *Type) SetUnderlying(underlying *Type) { + if underlying.kind == TFORW { + // This type isn't computed yet; when it is, update n. + underlying.forwardType().Copyto = append(underlying.forwardType().Copyto, t) + return + } + + ft := t.forwardType() + + // TODO(mdempsky): Fix Type rekinding. + t.kind = underlying.kind + t.extra = underlying.extra + t.width = underlying.width + t.align = underlying.align + t.intRegs = underlying.intRegs + t.floatRegs = underlying.floatRegs + t.underlying = underlying.underlying + + if underlying.NotInHeap() { + t.SetNotInHeap(true) + } + if underlying.HasShape() { + t.SetHasShape(true) + } + + // spec: "The declared type does not inherit any methods bound + // to the existing type, but the method set of an interface + // type [...] remains unchanged." + if t.IsInterface() { + t.methods = underlying.methods + t.allMethods = underlying.allMethods + } + + // Update types waiting on this type. + for _, w := range ft.Copyto { + w.SetUnderlying(t) + } + + // Double-check use of type as embedded type. + if ft.Embedlineno.IsKnown() { + if t.IsPtr() || t.IsUnsafePtr() { + base.ErrorfAt(ft.Embedlineno, errors.InvalidPtrEmbed, "embedded type cannot be a pointer") + } + } +} + +func fieldsHasShape(fields []*Field) bool { + for _, f := range fields { + if f.Type != nil && f.Type.HasShape() { + return true + } + } + return false +} + +// newBasic returns a new basic type of the given kind. +func newBasic(kind Kind, obj Object) *Type { + t := newType(kind) + t.obj = obj + return t +} + +// NewInterface returns a new interface for the given methods and +// embedded types. Embedded types are specified as fields with no Sym. +func NewInterface(methods []*Field) *Type { + t := newType(TINTER) + t.SetInterface(methods) + for _, f := range methods { + // f.Type could be nil for a broken interface declaration + if f.Type != nil && f.Type.HasShape() { + t.SetHasShape(true) + break + } + } + return t +} + +// NewSignature returns a new function type for the given receiver, +// parameters, and results, any of which may be nil. +func NewSignature(recv *Field, params, results []*Field) *Type { + startParams := 0 + if recv != nil { + startParams = 1 + } + startResults := startParams + len(params) + + allParams := make([]*Field, startResults+len(results)) + if recv != nil { + allParams[0] = recv + } + copy(allParams[startParams:], params) + copy(allParams[startResults:], results) + + t := newType(TFUNC) + ft := t.funcType() + + funargs := func(fields []*Field) *Type { + s := NewStruct(fields) + s.StructType().ParamTuple = true + return s + } + + ft.allParams = allParams + ft.startParams = startParams + ft.startResults = startResults + + ft.resultsTuple = funargs(allParams[startResults:]) + + if fieldsHasShape(allParams) { + t.SetHasShape(true) + } + + return t +} + +// NewStruct returns a new struct with the given fields. +func NewStruct(fields []*Field) *Type { + t := newType(TSTRUCT) + t.setFields(fields) + if fieldsHasShape(fields) { + t.SetHasShape(true) + } + return t +} + +var ( + IsInt [NTYPE]bool + IsFloat [NTYPE]bool + IsComplex [NTYPE]bool + IsSimple [NTYPE]bool +) + +var IsOrdered [NTYPE]bool + +// IsReflexive reports whether t has a reflexive equality operator. +// That is, if x==x for all x of type t. +func IsReflexive(t *Type) bool { + switch t.Kind() { + case TBOOL, + TINT, + TUINT, + TINT8, + TUINT8, + TINT16, + TUINT16, + TINT32, + TUINT32, + TINT64, + TUINT64, + TUINTPTR, + TPTR, + TUNSAFEPTR, + TSTRING, + TCHAN: + return true + + case TFLOAT32, + TFLOAT64, + TCOMPLEX64, + TCOMPLEX128, + TINTER: + return false + + case TARRAY: + return IsReflexive(t.Elem()) + + case TSTRUCT: + for _, t1 := range t.Fields() { + if !IsReflexive(t1.Type) { + return false + } + } + return true + + default: + base.Fatalf("bad type for map key: %v", t) + return false + } +} + +// Can this type be stored directly in an interface word? +// Yes, if the representation is a single pointer. +func IsDirectIface(t *Type) bool { + switch t.Kind() { + case TPTR: + // Pointers to notinheap types must be stored indirectly. See issue 42076. + return !t.Elem().NotInHeap() + case TCHAN, + TMAP, + TFUNC, + TUNSAFEPTR: + return true + + case TARRAY: + // Array of 1 direct iface type can be direct. + return t.NumElem() == 1 && IsDirectIface(t.Elem()) + + case TSTRUCT: + // Struct with 1 field of direct iface type can be direct. + return t.NumFields() == 1 && IsDirectIface(t.Field(0).Type) + } + + return false +} + +// IsInterfaceMethod reports whether (field) m is +// an interface method. Such methods have the +// special receiver type types.FakeRecvType(). +func IsInterfaceMethod(f *Type) bool { + return f.Recv().Type == FakeRecvType() +} + +// IsMethodApplicable reports whether method m can be called on a +// value of type t. This is necessary because we compute a single +// method set for both T and *T, but some *T methods are not +// applicable to T receivers. +func IsMethodApplicable(t *Type, m *Field) bool { + return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || IsInterfaceMethod(m.Type) || m.Embedded == 2 +} + +// RuntimeSymName returns the name of s if it's in package "runtime"; otherwise +// it returns "". +func RuntimeSymName(s *Sym) string { + if s.Pkg.Path == "runtime" { + return s.Name + } + return "" +} + +// ReflectSymName returns the name of s if it's in package "reflect"; otherwise +// it returns "". +func ReflectSymName(s *Sym) string { + if s.Pkg.Path == "reflect" { + return s.Name + } + return "" +} + +// IsNoInstrumentPkg reports whether p is a package that +// should not be instrumented. +func IsNoInstrumentPkg(p *Pkg) bool { + return objabi.LookupPkgSpecial(p.Path).NoInstrument +} + +// IsNoRacePkg reports whether p is a package that +// should not be race instrumented. +func IsNoRacePkg(p *Pkg) bool { + return objabi.LookupPkgSpecial(p.Path).NoRaceFunc +} + +// ReceiverBaseType returns the underlying type, if any, +// that owns methods with receiver parameter t. +// The result is either a named type or an anonymous struct. +func ReceiverBaseType(t *Type) *Type { + if t == nil { + return nil + } + + // Strip away pointer if it's there. + if t.IsPtr() { + if t.Sym() != nil { + return nil + } + t = t.Elem() + if t == nil { + return nil + } + } + + // Must be a named type or anonymous struct. + if t.Sym() == nil && !t.IsStruct() { + return nil + } + + // Check types. + if IsSimple[t.Kind()] { + return t + } + switch t.Kind() { + case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT: + return t + } + return nil +} + +func FloatForComplex(t *Type) *Type { + switch t.Kind() { + case TCOMPLEX64: + return Types[TFLOAT32] + case TCOMPLEX128: + return Types[TFLOAT64] + } + base.Fatalf("unexpected type: %v", t) + return nil +} + +func ComplexForFloat(t *Type) *Type { + switch t.Kind() { + case TFLOAT32: + return Types[TCOMPLEX64] + case TFLOAT64: + return Types[TCOMPLEX128] + } + base.Fatalf("unexpected type: %v", t) + return nil +} + +func TypeSym(t *Type) *Sym { + return TypeSymLookup(TypeSymName(t)) +} + +func TypeSymLookup(name string) *Sym { + typepkgmu.Lock() + s := typepkg.Lookup(name) + typepkgmu.Unlock() + return s +} + +func TypeSymName(t *Type) string { + name := t.LinkString() + // Use a separate symbol name for Noalg types for #17752. + if TypeHasNoAlg(t) { + name = "noalg." + name + } + return name +} + +// Fake package for runtime type info (headers) +// Don't access directly, use typeLookup below. +var ( + typepkgmu sync.Mutex // protects typepkg lookups + typepkg = NewPkg("type", "type") +) + +var SimType [NTYPE]Kind + +// Fake package for shape types (see typecheck.Shapify()). +var ShapePkg = NewPkg("go.shape", "go.shape") diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/type_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/type_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1fd05b3f5e8c7677beebde97c47b622f5ac0431c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/type_test.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import ( + "testing" +) + +func TestSSACompare(t *testing.T) { + a := []*Type{ + TypeInvalid, + TypeMem, + TypeFlags, + TypeVoid, + TypeInt128, + } + for _, x := range a { + for _, y := range a { + c := x.Compare(y) + if x == y && c != CMPeq || x != y && c == CMPeq { + t.Errorf("%s compare %s == %d\n", x.extra, y.extra, c) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/universe.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/universe.go new file mode 100644 index 0000000000000000000000000000000000000000..d1800f217c96ad305b014a9007a8c2961b44faf5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/universe.go @@ -0,0 +1,154 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +import ( + "cmd/compile/internal/base" + "cmd/internal/src" +) + +var basicTypes = [...]struct { + name string + etype Kind +}{ + {"int8", TINT8}, + {"int16", TINT16}, + {"int32", TINT32}, + {"int64", TINT64}, + {"uint8", TUINT8}, + {"uint16", TUINT16}, + {"uint32", TUINT32}, + {"uint64", TUINT64}, + {"float32", TFLOAT32}, + {"float64", TFLOAT64}, + {"complex64", TCOMPLEX64}, + {"complex128", TCOMPLEX128}, + {"bool", TBOOL}, + {"string", TSTRING}, +} + +var typedefs = [...]struct { + name string + etype Kind + sameas32 Kind + sameas64 Kind +}{ + {"int", TINT, TINT32, TINT64}, + {"uint", TUINT, TUINT32, TUINT64}, + {"uintptr", TUINTPTR, TUINT32, TUINT64}, +} + +func InitTypes(defTypeName func(sym *Sym, typ *Type) Object) { + if PtrSize == 0 { + base.Fatalf("InitTypes called before PtrSize was set") + } + + SlicePtrOffset = 0 + SliceLenOffset = RoundUp(SlicePtrOffset+int64(PtrSize), int64(PtrSize)) + SliceCapOffset = RoundUp(SliceLenOffset+int64(PtrSize), int64(PtrSize)) + SliceSize = RoundUp(SliceCapOffset+int64(PtrSize), int64(PtrSize)) + + // string is same as slice wo the cap + StringSize = RoundUp(SliceLenOffset+int64(PtrSize), int64(PtrSize)) + + for et := Kind(0); et < NTYPE; et++ { + SimType[et] = et + } + + Types[TANY] = newType(TANY) // note: an old placeholder type, NOT the new builtin 'any' alias for interface{} + Types[TINTER] = NewInterface(nil) + CheckSize(Types[TINTER]) + + defBasic := func(kind Kind, pkg *Pkg, name string) *Type { + typ := newType(kind) + obj := defTypeName(pkg.Lookup(name), typ) + typ.obj = obj + if kind != TANY { + CheckSize(typ) + } + return typ + } + + for _, s := range &basicTypes { + Types[s.etype] = defBasic(s.etype, BuiltinPkg, s.name) + } + + for _, s := range &typedefs { + sameas := s.sameas32 + if PtrSize == 8 { + sameas = s.sameas64 + } + SimType[s.etype] = sameas + + Types[s.etype] = defBasic(s.etype, BuiltinPkg, s.name) + } + + // We create separate byte and rune types for better error messages + // rather than just creating type alias *Sym's for the uint8 and + // int32 Hence, (bytetype|runtype).Sym.isAlias() is false. + // TODO(gri) Should we get rid of this special case (at the cost + // of less informative error messages involving bytes and runes)? + // NOTE(rsc): No, the error message quality is important. + // (Alternatively, we could introduce an OTALIAS node representing + // type aliases, albeit at the cost of having to deal with it everywhere). + ByteType = defBasic(TUINT8, BuiltinPkg, "byte") + RuneType = defBasic(TINT32, BuiltinPkg, "rune") + + // error type + DeferCheckSize() + ErrorType = defBasic(TFORW, BuiltinPkg, "error") + ErrorType.SetUnderlying(makeErrorInterface()) + ResumeCheckSize() + + // comparable type (interface) + DeferCheckSize() + ComparableType = defBasic(TFORW, BuiltinPkg, "comparable") + ComparableType.SetUnderlying(makeComparableInterface()) + ResumeCheckSize() + + // any type (interface) + DeferCheckSize() + AnyType = defBasic(TFORW, BuiltinPkg, "any") + AnyType.SetUnderlying(NewInterface(nil)) + ResumeCheckSize() + + Types[TUNSAFEPTR] = defBasic(TUNSAFEPTR, UnsafePkg, "Pointer") + + Types[TBLANK] = newType(TBLANK) + Types[TNIL] = newType(TNIL) + + // simple aliases + SimType[TMAP] = TPTR + SimType[TCHAN] = TPTR + SimType[TFUNC] = TPTR + SimType[TUNSAFEPTR] = TPTR + + for et := TINT8; et <= TUINT64; et++ { + IsInt[et] = true + } + IsInt[TINT] = true + IsInt[TUINT] = true + IsInt[TUINTPTR] = true + + IsFloat[TFLOAT32] = true + IsFloat[TFLOAT64] = true + + IsComplex[TCOMPLEX64] = true + IsComplex[TCOMPLEX128] = true +} + +func makeErrorInterface() *Type { + sig := NewSignature(FakeRecv(), nil, []*Field{ + NewField(src.NoXPos, nil, Types[TSTRING]), + }) + method := NewField(src.NoXPos, LocalPkg.Lookup("Error"), sig) + return NewInterface([]*Field{method}) +} + +// makeComparableInterface makes the predefined "comparable" interface in the +// built-in package. It has a unique name, but no methods. +func makeComparableInterface() *Type { + return NewInterface(nil) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/utils.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/utils.go new file mode 100644 index 0000000000000000000000000000000000000000..f9f629ca3ea6cf5bd878387b6cab1a7892e1685b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types/utils.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types + +const BADWIDTH = -1000000000 + +type bitset8 uint8 + +func (f *bitset8) set(mask uint8, b bool) { + if b { + *(*uint8)(f) |= mask + } else { + *(*uint8)(f) &^= mask + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/alias.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/alias.go new file mode 100644 index 0000000000000000000000000000000000000000..06dfba16976cfaf3f0653ea764a9beec43bcda39 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/alias.go @@ -0,0 +1,88 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import "fmt" + +// An Alias represents an alias type. +// Whether or not Alias types are created is controlled by the +// gotypesalias setting with the GODEBUG environment variable. +// For gotypesalias=1, alias declarations produce an Alias type. +// Otherwise, the alias information is only in the type name, +// which points directly to the actual (aliased) type. +type Alias struct { + obj *TypeName // corresponding declared alias object + fromRHS Type // RHS of type alias declaration; may be an alias + actual Type // actual (aliased) type; never an alias +} + +// NewAlias creates a new Alias type with the given type name and rhs. +// rhs must not be nil. +func NewAlias(obj *TypeName, rhs Type) *Alias { + alias := (*Checker)(nil).newAlias(obj, rhs) + // Ensure that alias.actual is set (#65455). + unalias(alias) + return alias +} + +func (a *Alias) Obj() *TypeName { return a.obj } +func (a *Alias) Underlying() Type { return unalias(a).Underlying() } +func (a *Alias) String() string { return TypeString(a, nil) } + +// Type accessors + +// Unalias returns t if it is not an alias type; +// otherwise it follows t's alias chain until it +// reaches a non-alias type which is then returned. +// Consequently, the result is never an alias type. +func Unalias(t Type) Type { + if a0, _ := t.(*Alias); a0 != nil { + return unalias(a0) + } + return t +} + +func unalias(a0 *Alias) Type { + if a0.actual != nil { + return a0.actual + } + var t Type + for a := a0; a != nil; a, _ = t.(*Alias) { + t = a.fromRHS + } + if t == nil { + panic(fmt.Sprintf("non-terminated alias %s", a0.obj.name)) + } + a0.actual = t + return t +} + +// asNamed returns t as *Named if that is t's +// actual type. It returns nil otherwise. +func asNamed(t Type) *Named { + n, _ := Unalias(t).(*Named) + return n +} + +// newAlias creates a new Alias type with the given type name and rhs. +// rhs must not be nil. +func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias { + assert(rhs != nil) + a := &Alias{obj, rhs, nil} + if obj.typ == nil { + obj.typ = a + } + + // Ensure that a.actual is set at the end of type checking. + if check != nil { + check.needsCleanup(a) + } + + return a +} + +func (a *Alias) cleanup() { + Unalias(a) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/api.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/api.go new file mode 100644 index 0000000000000000000000000000000000000000..bb02d9198e6ff3e38dc7ec7617a2e41b4e8bc316 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/api.go @@ -0,0 +1,471 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package types declares the data types and implements +// the algorithms for type-checking of Go packages. Use +// Config.Check to invoke the type checker for a package. +// Alternatively, create a new type checker with NewChecker +// and invoke it incrementally by calling Checker.Files. +// +// Type-checking consists of several interdependent phases: +// +// Name resolution maps each identifier (syntax.Name) in the program to the +// language object (Object) it denotes. +// Use Info.{Defs,Uses,Implicits} for the results of name resolution. +// +// Constant folding computes the exact constant value (constant.Value) +// for every expression (syntax.Expr) that is a compile-time constant. +// Use Info.Types[expr].Value for the results of constant folding. +// +// Type inference computes the type (Type) of every expression (syntax.Expr) +// and checks for compliance with the language specification. +// Use Info.Types[expr].Type for the results of type inference. +package types2 + +import ( + "cmd/compile/internal/syntax" + "fmt" + "go/constant" + . "internal/types/errors" + "strings" +) + +// An Error describes a type-checking error; it implements the error interface. +// A "soft" error is an error that still permits a valid interpretation of a +// package (such as "unused variable"); "hard" errors may lead to unpredictable +// behavior if ignored. +type Error struct { + Pos syntax.Pos // error position + Msg string // default error message, user-friendly + Full string // full error message, for debugging (may contain internal details) + Soft bool // if set, error is "soft" + Code Code // error code +} + +// Error returns an error string formatted as follows: +// filename:line:column: message +func (err Error) Error() string { + return fmt.Sprintf("%s: %s", err.Pos, err.Msg) +} + +// FullError returns an error string like Error, buy it may contain +// type-checker internal details such as subscript indices for type +// parameters and more. Useful for debugging. +func (err Error) FullError() string { + return fmt.Sprintf("%s: %s", err.Pos, err.Full) +} + +// An ArgumentError holds an error associated with an argument index. +type ArgumentError struct { + Index int + Err error +} + +func (e *ArgumentError) Error() string { return e.Err.Error() } +func (e *ArgumentError) Unwrap() error { return e.Err } + +// An Importer resolves import paths to Packages. +// +// CAUTION: This interface does not support the import of locally +// vendored packages. See https://golang.org/s/go15vendor. +// If possible, external implementations should implement ImporterFrom. +type Importer interface { + // Import returns the imported package for the given import path. + // The semantics is like for ImporterFrom.ImportFrom except that + // dir and mode are ignored (since they are not present). + Import(path string) (*Package, error) +} + +// ImportMode is reserved for future use. +type ImportMode int + +// An ImporterFrom resolves import paths to packages; it +// supports vendoring per https://golang.org/s/go15vendor. +// Use go/importer to obtain an ImporterFrom implementation. +type ImporterFrom interface { + // Importer is present for backward-compatibility. Calling + // Import(path) is the same as calling ImportFrom(path, "", 0); + // i.e., locally vendored packages may not be found. + // The types package does not call Import if an ImporterFrom + // is present. + Importer + + // ImportFrom returns the imported package for the given import + // path when imported by a package file located in dir. + // If the import failed, besides returning an error, ImportFrom + // is encouraged to cache and return a package anyway, if one + // was created. This will reduce package inconsistencies and + // follow-on type checker errors due to the missing package. + // The mode value must be 0; it is reserved for future use. + // Two calls to ImportFrom with the same path and dir must + // return the same package. + ImportFrom(path, dir string, mode ImportMode) (*Package, error) +} + +// A Config specifies the configuration for type checking. +// The zero value for Config is a ready-to-use default configuration. +type Config struct { + // Context is the context used for resolving global identifiers. If nil, the + // type checker will initialize this field with a newly created context. + Context *Context + + // GoVersion describes the accepted Go language version. The string must + // start with a prefix of the form "go%d.%d" (e.g. "go1.20", "go1.21rc1", or + // "go1.21.0") or it must be empty; an empty string disables Go language + // version checks. If the format is invalid, invoking the type checker will + // result in an error. + GoVersion string + + // If IgnoreFuncBodies is set, function bodies are not + // type-checked. + IgnoreFuncBodies bool + + // If FakeImportC is set, `import "C"` (for packages requiring Cgo) + // declares an empty "C" package and errors are omitted for qualified + // identifiers referring to package C (which won't find an object). + // This feature is intended for the standard library cmd/api tool. + // + // Caution: Effects may be unpredictable due to follow-on errors. + // Do not use casually! + FakeImportC bool + + // If IgnoreBranchErrors is set, branch/label errors are ignored. + IgnoreBranchErrors bool + + // If go115UsesCgo is set, the type checker expects the + // _cgo_gotypes.go file generated by running cmd/cgo to be + // provided as a package source file. Qualified identifiers + // referring to package C will be resolved to cgo-provided + // declarations within _cgo_gotypes.go. + // + // It is an error to set both FakeImportC and go115UsesCgo. + go115UsesCgo bool + + // If Trace is set, a debug trace is printed to stdout. + Trace bool + + // If Error != nil, it is called with each error found + // during type checking; err has dynamic type Error. + // Secondary errors (for instance, to enumerate all types + // involved in an invalid recursive type declaration) have + // error strings that start with a '\t' character. + // If Error == nil, type-checking stops with the first + // error found. + Error func(err error) + + // An importer is used to import packages referred to from + // import declarations. + // If the installed importer implements ImporterFrom, the type + // checker calls ImportFrom instead of Import. + // The type checker reports an error if an importer is needed + // but none was installed. + Importer Importer + + // If Sizes != nil, it provides the sizing functions for package unsafe. + // Otherwise SizesFor("gc", "amd64") is used instead. + Sizes Sizes + + // If DisableUnusedImportCheck is set, packages are not checked + // for unused imports. + DisableUnusedImportCheck bool + + // If a non-empty ErrorURL format string is provided, it is used + // to format an error URL link that is appended to the first line + // of an error message. ErrorURL must be a format string containing + // exactly one "%s" format, e.g. "[go.dev/e/%s]". + ErrorURL string +} + +func srcimporter_setUsesCgo(conf *Config) { + conf.go115UsesCgo = true +} + +// Info holds result type information for a type-checked package. +// Only the information for which a map is provided is collected. +// If the package has type errors, the collected information may +// be incomplete. +type Info struct { + // Types maps expressions to their types, and for constant + // expressions, also their values. Invalid expressions are + // omitted. + // + // For (possibly parenthesized) identifiers denoting built-in + // functions, the recorded signatures are call-site specific: + // if the call result is not a constant, the recorded type is + // an argument-specific signature. Otherwise, the recorded type + // is invalid. + // + // The Types map does not record the type of every identifier, + // only those that appear where an arbitrary expression is + // permitted. For instance, the identifier f in a selector + // expression x.f is found only in the Selections map, the + // identifier z in a variable declaration 'var z int' is found + // only in the Defs map, and identifiers denoting packages in + // qualified identifiers are collected in the Uses map. + Types map[syntax.Expr]TypeAndValue + + // If StoreTypesInSyntax is set, type information identical to + // that which would be put in the Types map, will be set in + // syntax.Expr.TypeAndValue (independently of whether Types + // is nil or not). + StoreTypesInSyntax bool + + // Instances maps identifiers denoting generic types or functions to their + // type arguments and instantiated type. + // + // For example, Instances will map the identifier for 'T' in the type + // instantiation T[int, string] to the type arguments [int, string] and + // resulting instantiated *Named type. Given a generic function + // func F[A any](A), Instances will map the identifier for 'F' in the call + // expression F(int(1)) to the inferred type arguments [int], and resulting + // instantiated *Signature. + // + // Invariant: Instantiating Uses[id].Type() with Instances[id].TypeArgs + // results in an equivalent of Instances[id].Type. + Instances map[*syntax.Name]Instance + + // Defs maps identifiers to the objects they define (including + // package names, dots "." of dot-imports, and blank "_" identifiers). + // For identifiers that do not denote objects (e.g., the package name + // in package clauses, or symbolic variables t in t := x.(type) of + // type switch headers), the corresponding objects are nil. + // + // For an embedded field, Defs returns the field *Var it defines. + // + // Invariant: Defs[id] == nil || Defs[id].Pos() == id.Pos() + Defs map[*syntax.Name]Object + + // Uses maps identifiers to the objects they denote. + // + // For an embedded field, Uses returns the *TypeName it denotes. + // + // Invariant: Uses[id].Pos() != id.Pos() + Uses map[*syntax.Name]Object + + // Implicits maps nodes to their implicitly declared objects, if any. + // The following node and object types may appear: + // + // node declared object + // + // *syntax.ImportDecl *PkgName for imports without renames + // *syntax.CaseClause type-specific *Var for each type switch case clause (incl. default) + // *syntax.Field anonymous parameter *Var (incl. unnamed results) + // + Implicits map[syntax.Node]Object + + // Selections maps selector expressions (excluding qualified identifiers) + // to their corresponding selections. + Selections map[*syntax.SelectorExpr]*Selection + + // Scopes maps syntax.Nodes to the scopes they define. Package scopes are not + // associated with a specific node but with all files belonging to a package. + // Thus, the package scope can be found in the type-checked Package object. + // Scopes nest, with the Universe scope being the outermost scope, enclosing + // the package scope, which contains (one or more) files scopes, which enclose + // function scopes which in turn enclose statement and function literal scopes. + // Note that even though package-level functions are declared in the package + // scope, the function scopes are embedded in the file scope of the file + // containing the function declaration. + // + // The Scope of a function contains the declarations of any + // type parameters, parameters, and named results, plus any + // local declarations in the body block. + // It is coextensive with the complete extent of the + // function's syntax ([*ast.FuncDecl] or [*ast.FuncLit]). + // The Scopes mapping does not contain an entry for the + // function body ([*ast.BlockStmt]); the function's scope is + // associated with the [*ast.FuncType]. + // + // The following node types may appear in Scopes: + // + // *syntax.File + // *syntax.FuncType + // *syntax.TypeDecl + // *syntax.BlockStmt + // *syntax.IfStmt + // *syntax.SwitchStmt + // *syntax.CaseClause + // *syntax.CommClause + // *syntax.ForStmt + // + Scopes map[syntax.Node]*Scope + + // InitOrder is the list of package-level initializers in the order in which + // they must be executed. Initializers referring to variables related by an + // initialization dependency appear in topological order, the others appear + // in source order. Variables without an initialization expression do not + // appear in this list. + InitOrder []*Initializer + + // FileVersions maps a file to its Go version string. + // If the file doesn't specify a version, the reported + // string is Config.GoVersion. + // Version strings begin with “go”, like “go1.21”, and + // are suitable for use with the [go/version] package. + FileVersions map[*syntax.PosBase]string +} + +func (info *Info) recordTypes() bool { + return info.Types != nil || info.StoreTypesInSyntax +} + +// TypeOf returns the type of expression e, or nil if not found. +// Precondition 1: the Types map is populated or StoreTypesInSynax is set. +// Precondition 2: Uses and Defs maps are populated. +func (info *Info) TypeOf(e syntax.Expr) Type { + if info.Types != nil { + if t, ok := info.Types[e]; ok { + return t.Type + } + } else if info.StoreTypesInSyntax { + if tv := e.GetTypeInfo(); tv.Type != nil { + return tv.Type + } + } + + if id, _ := e.(*syntax.Name); id != nil { + if obj := info.ObjectOf(id); obj != nil { + return obj.Type() + } + } + return nil +} + +// ObjectOf returns the object denoted by the specified id, +// or nil if not found. +// +// If id is an embedded struct field, ObjectOf returns the field (*Var) +// it defines, not the type (*TypeName) it uses. +// +// Precondition: the Uses and Defs maps are populated. +func (info *Info) ObjectOf(id *syntax.Name) Object { + if obj := info.Defs[id]; obj != nil { + return obj + } + return info.Uses[id] +} + +// PkgNameOf returns the local package name defined by the import, +// or nil if not found. +// +// For dot-imports, the package name is ".". +// +// Precondition: the Defs and Implicts maps are populated. +func (info *Info) PkgNameOf(imp *syntax.ImportDecl) *PkgName { + var obj Object + if imp.LocalPkgName != nil { + obj = info.Defs[imp.LocalPkgName] + } else { + obj = info.Implicits[imp] + } + pkgname, _ := obj.(*PkgName) + return pkgname +} + +// TypeAndValue reports the type and value (for constants) +// of the corresponding expression. +type TypeAndValue struct { + mode operandMode + Type Type + Value constant.Value +} + +// IsVoid reports whether the corresponding expression +// is a function call without results. +func (tv TypeAndValue) IsVoid() bool { + return tv.mode == novalue +} + +// IsType reports whether the corresponding expression specifies a type. +func (tv TypeAndValue) IsType() bool { + return tv.mode == typexpr +} + +// IsBuiltin reports whether the corresponding expression denotes +// a (possibly parenthesized) built-in function. +func (tv TypeAndValue) IsBuiltin() bool { + return tv.mode == builtin +} + +// IsValue reports whether the corresponding expression is a value. +// Builtins are not considered values. Constant values have a non- +// nil Value. +func (tv TypeAndValue) IsValue() bool { + switch tv.mode { + case constant_, variable, mapindex, value, nilvalue, commaok, commaerr: + return true + } + return false +} + +// IsNil reports whether the corresponding expression denotes the +// predeclared value nil. Depending on context, it may have been +// given a type different from UntypedNil. +func (tv TypeAndValue) IsNil() bool { + return tv.mode == nilvalue +} + +// Addressable reports whether the corresponding expression +// is addressable (https://golang.org/ref/spec#Address_operators). +func (tv TypeAndValue) Addressable() bool { + return tv.mode == variable +} + +// Assignable reports whether the corresponding expression +// is assignable to (provided a value of the right type). +func (tv TypeAndValue) Assignable() bool { + return tv.mode == variable || tv.mode == mapindex +} + +// HasOk reports whether the corresponding expression may be +// used on the rhs of a comma-ok assignment. +func (tv TypeAndValue) HasOk() bool { + return tv.mode == commaok || tv.mode == mapindex +} + +// Instance reports the type arguments and instantiated type for type and +// function instantiations. For type instantiations, Type will be of dynamic +// type *Named. For function instantiations, Type will be of dynamic type +// *Signature. +type Instance struct { + TypeArgs *TypeList + Type Type +} + +// An Initializer describes a package-level variable, or a list of variables in case +// of a multi-valued initialization expression, and the corresponding initialization +// expression. +type Initializer struct { + Lhs []*Var // var Lhs = Rhs + Rhs syntax.Expr +} + +func (init *Initializer) String() string { + var buf strings.Builder + for i, lhs := range init.Lhs { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(lhs.Name()) + } + buf.WriteString(" = ") + syntax.Fprint(&buf, init.Rhs, syntax.ShortForm) + return buf.String() +} + +// Check type-checks a package and returns the resulting package object and +// the first error if any. Additionally, if info != nil, Check populates each +// of the non-nil maps in the Info struct. +// +// The package is marked as complete if no errors occurred, otherwise it is +// incomplete. See Config.Error for controlling behavior in the presence of +// errors. +// +// The package is specified by a list of *syntax.Files and corresponding +// file set, and the package path the package is identified with. +// The clean path must not be empty or dot ("."). +func (conf *Config) Check(path string, files []*syntax.File, info *Info) (*Package, error) { + pkg := NewPackage(path, "") + return pkg, NewChecker(conf, pkg, info).Files(files) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/api_predicates.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/api_predicates.go new file mode 100644 index 0000000000000000000000000000000000000000..480f71144e8d59d2a272439706ec7bc3a9ac89aa --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/api_predicates.go @@ -0,0 +1,84 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements exported type predicates. + +package types2 + +// AssertableTo reports whether a value of type V can be asserted to have type T. +// +// The behavior of AssertableTo is unspecified in three cases: +// - if T is Typ[Invalid] +// - if V is a generalized interface; i.e., an interface that may only be used +// as a type constraint in Go code +// - if T is an uninstantiated generic type +func AssertableTo(V *Interface, T Type) bool { + // Checker.newAssertableTo suppresses errors for invalid types, so we need special + // handling here. + if !isValid(T.Underlying()) { + return false + } + return (*Checker)(nil).newAssertableTo(nopos, V, T, nil) +} + +// AssignableTo reports whether a value of type V is assignable to a variable +// of type T. +// +// The behavior of AssignableTo is unspecified if V or T is Typ[Invalid] or an +// uninstantiated generic type. +func AssignableTo(V, T Type) bool { + x := operand{mode: value, typ: V} + ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x + return ok +} + +// ConvertibleTo reports whether a value of type V is convertible to a value of +// type T. +// +// The behavior of ConvertibleTo is unspecified if V or T is Typ[Invalid] or an +// uninstantiated generic type. +func ConvertibleTo(V, T Type) bool { + x := operand{mode: value, typ: V} + return x.convertibleTo(nil, T, nil) // check not needed for non-constant x +} + +// Implements reports whether type V implements interface T. +// +// The behavior of Implements is unspecified if V is Typ[Invalid] or an uninstantiated +// generic type. +func Implements(V Type, T *Interface) bool { + if T.Empty() { + // All types (even Typ[Invalid]) implement the empty interface. + return true + } + // Checker.implements suppresses errors for invalid types, so we need special + // handling here. + if !isValid(V.Underlying()) { + return false + } + return (*Checker)(nil).implements(nopos, V, T, false, nil) +} + +// Satisfies reports whether type V satisfies the constraint T. +// +// The behavior of Satisfies is unspecified if V is Typ[Invalid] or an uninstantiated +// generic type. +func Satisfies(V Type, T *Interface) bool { + return (*Checker)(nil).implements(nopos, V, T, true, nil) +} + +// Identical reports whether x and y are identical types. +// Receivers of [Signature] types are ignored. +func Identical(x, y Type) bool { + var c comparer + return c.identical(x, y, nil) +} + +// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored. +// Receivers of [Signature] types are ignored. +func IdenticalIgnoreTags(x, y Type) bool { + var c comparer + c.ignoreTags = true + return c.identical(x, y, nil) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/api_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/api_test.go new file mode 100644 index 0000000000000000000000000000000000000000..bacba719553b34c079467ac99341f4954ca7d2f2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/api_test.go @@ -0,0 +1,2939 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "cmd/compile/internal/syntax" + "errors" + "fmt" + "internal/goversion" + "internal/testenv" + "reflect" + "regexp" + "sort" + "strings" + "sync" + "testing" + + . "cmd/compile/internal/types2" +) + +// nopos indicates an unknown position +var nopos syntax.Pos + +func mustParse(src string) *syntax.File { + f, err := syntax.Parse(syntax.NewFileBase(pkgName(src)), strings.NewReader(src), nil, nil, 0) + if err != nil { + panic(err) // so we don't need to pass *testing.T + } + return f +} + +func typecheck(src string, conf *Config, info *Info) (*Package, error) { + f := mustParse(src) + if conf == nil { + conf = &Config{ + Error: func(err error) {}, // collect all errors + Importer: defaultImporter(), + } + } + return conf.Check(f.PkgName.Value, []*syntax.File{f}, info) +} + +func mustTypecheck(src string, conf *Config, info *Info) *Package { + pkg, err := typecheck(src, conf, info) + if err != nil { + panic(err) // so we don't need to pass *testing.T + } + return pkg +} + +// pkgName extracts the package name from src, which must contain a package header. +func pkgName(src string) string { + const kw = "package " + if i := strings.Index(src, kw); i >= 0 { + after := src[i+len(kw):] + n := len(after) + if i := strings.IndexAny(after, "\n\t ;/"); i >= 0 { + n = i + } + return after[:n] + } + panic("missing package header: " + src) +} + +func TestValuesInfo(t *testing.T) { + var tests = []struct { + src string + expr string // constant expression + typ string // constant type + val string // constant value + }{ + {`package a0; const _ = false`, `false`, `untyped bool`, `false`}, + {`package a1; const _ = 0`, `0`, `untyped int`, `0`}, + {`package a2; const _ = 'A'`, `'A'`, `untyped rune`, `65`}, + {`package a3; const _ = 0.`, `0.`, `untyped float`, `0`}, + {`package a4; const _ = 0i`, `0i`, `untyped complex`, `(0 + 0i)`}, + {`package a5; const _ = "foo"`, `"foo"`, `untyped string`, `"foo"`}, + + {`package b0; var _ = false`, `false`, `bool`, `false`}, + {`package b1; var _ = 0`, `0`, `int`, `0`}, + {`package b2; var _ = 'A'`, `'A'`, `rune`, `65`}, + {`package b3; var _ = 0.`, `0.`, `float64`, `0`}, + {`package b4; var _ = 0i`, `0i`, `complex128`, `(0 + 0i)`}, + {`package b5; var _ = "foo"`, `"foo"`, `string`, `"foo"`}, + + {`package c0a; var _ = bool(false)`, `false`, `bool`, `false`}, + {`package c0b; var _ = bool(false)`, `bool(false)`, `bool`, `false`}, + {`package c0c; type T bool; var _ = T(false)`, `T(false)`, `c0c.T`, `false`}, + + {`package c1a; var _ = int(0)`, `0`, `int`, `0`}, + {`package c1b; var _ = int(0)`, `int(0)`, `int`, `0`}, + {`package c1c; type T int; var _ = T(0)`, `T(0)`, `c1c.T`, `0`}, + + {`package c2a; var _ = rune('A')`, `'A'`, `rune`, `65`}, + {`package c2b; var _ = rune('A')`, `rune('A')`, `rune`, `65`}, + {`package c2c; type T rune; var _ = T('A')`, `T('A')`, `c2c.T`, `65`}, + + {`package c3a; var _ = float32(0.)`, `0.`, `float32`, `0`}, + {`package c3b; var _ = float32(0.)`, `float32(0.)`, `float32`, `0`}, + {`package c3c; type T float32; var _ = T(0.)`, `T(0.)`, `c3c.T`, `0`}, + + {`package c4a; var _ = complex64(0i)`, `0i`, `complex64`, `(0 + 0i)`}, + {`package c4b; var _ = complex64(0i)`, `complex64(0i)`, `complex64`, `(0 + 0i)`}, + {`package c4c; type T complex64; var _ = T(0i)`, `T(0i)`, `c4c.T`, `(0 + 0i)`}, + + {`package c5a; var _ = string("foo")`, `"foo"`, `string`, `"foo"`}, + {`package c5b; var _ = string("foo")`, `string("foo")`, `string`, `"foo"`}, + {`package c5c; type T string; var _ = T("foo")`, `T("foo")`, `c5c.T`, `"foo"`}, + {`package c5d; var _ = string(65)`, `65`, `untyped int`, `65`}, + {`package c5e; var _ = string('A')`, `'A'`, `untyped rune`, `65`}, + {`package c5f; type T string; var _ = T('A')`, `'A'`, `untyped rune`, `65`}, + + {`package d0; var _ = []byte("foo")`, `"foo"`, `string`, `"foo"`}, + {`package d1; var _ = []byte(string("foo"))`, `"foo"`, `string`, `"foo"`}, + {`package d2; var _ = []byte(string("foo"))`, `string("foo")`, `string`, `"foo"`}, + {`package d3; type T []byte; var _ = T("foo")`, `"foo"`, `string`, `"foo"`}, + + {`package e0; const _ = float32( 1e-200)`, `float32(1e-200)`, `float32`, `0`}, + {`package e1; const _ = float32(-1e-200)`, `float32(-1e-200)`, `float32`, `0`}, + {`package e2; const _ = float64( 1e-2000)`, `float64(1e-2000)`, `float64`, `0`}, + {`package e3; const _ = float64(-1e-2000)`, `float64(-1e-2000)`, `float64`, `0`}, + {`package e4; const _ = complex64( 1e-200)`, `complex64(1e-200)`, `complex64`, `(0 + 0i)`}, + {`package e5; const _ = complex64(-1e-200)`, `complex64(-1e-200)`, `complex64`, `(0 + 0i)`}, + {`package e6; const _ = complex128( 1e-2000)`, `complex128(1e-2000)`, `complex128`, `(0 + 0i)`}, + {`package e7; const _ = complex128(-1e-2000)`, `complex128(-1e-2000)`, `complex128`, `(0 + 0i)`}, + + {`package f0 ; var _ float32 = 1e-200`, `1e-200`, `float32`, `0`}, + {`package f1 ; var _ float32 = -1e-200`, `-1e-200`, `float32`, `0`}, + {`package f2a; var _ float64 = 1e-2000`, `1e-2000`, `float64`, `0`}, + {`package f3a; var _ float64 = -1e-2000`, `-1e-2000`, `float64`, `0`}, + {`package f2b; var _ = 1e-2000`, `1e-2000`, `float64`, `0`}, + {`package f3b; var _ = -1e-2000`, `-1e-2000`, `float64`, `0`}, + {`package f4 ; var _ complex64 = 1e-200 `, `1e-200`, `complex64`, `(0 + 0i)`}, + {`package f5 ; var _ complex64 = -1e-200 `, `-1e-200`, `complex64`, `(0 + 0i)`}, + {`package f6a; var _ complex128 = 1e-2000i`, `1e-2000i`, `complex128`, `(0 + 0i)`}, + {`package f7a; var _ complex128 = -1e-2000i`, `-1e-2000i`, `complex128`, `(0 + 0i)`}, + {`package f6b; var _ = 1e-2000i`, `1e-2000i`, `complex128`, `(0 + 0i)`}, + {`package f7b; var _ = -1e-2000i`, `-1e-2000i`, `complex128`, `(0 + 0i)`}, + + {`package g0; const (a = len([iota]int{}); b; c); const _ = c`, `c`, `int`, `2`}, // go.dev/issue/22341 + {`package g1; var(j int32; s int; n = 1.0< 1 { + t.Errorf("package %s: %d Implicits entries found", name, len(info.Implicits)) + continue + } + + // extract Implicits entry, if any + var got string + for n, obj := range info.Implicits { + switch x := n.(type) { + case *syntax.ImportDecl: + got = "importSpec" + case *syntax.CaseClause: + got = "caseClause" + case *syntax.Field: + got = "field" + default: + t.Fatalf("package %s: unexpected %T", name, x) + } + got += ": " + obj.String() + } + + // verify entry + if got != test.want { + t.Errorf("package %s: got %q; want %q", name, got, test.want) + } + } +} + +func TestPkgNameOf(t *testing.T) { + testenv.MustHaveGoBuild(t) + + const src = ` +package p + +import ( + . "os" + _ "io" + "math" + "path/filepath" + snort "sort" +) + +// avoid imported and not used errors +var ( + _ = Open // os.Open + _ = math.Sin + _ = filepath.Abs + _ = snort.Ints +) +` + + var tests = []struct { + path string // path string enclosed in "'s + want string + }{ + {`"os"`, "."}, + {`"io"`, "_"}, + {`"math"`, "math"}, + {`"path/filepath"`, "filepath"}, + {`"sort"`, "snort"}, + } + + f := mustParse(src) + info := Info{ + Defs: make(map[*syntax.Name]Object), + Implicits: make(map[syntax.Node]Object), + } + var conf Config + conf.Importer = defaultImporter() + _, err := conf.Check("p", []*syntax.File{f}, &info) + if err != nil { + t.Fatal(err) + } + + // map import paths to importDecl + imports := make(map[string]*syntax.ImportDecl) + for _, d := range f.DeclList { + if imp, _ := d.(*syntax.ImportDecl); imp != nil { + imports[imp.Path.Value] = imp + } + } + + for _, test := range tests { + imp := imports[test.path] + if imp == nil { + t.Fatalf("invalid test case: import path %s not found", test.path) + } + got := info.PkgNameOf(imp) + if got == nil { + t.Fatalf("import %s: package name not found", test.path) + } + if got.Name() != test.want { + t.Errorf("import %s: got %s; want %s", test.path, got.Name(), test.want) + } + } + + // test non-existing importDecl + if got := info.PkgNameOf(new(syntax.ImportDecl)); got != nil { + t.Errorf("got %s for non-existing import declaration", got.Name()) + } +} + +func predString(tv TypeAndValue) string { + var buf strings.Builder + pred := func(b bool, s string) { + if b { + if buf.Len() > 0 { + buf.WriteString(", ") + } + buf.WriteString(s) + } + } + + pred(tv.IsVoid(), "void") + pred(tv.IsType(), "type") + pred(tv.IsBuiltin(), "builtin") + pred(tv.IsValue() && tv.Value != nil, "const") + pred(tv.IsValue() && tv.Value == nil, "value") + pred(tv.IsNil(), "nil") + pred(tv.Addressable(), "addressable") + pred(tv.Assignable(), "assignable") + pred(tv.HasOk(), "hasOk") + + if buf.Len() == 0 { + return "invalid" + } + return buf.String() +} + +func TestPredicatesInfo(t *testing.T) { + testenv.MustHaveGoBuild(t) + + var tests = []struct { + src string + expr string + pred string + }{ + // void + {`package n0; func f() { f() }`, `f()`, `void`}, + + // types + {`package t0; type _ int`, `int`, `type`}, + {`package t1; type _ []int`, `[]int`, `type`}, + {`package t2; type _ func()`, `func()`, `type`}, + {`package t3; type _ func(int)`, `int`, `type`}, + {`package t3; type _ func(...int)`, `...int`, `type`}, + + // built-ins + {`package b0; var _ = len("")`, `len`, `builtin`}, + {`package b1; var _ = (len)("")`, `(len)`, `builtin`}, + + // constants + {`package c0; var _ = 42`, `42`, `const`}, + {`package c1; var _ = "foo" + "bar"`, `"foo" + "bar"`, `const`}, + {`package c2; const (i = 1i; _ = i)`, `i`, `const`}, + + // values + {`package v0; var (a, b int; _ = a + b)`, `a + b`, `value`}, + {`package v1; var _ = &[]int{1}`, `[]int{…}`, `value`}, + {`package v2; var _ = func(){}`, `func() {}`, `value`}, + {`package v4; func f() { _ = f }`, `f`, `value`}, + {`package v3; var _ *int = nil`, `nil`, `value, nil`}, + {`package v3; var _ *int = (nil)`, `(nil)`, `value, nil`}, + + // addressable (and thus assignable) operands + {`package a0; var (x int; _ = x)`, `x`, `value, addressable, assignable`}, + {`package a1; var (p *int; _ = *p)`, `*p`, `value, addressable, assignable`}, + {`package a2; var (s []int; _ = s[0])`, `s[0]`, `value, addressable, assignable`}, + {`package a3; var (s struct{f int}; _ = s.f)`, `s.f`, `value, addressable, assignable`}, + {`package a4; var (a [10]int; _ = a[0])`, `a[0]`, `value, addressable, assignable`}, + {`package a5; func _(x int) { _ = x }`, `x`, `value, addressable, assignable`}, + {`package a6; func _()(x int) { _ = x; return }`, `x`, `value, addressable, assignable`}, + {`package a7; type T int; func (x T) _() { _ = x }`, `x`, `value, addressable, assignable`}, + // composite literals are not addressable + + // assignable but not addressable values + {`package s0; var (m map[int]int; _ = m[0])`, `m[0]`, `value, assignable, hasOk`}, + {`package s1; var (m map[int]int; _, _ = m[0])`, `m[0]`, `value, assignable, hasOk`}, + + // hasOk expressions + {`package k0; var (ch chan int; _ = <-ch)`, `<-ch`, `value, hasOk`}, + {`package k1; var (ch chan int; _, _ = <-ch)`, `<-ch`, `value, hasOk`}, + + // missing entries + // - package names are collected in the Uses map + // - identifiers being declared are collected in the Defs map + {`package m0; import "os"; func _() { _ = os.Stdout }`, `os`, ``}, + {`package m1; import p "os"; func _() { _ = p.Stdout }`, `p`, ``}, + {`package m2; const c = 0`, `c`, ``}, + {`package m3; type T int`, `T`, ``}, + {`package m4; var v int`, `v`, ``}, + {`package m5; func f() {}`, `f`, ``}, + {`package m6; func _(x int) {}`, `x`, ``}, + {`package m6; func _()(x int) { return }`, `x`, ``}, + {`package m6; type T int; func (x T) _() {}`, `x`, ``}, + } + + for _, test := range tests { + info := Info{Types: make(map[syntax.Expr]TypeAndValue)} + name := mustTypecheck(test.src, nil, &info).Name() + + // look for expression predicates + got := "" + for e, tv := range info.Types { + //println(name, syntax.String(e)) + if syntax.String(e) == test.expr { + got = predString(tv) + break + } + } + + if got != test.pred { + t.Errorf("package %s: got %s; want %s", name, got, test.pred) + } + } +} + +func TestScopesInfo(t *testing.T) { + testenv.MustHaveGoBuild(t) + + var tests = []struct { + src string + scopes []string // list of scope descriptors of the form kind:varlist + }{ + {`package p0`, []string{ + "file:", + }}, + {`package p1; import ( "fmt"; m "math"; _ "os" ); var ( _ = fmt.Println; _ = m.Pi )`, []string{ + "file:fmt m", + }}, + {`package p2; func _() {}`, []string{ + "file:", "func:", + }}, + {`package p3; func _(x, y int) {}`, []string{ + "file:", "func:x y", + }}, + {`package p4; func _(x, y int) { x, z := 1, 2; _ = z }`, []string{ + "file:", "func:x y z", // redeclaration of x + }}, + {`package p5; func _(x, y int) (u, _ int) { return }`, []string{ + "file:", "func:u x y", + }}, + {`package p6; func _() { { var x int; _ = x } }`, []string{ + "file:", "func:", "block:x", + }}, + {`package p7; func _() { if true {} }`, []string{ + "file:", "func:", "if:", "block:", + }}, + {`package p8; func _() { if x := 0; x < 0 { y := x; _ = y } }`, []string{ + "file:", "func:", "if:x", "block:y", + }}, + {`package p9; func _() { switch x := 0; x {} }`, []string{ + "file:", "func:", "switch:x", + }}, + {`package p10; func _() { switch x := 0; x { case 1: y := x; _ = y; default: }}`, []string{ + "file:", "func:", "switch:x", "case:y", "case:", + }}, + {`package p11; func _(t interface{}) { switch t.(type) {} }`, []string{ + "file:", "func:t", "switch:", + }}, + {`package p12; func _(t interface{}) { switch t := t; t.(type) {} }`, []string{ + "file:", "func:t", "switch:t", + }}, + {`package p13; func _(t interface{}) { switch x := t.(type) { case int: _ = x } }`, []string{ + "file:", "func:t", "switch:", "case:x", // x implicitly declared + }}, + {`package p14; func _() { select{} }`, []string{ + "file:", "func:", + }}, + {`package p15; func _(c chan int) { select{ case <-c: } }`, []string{ + "file:", "func:c", "comm:", + }}, + {`package p16; func _(c chan int) { select{ case i := <-c: x := i; _ = x} }`, []string{ + "file:", "func:c", "comm:i x", + }}, + {`package p17; func _() { for{} }`, []string{ + "file:", "func:", "for:", "block:", + }}, + {`package p18; func _(n int) { for i := 0; i < n; i++ { _ = i } }`, []string{ + "file:", "func:n", "for:i", "block:", + }}, + {`package p19; func _(a []int) { for i := range a { _ = i} }`, []string{ + "file:", "func:a", "for:i", "block:", + }}, + {`package p20; var s int; func _(a []int) { for i, x := range a { s += x; _ = i } }`, []string{ + "file:", "func:a", "for:i x", "block:", + }}, + } + + for _, test := range tests { + info := Info{Scopes: make(map[syntax.Node]*Scope)} + name := mustTypecheck(test.src, nil, &info).Name() + + // number of scopes must match + if len(info.Scopes) != len(test.scopes) { + t.Errorf("package %s: got %d scopes; want %d", name, len(info.Scopes), len(test.scopes)) + } + + // scope descriptions must match + for node, scope := range info.Scopes { + var kind string + switch node.(type) { + case *syntax.File: + kind = "file" + case *syntax.FuncType: + kind = "func" + case *syntax.BlockStmt: + kind = "block" + case *syntax.IfStmt: + kind = "if" + case *syntax.SwitchStmt: + kind = "switch" + case *syntax.SelectStmt: + kind = "select" + case *syntax.CaseClause: + kind = "case" + case *syntax.CommClause: + kind = "comm" + case *syntax.ForStmt: + kind = "for" + default: + kind = fmt.Sprintf("%T", node) + } + + // look for matching scope description + desc := kind + ":" + strings.Join(scope.Names(), " ") + found := false + for _, d := range test.scopes { + if desc == d { + found = true + break + } + } + if !found { + t.Errorf("package %s: no matching scope found for %s", name, desc) + } + } + } +} + +func TestInitOrderInfo(t *testing.T) { + var tests = []struct { + src string + inits []string + }{ + {`package p0; var (x = 1; y = x)`, []string{ + "x = 1", "y = x", + }}, + {`package p1; var (a = 1; b = 2; c = 3)`, []string{ + "a = 1", "b = 2", "c = 3", + }}, + {`package p2; var (a, b, c = 1, 2, 3)`, []string{ + "a = 1", "b = 2", "c = 3", + }}, + {`package p3; var _ = f(); func f() int { return 1 }`, []string{ + "_ = f()", // blank var + }}, + {`package p4; var (a = 0; x = y; y = z; z = 0)`, []string{ + "a = 0", "z = 0", "y = z", "x = y", + }}, + {`package p5; var (a, _ = m[0]; m map[int]string)`, []string{ + "a, _ = m[0]", // blank var + }}, + {`package p6; var a, b = f(); func f() (_, _ int) { return z, z }; var z = 0`, []string{ + "z = 0", "a, b = f()", + }}, + {`package p7; var (a = func() int { return b }(); b = 1)`, []string{ + "b = 1", "a = func() int {…}()", + }}, + {`package p8; var (a, b = func() (_, _ int) { return c, c }(); c = 1)`, []string{ + "c = 1", "a, b = func() (_, _ int) {…}()", + }}, + {`package p9; type T struct{}; func (T) m() int { _ = y; return 0 }; var x, y = T.m, 1`, []string{ + "y = 1", "x = T.m", + }}, + {`package p10; var (d = c + b; a = 0; b = 0; c = 0)`, []string{ + "a = 0", "b = 0", "c = 0", "d = c + b", + }}, + {`package p11; var (a = e + c; b = d + c; c = 0; d = 0; e = 0)`, []string{ + "c = 0", "d = 0", "b = d + c", "e = 0", "a = e + c", + }}, + // emit an initializer for n:1 initializations only once (not for each node + // on the lhs which may appear in different order in the dependency graph) + {`package p12; var (a = x; b = 0; x, y = m[0]; m map[int]int)`, []string{ + "b = 0", "x, y = m[0]", "a = x", + }}, + // test case from spec section on package initialization + {`package p12 + + var ( + a = c + b + b = f() + c = f() + d = 3 + ) + + func f() int { + d++ + return d + }`, []string{ + "d = 3", "b = f()", "c = f()", "a = c + b", + }}, + // test case for go.dev/issue/7131 + {`package main + + var counter int + func next() int { counter++; return counter } + + var _ = makeOrder() + func makeOrder() []int { return []int{f, b, d, e, c, a} } + + var a = next() + var b, c = next(), next() + var d, e, f = next(), next(), next() + `, []string{ + "a = next()", "b = next()", "c = next()", "d = next()", "e = next()", "f = next()", "_ = makeOrder()", + }}, + // test case for go.dev/issue/10709 + {`package p13 + + var ( + v = t.m() + t = makeT(0) + ) + + type T struct{} + + func (T) m() int { return 0 } + + func makeT(n int) T { + if n > 0 { + return makeT(n-1) + } + return T{} + }`, []string{ + "t = makeT(0)", "v = t.m()", + }}, + // test case for go.dev/issue/10709: same as test before, but variable decls swapped + {`package p14 + + var ( + t = makeT(0) + v = t.m() + ) + + type T struct{} + + func (T) m() int { return 0 } + + func makeT(n int) T { + if n > 0 { + return makeT(n-1) + } + return T{} + }`, []string{ + "t = makeT(0)", "v = t.m()", + }}, + // another candidate possibly causing problems with go.dev/issue/10709 + {`package p15 + + var y1 = f1() + + func f1() int { return g1() } + func g1() int { f1(); return x1 } + + var x1 = 0 + + var y2 = f2() + + func f2() int { return g2() } + func g2() int { return x2 } + + var x2 = 0`, []string{ + "x1 = 0", "y1 = f1()", "x2 = 0", "y2 = f2()", + }}, + } + + for _, test := range tests { + info := Info{} + name := mustTypecheck(test.src, nil, &info).Name() + + // number of initializers must match + if len(info.InitOrder) != len(test.inits) { + t.Errorf("package %s: got %d initializers; want %d", name, len(info.InitOrder), len(test.inits)) + continue + } + + // initializers must match + for i, want := range test.inits { + got := info.InitOrder[i].String() + if got != want { + t.Errorf("package %s, init %d: got %s; want %s", name, i, got, want) + continue + } + } + } +} + +func TestMultiFileInitOrder(t *testing.T) { + fileA := mustParse(`package main; var a = 1`) + fileB := mustParse(`package main; var b = 2`) + + // The initialization order must not depend on the parse + // order of the files, only on the presentation order to + // the type-checker. + for _, test := range []struct { + files []*syntax.File + want string + }{ + {[]*syntax.File{fileA, fileB}, "[a = 1 b = 2]"}, + {[]*syntax.File{fileB, fileA}, "[b = 2 a = 1]"}, + } { + var info Info + if _, err := new(Config).Check("main", test.files, &info); err != nil { + t.Fatal(err) + } + if got := fmt.Sprint(info.InitOrder); got != test.want { + t.Fatalf("got %s; want %s", got, test.want) + } + } +} + +func TestFiles(t *testing.T) { + var sources = []string{ + "package p; type T struct{}; func (T) m1() {}", + "package p; func (T) m2() {}; var x interface{ m1(); m2() } = T{}", + "package p; func (T) m3() {}; var y interface{ m1(); m2(); m3() } = T{}", + "package p", + } + + var conf Config + pkg := NewPackage("p", "p") + var info Info + check := NewChecker(&conf, pkg, &info) + + for _, src := range sources { + if err := check.Files([]*syntax.File{mustParse(src)}); err != nil { + t.Error(err) + } + } + + // check InitOrder is [x y] + var vars []string + for _, init := range info.InitOrder { + for _, v := range init.Lhs { + vars = append(vars, v.Name()) + } + } + if got, want := fmt.Sprint(vars), "[x y]"; got != want { + t.Errorf("InitOrder == %s, want %s", got, want) + } +} + +type testImporter map[string]*Package + +func (m testImporter) Import(path string) (*Package, error) { + if pkg := m[path]; pkg != nil { + return pkg, nil + } + return nil, fmt.Errorf("package %q not found", path) +} + +func TestSelection(t *testing.T) { + selections := make(map[*syntax.SelectorExpr]*Selection) + + imports := make(testImporter) + conf := Config{Importer: imports} + makePkg := func(path, src string) { + pkg := mustTypecheck(src, &conf, &Info{Selections: selections}) + imports[path] = pkg + } + + const libSrc = ` +package lib +type T float64 +const C T = 3 +var V T +func F() {} +func (T) M() {} +` + const mainSrc = ` +package main +import "lib" + +type A struct { + *B + C +} + +type B struct { + b int +} + +func (B) f(int) + +type C struct { + c int +} + +type G[P any] struct { + p P +} + +func (G[P]) m(P) {} + +var Inst G[int] + +func (C) g() +func (*C) h() + +func main() { + // qualified identifiers + var _ lib.T + _ = lib.C + _ = lib.F + _ = lib.V + _ = lib.T.M + + // fields + _ = A{}.B + _ = new(A).B + + _ = A{}.C + _ = new(A).C + + _ = A{}.b + _ = new(A).b + + _ = A{}.c + _ = new(A).c + + _ = Inst.p + _ = G[string]{}.p + + // methods + _ = A{}.f + _ = new(A).f + _ = A{}.g + _ = new(A).g + _ = new(A).h + + _ = B{}.f + _ = new(B).f + + _ = C{}.g + _ = new(C).g + _ = new(C).h + _ = Inst.m + + // method expressions + _ = A.f + _ = (*A).f + _ = B.f + _ = (*B).f + _ = G[string].m +}` + + wantOut := map[string][2]string{ + "lib.T.M": {"method expr (lib.T) M(lib.T)", ".[0]"}, + + "A{}.B": {"field (main.A) B *main.B", ".[0]"}, + "new(A).B": {"field (*main.A) B *main.B", "->[0]"}, + "A{}.C": {"field (main.A) C main.C", ".[1]"}, + "new(A).C": {"field (*main.A) C main.C", "->[1]"}, + "A{}.b": {"field (main.A) b int", "->[0 0]"}, + "new(A).b": {"field (*main.A) b int", "->[0 0]"}, + "A{}.c": {"field (main.A) c int", ".[1 0]"}, + "new(A).c": {"field (*main.A) c int", "->[1 0]"}, + "Inst.p": {"field (main.G[int]) p int", ".[0]"}, + + "A{}.f": {"method (main.A) f(int)", "->[0 0]"}, + "new(A).f": {"method (*main.A) f(int)", "->[0 0]"}, + "A{}.g": {"method (main.A) g()", ".[1 0]"}, + "new(A).g": {"method (*main.A) g()", "->[1 0]"}, + "new(A).h": {"method (*main.A) h()", "->[1 1]"}, // TODO(gri) should this report .[1 1] ? + "B{}.f": {"method (main.B) f(int)", ".[0]"}, + "new(B).f": {"method (*main.B) f(int)", "->[0]"}, + "C{}.g": {"method (main.C) g()", ".[0]"}, + "new(C).g": {"method (*main.C) g()", "->[0]"}, + "new(C).h": {"method (*main.C) h()", "->[1]"}, // TODO(gri) should this report .[1] ? + "Inst.m": {"method (main.G[int]) m(int)", ".[0]"}, + + "A.f": {"method expr (main.A) f(main.A, int)", "->[0 0]"}, + "(*A).f": {"method expr (*main.A) f(*main.A, int)", "->[0 0]"}, + "B.f": {"method expr (main.B) f(main.B, int)", ".[0]"}, + "(*B).f": {"method expr (*main.B) f(*main.B, int)", "->[0]"}, + "G[string].m": {"method expr (main.G[string]) m(main.G[string], string)", ".[0]"}, + "G[string]{}.p": {"field (main.G[string]) p string", ".[0]"}, + } + + makePkg("lib", libSrc) + makePkg("main", mainSrc) + + for e, sel := range selections { + _ = sel.String() // assertion: must not panic + + start := indexFor(mainSrc, syntax.StartPos(e)) + end := indexFor(mainSrc, syntax.EndPos(e)) + segment := mainSrc[start:end] // (all SelectorExprs are in main, not lib) + + direct := "." + if sel.Indirect() { + direct = "->" + } + got := [2]string{ + sel.String(), + fmt.Sprintf("%s%v", direct, sel.Index()), + } + want := wantOut[segment] + if want != got { + t.Errorf("%s: got %q; want %q", segment, got, want) + } + delete(wantOut, segment) + + // We must explicitly assert properties of the + // Signature's receiver since it doesn't participate + // in Identical() or String(). + sig, _ := sel.Type().(*Signature) + if sel.Kind() == MethodVal { + got := sig.Recv().Type() + want := sel.Recv() + if !Identical(got, want) { + t.Errorf("%s: Recv() = %s, want %s", segment, got, want) + } + } else if sig != nil && sig.Recv() != nil { + t.Errorf("%s: signature has receiver %s", sig, sig.Recv().Type()) + } + } + // Assert that all wantOut entries were used exactly once. + for segment := range wantOut { + t.Errorf("no syntax.Selection found with syntax %q", segment) + } +} + +// indexFor returns the index into s corresponding to the position pos. +func indexFor(s string, pos syntax.Pos) int { + i, line := 0, 1 // string index and corresponding line + target := int(pos.Line()) + for line < target && i < len(s) { + if s[i] == '\n' { + line++ + } + i++ + } + return i + int(pos.Col()-1) // columns are 1-based +} + +func TestIssue8518(t *testing.T) { + imports := make(testImporter) + conf := Config{ + Error: func(err error) { t.Log(err) }, // don't exit after first error + Importer: imports, + } + makePkg := func(path, src string) { + imports[path], _ = conf.Check(path, []*syntax.File{mustParse(src)}, nil) // errors logged via conf.Error + } + + const libSrc = ` +package a +import "missing" +const C1 = foo +const C2 = missing.C +` + + const mainSrc = ` +package main +import "a" +var _ = a.C1 +var _ = a.C2 +` + + makePkg("a", libSrc) + makePkg("main", mainSrc) // don't crash when type-checking this package +} + +func TestIssue59603(t *testing.T) { + imports := make(testImporter) + conf := Config{ + Error: func(err error) { t.Log(err) }, // don't exit after first error + Importer: imports, + } + makePkg := func(path, src string) { + imports[path], _ = conf.Check(path, []*syntax.File{mustParse(src)}, nil) // errors logged via conf.Error + } + + const libSrc = ` +package a +const C = foo +` + + const mainSrc = ` +package main +import "a" +const _ = a.C +` + + makePkg("a", libSrc) + makePkg("main", mainSrc) // don't crash when type-checking this package +} + +func TestLookupFieldOrMethodOnNil(t *testing.T) { + // LookupFieldOrMethod on a nil type is expected to produce a run-time panic. + defer func() { + const want = "LookupFieldOrMethod on nil type" + p := recover() + if s, ok := p.(string); !ok || s != want { + t.Fatalf("got %v, want %s", p, want) + } + }() + LookupFieldOrMethod(nil, false, nil, "") +} + +func TestLookupFieldOrMethod(t *testing.T) { + // Test cases assume a lookup of the form a.f or x.f, where a stands for an + // addressable value, and x for a non-addressable value (even though a variable + // for ease of test case writing). + var tests = []struct { + src string + found bool + index []int + indirect bool + }{ + // field lookups + {"var x T; type T struct{}", false, nil, false}, + {"var x T; type T struct{ f int }", true, []int{0}, false}, + {"var x T; type T struct{ a, b, f, c int }", true, []int{2}, false}, + + // field lookups on a generic type + {"var x T[int]; type T[P any] struct{}", false, nil, false}, + {"var x T[int]; type T[P any] struct{ f P }", true, []int{0}, false}, + {"var x T[int]; type T[P any] struct{ a, b, f, c P }", true, []int{2}, false}, + + // method lookups + {"var a T; type T struct{}; func (T) f() {}", true, []int{0}, false}, + {"var a *T; type T struct{}; func (T) f() {}", true, []int{0}, true}, + {"var a T; type T struct{}; func (*T) f() {}", true, []int{0}, false}, + {"var a *T; type T struct{}; func (*T) f() {}", true, []int{0}, true}, // TODO(gri) should this report indirect = false? + + // method lookups on a generic type + {"var a T[int]; type T[P any] struct{}; func (T[P]) f() {}", true, []int{0}, false}, + {"var a *T[int]; type T[P any] struct{}; func (T[P]) f() {}", true, []int{0}, true}, + {"var a T[int]; type T[P any] struct{}; func (*T[P]) f() {}", true, []int{0}, false}, + {"var a *T[int]; type T[P any] struct{}; func (*T[P]) f() {}", true, []int{0}, true}, // TODO(gri) should this report indirect = false? + + // collisions + {"type ( E1 struct{ f int }; E2 struct{ f int }; x struct{ E1; *E2 })", false, []int{1, 0}, false}, + {"type ( E1 struct{ f int }; E2 struct{}; x struct{ E1; *E2 }); func (E2) f() {}", false, []int{1, 0}, false}, + + // collisions on a generic type + {"type ( E1[P any] struct{ f P }; E2[P any] struct{ f P }; x struct{ E1[int]; *E2[int] })", false, []int{1, 0}, false}, + {"type ( E1[P any] struct{ f P }; E2[P any] struct{}; x struct{ E1[int]; *E2[int] }); func (E2[P]) f() {}", false, []int{1, 0}, false}, + + // outside methodset + // (*T).f method exists, but value of type T is not addressable + {"var x T; type T struct{}; func (*T) f() {}", false, nil, true}, + + // outside method set of a generic type + {"var x T[int]; type T[P any] struct{}; func (*T[P]) f() {}", false, nil, true}, + + // recursive generic types; see go.dev/issue/52715 + {"var a T[int]; type ( T[P any] struct { *N[P] }; N[P any] struct { *T[P] } ); func (N[P]) f() {}", true, []int{0, 0}, true}, + {"var a T[int]; type ( T[P any] struct { *N[P] }; N[P any] struct { *T[P] } ); func (T[P]) f() {}", true, []int{0}, false}, + } + + for _, test := range tests { + pkg := mustTypecheck("package p;"+test.src, nil, nil) + + obj := pkg.Scope().Lookup("a") + if obj == nil { + if obj = pkg.Scope().Lookup("x"); obj == nil { + t.Errorf("%s: incorrect test case - no object a or x", test.src) + continue + } + } + + f, index, indirect := LookupFieldOrMethod(obj.Type(), obj.Name() == "a", pkg, "f") + if (f != nil) != test.found { + if f == nil { + t.Errorf("%s: got no object; want one", test.src) + } else { + t.Errorf("%s: got object = %v; want none", test.src, f) + } + } + if !sameSlice(index, test.index) { + t.Errorf("%s: got index = %v; want %v", test.src, index, test.index) + } + if indirect != test.indirect { + t.Errorf("%s: got indirect = %v; want %v", test.src, indirect, test.indirect) + } + } +} + +// Test for go.dev/issue/52715 +func TestLookupFieldOrMethod_RecursiveGeneric(t *testing.T) { + const src = ` +package pkg + +type Tree[T any] struct { + *Node[T] +} + +func (*Tree[R]) N(r R) R { return r } + +type Node[T any] struct { + *Tree[T] +} + +type Instance = *Tree[int] +` + + f := mustParse(src) + pkg := NewPackage("pkg", f.PkgName.Value) + if err := NewChecker(nil, pkg, nil).Files([]*syntax.File{f}); err != nil { + panic(err) + } + + T := pkg.Scope().Lookup("Instance").Type() + _, _, _ = LookupFieldOrMethod(T, false, pkg, "M") // verify that LookupFieldOrMethod terminates +} + +func sameSlice(a, b []int) bool { + if len(a) != len(b) { + return false + } + for i, x := range a { + if x != b[i] { + return false + } + } + return true +} + +// TestScopeLookupParent ensures that (*Scope).LookupParent returns +// the correct result at various positions within the source. +func TestScopeLookupParent(t *testing.T) { + imports := make(testImporter) + conf := Config{Importer: imports} + var info Info + makePkg := func(path, src string) { + var err error + imports[path], err = conf.Check(path, []*syntax.File{mustParse(src)}, &info) + if err != nil { + t.Fatal(err) + } + } + + makePkg("lib", "package lib; var X int") + // Each /*name=kind:line*/ comment makes the test look up the + // name at that point and checks that it resolves to a decl of + // the specified kind and line number. "undef" means undefined. + mainSrc := ` +/*lib=pkgname:5*/ /*X=var:1*/ /*Pi=const:8*/ /*T=typename:9*/ /*Y=var:10*/ /*F=func:12*/ +package main + +import "lib" +import . "lib" + +const Pi = 3.1415 +type T struct{} +var Y, _ = lib.X, X + +func F[T *U, U any](param1, param2 int) /*param1=undef*/ (res1 /*res1=undef*/, res2 int) /*param1=var:12*/ /*res1=var:12*/ /*U=typename:12*/ { + const pi, e = 3.1415, /*pi=undef*/ 2.71828 /*pi=const:13*/ /*e=const:13*/ + type /*t=undef*/ t /*t=typename:14*/ *t + print(Y) /*Y=var:10*/ + x, Y := Y, /*x=undef*/ /*Y=var:10*/ Pi /*x=var:16*/ /*Y=var:16*/ ; _ = x; _ = Y + var F = /*F=func:12*/ F[*int, int] /*F=var:17*/ ; _ = F + + var a []int + for i, x := range a /*i=undef*/ /*x=var:16*/ { _ = i; _ = x } + + var i interface{} + switch y := i.(type) { /*y=undef*/ + case /*y=undef*/ int /*y=var:23*/ : + case float32, /*y=undef*/ float64 /*y=var:23*/ : + default /*y=var:23*/: + println(y) + } + /*y=undef*/ + + switch int := i.(type) { + case /*int=typename:0*/ int /*int=var:31*/ : + println(int) + default /*int=var:31*/ : + } + + _ = param1 + _ = res1 + return +} +/*main=undef*/ +` + + info.Uses = make(map[*syntax.Name]Object) + makePkg("main", mainSrc) + mainScope := imports["main"].Scope() + + rx := regexp.MustCompile(`^/\*(\w*)=([\w:]*)\*/$`) + + base := syntax.NewFileBase("main") + syntax.CommentsDo(strings.NewReader(mainSrc), func(line, col uint, text string) { + pos := syntax.MakePos(base, line, col) + + // Syntax errors are not comments. + if text[0] != '/' { + t.Errorf("%s: %s", pos, text) + return + } + + // Parse the assertion in the comment. + m := rx.FindStringSubmatch(text) + if m == nil { + t.Errorf("%s: bad comment: %s", pos, text) + return + } + name, want := m[1], m[2] + + // Look up the name in the innermost enclosing scope. + inner := mainScope.Innermost(pos) + if inner == nil { + t.Errorf("%s: at %s: can't find innermost scope", pos, text) + return + } + got := "undef" + if _, obj := inner.LookupParent(name, pos); obj != nil { + kind := strings.ToLower(strings.TrimPrefix(reflect.TypeOf(obj).String(), "*types2.")) + got = fmt.Sprintf("%s:%d", kind, obj.Pos().Line()) + } + if got != want { + t.Errorf("%s: at %s: %s resolved to %s, want %s", pos, text, name, got, want) + } + }) + + // Check that for each referring identifier, + // a lookup of its name on the innermost + // enclosing scope returns the correct object. + + for id, wantObj := range info.Uses { + inner := mainScope.Innermost(id.Pos()) + if inner == nil { + t.Errorf("%s: can't find innermost scope enclosing %q", id.Pos(), id.Value) + continue + } + + // Exclude selectors and qualified identifiers---lexical + // refs only. (Ideally, we'd see if the AST parent is a + // SelectorExpr, but that requires PathEnclosingInterval + // from golang.org/x/tools/go/ast/astutil.) + if id.Value == "X" { + continue + } + + _, gotObj := inner.LookupParent(id.Value, id.Pos()) + if gotObj != wantObj { + // Print the scope tree of mainScope in case of error. + var printScopeTree func(indent string, s *Scope) + printScopeTree = func(indent string, s *Scope) { + t.Logf("%sscope %s %v-%v = %v", + indent, + ScopeComment(s), + s.Pos(), + s.End(), + s.Names()) + for i := range s.NumChildren() { + printScopeTree(indent+" ", s.Child(i)) + } + } + printScopeTree("", mainScope) + + t.Errorf("%s: Scope(%s).LookupParent(%s@%v) got %v, want %v [scopePos=%v]", + id.Pos(), + ScopeComment(inner), + id.Value, + id.Pos(), + gotObj, + wantObj, + ObjectScopePos(wantObj)) + continue + } + } +} + +// newDefined creates a new defined type named T with the given underlying type. +func newDefined(underlying Type) *Named { + tname := NewTypeName(nopos, nil, "T", nil) + return NewNamed(tname, underlying, nil) +} + +func TestConvertibleTo(t *testing.T) { + for _, test := range []struct { + v, t Type + want bool + }{ + {Typ[Int], Typ[Int], true}, + {Typ[Int], Typ[Float32], true}, + {Typ[Int], Typ[String], true}, + {newDefined(Typ[Int]), Typ[Int], true}, + {newDefined(new(Struct)), new(Struct), true}, + {newDefined(Typ[Int]), new(Struct), false}, + {Typ[UntypedInt], Typ[Int], true}, + {NewSlice(Typ[Int]), NewArray(Typ[Int], 10), true}, + {NewSlice(Typ[Int]), NewArray(Typ[Uint], 10), false}, + {NewSlice(Typ[Int]), NewPointer(NewArray(Typ[Int], 10)), true}, + {NewSlice(Typ[Int]), NewPointer(NewArray(Typ[Uint], 10)), false}, + // Untyped string values are not permitted by the spec, so the behavior below is undefined. + {Typ[UntypedString], Typ[String], true}, + } { + if got := ConvertibleTo(test.v, test.t); got != test.want { + t.Errorf("ConvertibleTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want) + } + } +} + +func TestAssignableTo(t *testing.T) { + for _, test := range []struct { + v, t Type + want bool + }{ + {Typ[Int], Typ[Int], true}, + {Typ[Int], Typ[Float32], false}, + {newDefined(Typ[Int]), Typ[Int], false}, + {newDefined(new(Struct)), new(Struct), true}, + {Typ[UntypedBool], Typ[Bool], true}, + {Typ[UntypedString], Typ[Bool], false}, + // Neither untyped string nor untyped numeric assignments arise during + // normal type checking, so the below behavior is technically undefined by + // the spec. + {Typ[UntypedString], Typ[String], true}, + {Typ[UntypedInt], Typ[Int], true}, + } { + if got := AssignableTo(test.v, test.t); got != test.want { + t.Errorf("AssignableTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want) + } + } +} + +func TestIdentical(t *testing.T) { + // For each test, we compare the types of objects X and Y in the source. + tests := []struct { + src string + want bool + }{ + // Basic types. + {"var X int; var Y int", true}, + {"var X int; var Y string", false}, + + // TODO: add more tests for complex types. + + // Named types. + {"type X int; type Y int", false}, + + // Aliases. + {"type X = int; type Y = int", true}, + + // Functions. + {`func X(int) string { return "" }; func Y(int) string { return "" }`, true}, + {`func X() string { return "" }; func Y(int) string { return "" }`, false}, + {`func X(int) string { return "" }; func Y(int) {}`, false}, + + // Generic functions. Type parameters should be considered identical modulo + // renaming. See also go.dev/issue/49722. + {`func X[P ~int](){}; func Y[Q ~int]() {}`, true}, + {`func X[P1 any, P2 ~*P1](){}; func Y[Q1 any, Q2 ~*Q1]() {}`, true}, + {`func X[P1 any, P2 ~[]P1](){}; func Y[Q1 any, Q2 ~*Q1]() {}`, false}, + {`func X[P ~int](P){}; func Y[Q ~int](Q) {}`, true}, + {`func X[P ~string](P){}; func Y[Q ~int](Q) {}`, false}, + {`func X[P ~int]([]P){}; func Y[Q ~int]([]Q) {}`, true}, + } + + for _, test := range tests { + pkg := mustTypecheck("package p;"+test.src, nil, nil) + X := pkg.Scope().Lookup("X") + Y := pkg.Scope().Lookup("Y") + if X == nil || Y == nil { + t.Fatal("test must declare both X and Y") + } + if got := Identical(X.Type(), Y.Type()); got != test.want { + t.Errorf("Identical(%s, %s) = %t, want %t", X.Type(), Y.Type(), got, test.want) + } + } +} + +func TestIdentical_issue15173(t *testing.T) { + // Identical should allow nil arguments and be symmetric. + for _, test := range []struct { + x, y Type + want bool + }{ + {Typ[Int], Typ[Int], true}, + {Typ[Int], nil, false}, + {nil, Typ[Int], false}, + {nil, nil, true}, + } { + if got := Identical(test.x, test.y); got != test.want { + t.Errorf("Identical(%v, %v) = %t", test.x, test.y, got) + } + } +} + +func TestIdenticalUnions(t *testing.T) { + tname := NewTypeName(nopos, nil, "myInt", nil) + myInt := NewNamed(tname, Typ[Int], nil) + tmap := map[string]*Term{ + "int": NewTerm(false, Typ[Int]), + "~int": NewTerm(true, Typ[Int]), + "string": NewTerm(false, Typ[String]), + "~string": NewTerm(true, Typ[String]), + "myInt": NewTerm(false, myInt), + } + makeUnion := func(s string) *Union { + parts := strings.Split(s, "|") + var terms []*Term + for _, p := range parts { + term := tmap[p] + if term == nil { + t.Fatalf("missing term %q", p) + } + terms = append(terms, term) + } + return NewUnion(terms) + } + for _, test := range []struct { + x, y string + want bool + }{ + // These tests are just sanity checks. The tests for type sets and + // interfaces provide much more test coverage. + {"int|~int", "~int", true}, + {"myInt|~int", "~int", true}, + {"int|string", "string|int", true}, + {"int|int|string", "string|int", true}, + {"myInt|string", "int|string", false}, + } { + x := makeUnion(test.x) + y := makeUnion(test.y) + if got := Identical(x, y); got != test.want { + t.Errorf("Identical(%v, %v) = %t", test.x, test.y, got) + } + } +} + +func TestIssue61737(t *testing.T) { + // This test verifies that it is possible to construct invalid interfaces + // containing duplicate methods using the go/types API. + // + // It must be possible for importers to construct such invalid interfaces. + // Previously, this panicked. + + sig1 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[Int])), nil, false) + sig2 := NewSignatureType(nil, nil, nil, NewTuple(NewParam(nopos, nil, "", Typ[String])), nil, false) + + methods := []*Func{ + NewFunc(nopos, nil, "M", sig1), + NewFunc(nopos, nil, "M", sig2), + } + + embeddedMethods := []*Func{ + NewFunc(nopos, nil, "M", sig2), + } + embedded := NewInterfaceType(embeddedMethods, nil) + iface := NewInterfaceType(methods, []Type{embedded}) + iface.NumMethods() // unlike go/types, there is no Complete() method, so we complete implicitly +} + +func TestNewAlias_Issue65455(t *testing.T) { + obj := NewTypeName(nopos, nil, "A", nil) + alias := NewAlias(obj, Typ[Int]) + alias.Underlying() // must not panic +} + +func TestIssue15305(t *testing.T) { + const src = "package p; func f() int16; var _ = f(undef)" + f := mustParse(src) + conf := Config{ + Error: func(err error) {}, // allow errors + } + info := &Info{ + Types: make(map[syntax.Expr]TypeAndValue), + } + conf.Check("p", []*syntax.File{f}, info) // ignore result + for e, tv := range info.Types { + if _, ok := e.(*syntax.CallExpr); ok { + if tv.Type != Typ[Int16] { + t.Errorf("CallExpr has type %v, want int16", tv.Type) + } + return + } + } + t.Errorf("CallExpr has no type") +} + +// TestCompositeLitTypes verifies that Info.Types registers the correct +// types for composite literal expressions and composite literal type +// expressions. +func TestCompositeLitTypes(t *testing.T) { + for i, test := range []struct { + lit, typ string + }{ + {`[16]byte{}`, `[16]byte`}, + {`[...]byte{}`, `[0]byte`}, // test for go.dev/issue/14092 + {`[...]int{1, 2, 3}`, `[3]int`}, // test for go.dev/issue/14092 + {`[...]int{90: 0, 98: 1, 2}`, `[100]int`}, // test for go.dev/issue/14092 + {`[]int{}`, `[]int`}, + {`map[string]bool{"foo": true}`, `map[string]bool`}, + {`struct{}{}`, `struct{}`}, + {`struct{x, y int; z complex128}{}`, `struct{x int; y int; z complex128}`}, + } { + f := mustParse(fmt.Sprintf("package p%d; var _ = %s", i, test.lit)) + types := make(map[syntax.Expr]TypeAndValue) + if _, err := new(Config).Check("p", []*syntax.File{f}, &Info{Types: types}); err != nil { + t.Fatalf("%s: %v", test.lit, err) + } + + cmptype := func(x syntax.Expr, want string) { + tv, ok := types[x] + if !ok { + t.Errorf("%s: no Types entry found", test.lit) + return + } + if tv.Type == nil { + t.Errorf("%s: type is nil", test.lit) + return + } + if got := tv.Type.String(); got != want { + t.Errorf("%s: got %v, want %s", test.lit, got, want) + } + } + + // test type of composite literal expression + rhs := f.DeclList[0].(*syntax.VarDecl).Values + cmptype(rhs, test.typ) + + // test type of composite literal type expression + cmptype(rhs.(*syntax.CompositeLit).Type, test.typ) + } +} + +// TestObjectParents verifies that objects have parent scopes or not +// as specified by the Object interface. +func TestObjectParents(t *testing.T) { + const src = ` +package p + +const C = 0 + +type T1 struct { + a, b int + T2 +} + +type T2 interface { + im1() + im2() +} + +func (T1) m1() {} +func (*T1) m2() {} + +func f(x int) { y := x; print(y) } +` + + f := mustParse(src) + + info := &Info{ + Defs: make(map[*syntax.Name]Object), + } + if _, err := new(Config).Check("p", []*syntax.File{f}, info); err != nil { + t.Fatal(err) + } + + for ident, obj := range info.Defs { + if obj == nil { + // only package names and implicit vars have a nil object + // (in this test we only need to handle the package name) + if ident.Value != "p" { + t.Errorf("%v has nil object", ident) + } + continue + } + + // struct fields, type-associated and interface methods + // have no parent scope + wantParent := true + switch obj := obj.(type) { + case *Var: + if obj.IsField() { + wantParent = false + } + case *Func: + if obj.Type().(*Signature).Recv() != nil { // method + wantParent = false + } + } + + gotParent := obj.Parent() != nil + switch { + case gotParent && !wantParent: + t.Errorf("%v: want no parent, got %s", ident, obj.Parent()) + case !gotParent && wantParent: + t.Errorf("%v: no parent found", ident) + } + } +} + +// TestFailedImport tests that we don't get follow-on errors +// elsewhere in a package due to failing to import a package. +func TestFailedImport(t *testing.T) { + testenv.MustHaveGoBuild(t) + + const src = ` +package p + +import foo "go/types/thisdirectorymustnotexistotherwisethistestmayfail/foo" // should only see an error here + +const c = foo.C +type T = foo.T +var v T = c +func f(x T) T { return foo.F(x) } +` + f := mustParse(src) + files := []*syntax.File{f} + + // type-check using all possible importers + for _, compiler := range []string{"gc", "gccgo", "source"} { + errcount := 0 + conf := Config{ + Error: func(err error) { + // we should only see the import error + if errcount > 0 || !strings.Contains(err.Error(), "could not import") { + t.Errorf("for %s importer, got unexpected error: %v", compiler, err) + } + errcount++ + }, + //Importer: importer.For(compiler, nil), + } + + info := &Info{ + Uses: make(map[*syntax.Name]Object), + } + pkg, _ := conf.Check("p", files, info) + if pkg == nil { + t.Errorf("for %s importer, type-checking failed to return a package", compiler) + continue + } + + imports := pkg.Imports() + if len(imports) != 1 { + t.Errorf("for %s importer, got %d imports, want 1", compiler, len(imports)) + continue + } + imp := imports[0] + if imp.Name() != "foo" { + t.Errorf(`for %s importer, got %q, want "foo"`, compiler, imp.Name()) + continue + } + + // verify that all uses of foo refer to the imported package foo (imp) + for ident, obj := range info.Uses { + if ident.Value == "foo" { + if obj, ok := obj.(*PkgName); ok { + if obj.Imported() != imp { + t.Errorf("%s resolved to %v; want %v", ident.Value, obj.Imported(), imp) + } + } else { + t.Errorf("%s resolved to %v; want package name", ident.Value, obj) + } + } + } + } +} + +func TestInstantiate(t *testing.T) { + // eventually we like more tests but this is a start + const src = "package p; type T[P any] *T[P]" + pkg := mustTypecheck(src, nil, nil) + + // type T should have one type parameter + T := pkg.Scope().Lookup("T").Type().(*Named) + if n := T.TypeParams().Len(); n != 1 { + t.Fatalf("expected 1 type parameter; found %d", n) + } + + // instantiation should succeed (no endless recursion) + // even with a nil *Checker + res, err := Instantiate(nil, T, []Type{Typ[Int]}, false) + if err != nil { + t.Fatal(err) + } + + // instantiated type should point to itself + if p := res.Underlying().(*Pointer).Elem(); p != res { + t.Fatalf("unexpected result type: %s points to %s", res, p) + } +} + +func TestInstantiateConcurrent(t *testing.T) { + const src = `package p + +type I[P any] interface { + m(P) + n() P +} + +type J = I[int] + +type Nested[P any] *interface{b(P)} + +type K = Nested[string] +` + pkg := mustTypecheck(src, nil, nil) + + insts := []*Interface{ + pkg.Scope().Lookup("J").Type().Underlying().(*Interface), + pkg.Scope().Lookup("K").Type().Underlying().(*Pointer).Elem().(*Interface), + } + + // Use the interface instances concurrently. + for _, inst := range insts { + var ( + counts [2]int // method counts + methods [2][]string // method strings + ) + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + i := i + wg.Add(1) + go func() { + defer wg.Done() + + counts[i] = inst.NumMethods() + for mi := 0; mi < counts[i]; mi++ { + methods[i] = append(methods[i], inst.Method(mi).String()) + } + }() + } + wg.Wait() + + if counts[0] != counts[1] { + t.Errorf("mismatching method counts for %s: %d vs %d", inst, counts[0], counts[1]) + continue + } + for i := 0; i < counts[0]; i++ { + if m0, m1 := methods[0][i], methods[1][i]; m0 != m1 { + t.Errorf("mismatching methods for %s: %s vs %s", inst, m0, m1) + } + } + } +} + +func TestInstantiateErrors(t *testing.T) { + tests := []struct { + src string // by convention, T must be the type being instantiated + targs []Type + wantAt int // -1 indicates no error + }{ + {"type T[P interface{~string}] int", []Type{Typ[Int]}, 0}, + {"type T[P1 interface{int}, P2 interface{~string}] int", []Type{Typ[Int], Typ[Int]}, 1}, + {"type T[P1 any, P2 interface{~[]P1}] int", []Type{Typ[Int], NewSlice(Typ[String])}, 1}, + {"type T[P1 interface{~[]P2}, P2 any] int", []Type{NewSlice(Typ[String]), Typ[Int]}, 0}, + } + + for _, test := range tests { + src := "package p; " + test.src + pkg := mustTypecheck(src, nil, nil) + + T := pkg.Scope().Lookup("T").Type().(*Named) + + _, err := Instantiate(nil, T, test.targs, true) + if err == nil { + t.Fatalf("Instantiate(%v, %v) returned nil error, want non-nil", T, test.targs) + } + + var argErr *ArgumentError + if !errors.As(err, &argErr) { + t.Fatalf("Instantiate(%v, %v): error is not an *ArgumentError", T, test.targs) + } + + if argErr.Index != test.wantAt { + t.Errorf("Instantiate(%v, %v): error at index %d, want index %d", T, test.targs, argErr.Index, test.wantAt) + } + } +} + +func TestArgumentErrorUnwrapping(t *testing.T) { + var err error = &ArgumentError{ + Index: 1, + Err: Error{Msg: "test"}, + } + var e Error + if !errors.As(err, &e) { + t.Fatalf("error %v does not wrap types.Error", err) + } + if e.Msg != "test" { + t.Errorf("e.Msg = %q, want %q", e.Msg, "test") + } +} + +func TestInstanceIdentity(t *testing.T) { + imports := make(testImporter) + conf := Config{Importer: imports} + makePkg := func(src string) { + f := mustParse(src) + name := f.PkgName.Value + pkg, err := conf.Check(name, []*syntax.File{f}, nil) + if err != nil { + t.Fatal(err) + } + imports[name] = pkg + } + makePkg(`package lib; type T[P any] struct{}`) + makePkg(`package a; import "lib"; var A lib.T[int]`) + makePkg(`package b; import "lib"; var B lib.T[int]`) + a := imports["a"].Scope().Lookup("A") + b := imports["b"].Scope().Lookup("B") + if !Identical(a.Type(), b.Type()) { + t.Errorf("mismatching types: a.A: %s, b.B: %s", a.Type(), b.Type()) + } +} + +// TestInstantiatedObjects verifies properties of instantiated objects. +func TestInstantiatedObjects(t *testing.T) { + const src = ` +package p + +type T[P any] struct { + field P +} + +func (recv *T[Q]) concreteMethod(mParam Q) (mResult Q) { return } + +type FT[P any] func(ftParam P) (ftResult P) + +func F[P any](fParam P) (fResult P){ return } + +type I[P any] interface { + interfaceMethod(P) +} + +type R[P any] T[P] + +func (R[P]) m() {} // having a method triggers expansion of R + +var ( + t T[int] + ft FT[int] + f = F[int] + i I[int] +) + +func fn() { + var r R[int] + _ = r +} +` + info := &Info{ + Defs: make(map[*syntax.Name]Object), + } + f := mustParse(src) + conf := Config{} + pkg, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, info) + if err != nil { + t.Fatal(err) + } + + lookup := func(name string) Type { return pkg.Scope().Lookup(name).Type() } + fnScope := pkg.Scope().Lookup("fn").(*Func).Scope() + + tests := []struct { + name string + obj Object + }{ + // Struct fields + {"field", lookup("t").Underlying().(*Struct).Field(0)}, + {"field", fnScope.Lookup("r").Type().Underlying().(*Struct).Field(0)}, + + // Methods and method fields + {"concreteMethod", lookup("t").(*Named).Method(0)}, + {"recv", lookup("t").(*Named).Method(0).Type().(*Signature).Recv()}, + {"mParam", lookup("t").(*Named).Method(0).Type().(*Signature).Params().At(0)}, + {"mResult", lookup("t").(*Named).Method(0).Type().(*Signature).Results().At(0)}, + + // Interface methods + {"interfaceMethod", lookup("i").Underlying().(*Interface).Method(0)}, + + // Function type fields + {"ftParam", lookup("ft").Underlying().(*Signature).Params().At(0)}, + {"ftResult", lookup("ft").Underlying().(*Signature).Results().At(0)}, + + // Function fields + {"fParam", lookup("f").(*Signature).Params().At(0)}, + {"fResult", lookup("f").(*Signature).Results().At(0)}, + } + + // Collect all identifiers by name. + idents := make(map[string][]*syntax.Name) + syntax.Inspect(f, func(n syntax.Node) bool { + if id, ok := n.(*syntax.Name); ok { + idents[id.Value] = append(idents[id.Value], id) + } + return true + }) + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + if got := len(idents[test.name]); got != 1 { + t.Fatalf("found %d identifiers named %s, want 1", got, test.name) + } + ident := idents[test.name][0] + def := info.Defs[ident] + if def == test.obj { + t.Fatalf("info.Defs[%s] contains the test object", test.name) + } + if orig := originObject(test.obj); def != orig { + t.Errorf("info.Defs[%s] does not match obj.Origin()", test.name) + } + if def.Pkg() != test.obj.Pkg() { + t.Errorf("Pkg() = %v, want %v", def.Pkg(), test.obj.Pkg()) + } + if def.Name() != test.obj.Name() { + t.Errorf("Name() = %v, want %v", def.Name(), test.obj.Name()) + } + if def.Pos() != test.obj.Pos() { + t.Errorf("Pos() = %v, want %v", def.Pos(), test.obj.Pos()) + } + if def.Parent() != test.obj.Parent() { + t.Fatalf("Parent() = %v, want %v", def.Parent(), test.obj.Parent()) + } + if def.Exported() != test.obj.Exported() { + t.Fatalf("Exported() = %v, want %v", def.Exported(), test.obj.Exported()) + } + if def.Id() != test.obj.Id() { + t.Fatalf("Id() = %v, want %v", def.Id(), test.obj.Id()) + } + // String and Type are expected to differ. + }) + } +} + +func originObject(obj Object) Object { + switch obj := obj.(type) { + case *Var: + return obj.Origin() + case *Func: + return obj.Origin() + } + return obj +} + +func TestImplements(t *testing.T) { + const src = ` +package p + +type EmptyIface interface{} + +type I interface { + m() +} + +type C interface { + m() + ~int +} + +type Integer interface{ + int8 | int16 | int32 | int64 +} + +type EmptyTypeSet interface{ + Integer + ~string +} + +type N1 int +func (N1) m() {} + +type N2 int +func (*N2) m() {} + +type N3 int +func (N3) m(int) {} + +type N4 string +func (N4) m() + +type Bad Bad // invalid type +` + + f := mustParse(src) + conf := Config{Error: func(error) {}} + pkg, _ := conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) + + lookup := func(tname string) Type { return pkg.Scope().Lookup(tname).Type() } + var ( + EmptyIface = lookup("EmptyIface").Underlying().(*Interface) + I = lookup("I").(*Named) + II = I.Underlying().(*Interface) + C = lookup("C").(*Named) + CI = C.Underlying().(*Interface) + Integer = lookup("Integer").Underlying().(*Interface) + EmptyTypeSet = lookup("EmptyTypeSet").Underlying().(*Interface) + N1 = lookup("N1") + N1p = NewPointer(N1) + N2 = lookup("N2") + N2p = NewPointer(N2) + N3 = lookup("N3") + N4 = lookup("N4") + Bad = lookup("Bad") + ) + + tests := []struct { + V Type + T *Interface + want bool + }{ + {I, II, true}, + {I, CI, false}, + {C, II, true}, + {C, CI, true}, + {Typ[Int8], Integer, true}, + {Typ[Int64], Integer, true}, + {Typ[String], Integer, false}, + {EmptyTypeSet, II, true}, + {EmptyTypeSet, EmptyTypeSet, true}, + {Typ[Int], EmptyTypeSet, false}, + {N1, II, true}, + {N1, CI, true}, + {N1p, II, true}, + {N1p, CI, false}, + {N2, II, false}, + {N2, CI, false}, + {N2p, II, true}, + {N2p, CI, false}, + {N3, II, false}, + {N3, CI, false}, + {N4, II, true}, + {N4, CI, false}, + {Bad, II, false}, + {Bad, CI, false}, + {Bad, EmptyIface, true}, + } + + for _, test := range tests { + if got := Implements(test.V, test.T); got != test.want { + t.Errorf("Implements(%s, %s) = %t, want %t", test.V, test.T, got, test.want) + } + + // The type assertion x.(T) is valid if T is an interface or if T implements the type of x. + // The assertion is never valid if T is a bad type. + V := test.T + T := test.V + want := false + if _, ok := T.Underlying().(*Interface); (ok || Implements(T, V)) && T != Bad { + want = true + } + if got := AssertableTo(V, T); got != want { + t.Errorf("AssertableTo(%s, %s) = %t, want %t", V, T, got, want) + } + } +} + +func TestMissingMethodAlternative(t *testing.T) { + const src = ` +package p +type T interface { + m() +} + +type V0 struct{} +func (V0) m() {} + +type V1 struct{} + +type V2 struct{} +func (V2) m() int + +type V3 struct{} +func (*V3) m() + +type V4 struct{} +func (V4) M() +` + + pkg := mustTypecheck(src, nil, nil) + + T := pkg.Scope().Lookup("T").Type().Underlying().(*Interface) + lookup := func(name string) (*Func, bool) { + return MissingMethod(pkg.Scope().Lookup(name).Type(), T, true) + } + + // V0 has method m with correct signature. Should not report wrongType. + method, wrongType := lookup("V0") + if method != nil || wrongType { + t.Fatalf("V0: got method = %v, wrongType = %v", method, wrongType) + } + + checkMissingMethod := func(tname string, reportWrongType bool) { + method, wrongType := lookup(tname) + if method == nil || method.Name() != "m" || wrongType != reportWrongType { + t.Fatalf("%s: got method = %v, wrongType = %v", tname, method, wrongType) + } + } + + // V1 has no method m. Should not report wrongType. + checkMissingMethod("V1", false) + + // V2 has method m with wrong signature type (ignoring receiver). Should report wrongType. + checkMissingMethod("V2", true) + + // V3 has no method m but it exists on *V3. Should report wrongType. + checkMissingMethod("V3", true) + + // V4 has no method m but has M. Should not report wrongType. + checkMissingMethod("V4", false) +} + +func TestErrorURL(t *testing.T) { + conf := Config{ErrorURL: " [go.dev/e/%s]"} + + // test case for a one-line error + const src1 = ` +package p +var _ T +` + _, err := typecheck(src1, &conf, nil) + if err == nil || !strings.HasSuffix(err.Error(), " [go.dev/e/UndeclaredName]") { + t.Errorf("src1: unexpected error: got %v", err) + } + + // test case for a multi-line error + const src2 = ` +package p +func f() int { return 0 } +var _ = f(1, 2) +` + _, err = typecheck(src2, &conf, nil) + if err == nil || !strings.Contains(err.Error(), " [go.dev/e/WrongArgCount]\n") { + t.Errorf("src1: unexpected error: got %v", err) + } +} + +func TestModuleVersion(t *testing.T) { + // version go1.dd must be able to typecheck go1.dd.0, go1.dd.1, etc. + goversion := fmt.Sprintf("go1.%d", goversion.Version) + for _, v := range []string{ + goversion, + goversion + ".0", + goversion + ".1", + goversion + ".rc", + } { + conf := Config{GoVersion: v} + pkg := mustTypecheck("package p", &conf, nil) + if pkg.GoVersion() != conf.GoVersion { + t.Errorf("got %s; want %s", pkg.GoVersion(), conf.GoVersion) + } + } +} + +func TestFileVersions(t *testing.T) { + for _, test := range []struct { + goVersion string + fileVersion string + wantVersion string + }{ + {"", "", ""}, // no versions specified + {"go1.19", "", "go1.19"}, // module version specified + {"", "go1.20", ""}, // file upgrade ignored + {"go1.19", "go1.20", "go1.20"}, // file upgrade permitted + {"go1.20", "go1.19", "go1.20"}, // file downgrade not permitted + {"go1.21", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + + // versions containing release numbers + // (file versions containing release numbers are considered invalid) + {"go1.19.0", "", "go1.19.0"}, // no file version specified + {"go1.20", "go1.20.1", "go1.20"}, // file upgrade ignored + {"go1.20.1", "go1.20", "go1.20.1"}, // file upgrade ignored + {"go1.20.1", "go1.21", "go1.21"}, // file upgrade permitted + {"go1.20.1", "go1.19", "go1.20.1"}, // file downgrade not permitted + {"go1.21.1", "go1.19.1", "go1.21.1"}, // file downgrade not permitted (invalid file version) + {"go1.21.1", "go1.19", "go1.19"}, // file downgrade permitted (module version is >= go1.21) + } { + var src string + if test.fileVersion != "" { + src = "//go:build " + test.fileVersion + "\n" + } + src += "package p" + + conf := Config{GoVersion: test.goVersion} + versions := make(map[*syntax.PosBase]string) + var info Info + info.FileVersions = versions + mustTypecheck(src, &conf, &info) + + n := 0 + for _, v := range info.FileVersions { + want := test.wantVersion + if v != want { + t.Errorf("%q: unexpected file version: got %v, want %v", src, v, want) + } + n++ + } + if n != 1 { + t.Errorf("%q: incorrect number of map entries: got %d", src, n) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/array.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/array.go new file mode 100644 index 0000000000000000000000000000000000000000..502d49bc25770f7bdcb3f03ba89652e64318b3c8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/array.go @@ -0,0 +1,25 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +// An Array represents an array type. +type Array struct { + len int64 + elem Type +} + +// NewArray returns a new array type for the given element type and length. +// A negative length indicates an unknown length. +func NewArray(elem Type, len int64) *Array { return &Array{len: len, elem: elem} } + +// Len returns the length of array a. +// A negative result indicates an unknown length. +func (a *Array) Len() int64 { return a.len } + +// Elem returns element type of array a. +func (a *Array) Elem() Type { return a.elem } + +func (a *Array) Underlying() Type { return a } +func (a *Array) String() string { return TypeString(a, nil) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/assignments.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/assignments.go new file mode 100644 index 0000000000000000000000000000000000000000..8abafdba1bac038968248244b68140392efe631e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/assignments.go @@ -0,0 +1,575 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements initialization and assignment checks. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "fmt" + . "internal/types/errors" + "strings" +) + +// assignment reports whether x can be assigned to a variable of type T, +// if necessary by attempting to convert untyped values to the appropriate +// type. context describes the context in which the assignment takes place. +// Use T == nil to indicate assignment to an untyped blank identifier. +// If the assignment check fails, x.mode is set to invalid. +func (check *Checker) assignment(x *operand, T Type, context string) { + check.singleValue(x) + + switch x.mode { + case invalid: + return // error reported before + case constant_, variable, mapindex, value, nilvalue, commaok, commaerr: + // ok + default: + // we may get here because of other problems (go.dev/issue/39634, crash 12) + // TODO(gri) do we need a new "generic" error code here? + check.errorf(x, IncompatibleAssign, "cannot assign %s to %s in %s", x, T, context) + x.mode = invalid + return + } + + if isUntyped(x.typ) { + target := T + // spec: "If an untyped constant is assigned to a variable of interface + // type or the blank identifier, the constant is first converted to type + // bool, rune, int, float64, complex128 or string respectively, depending + // on whether the value is a boolean, rune, integer, floating-point, + // complex, or string constant." + if x.isNil() { + if T == nil { + check.errorf(x, UntypedNilUse, "use of untyped nil in %s", context) + x.mode = invalid + return + } + } else if T == nil || isNonTypeParamInterface(T) { + target = Default(x.typ) + } + newType, val, code := check.implicitTypeAndValue(x, target) + if code != 0 { + msg := check.sprintf("cannot use %s as %s value in %s", x, target, context) + switch code { + case TruncatedFloat: + msg += " (truncated)" + case NumericOverflow: + msg += " (overflows)" + default: + code = IncompatibleAssign + } + check.error(x, code, msg) + x.mode = invalid + return + } + if val != nil { + x.val = val + check.updateExprVal(x.expr, val) + } + if newType != x.typ { + x.typ = newType + check.updateExprType(x.expr, newType, false) + } + } + // x.typ is typed + + // A generic (non-instantiated) function value cannot be assigned to a variable. + if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 { + check.errorf(x, WrongTypeArgCount, "cannot use generic function %s without instantiation in %s", x, context) + x.mode = invalid + return + } + + // spec: "If a left-hand side is the blank identifier, any typed or + // non-constant value except for the predeclared identifier nil may + // be assigned to it." + if T == nil { + return + } + + cause := "" + if ok, code := x.assignableTo(check, T, &cause); !ok { + if cause != "" { + check.errorf(x, code, "cannot use %s as %s value in %s: %s", x, T, context, cause) + } else { + check.errorf(x, code, "cannot use %s as %s value in %s", x, T, context) + } + x.mode = invalid + } +} + +func (check *Checker) initConst(lhs *Const, x *operand) { + if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) { + if lhs.typ == nil { + lhs.typ = Typ[Invalid] + } + return + } + + // rhs must be a constant + if x.mode != constant_ { + check.errorf(x, InvalidConstInit, "%s is not constant", x) + if lhs.typ == nil { + lhs.typ = Typ[Invalid] + } + return + } + assert(isConstType(x.typ)) + + // If the lhs doesn't have a type yet, use the type of x. + if lhs.typ == nil { + lhs.typ = x.typ + } + + check.assignment(x, lhs.typ, "constant declaration") + if x.mode == invalid { + return + } + + lhs.val = x.val +} + +// initVar checks the initialization lhs = x in a variable declaration. +// If lhs doesn't have a type yet, it is given the type of x, +// or Typ[Invalid] in case of an error. +// If the initialization check fails, x.mode is set to invalid. +func (check *Checker) initVar(lhs *Var, x *operand, context string) { + if x.mode == invalid || !isValid(x.typ) || !isValid(lhs.typ) { + if lhs.typ == nil { + lhs.typ = Typ[Invalid] + } + x.mode = invalid + return + } + + // If lhs doesn't have a type yet, use the type of x. + if lhs.typ == nil { + typ := x.typ + if isUntyped(typ) { + // convert untyped types to default types + if typ == Typ[UntypedNil] { + check.errorf(x, UntypedNilUse, "use of untyped nil in %s", context) + lhs.typ = Typ[Invalid] + x.mode = invalid + return + } + typ = Default(typ) + } + lhs.typ = typ + } + + check.assignment(x, lhs.typ, context) +} + +// lhsVar checks a lhs variable in an assignment and returns its type. +// lhsVar takes care of not counting a lhs identifier as a "use" of +// that identifier. The result is nil if it is the blank identifier, +// and Typ[Invalid] if it is an invalid lhs expression. +func (check *Checker) lhsVar(lhs syntax.Expr) Type { + // Determine if the lhs is a (possibly parenthesized) identifier. + ident, _ := syntax.Unparen(lhs).(*syntax.Name) + + // Don't evaluate lhs if it is the blank identifier. + if ident != nil && ident.Value == "_" { + check.recordDef(ident, nil) + return nil + } + + // If the lhs is an identifier denoting a variable v, this reference + // is not a 'use' of v. Remember current value of v.used and restore + // after evaluating the lhs via check.expr. + var v *Var + var v_used bool + if ident != nil { + if obj := check.lookup(ident.Value); obj != nil { + // It's ok to mark non-local variables, but ignore variables + // from other packages to avoid potential race conditions with + // dot-imported variables. + if w, _ := obj.(*Var); w != nil && w.pkg == check.pkg { + v = w + v_used = v.used + } + } + } + + var x operand + check.expr(nil, &x, lhs) + + if v != nil { + v.used = v_used // restore v.used + } + + if x.mode == invalid || !isValid(x.typ) { + return Typ[Invalid] + } + + // spec: "Each left-hand side operand must be addressable, a map index + // expression, or the blank identifier. Operands may be parenthesized." + switch x.mode { + case invalid: + return Typ[Invalid] + case variable, mapindex: + // ok + default: + if sel, ok := x.expr.(*syntax.SelectorExpr); ok { + var op operand + check.expr(nil, &op, sel.X) + if op.mode == mapindex { + check.errorf(&x, UnaddressableFieldAssign, "cannot assign to struct field %s in map", syntax.String(x.expr)) + return Typ[Invalid] + } + } + check.errorf(&x, UnassignableOperand, "cannot assign to %s (neither addressable nor a map index expression)", x.expr) + return Typ[Invalid] + } + + return x.typ +} + +// assignVar checks the assignment lhs = rhs (if x == nil), or lhs = x (if x != nil). +// If x != nil, it must be the evaluation of rhs (and rhs will be ignored). +// If the assignment check fails and x != nil, x.mode is set to invalid. +func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand, context string) { + T := check.lhsVar(lhs) // nil if lhs is _ + if !isValid(T) { + if x != nil { + x.mode = invalid + } else { + check.use(rhs) + } + return + } + + if x == nil { + var target *target + // avoid calling syntax.String if not needed + if T != nil { + if _, ok := under(T).(*Signature); ok { + target = newTarget(T, syntax.String(lhs)) + } + } + x = new(operand) + check.expr(target, x, rhs) + } + + if T == nil && context == "assignment" { + context = "assignment to _ identifier" + } + check.assignment(x, T, context) +} + +// operandTypes returns the list of types for the given operands. +func operandTypes(list []*operand) (res []Type) { + for _, x := range list { + res = append(res, x.typ) + } + return res +} + +// varTypes returns the list of types for the given variables. +func varTypes(list []*Var) (res []Type) { + for _, x := range list { + res = append(res, x.typ) + } + return res +} + +// typesSummary returns a string of the form "(t1, t2, ...)" where the +// ti's are user-friendly string representations for the given types. +// If variadic is set and the last type is a slice, its string is of +// the form "...E" where E is the slice's element type. +func (check *Checker) typesSummary(list []Type, variadic bool) string { + var res []string + for i, t := range list { + var s string + switch { + case t == nil: + fallthrough // should not happen but be cautious + case !isValid(t): + s = "unknown type" + case isUntyped(t): + if isNumeric(t) { + // Do not imply a specific type requirement: + // "have number, want float64" is better than + // "have untyped int, want float64" or + // "have int, want float64". + s = "number" + } else { + // If we don't have a number, omit the "untyped" qualifier + // for compactness. + s = strings.Replace(t.(*Basic).name, "untyped ", "", -1) + } + case variadic && i == len(list)-1: + s = check.sprintf("...%s", t.(*Slice).elem) + } + if s == "" { + s = check.sprintf("%s", t) + } + res = append(res, s) + } + return "(" + strings.Join(res, ", ") + ")" +} + +func measure(x int, unit string) string { + if x != 1 { + unit += "s" + } + return fmt.Sprintf("%d %s", x, unit) +} + +func (check *Checker) assignError(rhs []syntax.Expr, l, r int) { + vars := measure(l, "variable") + vals := measure(r, "value") + rhs0 := rhs[0] + + if len(rhs) == 1 { + if call, _ := syntax.Unparen(rhs0).(*syntax.CallExpr); call != nil { + check.errorf(rhs0, WrongAssignCount, "assignment mismatch: %s but %s returns %s", vars, call.Fun, vals) + return + } + } + check.errorf(rhs0, WrongAssignCount, "assignment mismatch: %s but %s", vars, vals) +} + +func (check *Checker) returnError(at poser, lhs []*Var, rhs []*operand) { + l, r := len(lhs), len(rhs) + qualifier := "not enough" + if r > l { + at = rhs[l] // report at first extra value + qualifier = "too many" + } else if r > 0 { + at = rhs[r-1] // report at last value + } + var err error_ + err.code = WrongResultCount + err.errorf(at, "%s return values", qualifier) + err.errorf(nopos, "have %s", check.typesSummary(operandTypes(rhs), false)) + err.errorf(nopos, "want %s", check.typesSummary(varTypes(lhs), false)) + check.report(&err) +} + +// initVars type-checks assignments of initialization expressions orig_rhs +// to variables lhs. +// If returnStmt is non-nil, initVars type-checks the implicit assignment +// of result expressions orig_rhs to function result parameters lhs. +func (check *Checker) initVars(lhs []*Var, orig_rhs []syntax.Expr, returnStmt syntax.Stmt) { + context := "assignment" + if returnStmt != nil { + context = "return statement" + } + + l, r := len(lhs), len(orig_rhs) + + // If l == 1 and the rhs is a single call, for a better + // error message don't handle it as n:n mapping below. + isCall := false + if r == 1 { + _, isCall = syntax.Unparen(orig_rhs[0]).(*syntax.CallExpr) + } + + // If we have a n:n mapping from lhs variable to rhs expression, + // each value can be assigned to its corresponding variable. + if l == r && !isCall { + var x operand + for i, lhs := range lhs { + desc := lhs.name + if returnStmt != nil && desc == "" { + desc = "result variable" + } + check.expr(newTarget(lhs.typ, desc), &x, orig_rhs[i]) + check.initVar(lhs, &x, context) + } + return + } + + // If we don't have an n:n mapping, the rhs must be a single expression + // resulting in 2 or more values; otherwise we have an assignment mismatch. + if r != 1 { + // Only report a mismatch error if there are no other errors on the rhs. + if check.use(orig_rhs...) { + if returnStmt != nil { + rhs := check.exprList(orig_rhs) + check.returnError(returnStmt, lhs, rhs) + } else { + check.assignError(orig_rhs, l, r) + } + } + // ensure that LHS variables have a type + for _, v := range lhs { + if v.typ == nil { + v.typ = Typ[Invalid] + } + } + return + } + + rhs, commaOk := check.multiExpr(orig_rhs[0], l == 2 && returnStmt == nil) + r = len(rhs) + if l == r { + for i, lhs := range lhs { + check.initVar(lhs, rhs[i], context) + } + // Only record comma-ok expression if both initializations succeeded + // (go.dev/issue/59371). + if commaOk && rhs[0].mode != invalid && rhs[1].mode != invalid { + check.recordCommaOkTypes(orig_rhs[0], rhs) + } + return + } + + // In all other cases we have an assignment mismatch. + // Only report a mismatch error if there are no other errors on the rhs. + if rhs[0].mode != invalid { + if returnStmt != nil { + check.returnError(returnStmt, lhs, rhs) + } else { + check.assignError(orig_rhs, l, r) + } + } + // ensure that LHS variables have a type + for _, v := range lhs { + if v.typ == nil { + v.typ = Typ[Invalid] + } + } + // orig_rhs[0] was already evaluated +} + +// assignVars type-checks assignments of expressions orig_rhs to variables lhs. +func (check *Checker) assignVars(lhs, orig_rhs []syntax.Expr) { + l, r := len(lhs), len(orig_rhs) + + // If l == 1 and the rhs is a single call, for a better + // error message don't handle it as n:n mapping below. + isCall := false + if r == 1 { + _, isCall = syntax.Unparen(orig_rhs[0]).(*syntax.CallExpr) + } + + // If we have a n:n mapping from lhs variable to rhs expression, + // each value can be assigned to its corresponding variable. + if l == r && !isCall { + for i, lhs := range lhs { + check.assignVar(lhs, orig_rhs[i], nil, "assignment") + } + return + } + + // If we don't have an n:n mapping, the rhs must be a single expression + // resulting in 2 or more values; otherwise we have an assignment mismatch. + if r != 1 { + // Only report a mismatch error if there are no other errors on the lhs or rhs. + okLHS := check.useLHS(lhs...) + okRHS := check.use(orig_rhs...) + if okLHS && okRHS { + check.assignError(orig_rhs, l, r) + } + return + } + + rhs, commaOk := check.multiExpr(orig_rhs[0], l == 2) + r = len(rhs) + if l == r { + for i, lhs := range lhs { + check.assignVar(lhs, nil, rhs[i], "assignment") + } + // Only record comma-ok expression if both assignments succeeded + // (go.dev/issue/59371). + if commaOk && rhs[0].mode != invalid && rhs[1].mode != invalid { + check.recordCommaOkTypes(orig_rhs[0], rhs) + } + return + } + + // In all other cases we have an assignment mismatch. + // Only report a mismatch error if there are no other errors on the rhs. + if rhs[0].mode != invalid { + check.assignError(orig_rhs, l, r) + } + check.useLHS(lhs...) + // orig_rhs[0] was already evaluated +} + +func (check *Checker) shortVarDecl(pos syntax.Pos, lhs, rhs []syntax.Expr) { + top := len(check.delayed) + scope := check.scope + + // collect lhs variables + seen := make(map[string]bool, len(lhs)) + lhsVars := make([]*Var, len(lhs)) + newVars := make([]*Var, 0, len(lhs)) + hasErr := false + for i, lhs := range lhs { + ident, _ := lhs.(*syntax.Name) + if ident == nil { + check.useLHS(lhs) + check.errorf(lhs, BadDecl, "non-name %s on left side of :=", lhs) + hasErr = true + continue + } + + name := ident.Value + if name != "_" { + if seen[name] { + check.errorf(lhs, RepeatedDecl, "%s repeated on left side of :=", lhs) + hasErr = true + continue + } + seen[name] = true + } + + // Use the correct obj if the ident is redeclared. The + // variable's scope starts after the declaration; so we + // must use Scope.Lookup here and call Scope.Insert + // (via check.declare) later. + if alt := scope.Lookup(name); alt != nil { + check.recordUse(ident, alt) + // redeclared object must be a variable + if obj, _ := alt.(*Var); obj != nil { + lhsVars[i] = obj + } else { + check.errorf(lhs, UnassignableOperand, "cannot assign to %s", lhs) + hasErr = true + } + continue + } + + // declare new variable + obj := NewVar(ident.Pos(), check.pkg, name, nil) + lhsVars[i] = obj + if name != "_" { + newVars = append(newVars, obj) + } + check.recordDef(ident, obj) + } + + // create dummy variables where the lhs is invalid + for i, obj := range lhsVars { + if obj == nil { + lhsVars[i] = NewVar(lhs[i].Pos(), check.pkg, "_", nil) + } + } + + check.initVars(lhsVars, rhs, nil) + + // process function literals in rhs expressions before scope changes + check.processDelayed(top) + + if len(newVars) == 0 && !hasErr { + check.softErrorf(pos, NoNewVar, "no new variables on left side of :=") + return + } + + // declare new variables + // spec: "The scope of a constant or variable identifier declared inside + // a function begins at the end of the ConstSpec or VarSpec (ShortVarDecl + // for short variable declarations) and ends at the end of the innermost + // containing block." + scopePos := syntax.EndPos(rhs[len(rhs)-1]) + for _, obj := range newVars { + check.declare(scope, nil, obj, scopePos) // id = nil: recordDef already called + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/basic.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/basic.go new file mode 100644 index 0000000000000000000000000000000000000000..2fd973cafbc5e66022c8455e9073237e15de2c64 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/basic.go @@ -0,0 +1,82 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +// BasicKind describes the kind of basic type. +type BasicKind int + +const ( + Invalid BasicKind = iota // type is invalid + + // predeclared types + Bool + Int + Int8 + Int16 + Int32 + Int64 + Uint + Uint8 + Uint16 + Uint32 + Uint64 + Uintptr + Float32 + Float64 + Complex64 + Complex128 + String + UnsafePointer + + // types for untyped values + UntypedBool + UntypedInt + UntypedRune + UntypedFloat + UntypedComplex + UntypedString + UntypedNil + + // aliases + Byte = Uint8 + Rune = Int32 +) + +// BasicInfo is a set of flags describing properties of a basic type. +type BasicInfo int + +// Properties of basic types. +const ( + IsBoolean BasicInfo = 1 << iota + IsInteger + IsUnsigned + IsFloat + IsComplex + IsString + IsUntyped + + IsOrdered = IsInteger | IsFloat | IsString + IsNumeric = IsInteger | IsFloat | IsComplex + IsConstType = IsBoolean | IsNumeric | IsString +) + +// A Basic represents a basic type. +type Basic struct { + kind BasicKind + info BasicInfo + name string +} + +// Kind returns the kind of basic type b. +func (b *Basic) Kind() BasicKind { return b.kind } + +// Info returns information about properties of basic type b. +func (b *Basic) Info() BasicInfo { return b.info } + +// Name returns the name of basic type b. +func (b *Basic) Name() string { return b.name } + +func (b *Basic) Underlying() Type { return b } +func (b *Basic) String() string { return TypeString(b, nil) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/builtins.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/builtins.go new file mode 100644 index 0000000000000000000000000000000000000000..60f6d7f4152dd2ba7cf8a92d5f3a6550bf9f6576 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/builtins.go @@ -0,0 +1,1047 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements typechecking of builtin function calls. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "go/constant" + "go/token" + . "internal/types/errors" +) + +// builtin type-checks a call to the built-in specified by id and +// reports whether the call is valid, with *x holding the result; +// but x.expr is not set. If the call is invalid, the result is +// false, and *x is undefined. +func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (_ bool) { + argList := call.ArgList + + // append is the only built-in that permits the use of ... for the last argument + bin := predeclaredFuncs[id] + if call.HasDots && id != _Append { + //check.errorf(call.Ellipsis, invalidOp + "invalid use of ... with built-in %s", bin.name) + check.errorf(call, + InvalidDotDotDot, + invalidOp+"invalid use of ... with built-in %s", bin.name) + check.use(argList...) + return + } + + // For len(x) and cap(x) we need to know if x contains any function calls or + // receive operations. Save/restore current setting and set hasCallOrRecv to + // false for the evaluation of x so that we can check it afterwards. + // Note: We must do this _before_ calling exprList because exprList evaluates + // all arguments. + if id == _Len || id == _Cap { + defer func(b bool) { + check.hasCallOrRecv = b + }(check.hasCallOrRecv) + check.hasCallOrRecv = false + } + + // Evaluate arguments for built-ins that use ordinary (value) arguments. + // For built-ins with special argument handling (make, new, etc.), + // evaluation is done by the respective built-in code. + var args []*operand // not valid for _Make, _New, _Offsetof, _Trace + var nargs int + switch id { + default: + // check all arguments + args = check.exprList(argList) + nargs = len(args) + for _, a := range args { + if a.mode == invalid { + return + } + } + // first argument is always in x + if nargs > 0 { + *x = *args[0] + } + case _Make, _New, _Offsetof, _Trace: + // arguments require special handling + nargs = len(argList) + } + + // check argument count + { + msg := "" + if nargs < bin.nargs { + msg = "not enough" + } else if !bin.variadic && nargs > bin.nargs { + msg = "too many" + } + if msg != "" { + check.errorf(call, WrongArgCount, invalidOp+"%s arguments for %v (expected %d, found %d)", msg, call, bin.nargs, nargs) + return + } + } + + switch id { + case _Append: + // append(s S, x ...T) S, where T is the element type of S + // spec: "The variadic function append appends zero or more values x to s of type + // S, which must be a slice type, and returns the resulting slice, also of type S. + // The values x are passed to a parameter of type ...T where T is the element type + // of S and the respective parameter passing rules apply." + S := x.typ + var T Type + if s, _ := coreType(S).(*Slice); s != nil { + T = s.elem + } else { + var cause string + switch { + case x.isNil(): + cause = "have untyped nil" + case isTypeParam(S): + if u := coreType(S); u != nil { + cause = check.sprintf("%s has core type %s", x, u) + } else { + cause = check.sprintf("%s has no core type", x) + } + default: + cause = check.sprintf("have %s", x) + } + // don't use invalidArg prefix here as it would repeat "argument" in the error message + check.errorf(x, InvalidAppend, "first argument to append must be a slice; %s", cause) + return + } + + // spec: "As a special case, append also accepts a first argument assignable + // to type []byte with a second argument of string type followed by ... . + // This form appends the bytes of the string. + if nargs == 2 && call.HasDots { + if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok { + y := args[1] + if t := coreString(y.typ); t != nil && isString(t) { + if check.recordTypes() { + sig := makeSig(S, S, y.typ) + sig.variadic = true + check.recordBuiltinType(call.Fun, sig) + } + x.mode = value + x.typ = S + break + } + } + } + + // check general case by creating custom signature + sig := makeSig(S, S, NewSlice(T)) // []T required for variadic signature + sig.variadic = true + check.arguments(call, sig, nil, nil, args, nil, nil) // discard result (we know the result type) + // ok to continue even if check.arguments reported errors + + x.mode = value + x.typ = S + if check.recordTypes() { + check.recordBuiltinType(call.Fun, sig) + } + + case _Cap, _Len: + // cap(x) + // len(x) + mode := invalid + var val constant.Value + switch t := arrayPtrDeref(under(x.typ)).(type) { + case *Basic: + if isString(t) && id == _Len { + if x.mode == constant_ { + mode = constant_ + val = constant.MakeInt64(int64(len(constant.StringVal(x.val)))) + } else { + mode = value + } + } + + case *Array: + mode = value + // spec: "The expressions len(s) and cap(s) are constants + // if the type of s is an array or pointer to an array and + // the expression s does not contain channel receives or + // function calls; in this case s is not evaluated." + if !check.hasCallOrRecv { + mode = constant_ + if t.len >= 0 { + val = constant.MakeInt64(t.len) + } else { + val = constant.MakeUnknown() + } + } + + case *Slice, *Chan: + mode = value + + case *Map: + if id == _Len { + mode = value + } + + case *Interface: + if !isTypeParam(x.typ) { + break + } + if t.typeSet().underIs(func(t Type) bool { + switch t := arrayPtrDeref(t).(type) { + case *Basic: + if isString(t) && id == _Len { + return true + } + case *Array, *Slice, *Chan: + return true + case *Map: + if id == _Len { + return true + } + } + return false + }) { + mode = value + } + } + + if mode == invalid { + // avoid error if underlying type is invalid + if isValid(under(x.typ)) { + code := InvalidCap + if id == _Len { + code = InvalidLen + } + check.errorf(x, code, invalidArg+"%s for %s", x, bin.name) + } + return + } + + // record the signature before changing x.typ + if check.recordTypes() && mode != constant_ { + check.recordBuiltinType(call.Fun, makeSig(Typ[Int], x.typ)) + } + + x.mode = mode + x.typ = Typ[Int] + x.val = val + + case _Clear: + // clear(m) + check.verifyVersionf(call.Fun, go1_21, "clear") + + if !underIs(x.typ, func(u Type) bool { + switch u.(type) { + case *Map, *Slice: + return true + } + check.errorf(x, InvalidClear, invalidArg+"cannot clear %s: argument must be (or constrained by) map or slice", x) + return false + }) { + return + } + + x.mode = novalue + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(nil, x.typ)) + } + + case _Close: + // close(c) + if !underIs(x.typ, func(u Type) bool { + uch, _ := u.(*Chan) + if uch == nil { + check.errorf(x, InvalidClose, invalidOp+"cannot close non-channel %s", x) + return false + } + if uch.dir == RecvOnly { + check.errorf(x, InvalidClose, invalidOp+"cannot close receive-only channel %s", x) + return false + } + return true + }) { + return + } + x.mode = novalue + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(nil, x.typ)) + } + + case _Complex: + // complex(x, y floatT) complexT + y := args[1] + + // convert or check untyped arguments + d := 0 + if isUntyped(x.typ) { + d |= 1 + } + if isUntyped(y.typ) { + d |= 2 + } + switch d { + case 0: + // x and y are typed => nothing to do + case 1: + // only x is untyped => convert to type of y + check.convertUntyped(x, y.typ) + case 2: + // only y is untyped => convert to type of x + check.convertUntyped(y, x.typ) + case 3: + // x and y are untyped => + // 1) if both are constants, convert them to untyped + // floating-point numbers if possible, + // 2) if one of them is not constant (possible because + // it contains a shift that is yet untyped), convert + // both of them to float64 since they must have the + // same type to succeed (this will result in an error + // because shifts of floats are not permitted) + if x.mode == constant_ && y.mode == constant_ { + toFloat := func(x *operand) { + if isNumeric(x.typ) && constant.Sign(constant.Imag(x.val)) == 0 { + x.typ = Typ[UntypedFloat] + } + } + toFloat(x) + toFloat(y) + } else { + check.convertUntyped(x, Typ[Float64]) + check.convertUntyped(y, Typ[Float64]) + // x and y should be invalid now, but be conservative + // and check below + } + } + if x.mode == invalid || y.mode == invalid { + return + } + + // both argument types must be identical + if !Identical(x.typ, y.typ) { + check.errorf(x, InvalidComplex, invalidOp+"%v (mismatched types %s and %s)", call, x.typ, y.typ) + return + } + + // the argument types must be of floating-point type + // (applyTypeFunc never calls f with a type parameter) + f := func(typ Type) Type { + assert(!isTypeParam(typ)) + if t, _ := under(typ).(*Basic); t != nil { + switch t.kind { + case Float32: + return Typ[Complex64] + case Float64: + return Typ[Complex128] + case UntypedFloat: + return Typ[UntypedComplex] + } + } + return nil + } + resTyp := check.applyTypeFunc(f, x, id) + if resTyp == nil { + check.errorf(x, InvalidComplex, invalidArg+"arguments have type %s, expected floating-point", x.typ) + return + } + + // if both arguments are constants, the result is a constant + if x.mode == constant_ && y.mode == constant_ { + x.val = constant.BinaryOp(constant.ToFloat(x.val), token.ADD, constant.MakeImag(constant.ToFloat(y.val))) + } else { + x.mode = value + } + + if check.recordTypes() && x.mode != constant_ { + check.recordBuiltinType(call.Fun, makeSig(resTyp, x.typ, x.typ)) + } + + x.typ = resTyp + + case _Copy: + // copy(x, y []T) int + dst, _ := coreType(x.typ).(*Slice) + + y := args[1] + src0 := coreString(y.typ) + if src0 != nil && isString(src0) { + src0 = NewSlice(universeByte) + } + src, _ := src0.(*Slice) + + if dst == nil || src == nil { + check.errorf(x, InvalidCopy, invalidArg+"copy expects slice arguments; found %s and %s", x, y) + return + } + + if !Identical(dst.elem, src.elem) { + check.errorf(x, InvalidCopy, invalidArg+"arguments to copy %s and %s have different element types %s and %s", x, y, dst.elem, src.elem) + return + } + + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(Typ[Int], x.typ, y.typ)) + } + x.mode = value + x.typ = Typ[Int] + + case _Delete: + // delete(map_, key) + // map_ must be a map type or a type parameter describing map types. + // The key cannot be a type parameter for now. + map_ := x.typ + var key Type + if !underIs(map_, func(u Type) bool { + map_, _ := u.(*Map) + if map_ == nil { + check.errorf(x, InvalidDelete, invalidArg+"%s is not a map", x) + return false + } + if key != nil && !Identical(map_.key, key) { + check.errorf(x, InvalidDelete, invalidArg+"maps of %s must have identical key types", x) + return false + } + key = map_.key + return true + }) { + return + } + + *x = *args[1] // key + check.assignment(x, key, "argument to delete") + if x.mode == invalid { + return + } + + x.mode = novalue + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(nil, map_, key)) + } + + case _Imag, _Real: + // imag(complexT) floatT + // real(complexT) floatT + + // convert or check untyped argument + if isUntyped(x.typ) { + if x.mode == constant_ { + // an untyped constant number can always be considered + // as a complex constant + if isNumeric(x.typ) { + x.typ = Typ[UntypedComplex] + } + } else { + // an untyped non-constant argument may appear if + // it contains a (yet untyped non-constant) shift + // expression: convert it to complex128 which will + // result in an error (shift of complex value) + check.convertUntyped(x, Typ[Complex128]) + // x should be invalid now, but be conservative and check + if x.mode == invalid { + return + } + } + } + + // the argument must be of complex type + // (applyTypeFunc never calls f with a type parameter) + f := func(typ Type) Type { + assert(!isTypeParam(typ)) + if t, _ := under(typ).(*Basic); t != nil { + switch t.kind { + case Complex64: + return Typ[Float32] + case Complex128: + return Typ[Float64] + case UntypedComplex: + return Typ[UntypedFloat] + } + } + return nil + } + resTyp := check.applyTypeFunc(f, x, id) + if resTyp == nil { + code := InvalidImag + if id == _Real { + code = InvalidReal + } + check.errorf(x, code, invalidArg+"argument has type %s, expected complex type", x.typ) + return + } + + // if the argument is a constant, the result is a constant + if x.mode == constant_ { + if id == _Real { + x.val = constant.Real(x.val) + } else { + x.val = constant.Imag(x.val) + } + } else { + x.mode = value + } + + if check.recordTypes() && x.mode != constant_ { + check.recordBuiltinType(call.Fun, makeSig(resTyp, x.typ)) + } + + x.typ = resTyp + + case _Make: + // make(T, n) + // make(T, n, m) + // (no argument evaluated yet) + arg0 := argList[0] + T := check.varType(arg0) + if !isValid(T) { + return + } + + var min int // minimum number of arguments + switch coreType(T).(type) { + case *Slice: + min = 2 + case *Map, *Chan: + min = 1 + case nil: + check.errorf(arg0, InvalidMake, invalidArg+"cannot make %s: no core type", arg0) + return + default: + check.errorf(arg0, InvalidMake, invalidArg+"cannot make %s; type must be slice, map, or channel", arg0) + return + } + if nargs < min || min+1 < nargs { + check.errorf(call, WrongArgCount, invalidOp+"%v expects %d or %d arguments; found %d", call, min, min+1, nargs) + return + } + + types := []Type{T} + var sizes []int64 // constant integer arguments, if any + for _, arg := range argList[1:] { + typ, size := check.index(arg, -1) // ok to continue with typ == Typ[Invalid] + types = append(types, typ) + if size >= 0 { + sizes = append(sizes, size) + } + } + if len(sizes) == 2 && sizes[0] > sizes[1] { + check.error(argList[1], SwappedMakeArgs, invalidArg+"length and capacity swapped") + // safe to continue + } + x.mode = value + x.typ = T + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(x.typ, types...)) + } + + case _Max, _Min: + // max(x, ...) + // min(x, ...) + check.verifyVersionf(call.Fun, go1_21, bin.name) + + op := token.LSS + if id == _Max { + op = token.GTR + } + + for i, a := range args { + if a.mode == invalid { + return + } + + if !allOrdered(a.typ) { + check.errorf(a, InvalidMinMaxOperand, invalidArg+"%s cannot be ordered", a) + return + } + + // The first argument is already in x and there's nothing left to do. + if i > 0 { + check.matchTypes(x, a) + if x.mode == invalid { + return + } + + if !Identical(x.typ, a.typ) { + check.errorf(a, MismatchedTypes, invalidArg+"mismatched types %s (previous argument) and %s (type of %s)", x.typ, a.typ, a.expr) + return + } + + if x.mode == constant_ && a.mode == constant_ { + if constant.Compare(a.val, op, x.val) { + *x = *a + } + } else { + x.mode = value + } + } + } + + // If nargs == 1, make sure x.mode is either a value or a constant. + if x.mode != constant_ { + x.mode = value + // A value must not be untyped. + check.assignment(x, &emptyInterface, "argument to "+bin.name) + if x.mode == invalid { + return + } + } + + // Use the final type computed above for all arguments. + for _, a := range args { + check.updateExprType(a.expr, x.typ, true) + } + + if check.recordTypes() && x.mode != constant_ { + types := make([]Type, nargs) + for i := range types { + types[i] = x.typ + } + check.recordBuiltinType(call.Fun, makeSig(x.typ, types...)) + } + + case _New: + // new(T) + // (no argument evaluated yet) + T := check.varType(argList[0]) + if !isValid(T) { + return + } + + x.mode = value + x.typ = &Pointer{base: T} + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(x.typ, T)) + } + + case _Panic: + // panic(x) + // record panic call if inside a function with result parameters + // (for use in Checker.isTerminating) + if check.sig != nil && check.sig.results.Len() > 0 { + // function has result parameters + p := check.isPanic + if p == nil { + // allocate lazily + p = make(map[*syntax.CallExpr]bool) + check.isPanic = p + } + p[call] = true + } + + check.assignment(x, &emptyInterface, "argument to panic") + if x.mode == invalid { + return + } + + x.mode = novalue + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(nil, &emptyInterface)) + } + + case _Print, _Println: + // print(x, y, ...) + // println(x, y, ...) + var params []Type + if nargs > 0 { + params = make([]Type, nargs) + for i, a := range args { + check.assignment(a, nil, "argument to "+predeclaredFuncs[id].name) + if a.mode == invalid { + return + } + params[i] = a.typ + } + } + + x.mode = novalue + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(nil, params...)) + } + + case _Recover: + // recover() interface{} + x.mode = value + x.typ = &emptyInterface + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(x.typ)) + } + + case _Add: + // unsafe.Add(ptr unsafe.Pointer, len IntegerType) unsafe.Pointer + check.verifyVersionf(call.Fun, go1_17, "unsafe.Add") + + check.assignment(x, Typ[UnsafePointer], "argument to unsafe.Add") + if x.mode == invalid { + return + } + + y := args[1] + if !check.isValidIndex(y, InvalidUnsafeAdd, "length", true) { + return + } + + x.mode = value + x.typ = Typ[UnsafePointer] + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(x.typ, x.typ, y.typ)) + } + + case _Alignof: + // unsafe.Alignof(x T) uintptr + check.assignment(x, nil, "argument to unsafe.Alignof") + if x.mode == invalid { + return + } + + if hasVarSize(x.typ, nil) { + x.mode = value + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ)) + } + } else { + x.mode = constant_ + x.val = constant.MakeInt64(check.conf.alignof(x.typ)) + // result is constant - no need to record signature + } + x.typ = Typ[Uintptr] + + case _Offsetof: + // unsafe.Offsetof(x T) uintptr, where x must be a selector + // (no argument evaluated yet) + arg0 := argList[0] + selx, _ := syntax.Unparen(arg0).(*syntax.SelectorExpr) + if selx == nil { + check.errorf(arg0, BadOffsetofSyntax, invalidArg+"%s is not a selector expression", arg0) + check.use(arg0) + return + } + + check.expr(nil, x, selx.X) + if x.mode == invalid { + return + } + + base := derefStructPtr(x.typ) + sel := selx.Sel.Value + obj, index, indirect := LookupFieldOrMethod(base, false, check.pkg, sel) + switch obj.(type) { + case nil: + check.errorf(x, MissingFieldOrMethod, invalidArg+"%s has no single field %s", base, sel) + return + case *Func: + // TODO(gri) Using derefStructPtr may result in methods being found + // that don't actually exist. An error either way, but the error + // message is confusing. See: https://play.golang.org/p/al75v23kUy , + // but go/types reports: "invalid argument: x.m is a method value". + check.errorf(arg0, InvalidOffsetof, invalidArg+"%s is a method value", arg0) + return + } + if indirect { + check.errorf(x, InvalidOffsetof, invalidArg+"field %s is embedded via a pointer in %s", sel, base) + return + } + + // TODO(gri) Should we pass x.typ instead of base (and have indirect report if derefStructPtr indirected)? + check.recordSelection(selx, FieldVal, base, obj, index, false) + + // record the selector expression (was bug - go.dev/issue/47895) + { + mode := value + if x.mode == variable || indirect { + mode = variable + } + check.record(&operand{mode, selx, obj.Type(), nil, 0}) + } + + // The field offset is considered a variable even if the field is declared before + // the part of the struct which is variable-sized. This makes both the rules + // simpler and also permits (or at least doesn't prevent) a compiler from re- + // arranging struct fields if it wanted to. + if hasVarSize(base, nil) { + x.mode = value + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], obj.Type())) + } + } else { + offs := check.conf.offsetof(base, index) + if offs < 0 { + check.errorf(x, TypeTooLarge, "%s is too large", x) + return + } + x.mode = constant_ + x.val = constant.MakeInt64(offs) + // result is constant - no need to record signature + } + x.typ = Typ[Uintptr] + + case _Sizeof: + // unsafe.Sizeof(x T) uintptr + check.assignment(x, nil, "argument to unsafe.Sizeof") + if x.mode == invalid { + return + } + + if hasVarSize(x.typ, nil) { + x.mode = value + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ)) + } + } else { + size := check.conf.sizeof(x.typ) + if size < 0 { + check.errorf(x, TypeTooLarge, "%s is too large", x) + return + } + x.mode = constant_ + x.val = constant.MakeInt64(size) + // result is constant - no need to record signature + } + x.typ = Typ[Uintptr] + + case _Slice: + // unsafe.Slice(ptr *T, len IntegerType) []T + check.verifyVersionf(call.Fun, go1_17, "unsafe.Slice") + + ptr, _ := coreType(x.typ).(*Pointer) + if ptr == nil { + check.errorf(x, InvalidUnsafeSlice, invalidArg+"%s is not a pointer", x) + return + } + + y := args[1] + if !check.isValidIndex(y, InvalidUnsafeSlice, "length", false) { + return + } + + x.mode = value + x.typ = NewSlice(ptr.base) + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(x.typ, ptr, y.typ)) + } + + case _SliceData: + // unsafe.SliceData(slice []T) *T + check.verifyVersionf(call.Fun, go1_20, "unsafe.SliceData") + + slice, _ := coreType(x.typ).(*Slice) + if slice == nil { + check.errorf(x, InvalidUnsafeSliceData, invalidArg+"%s is not a slice", x) + return + } + + x.mode = value + x.typ = NewPointer(slice.elem) + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(x.typ, slice)) + } + + case _String: + // unsafe.String(ptr *byte, len IntegerType) string + check.verifyVersionf(call.Fun, go1_20, "unsafe.String") + + check.assignment(x, NewPointer(universeByte), "argument to unsafe.String") + if x.mode == invalid { + return + } + + y := args[1] + if !check.isValidIndex(y, InvalidUnsafeString, "length", false) { + return + } + + x.mode = value + x.typ = Typ[String] + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(x.typ, NewPointer(universeByte), y.typ)) + } + + case _StringData: + // unsafe.StringData(str string) *byte + check.verifyVersionf(call.Fun, go1_20, "unsafe.StringData") + + check.assignment(x, Typ[String], "argument to unsafe.StringData") + if x.mode == invalid { + return + } + + x.mode = value + x.typ = NewPointer(universeByte) + if check.recordTypes() { + check.recordBuiltinType(call.Fun, makeSig(x.typ, Typ[String])) + } + + case _Assert: + // assert(pred) causes a typechecker error if pred is false. + // The result of assert is the value of pred if there is no error. + // Note: assert is only available in self-test mode. + if x.mode != constant_ || !isBoolean(x.typ) { + check.errorf(x, Test, invalidArg+"%s is not a boolean constant", x) + return + } + if x.val.Kind() != constant.Bool { + check.errorf(x, Test, "internal error: value of %s should be a boolean constant", x) + return + } + if !constant.BoolVal(x.val) { + check.errorf(call, Test, "%v failed", call) + // compile-time assertion failure - safe to continue + } + // result is constant - no need to record signature + + case _Trace: + // trace(x, y, z, ...) dumps the positions, expressions, and + // values of its arguments. The result of trace is the value + // of the first argument. + // Note: trace is only available in self-test mode. + // (no argument evaluated yet) + if nargs == 0 { + check.dump("%v: trace() without arguments", atPos(call)) + x.mode = novalue + break + } + var t operand + x1 := x + for _, arg := range argList { + check.rawExpr(nil, x1, arg, nil, false) // permit trace for types, e.g.: new(trace(T)) + check.dump("%v: %s", atPos(x1), x1) + x1 = &t // use incoming x only for first argument + } + if x.mode == invalid { + return + } + // trace is only available in test mode - no need to record signature + + default: + unreachable() + } + + assert(x.mode != invalid) + return true +} + +// hasVarSize reports if the size of type t is variable due to type parameters +// or if the type is infinitely-sized due to a cycle for which the type has not +// yet been checked. +func hasVarSize(t Type, seen map[*Named]bool) (varSized bool) { + // Cycles are only possible through *Named types. + // The seen map is used to detect cycles and track + // the results of previously seen types. + if named := asNamed(t); named != nil { + if v, ok := seen[named]; ok { + return v + } + if seen == nil { + seen = make(map[*Named]bool) + } + seen[named] = true // possibly cyclic until proven otherwise + defer func() { + seen[named] = varSized // record final determination for named + }() + } + + switch u := under(t).(type) { + case *Array: + return hasVarSize(u.elem, seen) + case *Struct: + for _, f := range u.fields { + if hasVarSize(f.typ, seen) { + return true + } + } + case *Interface: + return isTypeParam(t) + case *Named, *Union: + unreachable() + } + return false +} + +// applyTypeFunc applies f to x. If x is a type parameter, +// the result is a type parameter constrained by a new +// interface bound. The type bounds for that interface +// are computed by applying f to each of the type bounds +// of x. If any of these applications of f return nil, +// applyTypeFunc returns nil. +// If x is not a type parameter, the result is f(x). +func (check *Checker) applyTypeFunc(f func(Type) Type, x *operand, id builtinId) Type { + if tp, _ := x.typ.(*TypeParam); tp != nil { + // Test if t satisfies the requirements for the argument + // type and collect possible result types at the same time. + var terms []*Term + if !tp.is(func(t *term) bool { + if t == nil { + return false + } + if r := f(t.typ); r != nil { + terms = append(terms, NewTerm(t.tilde, r)) + return true + } + return false + }) { + return nil + } + + // We can type-check this fine but we're introducing a synthetic + // type parameter for the result. It's not clear what the API + // implications are here. Report an error for 1.18 (see go.dev/issue/50912), + // but continue type-checking. + var code Code + switch id { + case _Real: + code = InvalidReal + case _Imag: + code = InvalidImag + case _Complex: + code = InvalidComplex + default: + unreachable() + } + check.softErrorf(x, code, "%s not supported as argument to %s for go1.18 (see go.dev/issue/50937)", x, predeclaredFuncs[id].name) + + // Construct a suitable new type parameter for the result type. + // The type parameter is placed in the current package so export/import + // works as expected. + tpar := NewTypeName(nopos, check.pkg, tp.obj.name, nil) + ptyp := check.newTypeParam(tpar, NewInterfaceType(nil, []Type{NewUnion(terms)})) // assigns type to tpar as a side-effect + ptyp.index = tp.index + + return ptyp + } + + return f(x.typ) +} + +// makeSig makes a signature for the given argument and result types. +// Default types are used for untyped arguments, and res may be nil. +func makeSig(res Type, args ...Type) *Signature { + list := make([]*Var, len(args)) + for i, param := range args { + list[i] = NewVar(nopos, nil, "", Default(param)) + } + params := NewTuple(list...) + var result *Tuple + if res != nil { + assert(!isUntyped(res)) + result = NewTuple(NewVar(nopos, nil, "", res)) + } + return &Signature{params: params, results: result} +} + +// arrayPtrDeref returns A if typ is of the form *A and A is an array; +// otherwise it returns typ. +func arrayPtrDeref(typ Type) Type { + if p, ok := typ.(*Pointer); ok { + if a, _ := under(p.base).(*Array); a != nil { + return a + } + } + return typ +} + +// unparen returns e with any enclosing parentheses stripped. +func unparen(e syntax.Expr) syntax.Expr { + for { + p, ok := e.(*syntax.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/builtins_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/builtins_test.go new file mode 100644 index 0000000000000000000000000000000000000000..875ee5a4d5dde41245e1f8673cb9a8602765b66d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/builtins_test.go @@ -0,0 +1,250 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "cmd/compile/internal/syntax" + "fmt" + "testing" + + . "cmd/compile/internal/types2" +) + +var builtinCalls = []struct { + name, src, sig string +}{ + {"append", `var s []int; _ = append(s)`, `func([]int, ...int) []int`}, + {"append", `var s []int; _ = append(s, 0)`, `func([]int, ...int) []int`}, + {"append", `var s []int; _ = (append)(s, 0)`, `func([]int, ...int) []int`}, + {"append", `var s []byte; _ = ((append))(s, 0)`, `func([]byte, ...byte) []byte`}, + {"append", `var s []byte; _ = append(s, "foo"...)`, `func([]byte, string...) []byte`}, + {"append", `type T []byte; var s T; var str string; _ = append(s, str...)`, `func(p.T, string...) p.T`}, + {"append", `type T []byte; type U string; var s T; var str U; _ = append(s, str...)`, `func(p.T, p.U...) p.T`}, + + {"cap", `var s [10]int; _ = cap(s)`, `invalid type`}, // constant + {"cap", `var s [10]int; _ = cap(&s)`, `invalid type`}, // constant + {"cap", `var s []int64; _ = cap(s)`, `func([]int64) int`}, + {"cap", `var c chan<-bool; _ = cap(c)`, `func(chan<- bool) int`}, + {"cap", `type S []byte; var s S; _ = cap(s)`, `func(p.S) int`}, + {"cap", `var s P; _ = cap(s)`, `func(P) int`}, + + {"len", `_ = len("foo")`, `invalid type`}, // constant + {"len", `var s string; _ = len(s)`, `func(string) int`}, + {"len", `var s [10]int; _ = len(s)`, `invalid type`}, // constant + {"len", `var s [10]int; _ = len(&s)`, `invalid type`}, // constant + {"len", `var s []int64; _ = len(s)`, `func([]int64) int`}, + {"len", `var c chan<-bool; _ = len(c)`, `func(chan<- bool) int`}, + {"len", `var m map[string]float32; _ = len(m)`, `func(map[string]float32) int`}, + {"len", `type S []byte; var s S; _ = len(s)`, `func(p.S) int`}, + {"len", `var s P; _ = len(s)`, `func(P) int`}, + + {"clear", `var m map[float64]int; clear(m)`, `func(map[float64]int)`}, + {"clear", `var s []byte; clear(s)`, `func([]byte)`}, + + {"close", `var c chan int; close(c)`, `func(chan int)`}, + {"close", `var c chan<- chan string; close(c)`, `func(chan<- chan string)`}, + + {"complex", `_ = complex(1, 0)`, `invalid type`}, // constant + {"complex", `var re float32; _ = complex(re, 1.0)`, `func(float32, float32) complex64`}, + {"complex", `var im float64; _ = complex(1, im)`, `func(float64, float64) complex128`}, + {"complex", `type F32 float32; var re, im F32; _ = complex(re, im)`, `func(p.F32, p.F32) complex64`}, + {"complex", `type F64 float64; var re, im F64; _ = complex(re, im)`, `func(p.F64, p.F64) complex128`}, + + {"copy", `var src, dst []byte; copy(dst, src)`, `func([]byte, []byte) int`}, + {"copy", `type T [][]int; var src, dst T; _ = copy(dst, src)`, `func(p.T, p.T) int`}, + {"copy", `var src string; var dst []byte; copy(dst, src)`, `func([]byte, string) int`}, + {"copy", `type T string; type U []byte; var src T; var dst U; copy(dst, src)`, `func(p.U, p.T) int`}, + {"copy", `var dst []byte; copy(dst, "hello")`, `func([]byte, string) int`}, + + {"delete", `var m map[string]bool; delete(m, "foo")`, `func(map[string]bool, string)`}, + {"delete", `type (K string; V int); var m map[K]V; delete(m, "foo")`, `func(map[p.K]p.V, p.K)`}, + + {"imag", `_ = imag(1i)`, `invalid type`}, // constant + {"imag", `var c complex64; _ = imag(c)`, `func(complex64) float32`}, + {"imag", `var c complex128; _ = imag(c)`, `func(complex128) float64`}, + {"imag", `type C64 complex64; var c C64; _ = imag(c)`, `func(p.C64) float32`}, + {"imag", `type C128 complex128; var c C128; _ = imag(c)`, `func(p.C128) float64`}, + + {"real", `_ = real(1i)`, `invalid type`}, // constant + {"real", `var c complex64; _ = real(c)`, `func(complex64) float32`}, + {"real", `var c complex128; _ = real(c)`, `func(complex128) float64`}, + {"real", `type C64 complex64; var c C64; _ = real(c)`, `func(p.C64) float32`}, + {"real", `type C128 complex128; var c C128; _ = real(c)`, `func(p.C128) float64`}, + + {"make", `_ = make([]int, 10)`, `func([]int, int) []int`}, + {"make", `type T []byte; _ = make(T, 10, 20)`, `func(p.T, int, int) p.T`}, + + // go.dev/issue/37349 + {"make", ` _ = make([]int, 0 )`, `func([]int, int) []int`}, + {"make", `var l int; _ = make([]int, l )`, `func([]int, int) []int`}, + {"make", ` _ = make([]int, 0, 0)`, `func([]int, int, int) []int`}, + {"make", `var l int; _ = make([]int, l, 0)`, `func([]int, int, int) []int`}, + {"make", `var c int; _ = make([]int, 0, c)`, `func([]int, int, int) []int`}, + {"make", `var l, c int; _ = make([]int, l, c)`, `func([]int, int, int) []int`}, + + // go.dev/issue/37393 + {"make", ` _ = make([]int , 0 )`, `func([]int, int) []int`}, + {"make", `var l byte ; _ = make([]int8 , l )`, `func([]int8, byte) []int8`}, + {"make", ` _ = make([]int16 , 0, 0)`, `func([]int16, int, int) []int16`}, + {"make", `var l int16; _ = make([]string , l, 0)`, `func([]string, int16, int) []string`}, + {"make", `var c int32; _ = make([]float64 , 0, c)`, `func([]float64, int, int32) []float64`}, + {"make", `var l, c uint ; _ = make([]complex128, l, c)`, `func([]complex128, uint, uint) []complex128`}, + + // go.dev/issue/45667 + {"make", `const l uint = 1; _ = make([]int, l)`, `func([]int, uint) []int`}, + + {"max", ` _ = max(0 )`, `invalid type`}, // constant + {"max", `var x int ; _ = max(x )`, `func(int) int`}, + {"max", `var x int ; _ = max(0, x )`, `func(int, int) int`}, + {"max", `var x string ; _ = max("a", x )`, `func(string, string) string`}, + {"max", `var x float32; _ = max(0, 1.0, x)`, `func(float32, float32, float32) float32`}, + + {"min", ` _ = min(0 )`, `invalid type`}, // constant + {"min", `var x int ; _ = min(x )`, `func(int) int`}, + {"min", `var x int ; _ = min(0, x )`, `func(int, int) int`}, + {"min", `var x string ; _ = min("a", x )`, `func(string, string) string`}, + {"min", `var x float32; _ = min(0, 1.0, x)`, `func(float32, float32, float32) float32`}, + + {"new", `_ = new(int)`, `func(int) *int`}, + {"new", `type T struct{}; _ = new(T)`, `func(p.T) *p.T`}, + + {"panic", `panic(0)`, `func(interface{})`}, + {"panic", `panic("foo")`, `func(interface{})`}, + + {"print", `print()`, `func()`}, + {"print", `print(0)`, `func(int)`}, + {"print", `print(1, 2.0, "foo", true)`, `func(int, float64, string, bool)`}, + + {"println", `println()`, `func()`}, + {"println", `println(0)`, `func(int)`}, + {"println", `println(1, 2.0, "foo", true)`, `func(int, float64, string, bool)`}, + + {"recover", `recover()`, `func() interface{}`}, + {"recover", `_ = recover()`, `func() interface{}`}, + + {"Add", `var p unsafe.Pointer; _ = unsafe.Add(p, -1.0)`, `func(unsafe.Pointer, int) unsafe.Pointer`}, + {"Add", `var p unsafe.Pointer; var n uintptr; _ = unsafe.Add(p, n)`, `func(unsafe.Pointer, uintptr) unsafe.Pointer`}, + {"Add", `_ = unsafe.Add(nil, 0)`, `func(unsafe.Pointer, int) unsafe.Pointer`}, + + {"Alignof", `_ = unsafe.Alignof(0)`, `invalid type`}, // constant + {"Alignof", `var x struct{}; _ = unsafe.Alignof(x)`, `invalid type`}, // constant + {"Alignof", `var x P; _ = unsafe.Alignof(x)`, `func(P) uintptr`}, + + {"Offsetof", `var x struct{f bool}; _ = unsafe.Offsetof(x.f)`, `invalid type`}, // constant + {"Offsetof", `var x struct{_ int; f bool}; _ = unsafe.Offsetof((&x).f)`, `invalid type`}, // constant + {"Offsetof", `var x struct{_ int; f P}; _ = unsafe.Offsetof((&x).f)`, `func(P) uintptr`}, + + {"Sizeof", `_ = unsafe.Sizeof(0)`, `invalid type`}, // constant + {"Sizeof", `var x struct{}; _ = unsafe.Sizeof(x)`, `invalid type`}, // constant + {"Sizeof", `var x P; _ = unsafe.Sizeof(x)`, `func(P) uintptr`}, + + {"Slice", `var p *int; _ = unsafe.Slice(p, 1)`, `func(*int, int) []int`}, + {"Slice", `var p *byte; var n uintptr; _ = unsafe.Slice(p, n)`, `func(*byte, uintptr) []byte`}, + {"Slice", `type B *byte; var b B; _ = unsafe.Slice(b, 0)`, `func(*byte, int) []byte`}, + + {"SliceData", "var s []int; _ = unsafe.SliceData(s)", `func([]int) *int`}, + {"SliceData", "type S []int; var s S; _ = unsafe.SliceData(s)", `func([]int) *int`}, + + {"String", `var p *byte; _ = unsafe.String(p, 1)`, `func(*byte, int) string`}, + {"String", `type B *byte; var b B; _ = unsafe.String(b, 0)`, `func(*byte, int) string`}, + + {"StringData", `var s string; _ = unsafe.StringData(s)`, `func(string) *byte`}, + {"StringData", `_ = unsafe.StringData("foo")`, `func(string) *byte`}, + + {"assert", `assert(true)`, `invalid type`}, // constant + {"assert", `type B bool; const pred B = 1 < 2; assert(pred)`, `invalid type`}, // constant + + // no tests for trace since it produces output as a side-effect +} + +func TestBuiltinSignatures(t *testing.T) { + DefPredeclaredTestFuncs() + + seen := map[string]bool{"trace": true} // no test for trace built-in; add it manually + for _, call := range builtinCalls { + testBuiltinSignature(t, call.name, call.src, call.sig) + seen[call.name] = true + } + + // make sure we didn't miss one + for _, name := range Universe.Names() { + if _, ok := Universe.Lookup(name).(*Builtin); ok && !seen[name] { + t.Errorf("missing test for %s", name) + } + } + for _, name := range Unsafe.Scope().Names() { + if _, ok := Unsafe.Scope().Lookup(name).(*Builtin); ok && !seen[name] { + t.Errorf("missing test for unsafe.%s", name) + } + } +} + +func testBuiltinSignature(t *testing.T, name, src0, want string) { + src := fmt.Sprintf(`package p; import "unsafe"; type _ unsafe.Pointer /* use unsafe */; func _[P ~[]byte]() { %s }`, src0) + + uses := make(map[*syntax.Name]Object) + types := make(map[syntax.Expr]TypeAndValue) + mustTypecheck(src, nil, &Info{Uses: uses, Types: types}) + + // find called function + n := 0 + var fun syntax.Expr + for x := range types { + if call, _ := x.(*syntax.CallExpr); call != nil { + fun = call.Fun + n++ + } + } + if n != 1 { + t.Errorf("%s: got %d CallExprs; want 1", src0, n) + return + } + + // check recorded types for fun and descendents (may be parenthesized) + for { + // the recorded type for the built-in must match the wanted signature + typ := types[fun].Type + if typ == nil { + t.Errorf("%s: no type recorded for %s", src0, syntax.String(fun)) + return + } + if got := typ.String(); got != want { + t.Errorf("%s: got type %s; want %s", src0, got, want) + return + } + + // called function must be a (possibly parenthesized, qualified) + // identifier denoting the expected built-in + switch p := fun.(type) { + case *syntax.Name: + obj := uses[p] + if obj == nil { + t.Errorf("%s: no object found for %s", src0, p.Value) + return + } + bin, _ := obj.(*Builtin) + if bin == nil { + t.Errorf("%s: %s does not denote a built-in", src0, p.Value) + return + } + if bin.Name() != name { + t.Errorf("%s: got built-in %s; want %s", src0, bin.Name(), name) + return + } + return // we're done + + case *syntax.ParenExpr: + fun = p.X // unpack + + case *syntax.SelectorExpr: + // built-in from package unsafe - ignore details + return // we're done + + default: + t.Errorf("%s: invalid function call", src0) + return + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/call.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/call.go new file mode 100644 index 0000000000000000000000000000000000000000..db7d86e3d380f31e96170c018381def8b49edb41 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/call.go @@ -0,0 +1,999 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements typechecking of call and selector expressions. + +package types2 + +import ( + "cmd/compile/internal/syntax" + . "internal/types/errors" + "strings" + "unicode" +) + +// funcInst type-checks a function instantiation. +// The incoming x must be a generic function. +// If inst != nil, it provides some or all of the type arguments (inst.Index). +// If target != nil, it may be used to infer missing type arguments of x, if any. +// At least one of T or inst must be provided. +// +// There are two modes of operation: +// +// 1. If infer == true, funcInst infers missing type arguments as needed and +// instantiates the function x. The returned results are nil. +// +// 2. If infer == false and inst provides all type arguments, funcInst +// instantiates the function x. The returned results are nil. +// If inst doesn't provide enough type arguments, funcInst returns the +// available arguments and the corresponding expression list; x remains +// unchanged. +// +// If an error (other than a version error) occurs in any case, it is reported +// and x.mode is set to invalid. +func (check *Checker) funcInst(T *target, pos syntax.Pos, x *operand, inst *syntax.IndexExpr, infer bool) ([]Type, []syntax.Expr) { + assert(T != nil || inst != nil) + + var instErrPos poser + if inst != nil { + instErrPos = inst.Pos() + } else { + instErrPos = pos + } + versionErr := !check.verifyVersionf(instErrPos, go1_18, "function instantiation") + + // targs and xlist are the type arguments and corresponding type expressions, or nil. + var targs []Type + var xlist []syntax.Expr + if inst != nil { + xlist = syntax.UnpackListExpr(inst.Index) + targs = check.typeList(xlist) + if targs == nil { + x.mode = invalid + x.expr = inst + return nil, nil + } + assert(len(targs) == len(xlist)) + } + + // Check the number of type arguments (got) vs number of type parameters (want). + // Note that x is a function value, not a type expression, so we don't need to + // call under below. + sig := x.typ.(*Signature) + got, want := len(targs), sig.TypeParams().Len() + if got > want { + // Providing too many type arguments is always an error. + check.errorf(xlist[got-1], WrongTypeArgCount, "got %d type arguments but want %d", got, want) + x.mode = invalid + x.expr = inst + return nil, nil + } + + if got < want { + if !infer { + return targs, xlist + } + + // If the uninstantiated or partially instantiated function x is used in + // an assignment (tsig != nil), infer missing type arguments by treating + // the assignment + // + // var tvar tsig = x + // + // like a call g(tvar) of the synthetic generic function g + // + // func g[type_parameters_of_x](func_type_of_x) + // + var args []*operand + var params []*Var + var reverse bool + if T != nil && sig.tparams != nil { + if !versionErr && !check.allowVersion(check.pkg, instErrPos, go1_21) { + if inst != nil { + check.versionErrorf(instErrPos, go1_21, "partially instantiated function in assignment") + } else { + check.versionErrorf(instErrPos, go1_21, "implicitly instantiated function in assignment") + } + } + gsig := NewSignatureType(nil, nil, nil, sig.params, sig.results, sig.variadic) + params = []*Var{NewVar(x.Pos(), check.pkg, "", gsig)} + // The type of the argument operand is tsig, which is the type of the LHS in an assignment + // or the result type in a return statement. Create a pseudo-expression for that operand + // that makes sense when reported in error messages from infer, below. + expr := syntax.NewName(x.Pos(), T.desc) + args = []*operand{{mode: value, expr: expr, typ: T.sig}} + reverse = true + } + + // Rename type parameters to avoid problems with recursive instantiations. + // Note that NewTuple(params...) below is (*Tuple)(nil) if len(params) == 0, as desired. + tparams, params2 := check.renameTParams(pos, sig.TypeParams().list(), NewTuple(params...)) + + targs = check.infer(pos, tparams, targs, params2.(*Tuple), args, reverse) + if targs == nil { + // error was already reported + x.mode = invalid + x.expr = inst + return nil, nil + } + got = len(targs) + } + assert(got == want) + + // instantiate function signature + expr := x.expr // if we don't have an index expression, keep the existing expression of x + if inst != nil { + expr = inst + } + sig = check.instantiateSignature(x.Pos(), expr, sig, targs, xlist) + + x.typ = sig + x.mode = value + x.expr = expr + return nil, nil +} + +func (check *Checker) instantiateSignature(pos syntax.Pos, expr syntax.Expr, typ *Signature, targs []Type, xlist []syntax.Expr) (res *Signature) { + assert(check != nil) + assert(len(targs) == typ.TypeParams().Len()) + + if check.conf.Trace { + check.trace(pos, "-- instantiating signature %s with %s", typ, targs) + check.indent++ + defer func() { + check.indent-- + check.trace(pos, "=> %s (under = %s)", res, res.Underlying()) + }() + } + + inst := check.instance(pos, typ, targs, nil, check.context()).(*Signature) + assert(inst.TypeParams().Len() == 0) // signature is not generic anymore + check.recordInstance(expr, targs, inst) + assert(len(xlist) <= len(targs)) + + // verify instantiation lazily (was go.dev/issue/50450) + check.later(func() { + tparams := typ.TypeParams().list() + if i, err := check.verify(pos, tparams, targs, check.context()); err != nil { + // best position for error reporting + pos := pos + if i < len(xlist) { + pos = syntax.StartPos(xlist[i]) + } + check.softErrorf(pos, InvalidTypeArg, "%s", err) + } else { + check.mono.recordInstance(check.pkg, pos, tparams, targs, xlist) + } + }).describef(pos, "verify instantiation") + + return inst +} + +func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind { + var inst *syntax.IndexExpr // function instantiation, if any + if iexpr, _ := call.Fun.(*syntax.IndexExpr); iexpr != nil { + if check.indexExpr(x, iexpr) { + // Delay function instantiation to argument checking, + // where we combine type and value arguments for type + // inference. + assert(x.mode == value) + inst = iexpr + } + x.expr = iexpr + check.record(x) + } else { + check.exprOrType(x, call.Fun, true) + } + // x.typ may be generic + + switch x.mode { + case invalid: + check.use(call.ArgList...) + x.expr = call + return statement + + case typexpr: + // conversion + check.nonGeneric(nil, x) + if x.mode == invalid { + return conversion + } + T := x.typ + x.mode = invalid + switch n := len(call.ArgList); n { + case 0: + check.errorf(call, WrongArgCount, "missing argument in conversion to %s", T) + case 1: + check.expr(nil, x, call.ArgList[0]) + if x.mode != invalid { + if t, _ := under(T).(*Interface); t != nil && !isTypeParam(T) { + if !t.IsMethodSet() { + check.errorf(call, MisplacedConstraintIface, "cannot use interface %s in conversion (contains specific type constraints or is comparable)", T) + break + } + } + if call.HasDots { + check.errorf(call.ArgList[0], BadDotDotDotSyntax, "invalid use of ... in conversion to %s", T) + break + } + check.conversion(x, T) + } + default: + check.use(call.ArgList...) + check.errorf(call.ArgList[n-1], WrongArgCount, "too many arguments in conversion to %s", T) + } + x.expr = call + return conversion + + case builtin: + // no need to check for non-genericity here + id := x.id + if !check.builtin(x, call, id) { + x.mode = invalid + } + x.expr = call + // a non-constant result implies a function call + if x.mode != invalid && x.mode != constant_ { + check.hasCallOrRecv = true + } + return predeclaredFuncs[id].kind + } + + // ordinary function/method call + // signature may be generic + cgocall := x.mode == cgofunc + + // a type parameter may be "called" if all types have the same signature + sig, _ := coreType(x.typ).(*Signature) + if sig == nil { + check.errorf(x, InvalidCall, invalidOp+"cannot call non-function %s", x) + x.mode = invalid + x.expr = call + return statement + } + + // Capture wasGeneric before sig is potentially instantiated below. + wasGeneric := sig.TypeParams().Len() > 0 + + // evaluate type arguments, if any + var xlist []syntax.Expr + var targs []Type + if inst != nil { + xlist = syntax.UnpackListExpr(inst.Index) + targs = check.typeList(xlist) + if targs == nil { + check.use(call.ArgList...) + x.mode = invalid + x.expr = call + return statement + } + assert(len(targs) == len(xlist)) + + // check number of type arguments (got) vs number of type parameters (want) + got, want := len(targs), sig.TypeParams().Len() + if got > want { + check.errorf(xlist[want], WrongTypeArgCount, "got %d type arguments but want %d", got, want) + check.use(call.ArgList...) + x.mode = invalid + x.expr = call + return statement + } + + // If sig is generic and all type arguments are provided, preempt function + // argument type inference by explicitly instantiating the signature. This + // ensures that we record accurate type information for sig, even if there + // is an error checking its arguments (for example, if an incorrect number + // of arguments is supplied). + if got == want && want > 0 { + check.verifyVersionf(inst, go1_18, "function instantiation") + sig = check.instantiateSignature(inst.Pos(), inst, sig, targs, xlist) + // targs have been consumed; proceed with checking arguments of the + // non-generic signature. + targs = nil + xlist = nil + } + } + + // evaluate arguments + args, atargs, atxlist := check.genericExprList(call.ArgList) + sig = check.arguments(call, sig, targs, xlist, args, atargs, atxlist) + + if wasGeneric && sig.TypeParams().Len() == 0 { + // update the recorded type of call.Fun to its instantiated type + check.recordTypeAndValue(call.Fun, value, sig, nil) + } + + // determine result + switch sig.results.Len() { + case 0: + x.mode = novalue + case 1: + if cgocall { + x.mode = commaerr + } else { + x.mode = value + } + x.typ = sig.results.vars[0].typ // unpack tuple + default: + x.mode = value + x.typ = sig.results + } + x.expr = call + check.hasCallOrRecv = true + + // if type inference failed, a parameterized result must be invalidated + // (operands cannot have a parameterized type) + if x.mode == value && sig.TypeParams().Len() > 0 && isParameterized(sig.TypeParams().list(), x.typ) { + x.mode = invalid + } + + return statement +} + +// exprList evaluates a list of expressions and returns the corresponding operands. +// A single-element expression list may evaluate to multiple operands. +func (check *Checker) exprList(elist []syntax.Expr) (xlist []*operand) { + if n := len(elist); n == 1 { + xlist, _ = check.multiExpr(elist[0], false) + } else if n > 1 { + // multiple (possibly invalid) values + xlist = make([]*operand, n) + for i, e := range elist { + var x operand + check.expr(nil, &x, e) + xlist[i] = &x + } + } + return +} + +// genericExprList is like exprList but result operands may be uninstantiated or partially +// instantiated generic functions (where constraint information is insufficient to infer +// the missing type arguments) for Go 1.21 and later. +// For each non-generic or uninstantiated generic operand, the corresponding targsList and +// xlistList elements do not exist (targsList and xlistList are nil) or the elements are nil. +// For each partially instantiated generic function operand, the corresponding targsList and +// xlistList elements are the operand's partial type arguments and type expression lists. +func (check *Checker) genericExprList(elist []syntax.Expr) (resList []*operand, targsList [][]Type, xlistList [][]syntax.Expr) { + if debug { + defer func() { + // targsList and xlistList must have matching lengths + assert(len(targsList) == len(xlistList)) + // type arguments must only exist for partially instantiated functions + for i, x := range resList { + if i < len(targsList) { + if n := len(targsList[i]); n > 0 { + // x must be a partially instantiated function + assert(n < x.typ.(*Signature).TypeParams().Len()) + } + } + } + }() + } + + // Before Go 1.21, uninstantiated or partially instantiated argument functions are + // nor permitted. Checker.funcInst must infer missing type arguments in that case. + infer := true // for -lang < go1.21 + n := len(elist) + if n > 0 && check.allowVersion(check.pkg, elist[0], go1_21) { + infer = false + } + + if n == 1 { + // single value (possibly a partially instantiated function), or a multi-valued expression + e := elist[0] + var x operand + if inst, _ := e.(*syntax.IndexExpr); inst != nil && check.indexExpr(&x, inst) { + // x is a generic function. + targs, xlist := check.funcInst(nil, x.Pos(), &x, inst, infer) + if targs != nil { + // x was not instantiated: collect the (partial) type arguments. + targsList = [][]Type{targs} + xlistList = [][]syntax.Expr{xlist} + // Update x.expr so that we can record the partially instantiated function. + x.expr = inst + } else { + // x was instantiated: we must record it here because we didn't + // use the usual expression evaluators. + check.record(&x) + } + resList = []*operand{&x} + } else { + // x is not a function instantiation (it may still be a generic function). + check.rawExpr(nil, &x, e, nil, true) + check.exclude(&x, 1< 1 { + // multiple values + resList = make([]*operand, n) + targsList = make([][]Type, n) + xlistList = make([][]syntax.Expr, n) + for i, e := range elist { + var x operand + if inst, _ := e.(*syntax.IndexExpr); inst != nil && check.indexExpr(&x, inst) { + // x is a generic function. + targs, xlist := check.funcInst(nil, x.Pos(), &x, inst, infer) + if targs != nil { + // x was not instantiated: collect the (partial) type arguments. + targsList[i] = targs + xlistList[i] = xlist + // Update x.expr so that we can record the partially instantiated function. + x.expr = inst + } else { + // x was instantiated: we must record it here because we didn't + // use the usual expression evaluators. + check.record(&x) + } + } else { + // x is exactly one value (possibly invalid or uninstantiated generic function). + check.genericExpr(&x, e) + } + resList[i] = &x + } + } + + return +} + +// arguments type-checks arguments passed to a function call with the given signature. +// The function and its arguments may be generic, and possibly partially instantiated. +// targs and xlist are the function's type arguments (and corresponding expressions). +// args are the function arguments. If an argument args[i] is a partially instantiated +// generic function, atargs[i] and atxlist[i] are the corresponding type arguments +// (and corresponding expressions). +// If the callee is variadic, arguments adjusts its signature to match the provided +// arguments. The type parameters and arguments of the callee and all its arguments +// are used together to infer any missing type arguments, and the callee and argument +// functions are instantiated as necessary. +// The result signature is the (possibly adjusted and instantiated) function signature. +// If an error occurred, the result signature is the incoming sig. +func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []Type, xlist []syntax.Expr, args []*operand, atargs [][]Type, atxlist [][]syntax.Expr) (rsig *Signature) { + rsig = sig + + // Function call argument/parameter count requirements + // + // | standard call | dotdotdot call | + // --------------+------------------+----------------+ + // standard func | nargs == npars | invalid | + // --------------+------------------+----------------+ + // variadic func | nargs >= npars-1 | nargs == npars | + // --------------+------------------+----------------+ + + nargs := len(args) + npars := sig.params.Len() + ddd := call.HasDots + + // set up parameters + sigParams := sig.params // adjusted for variadic functions (may be nil for empty parameter lists!) + adjusted := false // indicates if sigParams is different from sig.params + if sig.variadic { + if ddd { + // variadic_func(a, b, c...) + if len(call.ArgList) == 1 && nargs > 1 { + // f()... is not permitted if f() is multi-valued + //check.errorf(call.Ellipsis, "cannot use ... with %d-valued %s", nargs, call.ArgList[0]) + check.errorf(call, InvalidDotDotDot, "cannot use ... with %d-valued %s", nargs, call.ArgList[0]) + return + } + } else { + // variadic_func(a, b, c) + if nargs >= npars-1 { + // Create custom parameters for arguments: keep + // the first npars-1 parameters and add one for + // each argument mapping to the ... parameter. + vars := make([]*Var, npars-1) // npars > 0 for variadic functions + copy(vars, sig.params.vars) + last := sig.params.vars[npars-1] + typ := last.typ.(*Slice).elem + for len(vars) < nargs { + vars = append(vars, NewParam(last.pos, last.pkg, last.name, typ)) + } + sigParams = NewTuple(vars...) // possibly nil! + adjusted = true + npars = nargs + } else { + // nargs < npars-1 + npars-- // for correct error message below + } + } + } else { + if ddd { + // standard_func(a, b, c...) + //check.errorf(call.Ellipsis, "cannot use ... in call to non-variadic %s", call.Fun) + check.errorf(call, NonVariadicDotDotDot, "cannot use ... in call to non-variadic %s", call.Fun) + return + } + // standard_func(a, b, c) + } + + // check argument count + if nargs != npars { + var at poser = call + qualifier := "not enough" + if nargs > npars { + at = args[npars].expr // report at first extra argument + qualifier = "too many" + } else if nargs > 0 { + at = args[nargs-1].expr // report at last argument + } + // take care of empty parameter lists represented by nil tuples + var params []*Var + if sig.params != nil { + params = sig.params.vars + } + var err error_ + err.code = WrongArgCount + err.errorf(at, "%s arguments in call to %s", qualifier, call.Fun) + err.errorf(nopos, "have %s", check.typesSummary(operandTypes(args), false)) + err.errorf(nopos, "want %s", check.typesSummary(varTypes(params), sig.variadic)) + check.report(&err) + return + } + + // collect type parameters of callee and generic function arguments + var tparams []*TypeParam + + // collect type parameters of callee + n := sig.TypeParams().Len() + if n > 0 { + if !check.allowVersion(check.pkg, call.Pos(), go1_18) { + if iexpr, _ := call.Fun.(*syntax.IndexExpr); iexpr != nil { + check.versionErrorf(iexpr, go1_18, "function instantiation") + } else { + check.versionErrorf(call, go1_18, "implicit function instantiation") + } + } + // rename type parameters to avoid problems with recursive calls + var tmp Type + tparams, tmp = check.renameTParams(call.Pos(), sig.TypeParams().list(), sigParams) + sigParams = tmp.(*Tuple) + // make sure targs and tparams have the same length + for len(targs) < len(tparams) { + targs = append(targs, nil) + } + } + assert(len(tparams) == len(targs)) + + // collect type parameters from generic function arguments + var genericArgs []int // indices of generic function arguments + if enableReverseTypeInference { + for i, arg := range args { + // generic arguments cannot have a defined (*Named) type - no need for underlying type below + if asig, _ := arg.typ.(*Signature); asig != nil && asig.TypeParams().Len() > 0 { + // The argument type is a generic function signature. This type is + // pointer-identical with (it's copied from) the type of the generic + // function argument and thus the function object. + // Before we change the type (type parameter renaming, below), make + // a clone of it as otherwise we implicitly modify the object's type + // (go.dev/issues/63260). + asig = clone(asig) + // Rename type parameters for cases like f(g, g); this gives each + // generic function argument a unique type identity (go.dev/issues/59956). + // TODO(gri) Consider only doing this if a function argument appears + // multiple times, which is rare (possible optimization). + atparams, tmp := check.renameTParams(call.Pos(), asig.TypeParams().list(), asig) + asig = tmp.(*Signature) + asig.tparams = &TypeParamList{atparams} // renameTParams doesn't touch associated type parameters + arg.typ = asig // new type identity for the function argument + tparams = append(tparams, atparams...) + // add partial list of type arguments, if any + if i < len(atargs) { + targs = append(targs, atargs[i]...) + } + // make sure targs and tparams have the same length + for len(targs) < len(tparams) { + targs = append(targs, nil) + } + genericArgs = append(genericArgs, i) + } + } + } + assert(len(tparams) == len(targs)) + + // at the moment we only support implicit instantiations of argument functions + _ = len(genericArgs) > 0 && check.verifyVersionf(args[genericArgs[0]], go1_21, "implicitly instantiated function as argument") + + // tparams holds the type parameters of the callee and generic function arguments, if any: + // the first n type parameters belong to the callee, followed by mi type parameters for each + // of the generic function arguments, where mi = args[i].typ.(*Signature).TypeParams().Len(). + + // infer missing type arguments of callee and function arguments + if len(tparams) > 0 { + targs = check.infer(call.Pos(), tparams, targs, sigParams, args, false) + if targs == nil { + // TODO(gri) If infer inferred the first targs[:n], consider instantiating + // the call signature for better error messages/gopls behavior. + // Perhaps instantiate as much as we can, also for arguments. + // This will require changes to how infer returns its results. + return // error already reported + } + + // update result signature: instantiate if needed + if n > 0 { + rsig = check.instantiateSignature(call.Pos(), call.Fun, sig, targs[:n], xlist) + // If the callee's parameter list was adjusted we need to update (instantiate) + // it separately. Otherwise we can simply use the result signature's parameter + // list. + if adjusted { + sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(tparams[:n], targs[:n]), nil, check.context()).(*Tuple) + } else { + sigParams = rsig.params + } + } + + // compute argument signatures: instantiate if needed + j := n + for _, i := range genericArgs { + arg := args[i] + asig := arg.typ.(*Signature) + k := j + asig.TypeParams().Len() + // targs[j:k] are the inferred type arguments for asig + arg.typ = check.instantiateSignature(call.Pos(), arg.expr, asig, targs[j:k], nil) // TODO(gri) provide xlist if possible (partial instantiations) + check.record(arg) // record here because we didn't use the usual expr evaluators + j = k + } + } + + // check arguments + if len(args) > 0 { + context := check.sprintf("argument to %s", call.Fun) + for i, a := range args { + check.assignment(a, sigParams.vars[i].typ, context) + } + } + + return +} + +var cgoPrefixes = [...]string{ + "_Ciconst_", + "_Cfconst_", + "_Csconst_", + "_Ctype_", + "_Cvar_", // actually a pointer to the var + "_Cfpvar_fp_", + "_Cfunc_", + "_Cmacro_", // function to evaluate the expanded expression +} + +func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName, wantType bool) { + // these must be declared before the "goto Error" statements + var ( + obj Object + index []int + indirect bool + ) + + sel := e.Sel.Value + // If the identifier refers to a package, handle everything here + // so we don't need a "package" mode for operands: package names + // can only appear in qualified identifiers which are mapped to + // selector expressions. + if ident, ok := e.X.(*syntax.Name); ok { + obj := check.lookup(ident.Value) + if pname, _ := obj.(*PkgName); pname != nil { + assert(pname.pkg == check.pkg) + check.recordUse(ident, pname) + pname.used = true + pkg := pname.imported + + var exp Object + funcMode := value + if pkg.cgo { + // cgo special cases C.malloc: it's + // rewritten to _CMalloc and does not + // support two-result calls. + if sel == "malloc" { + sel = "_CMalloc" + } else { + funcMode = cgofunc + } + for _, prefix := range cgoPrefixes { + // cgo objects are part of the current package (in file + // _cgo_gotypes.go). Use regular lookup. + _, exp = check.scope.LookupParent(prefix+sel, check.pos) + if exp != nil { + break + } + } + if exp == nil { + check.errorf(e.Sel, UndeclaredImportedName, "undefined: %s", syntax.Expr(e)) // cast to syntax.Expr to silence vet + goto Error + } + check.objDecl(exp, nil) + } else { + exp = pkg.scope.Lookup(sel) + if exp == nil { + if !pkg.fake { + check.errorf(e.Sel, UndeclaredImportedName, "undefined: %s", syntax.Expr(e)) + } + goto Error + } + if !exp.Exported() { + check.errorf(e.Sel, UnexportedName, "%s not exported by package %s", sel, pkg.name) + // ok to continue + } + } + check.recordUse(e.Sel, exp) + + // Simplified version of the code for *syntax.Names: + // - imported objects are always fully initialized + switch exp := exp.(type) { + case *Const: + assert(exp.Val() != nil) + x.mode = constant_ + x.typ = exp.typ + x.val = exp.val + case *TypeName: + x.mode = typexpr + x.typ = exp.typ + case *Var: + x.mode = variable + x.typ = exp.typ + if pkg.cgo && strings.HasPrefix(exp.name, "_Cvar_") { + x.typ = x.typ.(*Pointer).base + } + case *Func: + x.mode = funcMode + x.typ = exp.typ + if pkg.cgo && strings.HasPrefix(exp.name, "_Cmacro_") { + x.mode = value + x.typ = x.typ.(*Signature).results.vars[0].typ + } + case *Builtin: + x.mode = builtin + x.typ = exp.typ + x.id = exp.id + default: + check.dump("%v: unexpected object %v", atPos(e.Sel), exp) + unreachable() + } + x.expr = e + return + } + } + + check.exprOrType(x, e.X, false) + switch x.mode { + case typexpr: + // don't crash for "type T T.x" (was go.dev/issue/51509) + if def != nil && def.typ == x.typ { + check.cycleError([]Object{def}) + goto Error + } + case builtin: + check.errorf(e.Pos(), UncalledBuiltin, "cannot select on %s", x) + goto Error + case invalid: + goto Error + } + + // Avoid crashing when checking an invalid selector in a method declaration + // (i.e., where def is not set): + // + // type S[T any] struct{} + // type V = S[any] + // func (fs *S[T]) M(x V.M) {} + // + // All codepaths below return a non-type expression. If we get here while + // expecting a type expression, it is an error. + // + // See go.dev/issue/57522 for more details. + // + // TODO(rfindley): We should do better by refusing to check selectors in all cases where + // x.typ is incomplete. + if wantType { + check.errorf(e.Sel, NotAType, "%s is not a type", syntax.Expr(e)) + goto Error + } + + obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel) + if obj == nil { + // Don't report another error if the underlying type was invalid (go.dev/issue/49541). + if !isValid(under(x.typ)) { + goto Error + } + + if index != nil { + // TODO(gri) should provide actual type where the conflict happens + check.errorf(e.Sel, AmbiguousSelector, "ambiguous selector %s.%s", x.expr, sel) + goto Error + } + + if indirect { + if x.mode == typexpr { + check.errorf(e.Sel, InvalidMethodExpr, "invalid method expression %s.%s (needs pointer receiver (*%s).%s)", x.typ, sel, x.typ, sel) + } else { + check.errorf(e.Sel, InvalidMethodExpr, "cannot call pointer method %s on %s", sel, x.typ) + } + goto Error + } + + var why string + if isInterfacePtr(x.typ) { + why = check.interfacePtrError(x.typ) + } else { + why = check.sprintf("type %s has no field or method %s", x.typ, sel) + // Check if capitalization of sel matters and provide better error message in that case. + // TODO(gri) This code only looks at the first character but LookupFieldOrMethod has an + // (internal) mechanism for case-insensitive lookup. Should use that instead. + if len(sel) > 0 { + var changeCase string + if r := rune(sel[0]); unicode.IsUpper(r) { + changeCase = string(unicode.ToLower(r)) + sel[1:] + } else { + changeCase = string(unicode.ToUpper(r)) + sel[1:] + } + if obj, _, _ = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, changeCase); obj != nil { + why += ", but does have " + changeCase + } + } + } + check.errorf(e.Sel, MissingFieldOrMethod, "%s.%s undefined (%s)", x.expr, sel, why) + goto Error + } + + // methods may not have a fully set up signature yet + if m, _ := obj.(*Func); m != nil { + check.objDecl(m, nil) + } + + if x.mode == typexpr { + // method expression + m, _ := obj.(*Func) + if m == nil { + // TODO(gri) should check if capitalization of sel matters and provide better error message in that case + check.errorf(e.Sel, MissingFieldOrMethod, "%s.%s undefined (type %s has no method %s)", x.expr, sel, x.typ, sel) + goto Error + } + + check.recordSelection(e, MethodExpr, x.typ, m, index, indirect) + + sig := m.typ.(*Signature) + if sig.recv == nil { + check.error(e, InvalidDeclCycle, "illegal cycle in method declaration") + goto Error + } + + // The receiver type becomes the type of the first function + // argument of the method expression's function type. + var params []*Var + if sig.params != nil { + params = sig.params.vars + } + // Be consistent about named/unnamed parameters. This is not needed + // for type-checking, but the newly constructed signature may appear + // in an error message and then have mixed named/unnamed parameters. + // (An alternative would be to not print parameter names in errors, + // but it's useful to see them; this is cheap and method expressions + // are rare.) + name := "" + if len(params) > 0 && params[0].name != "" { + // name needed + name = sig.recv.name + if name == "" { + name = "_" + } + } + params = append([]*Var{NewVar(sig.recv.pos, sig.recv.pkg, name, x.typ)}, params...) + x.mode = value + x.typ = &Signature{ + tparams: sig.tparams, + params: NewTuple(params...), + results: sig.results, + variadic: sig.variadic, + } + + check.addDeclDep(m) + + } else { + // regular selector + switch obj := obj.(type) { + case *Var: + check.recordSelection(e, FieldVal, x.typ, obj, index, indirect) + if x.mode == variable || indirect { + x.mode = variable + } else { + x.mode = value + } + x.typ = obj.typ + + case *Func: + // TODO(gri) If we needed to take into account the receiver's + // addressability, should we report the type &(x.typ) instead? + check.recordSelection(e, MethodVal, x.typ, obj, index, indirect) + + x.mode = value + + // remove receiver + sig := *obj.typ.(*Signature) + sig.recv = nil + x.typ = &sig + + check.addDeclDep(obj) + + default: + unreachable() + } + } + + // everything went well + x.expr = e + return + +Error: + x.mode = invalid + x.expr = e +} + +// use type-checks each argument. +// Useful to make sure expressions are evaluated +// (and variables are "used") in the presence of +// other errors. Arguments may be nil. +// Reports if all arguments evaluated without error. +func (check *Checker) use(args ...syntax.Expr) bool { return check.useN(args, false) } + +// useLHS is like use, but doesn't "use" top-level identifiers. +// It should be called instead of use if the arguments are +// expressions on the lhs of an assignment. +func (check *Checker) useLHS(args ...syntax.Expr) bool { return check.useN(args, true) } + +func (check *Checker) useN(args []syntax.Expr, lhs bool) bool { + ok := true + for _, e := range args { + if !check.use1(e, lhs) { + ok = false + } + } + return ok +} + +func (check *Checker) use1(e syntax.Expr, lhs bool) bool { + var x operand + x.mode = value // anything but invalid + switch n := syntax.Unparen(e).(type) { + case nil: + // nothing to do + case *syntax.Name: + // don't report an error evaluating blank + if n.Value == "_" { + break + } + // If the lhs is an identifier denoting a variable v, this assignment + // is not a 'use' of v. Remember current value of v.used and restore + // after evaluating the lhs via check.rawExpr. + var v *Var + var v_used bool + if lhs { + if _, obj := check.scope.LookupParent(n.Value, nopos); obj != nil { + // It's ok to mark non-local variables, but ignore variables + // from other packages to avoid potential race conditions with + // dot-imported variables. + if w, _ := obj.(*Var); w != nil && w.pkg == check.pkg { + v = w + v_used = v.used + } + } + } + check.exprOrType(&x, n, true) + if v != nil { + v.used = v_used // restore v.used + } + case *syntax.ListExpr: + return check.useN(n.ElemList, lhs) + default: + check.rawExpr(nil, &x, e, nil, true) + } + return x.mode != invalid +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/chan.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/chan.go new file mode 100644 index 0000000000000000000000000000000000000000..77650dfb09daad6b56df0138063676b381b84fcc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/chan.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +// A Chan represents a channel type. +type Chan struct { + dir ChanDir + elem Type +} + +// A ChanDir value indicates a channel direction. +type ChanDir int + +// The direction of a channel is indicated by one of these constants. +const ( + SendRecv ChanDir = iota + SendOnly + RecvOnly +) + +// NewChan returns a new channel type for the given direction and element type. +func NewChan(dir ChanDir, elem Type) *Chan { + return &Chan{dir: dir, elem: elem} +} + +// Dir returns the direction of channel c. +func (c *Chan) Dir() ChanDir { return c.dir } + +// Elem returns the element type of channel c. +func (c *Chan) Elem() Type { return c.elem } + +func (c *Chan) Underlying() Type { return c } +func (c *Chan) String() string { return TypeString(c, nil) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/check.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/check.go new file mode 100644 index 0000000000000000000000000000000000000000..058236708329df1a435c4923a329b9067f2513b6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/check.go @@ -0,0 +1,704 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements the Check function, which drives type-checking. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "errors" + "fmt" + "go/constant" + "internal/godebug" + . "internal/types/errors" +) + +// nopos indicates an unknown position +var nopos syntax.Pos + +// debugging/development support +const debug = false // leave on during development + +// gotypesalias controls the use of Alias types. +var gotypesalias = godebug.New("#gotypesalias") + +// exprInfo stores information about an untyped expression. +type exprInfo struct { + isLhs bool // expression is lhs operand of a shift with delayed type-check + mode operandMode + typ *Basic + val constant.Value // constant value; or nil (if not a constant) +} + +// An environment represents the environment within which an object is +// type-checked. +type environment struct { + decl *declInfo // package-level declaration whose init expression/function body is checked + scope *Scope // top-most scope for lookups + pos syntax.Pos // if valid, identifiers are looked up as if at position pos (used by Eval) + iota constant.Value // value of iota in a constant declaration; nil otherwise + errpos syntax.Pos // if valid, identifier position of a constant with inherited initializer + inTParamList bool // set if inside a type parameter list + sig *Signature // function signature if inside a function; nil otherwise + isPanic map[*syntax.CallExpr]bool // set of panic call expressions (used for termination check) + hasLabel bool // set if a function makes use of labels (only ~1% of functions); unused outside functions + hasCallOrRecv bool // set if an expression contains a function call or channel receive operation +} + +// lookup looks up name in the current environment and returns the matching object, or nil. +func (env *environment) lookup(name string) Object { + _, obj := env.scope.LookupParent(name, env.pos) + return obj +} + +// An importKey identifies an imported package by import path and source directory +// (directory containing the file containing the import). In practice, the directory +// may always be the same, or may not matter. Given an (import path, directory), an +// importer must always return the same package (but given two different import paths, +// an importer may still return the same package by mapping them to the same package +// paths). +type importKey struct { + path, dir string +} + +// A dotImportKey describes a dot-imported object in the given scope. +type dotImportKey struct { + scope *Scope + name string +} + +// An action describes a (delayed) action. +type action struct { + f func() // action to be executed + desc *actionDesc // action description; may be nil, requires debug to be set +} + +// If debug is set, describef sets a printf-formatted description for action a. +// Otherwise, it is a no-op. +func (a *action) describef(pos poser, format string, args ...interface{}) { + if debug { + a.desc = &actionDesc{pos, format, args} + } +} + +// An actionDesc provides information on an action. +// For debugging only. +type actionDesc struct { + pos poser + format string + args []interface{} +} + +// A Checker maintains the state of the type checker. +// It must be created with NewChecker. +type Checker struct { + // package information + // (initialized by NewChecker, valid for the life-time of checker) + + // If enableAlias is set, alias declarations produce an Alias type. + // Otherwise the alias information is only in the type name, which + // points directly to the actual (aliased) type. + enableAlias bool + + conf *Config + ctxt *Context // context for de-duplicating instances + pkg *Package + *Info + version goVersion // accepted language version + nextID uint64 // unique Id for type parameters (first valid Id is 1) + objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info + impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package + valids instanceLookup // valid *Named (incl. instantiated) types per the validType check + + // pkgPathMap maps package names to the set of distinct import paths we've + // seen for that name, anywhere in the import graph. It is used for + // disambiguating package names in error messages. + // + // pkgPathMap is allocated lazily, so that we don't pay the price of building + // it on the happy path. seenPkgMap tracks the packages that we've already + // walked. + pkgPathMap map[string]map[string]bool + seenPkgMap map[*Package]bool + + // information collected during type-checking of a set of package files + // (initialized by Files, valid only for the duration of check.Files; + // maps and lists are allocated on demand) + files []*syntax.File // list of package files + versions map[*syntax.PosBase]string // maps file bases to version strings (each file has an entry) + imports []*PkgName // list of imported packages + dotImportMap map[dotImportKey]*PkgName // maps dot-imported objects to the package they were dot-imported through + recvTParamMap map[*syntax.Name]*TypeParam // maps blank receiver type parameters to their type + brokenAliases map[*TypeName]bool // set of aliases with broken (not yet determined) types + unionTypeSets map[*Union]*_TypeSet // computed type sets for union types + mono monoGraph // graph for detecting non-monomorphizable instantiation loops + + firstErr error // first error encountered + methods map[*TypeName][]*Func // maps package scope type names to associated non-blank (non-interface) methods + untyped map[syntax.Expr]exprInfo // map of expressions without final type + delayed []action // stack of delayed action segments; segments are processed in FIFO order + objPath []Object // path of object dependencies during type inference (for cycle reporting) + cleaners []cleaner // list of types that may need a final cleanup at the end of type-checking + + // environment within which the current object is type-checked (valid only + // for the duration of type-checking a specific object) + environment + + // debugging + indent int // indentation for tracing +} + +// addDeclDep adds the dependency edge (check.decl -> to) if check.decl exists +func (check *Checker) addDeclDep(to Object) { + from := check.decl + if from == nil { + return // not in a package-level init expression + } + if _, found := check.objMap[to]; !found { + return // to is not a package-level object + } + from.addDep(to) +} + +// Note: The following three alias-related functions are only used +// when Alias types are not enabled. + +// brokenAlias records that alias doesn't have a determined type yet. +// It also sets alias.typ to Typ[Invalid]. +// Not used if check.enableAlias is set. +func (check *Checker) brokenAlias(alias *TypeName) { + assert(!check.enableAlias) + if check.brokenAliases == nil { + check.brokenAliases = make(map[*TypeName]bool) + } + check.brokenAliases[alias] = true + alias.typ = Typ[Invalid] +} + +// validAlias records that alias has the valid type typ (possibly Typ[Invalid]). +func (check *Checker) validAlias(alias *TypeName, typ Type) { + assert(!check.enableAlias) + delete(check.brokenAliases, alias) + alias.typ = typ +} + +// isBrokenAlias reports whether alias doesn't have a determined type yet. +func (check *Checker) isBrokenAlias(alias *TypeName) bool { + assert(!check.enableAlias) + return check.brokenAliases[alias] +} + +func (check *Checker) rememberUntyped(e syntax.Expr, lhs bool, mode operandMode, typ *Basic, val constant.Value) { + m := check.untyped + if m == nil { + m = make(map[syntax.Expr]exprInfo) + check.untyped = m + } + m[e] = exprInfo{lhs, mode, typ, val} +} + +// later pushes f on to the stack of actions that will be processed later; +// either at the end of the current statement, or in case of a local constant +// or variable declaration, before the constant or variable is in scope +// (so that f still sees the scope before any new declarations). +// later returns the pushed action so one can provide a description +// via action.describef for debugging, if desired. +func (check *Checker) later(f func()) *action { + i := len(check.delayed) + check.delayed = append(check.delayed, action{f: f}) + return &check.delayed[i] +} + +// push pushes obj onto the object path and returns its index in the path. +func (check *Checker) push(obj Object) int { + check.objPath = append(check.objPath, obj) + return len(check.objPath) - 1 +} + +// pop pops and returns the topmost object from the object path. +func (check *Checker) pop() Object { + i := len(check.objPath) - 1 + obj := check.objPath[i] + check.objPath[i] = nil + check.objPath = check.objPath[:i] + return obj +} + +type cleaner interface { + cleanup() +} + +// needsCleanup records objects/types that implement the cleanup method +// which will be called at the end of type-checking. +func (check *Checker) needsCleanup(c cleaner) { + check.cleaners = append(check.cleaners, c) +} + +// NewChecker returns a new Checker instance for a given package. +// Package files may be added incrementally via checker.Files. +func NewChecker(conf *Config, pkg *Package, info *Info) *Checker { + // make sure we have a configuration + if conf == nil { + conf = new(Config) + } + + // make sure we have an info struct + if info == nil { + info = new(Info) + } + + // Note: clients may call NewChecker with the Unsafe package, which is + // globally shared and must not be mutated. Therefore NewChecker must not + // mutate *pkg. + // + // (previously, pkg.goVersion was mutated here: go.dev/issue/61212) + + return &Checker{ + enableAlias: gotypesalias.Value() == "1", + conf: conf, + ctxt: conf.Context, + pkg: pkg, + Info: info, + version: asGoVersion(conf.GoVersion), + objMap: make(map[Object]*declInfo), + impMap: make(map[importKey]*Package), + } +} + +// initFiles initializes the files-specific portion of checker. +// The provided files must all belong to the same package. +func (check *Checker) initFiles(files []*syntax.File) { + // start with a clean slate (check.Files may be called multiple times) + check.files = nil + check.imports = nil + check.dotImportMap = nil + + check.firstErr = nil + check.methods = nil + check.untyped = nil + check.delayed = nil + check.objPath = nil + check.cleaners = nil + + // determine package name and collect valid files + pkg := check.pkg + for _, file := range files { + switch name := file.PkgName.Value; pkg.name { + case "": + if name != "_" { + pkg.name = name + } else { + check.error(file.PkgName, BlankPkgName, "invalid package name _") + } + fallthrough + + case name: + check.files = append(check.files, file) + + default: + check.errorf(file, MismatchedPkgName, "package %s; expected %s", name, pkg.name) + // ignore this file + } + } + + // reuse Info.FileVersions if provided + versions := check.Info.FileVersions + if versions == nil { + versions = make(map[*syntax.PosBase]string) + } + check.versions = versions + + pkgVersionOk := check.version.isValid() + downgradeOk := check.version.cmp(go1_21) >= 0 + + // determine Go version for each file + for _, file := range check.files { + // use unaltered Config.GoVersion by default + // (This version string may contain dot-release numbers as in go1.20.1, + // unlike file versions which are Go language versions only, if valid.) + v := check.conf.GoVersion + // use the file version, if applicable + // (file versions are either the empty string or of the form go1.dd) + if pkgVersionOk { + fileVersion := asGoVersion(file.GoVersion) + if fileVersion.isValid() { + cmp := fileVersion.cmp(check.version) + // Go 1.21 introduced the feature of setting the go.mod + // go line to an early version of Go and allowing //go:build lines + // to “upgrade” (cmp > 0) the Go version in a given file. + // We can do that backwards compatibly. + // + // Go 1.21 also introduced the feature of allowing //go:build lines + // to “downgrade” (cmp < 0) the Go version in a given file. + // That can't be done compatibly in general, since before the + // build lines were ignored and code got the module's Go version. + // To work around this, downgrades are only allowed when the + // module's Go version is Go 1.21 or later. + // + // If there is no valid check.version, then we don't really know what + // Go version to apply. + // Legacy tools may do this, and they historically have accepted everything. + // Preserve that behavior by ignoring //go:build constraints entirely in that + // case (!pkgVersionOk). + if cmp > 0 || cmp < 0 && downgradeOk { + v = file.GoVersion + } + } + } + versions[base(file.Pos())] = v // base(file.Pos()) may be nil for tests + } +} + +// A bailout panic is used for early termination. +type bailout struct{} + +func (check *Checker) handleBailout(err *error) { + switch p := recover().(type) { + case nil, bailout: + // normal return or early exit + *err = check.firstErr + default: + // re-panic + panic(p) + } +} + +// Files checks the provided files as part of the checker's package. +func (check *Checker) Files(files []*syntax.File) error { return check.checkFiles(files) } + +var errBadCgo = errors.New("cannot use FakeImportC and go115UsesCgo together") + +func (check *Checker) checkFiles(files []*syntax.File) (err error) { + if check.pkg == Unsafe { + // Defensive handling for Unsafe, which cannot be type checked, and must + // not be mutated. See https://go.dev/issue/61212 for an example of where + // Unsafe is passed to NewChecker. + return nil + } + + // Note: NewChecker doesn't return an error, so we need to check the version here. + if check.version.cmp(go_current) > 0 { + return fmt.Errorf("package requires newer Go version %v", check.version) + } + if check.conf.FakeImportC && check.conf.go115UsesCgo { + return errBadCgo + } + + defer check.handleBailout(&err) + + print := func(msg string) { + if check.conf.Trace { + fmt.Println() + fmt.Println(msg) + } + } + + print("== initFiles ==") + check.initFiles(files) + + print("== collectObjects ==") + check.collectObjects() + + print("== packageObjects ==") + check.packageObjects() + + print("== processDelayed ==") + check.processDelayed(0) // incl. all functions + + print("== cleanup ==") + check.cleanup() + + print("== initOrder ==") + check.initOrder() + + if !check.conf.DisableUnusedImportCheck { + print("== unusedImports ==") + check.unusedImports() + } + + print("== recordUntyped ==") + check.recordUntyped() + + if check.firstErr == nil { + // TODO(mdempsky): Ensure monomorph is safe when errors exist. + check.monomorph() + } + + check.pkg.goVersion = check.conf.GoVersion + check.pkg.complete = true + + // no longer needed - release memory + check.imports = nil + check.dotImportMap = nil + check.pkgPathMap = nil + check.seenPkgMap = nil + check.recvTParamMap = nil + check.brokenAliases = nil + check.unionTypeSets = nil + check.ctxt = nil + + // TODO(gri) There's more memory we should release at this point. + + return +} + +// processDelayed processes all delayed actions pushed after top. +func (check *Checker) processDelayed(top int) { + // If each delayed action pushes a new action, the + // stack will continue to grow during this loop. + // However, it is only processing functions (which + // are processed in a delayed fashion) that may + // add more actions (such as nested functions), so + // this is a sufficiently bounded process. + for i := top; i < len(check.delayed); i++ { + a := &check.delayed[i] + if check.conf.Trace { + if a.desc != nil { + check.trace(a.desc.pos.Pos(), "-- "+a.desc.format, a.desc.args...) + } else { + check.trace(nopos, "-- delayed %p", a.f) + } + } + a.f() // may append to check.delayed + if check.conf.Trace { + fmt.Println() + } + } + assert(top <= len(check.delayed)) // stack must not have shrunk + check.delayed = check.delayed[:top] +} + +// cleanup runs cleanup for all collected cleaners. +func (check *Checker) cleanup() { + // Don't use a range clause since Named.cleanup may add more cleaners. + for i := 0; i < len(check.cleaners); i++ { + check.cleaners[i].cleanup() + } + check.cleaners = nil +} + +func (check *Checker) record(x *operand) { + // convert x into a user-friendly set of values + // TODO(gri) this code can be simplified + var typ Type + var val constant.Value + switch x.mode { + case invalid: + typ = Typ[Invalid] + case novalue: + typ = (*Tuple)(nil) + case constant_: + typ = x.typ + val = x.val + default: + typ = x.typ + } + assert(x.expr != nil && typ != nil) + + if isUntyped(typ) { + // delay type and value recording until we know the type + // or until the end of type checking + check.rememberUntyped(x.expr, false, x.mode, typ.(*Basic), val) + } else { + check.recordTypeAndValue(x.expr, x.mode, typ, val) + } +} + +func (check *Checker) recordUntyped() { + if !debug && !check.recordTypes() { + return // nothing to do + } + + for x, info := range check.untyped { + if debug && isTyped(info.typ) { + check.dump("%v: %s (type %s) is typed", atPos(x), x, info.typ) + unreachable() + } + check.recordTypeAndValue(x, info.mode, info.typ, info.val) + } +} + +func (check *Checker) recordTypeAndValue(x syntax.Expr, mode operandMode, typ Type, val constant.Value) { + assert(x != nil) + assert(typ != nil) + if mode == invalid { + return // omit + } + if mode == constant_ { + assert(val != nil) + // We check allBasic(typ, IsConstType) here as constant expressions may be + // recorded as type parameters. + assert(!isValid(typ) || allBasic(typ, IsConstType)) + } + if m := check.Types; m != nil { + m[x] = TypeAndValue{mode, typ, val} + } + if check.StoreTypesInSyntax { + tv := TypeAndValue{mode, typ, val} + stv := syntax.TypeAndValue{Type: typ, Value: val} + if tv.IsVoid() { + stv.SetIsVoid() + } + if tv.IsType() { + stv.SetIsType() + } + if tv.IsBuiltin() { + stv.SetIsBuiltin() + } + if tv.IsValue() { + stv.SetIsValue() + } + if tv.IsNil() { + stv.SetIsNil() + } + if tv.Addressable() { + stv.SetAddressable() + } + if tv.Assignable() { + stv.SetAssignable() + } + if tv.HasOk() { + stv.SetHasOk() + } + x.SetTypeInfo(stv) + } +} + +func (check *Checker) recordBuiltinType(f syntax.Expr, sig *Signature) { + // f must be a (possibly parenthesized, possibly qualified) + // identifier denoting a built-in (including unsafe's non-constant + // functions Add and Slice): record the signature for f and possible + // children. + for { + check.recordTypeAndValue(f, builtin, sig, nil) + switch p := f.(type) { + case *syntax.Name, *syntax.SelectorExpr: + return // we're done + case *syntax.ParenExpr: + f = p.X + default: + unreachable() + } + } +} + +// recordCommaOkTypes updates recorded types to reflect that x is used in a commaOk context +// (and therefore has tuple type). +func (check *Checker) recordCommaOkTypes(x syntax.Expr, a []*operand) { + assert(x != nil) + assert(len(a) == 2) + if a[0].mode == invalid { + return + } + t0, t1 := a[0].typ, a[1].typ + assert(isTyped(t0) && isTyped(t1) && (isBoolean(t1) || t1 == universeError)) + if m := check.Types; m != nil { + for { + tv := m[x] + assert(tv.Type != nil) // should have been recorded already + pos := x.Pos() + tv.Type = NewTuple( + NewVar(pos, check.pkg, "", t0), + NewVar(pos, check.pkg, "", t1), + ) + m[x] = tv + // if x is a parenthesized expression (p.X), update p.X + p, _ := x.(*syntax.ParenExpr) + if p == nil { + break + } + x = p.X + } + } + if check.StoreTypesInSyntax { + // Note: this loop is duplicated because the type of tv is different. + // Above it is types2.TypeAndValue, here it is syntax.TypeAndValue. + for { + tv := x.GetTypeInfo() + assert(tv.Type != nil) // should have been recorded already + pos := x.Pos() + tv.Type = NewTuple( + NewVar(pos, check.pkg, "", t0), + NewVar(pos, check.pkg, "", t1), + ) + x.SetTypeInfo(tv) + p, _ := x.(*syntax.ParenExpr) + if p == nil { + break + } + x = p.X + } + } +} + +// recordInstance records instantiation information into check.Info, if the +// Instances map is non-nil. The given expr must be an ident, selector, or +// index (list) expr with ident or selector operand. +// +// TODO(rfindley): the expr parameter is fragile. See if we can access the +// instantiated identifier in some other way. +func (check *Checker) recordInstance(expr syntax.Expr, targs []Type, typ Type) { + ident := instantiatedIdent(expr) + assert(ident != nil) + assert(typ != nil) + if m := check.Instances; m != nil { + m[ident] = Instance{newTypeList(targs), typ} + } +} + +func instantiatedIdent(expr syntax.Expr) *syntax.Name { + var selOrIdent syntax.Expr + switch e := expr.(type) { + case *syntax.IndexExpr: + selOrIdent = e.X + case *syntax.SelectorExpr, *syntax.Name: + selOrIdent = e + } + switch x := selOrIdent.(type) { + case *syntax.Name: + return x + case *syntax.SelectorExpr: + return x.Sel + } + panic("instantiated ident not found") +} + +func (check *Checker) recordDef(id *syntax.Name, obj Object) { + assert(id != nil) + if m := check.Defs; m != nil { + m[id] = obj + } +} + +func (check *Checker) recordUse(id *syntax.Name, obj Object) { + assert(id != nil) + assert(obj != nil) + if m := check.Uses; m != nil { + m[id] = obj + } +} + +func (check *Checker) recordImplicit(node syntax.Node, obj Object) { + assert(node != nil) + assert(obj != nil) + if m := check.Implicits; m != nil { + m[node] = obj + } +} + +func (check *Checker) recordSelection(x *syntax.SelectorExpr, kind SelectionKind, recv Type, obj Object, index []int, indirect bool) { + assert(obj != nil && (recv == nil || len(index) > 0)) + check.recordUse(x.Sel, obj) + if m := check.Selections; m != nil { + m[x] = &Selection{kind, recv, obj, index, indirect} + } +} + +func (check *Checker) recordScope(node syntax.Node, scope *Scope) { + assert(node != nil) + assert(scope != nil) + if m := check.Scopes; m != nil { + m[node] = scope + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/check_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/check_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a9d6202a336c5aaaf49896546d3cae6c370e53ff --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/check_test.go @@ -0,0 +1,461 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a typechecker test harness. The packages specified +// in tests are typechecked. Error messages reported by the typechecker are +// compared against the errors expected in the test files. +// +// Expected errors are indicated in the test files by putting comments +// of the form /* ERROR pattern */ or /* ERRORx pattern */ (or a similar +// //-style line comment) immediately following the tokens where errors +// are reported. There must be exactly one blank before and after the +// ERROR/ERRORx indicator, and the pattern must be a properly quoted Go +// string. +// +// The harness will verify that each ERROR pattern is a substring of the +// error reported at that source position, and that each ERRORx pattern +// is a regular expression matching the respective error. +// Consecutive comments may be used to indicate multiple errors reported +// at the same position. +// +// For instance, the following test source indicates that an "undeclared" +// error should be reported for the undeclared variable x: +// +// package p +// func f() { +// _ = x /* ERROR "undeclared" */ + 1 +// } + +package types2_test + +import ( + "bytes" + "cmd/compile/internal/syntax" + "flag" + "fmt" + "internal/buildcfg" + "internal/testenv" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "testing" + + . "cmd/compile/internal/types2" +) + +var ( + haltOnError = flag.Bool("halt", false, "halt on error") + verifyErrors = flag.Bool("verify", false, "verify errors (rather than list them) in TestManual") +) + +func parseFiles(t *testing.T, filenames []string, srcs [][]byte, mode syntax.Mode) ([]*syntax.File, []error) { + var files []*syntax.File + var errlist []error + errh := func(err error) { errlist = append(errlist, err) } + for i, filename := range filenames { + base := syntax.NewFileBase(filename) + r := bytes.NewReader(srcs[i]) + file, err := syntax.Parse(base, r, errh, nil, mode) + if file == nil { + t.Fatalf("%s: %s", filename, err) + } + files = append(files, file) + } + return files, errlist +} + +func unpackError(err error) (syntax.Pos, string) { + switch err := err.(type) { + case syntax.Error: + return err.Pos, err.Msg + case Error: + return err.Pos, err.Msg + default: + return nopos, err.Error() + } +} + +// absDiff returns the absolute difference between x and y. +func absDiff(x, y uint) uint { + if x < y { + return y - x + } + return x - y +} + +// parseFlags parses flags from the first line of the given source if the line +// starts with "//" (line comment) followed by "-" (possibly with spaces +// between). Otherwise the line is ignored. +func parseFlags(src []byte, flags *flag.FlagSet) error { + // we must have a line comment that starts with a "-" + const prefix = "//" + if !bytes.HasPrefix(src, []byte(prefix)) { + return nil // first line is not a line comment + } + src = src[len(prefix):] + if i := bytes.Index(src, []byte("-")); i < 0 || len(bytes.TrimSpace(src[:i])) != 0 { + return nil // comment doesn't start with a "-" + } + end := bytes.Index(src, []byte("\n")) + const maxLen = 256 + if end < 0 || end > maxLen { + return fmt.Errorf("flags comment line too long") + } + + return flags.Parse(strings.Fields(string(src[:end]))) +} + +// testFiles type-checks the package consisting of the given files, and +// compares the resulting errors with the ERROR annotations in the source. +// Except for manual tests, each package is type-checked twice, once without +// use of Alias types, and once with Alias types. +// +// The srcs slice contains the file content for the files named in the +// filenames slice. The colDelta parameter specifies the tolerance for position +// mismatch when comparing errors. The manual parameter specifies whether this +// is a 'manual' test. +// +// If provided, opts may be used to mutate the Config before type-checking. +func testFiles(t *testing.T, filenames []string, srcs [][]byte, colDelta uint, manual bool, opts ...func(*Config)) { + // Alias types are disabled by default + testFilesImpl(t, filenames, srcs, colDelta, manual, opts...) + if !manual { + t.Setenv("GODEBUG", "gotypesalias=1") + testFilesImpl(t, filenames, srcs, colDelta, manual, opts...) + } +} + +func testFilesImpl(t *testing.T, filenames []string, srcs [][]byte, colDelta uint, manual bool, opts ...func(*Config)) { + if len(filenames) == 0 { + t.Fatal("no source files") + } + + // parse files + files, errlist := parseFiles(t, filenames, srcs, 0) + pkgName := "" + if len(files) > 0 { + pkgName = files[0].PkgName.Value + } + listErrors := manual && !*verifyErrors + if listErrors && len(errlist) > 0 { + t.Errorf("--- %s:", pkgName) + for _, err := range errlist { + t.Error(err) + } + } + + // set up typechecker + var conf Config + conf.Trace = manual && testing.Verbose() + conf.Importer = defaultImporter() + conf.Error = func(err error) { + if *haltOnError { + defer panic(err) + } + if listErrors { + t.Error(err) + return + } + errlist = append(errlist, err) + } + + // apply custom configuration + for _, opt := range opts { + opt(&conf) + } + + // apply flag setting (overrides custom configuration) + var goexperiment, gotypesalias string + flags := flag.NewFlagSet("", flag.PanicOnError) + flags.StringVar(&conf.GoVersion, "lang", "", "") + flags.StringVar(&goexperiment, "goexperiment", "", "") + flags.BoolVar(&conf.FakeImportC, "fakeImportC", false, "") + flags.StringVar(&gotypesalias, "gotypesalias", "", "") + if err := parseFlags(srcs[0], flags); err != nil { + t.Fatal(err) + } + + exp, err := buildcfg.ParseGOEXPERIMENT(runtime.GOOS, runtime.GOARCH, goexperiment) + if err != nil { + t.Fatal(err) + } + old := buildcfg.Experiment + defer func() { + buildcfg.Experiment = old + }() + buildcfg.Experiment = *exp + + // By default, gotypesalias is not set. + if gotypesalias != "" { + t.Setenv("GODEBUG", "gotypesalias="+gotypesalias) + } + + // Provide Config.Info with all maps so that info recording is tested. + info := Info{ + Types: make(map[syntax.Expr]TypeAndValue), + Instances: make(map[*syntax.Name]Instance), + Defs: make(map[*syntax.Name]Object), + Uses: make(map[*syntax.Name]Object), + Implicits: make(map[syntax.Node]Object), + Selections: make(map[*syntax.SelectorExpr]*Selection), + Scopes: make(map[syntax.Node]*Scope), + FileVersions: make(map[*syntax.PosBase]string), + } + + // typecheck + conf.Check(pkgName, files, &info) + if listErrors { + return + } + + // collect expected errors + errmap := make(map[string]map[uint][]syntax.Error) + for i, filename := range filenames { + if m := syntax.CommentMap(bytes.NewReader(srcs[i]), regexp.MustCompile("^ ERRORx? ")); len(m) > 0 { + errmap[filename] = m + } + } + + // match against found errors + var indices []int // list indices of matching errors, reused for each error + for _, err := range errlist { + gotPos, gotMsg := unpackError(err) + + // find list of errors for the respective error line + filename := gotPos.Base().Filename() + filemap := errmap[filename] + line := gotPos.Line() + var errList []syntax.Error + if filemap != nil { + errList = filemap[line] + } + + // At least one of the errors in errList should match the current error. + indices = indices[:0] + for i, want := range errList { + pattern, substr := strings.CutPrefix(want.Msg, " ERROR ") + if !substr { + var found bool + pattern, found = strings.CutPrefix(want.Msg, " ERRORx ") + if !found { + panic("unreachable") + } + } + pattern, err := strconv.Unquote(strings.TrimSpace(pattern)) + if err != nil { + t.Errorf("%s:%d:%d: %v", filename, line, want.Pos.Col(), err) + continue + } + if substr { + if !strings.Contains(gotMsg, pattern) { + continue + } + } else { + rx, err := regexp.Compile(pattern) + if err != nil { + t.Errorf("%s:%d:%d: %v", filename, line, want.Pos.Col(), err) + continue + } + if !rx.MatchString(gotMsg) { + continue + } + } + indices = append(indices, i) + } + if len(indices) == 0 { + t.Errorf("%s: no error expected: %q", gotPos, gotMsg) + continue + } + // len(indices) > 0 + + // If there are multiple matching errors, select the one with the closest column position. + index := -1 // index of matching error + var delta uint + for _, i := range indices { + if d := absDiff(gotPos.Col(), errList[i].Pos.Col()); index < 0 || d < delta { + index, delta = i, d + } + } + + // The closest column position must be within expected colDelta. + if delta > colDelta { + t.Errorf("%s: got col = %d; want %d", gotPos, gotPos.Col(), errList[index].Pos.Col()) + } + + // eliminate from errList + if n := len(errList) - 1; n > 0 { + // not the last entry - slide entries down (don't reorder) + copy(errList[index:], errList[index+1:]) + filemap[line] = errList[:n] + } else { + // last entry - remove errList from filemap + delete(filemap, line) + } + + // if filemap is empty, eliminate from errmap + if len(filemap) == 0 { + delete(errmap, filename) + } + } + + // there should be no expected errors left + if len(errmap) > 0 { + t.Errorf("--- %s: unreported errors:", pkgName) + for filename, filemap := range errmap { + for line, errList := range filemap { + for _, err := range errList { + t.Errorf("%s:%d:%d: %s", filename, line, err.Pos.Col(), err.Msg) + } + } + } + } +} + +// boolFieldAddr(conf, name) returns the address of the boolean field conf.. +// For accessing unexported fields. +func boolFieldAddr(conf *Config, name string) *bool { + v := reflect.Indirect(reflect.ValueOf(conf)) + return (*bool)(v.FieldByName(name).Addr().UnsafePointer()) +} + +// TestManual is for manual testing of a package - either provided +// as a list of filenames belonging to the package, or a directory +// name containing the package files - after the test arguments +// (and a separating "--"). For instance, to test the package made +// of the files foo.go and bar.go, use: +// +// go test -run Manual -- foo.go bar.go +// +// If no source arguments are provided, the file testdata/manual.go +// is used instead. +// Provide the -verify flag to verify errors against ERROR comments +// in the input files rather than having a list of errors reported. +// The accepted Go language version can be controlled with the -lang +// flag. +func TestManual(t *testing.T) { + testenv.MustHaveGoBuild(t) + + filenames := flag.Args() + if len(filenames) == 0 { + filenames = []string{filepath.FromSlash("testdata/manual.go")} + } + + info, err := os.Stat(filenames[0]) + if err != nil { + t.Fatalf("TestManual: %v", err) + } + + DefPredeclaredTestFuncs() + if info.IsDir() { + if len(filenames) > 1 { + t.Fatal("TestManual: must have only one directory argument") + } + testDir(t, filenames[0], 0, true) + } else { + testPkg(t, filenames, 0, true) + } +} + +func TestLongConstants(t *testing.T) { + format := `package longconst; const _ = %s /* ERROR "constant overflow" */; const _ = %s // ERROR "excessively long constant"` + src := fmt.Sprintf(format, strings.Repeat("1", 9999), strings.Repeat("1", 10001)) + testFiles(t, []string{"longconst.go"}, [][]byte{[]byte(src)}, 0, false) +} + +func withSizes(sizes Sizes) func(*Config) { + return func(cfg *Config) { + cfg.Sizes = sizes + } +} + +// TestIndexRepresentability tests that constant index operands must +// be representable as int even if they already have a type that can +// represent larger values. +func TestIndexRepresentability(t *testing.T) { + const src = `package index; var s []byte; var _ = s[int64 /* ERRORx "int64\\(1\\) << 40 \\(.*\\) overflows int" */ (1) << 40]` + testFiles(t, []string{"index.go"}, [][]byte{[]byte(src)}, 0, false, withSizes(&StdSizes{4, 4})) +} + +func TestIssue47243_TypedRHS(t *testing.T) { + // The RHS of the shift expression below overflows uint on 32bit platforms, + // but this is OK as it is explicitly typed. + const src = `package issue47243; var a uint64; var _ = a << uint64(4294967296)` // uint64(1<<32) + testFiles(t, []string{"p.go"}, [][]byte{[]byte(src)}, 0, false, withSizes(&StdSizes{4, 4})) +} + +func TestCheck(t *testing.T) { + old := buildcfg.Experiment.RangeFunc + defer func() { + buildcfg.Experiment.RangeFunc = old + }() + buildcfg.Experiment.RangeFunc = true + + DefPredeclaredTestFuncs() + testDirFiles(t, "../../../../internal/types/testdata/check", 50, false) // TODO(gri) narrow column tolerance +} +func TestSpec(t *testing.T) { testDirFiles(t, "../../../../internal/types/testdata/spec", 0, false) } +func TestExamples(t *testing.T) { + testDirFiles(t, "../../../../internal/types/testdata/examples", 125, false) +} // TODO(gri) narrow column tolerance +func TestFixedbugs(t *testing.T) { + testDirFiles(t, "../../../../internal/types/testdata/fixedbugs", 100, false) +} // TODO(gri) narrow column tolerance +func TestLocal(t *testing.T) { testDirFiles(t, "testdata/local", 0, false) } + +func testDirFiles(t *testing.T, dir string, colDelta uint, manual bool) { + testenv.MustHaveGoBuild(t) + dir = filepath.FromSlash(dir) + + fis, err := os.ReadDir(dir) + if err != nil { + t.Error(err) + return + } + + for _, fi := range fis { + path := filepath.Join(dir, fi.Name()) + + // If fi is a directory, its files make up a single package. + if fi.IsDir() { + testDir(t, path, colDelta, manual) + } else { + t.Run(filepath.Base(path), func(t *testing.T) { + testPkg(t, []string{path}, colDelta, manual) + }) + } + } +} + +func testDir(t *testing.T, dir string, colDelta uint, manual bool) { + fis, err := os.ReadDir(dir) + if err != nil { + t.Error(err) + return + } + + var filenames []string + for _, fi := range fis { + filenames = append(filenames, filepath.Join(dir, fi.Name())) + } + + t.Run(filepath.Base(dir), func(t *testing.T) { + testPkg(t, filenames, colDelta, manual) + }) +} + +func testPkg(t *testing.T, filenames []string, colDelta uint, manual bool) { + srcs := make([][]byte, len(filenames)) + for i, filename := range filenames { + src, err := os.ReadFile(filename) + if err != nil { + t.Fatalf("could not read %s: %v", filename, err) + } + srcs[i] = src + } + testFiles(t, filenames, srcs, colDelta, manual) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/compilersupport.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/compilersupport.go new file mode 100644 index 0000000000000000000000000000000000000000..33dd8e8baace43271c656efcdcd2db74dd89b4d8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/compilersupport.go @@ -0,0 +1,30 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Helper functions exported for the compiler. +// Do not use internally. + +package types2 + +// If t is a pointer, AsPointer returns that type, otherwise it returns nil. +func AsPointer(t Type) *Pointer { + u, _ := t.Underlying().(*Pointer) + return u +} + +// If t is a signature, AsSignature returns that type, otherwise it returns nil. +func AsSignature(t Type) *Signature { + u, _ := t.Underlying().(*Signature) + return u +} + +// If typ is a type parameter, CoreType returns the single underlying +// type of all types in the corresponding type constraint if it exists, or +// nil otherwise. If the type set contains only unrestricted and restricted +// channel types (with identical element types), the single underlying type +// is the restricted channel type if the restrictions are always the same. +// If typ is not a type parameter, CoreType returns the underlying type. +func CoreType(t Type) Type { + return coreType(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/const.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/const.go new file mode 100644 index 0000000000000000000000000000000000000000..af27c727dd2e70d68a3ddbad2e9ee69875d0f7c9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/const.go @@ -0,0 +1,306 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements functions for untyped constant operands. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "go/constant" + "go/token" + . "internal/types/errors" + "math" +) + +// overflow checks that the constant x is representable by its type. +// For untyped constants, it checks that the value doesn't become +// arbitrarily large. +func (check *Checker) overflow(x *operand, opPos syntax.Pos) { + assert(x.mode == constant_) + + if x.val.Kind() == constant.Unknown { + // TODO(gri) We should report exactly what went wrong. At the + // moment we don't have the (go/constant) API for that. + // See also TODO in go/constant/value.go. + check.error(atPos(opPos), InvalidConstVal, "constant result is not representable") + return + } + + // Typed constants must be representable in + // their type after each constant operation. + // x.typ cannot be a type parameter (type + // parameters cannot be constant types). + if isTyped(x.typ) { + check.representable(x, under(x.typ).(*Basic)) + return + } + + // Untyped integer values must not grow arbitrarily. + const prec = 512 // 512 is the constant precision + if x.val.Kind() == constant.Int && constant.BitLen(x.val) > prec { + op := opName(x.expr) + if op != "" { + op += " " + } + check.errorf(atPos(opPos), InvalidConstVal, "constant %soverflow", op) + x.val = constant.MakeUnknown() + } +} + +// representableConst reports whether x can be represented as +// value of the given basic type and for the configuration +// provided (only needed for int/uint sizes). +// +// If rounded != nil, *rounded is set to the rounded value of x for +// representable floating-point and complex values, and to an Int +// value for integer values; it is left alone otherwise. +// It is ok to provide the addressof the first argument for rounded. +// +// The check parameter may be nil if representableConst is invoked +// (indirectly) through an exported API call (AssignableTo, ConvertibleTo) +// because we don't need the Checker's config for those calls. +func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *constant.Value) bool { + if x.Kind() == constant.Unknown { + return true // avoid follow-up errors + } + + var conf *Config + if check != nil { + conf = check.conf + } + + sizeof := func(T Type) int64 { + s := conf.sizeof(T) + return s + } + + switch { + case isInteger(typ): + x := constant.ToInt(x) + if x.Kind() != constant.Int { + return false + } + if rounded != nil { + *rounded = x + } + if x, ok := constant.Int64Val(x); ok { + switch typ.kind { + case Int: + var s = uint(sizeof(typ)) * 8 + return int64(-1)<<(s-1) <= x && x <= int64(1)<<(s-1)-1 + case Int8: + const s = 8 + return -1<<(s-1) <= x && x <= 1<<(s-1)-1 + case Int16: + const s = 16 + return -1<<(s-1) <= x && x <= 1<<(s-1)-1 + case Int32: + const s = 32 + return -1<<(s-1) <= x && x <= 1<<(s-1)-1 + case Int64, UntypedInt: + return true + case Uint, Uintptr: + if s := uint(sizeof(typ)) * 8; s < 64 { + return 0 <= x && x <= int64(1)<= 0 && n <= int(s) + case Uint64: + return constant.Sign(x) >= 0 && n <= 64 + case UntypedInt: + return true + } + + case isFloat(typ): + x := constant.ToFloat(x) + if x.Kind() != constant.Float { + return false + } + switch typ.kind { + case Float32: + if rounded == nil { + return fitsFloat32(x) + } + r := roundFloat32(x) + if r != nil { + *rounded = r + return true + } + case Float64: + if rounded == nil { + return fitsFloat64(x) + } + r := roundFloat64(x) + if r != nil { + *rounded = r + return true + } + case UntypedFloat: + return true + default: + unreachable() + } + + case isComplex(typ): + x := constant.ToComplex(x) + if x.Kind() != constant.Complex { + return false + } + switch typ.kind { + case Complex64: + if rounded == nil { + return fitsFloat32(constant.Real(x)) && fitsFloat32(constant.Imag(x)) + } + re := roundFloat32(constant.Real(x)) + im := roundFloat32(constant.Imag(x)) + if re != nil && im != nil { + *rounded = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + return true + } + case Complex128: + if rounded == nil { + return fitsFloat64(constant.Real(x)) && fitsFloat64(constant.Imag(x)) + } + re := roundFloat64(constant.Real(x)) + im := roundFloat64(constant.Imag(x)) + if re != nil && im != nil { + *rounded = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + return true + } + case UntypedComplex: + return true + default: + unreachable() + } + + case isString(typ): + return x.Kind() == constant.String + + case isBoolean(typ): + return x.Kind() == constant.Bool + } + + return false +} + +func fitsFloat32(x constant.Value) bool { + f32, _ := constant.Float32Val(x) + f := float64(f32) + return !math.IsInf(f, 0) +} + +func roundFloat32(x constant.Value) constant.Value { + f32, _ := constant.Float32Val(x) + f := float64(f32) + if !math.IsInf(f, 0) { + return constant.MakeFloat64(f) + } + return nil +} + +func fitsFloat64(x constant.Value) bool { + f, _ := constant.Float64Val(x) + return !math.IsInf(f, 0) +} + +func roundFloat64(x constant.Value) constant.Value { + f, _ := constant.Float64Val(x) + if !math.IsInf(f, 0) { + return constant.MakeFloat64(f) + } + return nil +} + +// representable checks that a constant operand is representable in the given +// basic type. +func (check *Checker) representable(x *operand, typ *Basic) { + v, code := check.representation(x, typ) + if code != 0 { + check.invalidConversion(code, x, typ) + x.mode = invalid + return + } + assert(v != nil) + x.val = v +} + +// representation returns the representation of the constant operand x as the +// basic type typ. +// +// If no such representation is possible, it returns a non-zero error code. +func (check *Checker) representation(x *operand, typ *Basic) (constant.Value, Code) { + assert(x.mode == constant_) + v := x.val + if !representableConst(x.val, check, typ, &v) { + if isNumeric(x.typ) && isNumeric(typ) { + // numeric conversion : error msg + // + // integer -> integer : overflows + // integer -> float : overflows (actually not possible) + // float -> integer : truncated + // float -> float : overflows + // + if !isInteger(x.typ) && isInteger(typ) { + return nil, TruncatedFloat + } else { + return nil, NumericOverflow + } + } + return nil, InvalidConstVal + } + return v, 0 +} + +func (check *Checker) invalidConversion(code Code, x *operand, target Type) { + msg := "cannot convert %s to type %s" + switch code { + case TruncatedFloat: + msg = "%s truncated to %s" + case NumericOverflow: + msg = "%s overflows %s" + } + check.errorf(x, code, msg, x, target) +} + +// convertUntyped attempts to set the type of an untyped value to the target type. +func (check *Checker) convertUntyped(x *operand, target Type) { + newType, val, code := check.implicitTypeAndValue(x, target) + if code != 0 { + t := target + if !isTypeParam(target) { + t = safeUnderlying(target) + } + check.invalidConversion(code, x, t) + x.mode = invalid + return + } + if val != nil { + x.val = val + check.updateExprVal(x.expr, val) + } + if newType != x.typ { + x.typ = newType + check.updateExprType(x.expr, newType, false) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/context.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/context.go new file mode 100644 index 0000000000000000000000000000000000000000..772312463e5fbae6007b6fb70a1791998760198b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/context.go @@ -0,0 +1,144 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "sync" +) + +// This file contains a definition of the type-checking context; an opaque type +// that may be supplied by users during instantiation. +// +// Contexts serve two purposes: +// - reduce the duplication of identical instances +// - short-circuit instantiation cycles +// +// For the latter purpose, we must always have a context during instantiation, +// whether or not it is supplied by the user. For both purposes, it must be the +// case that hashing a pointer-identical type produces consistent results +// (somewhat obviously). +// +// However, neither of these purposes require that our hash is perfect, and so +// this was not an explicit design goal of the context type. In fact, due to +// concurrent use it is convenient not to guarantee de-duplication. +// +// Nevertheless, in the future it could be helpful to allow users to leverage +// contexts to canonicalize instances, and it would probably be possible to +// achieve such a guarantee. + +// A Context is an opaque type checking context. It may be used to share +// identical type instances across type-checked packages or calls to +// Instantiate. Contexts are safe for concurrent use. +// +// The use of a shared context does not guarantee that identical instances are +// deduplicated in all cases. +type Context struct { + mu sync.Mutex + typeMap map[string][]ctxtEntry // type hash -> instances entries + nextID int // next unique ID + originIDs map[Type]int // origin type -> unique ID +} + +type ctxtEntry struct { + orig Type + targs []Type + instance Type // = orig[targs] +} + +// NewContext creates a new Context. +func NewContext() *Context { + return &Context{ + typeMap: make(map[string][]ctxtEntry), + originIDs: make(map[Type]int), + } +} + +// instanceHash returns a string representation of typ instantiated with targs. +// The hash should be a perfect hash, though out of caution the type checker +// does not assume this. The result is guaranteed to not contain blanks. +func (ctxt *Context) instanceHash(orig Type, targs []Type) string { + assert(ctxt != nil) + assert(orig != nil) + var buf bytes.Buffer + + h := newTypeHasher(&buf, ctxt) + h.string(strconv.Itoa(ctxt.getID(orig))) + // Because we've already written the unique origin ID this call to h.typ is + // unnecessary, but we leave it for hash readability. It can be removed later + // if performance is an issue. + h.typ(orig) + if len(targs) > 0 { + // TODO(rfindley): consider asserting on isGeneric(typ) here, if and when + // isGeneric handles *Signature types. + h.typeList(targs) + } + + return strings.ReplaceAll(buf.String(), " ", "#") +} + +// lookup returns an existing instantiation of orig with targs, if it exists. +// Otherwise, it returns nil. +func (ctxt *Context) lookup(h string, orig Type, targs []Type) Type { + ctxt.mu.Lock() + defer ctxt.mu.Unlock() + + for _, e := range ctxt.typeMap[h] { + if identicalInstance(orig, targs, e.orig, e.targs) { + return e.instance + } + if debug { + // Panic during development to surface any imperfections in our hash. + panic(fmt.Sprintf("non-identical instances: (orig: %s, targs: %v) and %s", orig, targs, e.instance)) + } + } + + return nil +} + +// update de-duplicates n against previously seen types with the hash h. If an +// identical type is found with the type hash h, the previously seen type is +// returned. Otherwise, n is returned, and recorded in the Context for the hash +// h. +func (ctxt *Context) update(h string, orig Type, targs []Type, inst Type) Type { + assert(inst != nil) + + ctxt.mu.Lock() + defer ctxt.mu.Unlock() + + for _, e := range ctxt.typeMap[h] { + if inst == nil || Identical(inst, e.instance) { + return e.instance + } + if debug { + // Panic during development to surface any imperfections in our hash. + panic(fmt.Sprintf("%s and %s are not identical", inst, e.instance)) + } + } + + ctxt.typeMap[h] = append(ctxt.typeMap[h], ctxtEntry{ + orig: orig, + targs: targs, + instance: inst, + }) + + return inst +} + +// getID returns a unique ID for the type t. +func (ctxt *Context) getID(t Type) int { + ctxt.mu.Lock() + defer ctxt.mu.Unlock() + id, ok := ctxt.originIDs[t] + if !ok { + id = ctxt.nextID + ctxt.originIDs[t] = id + ctxt.nextID++ + } + return id +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/context_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/context_test.go new file mode 100644 index 0000000000000000000000000000000000000000..aa649b14481f76100988eef817ff1720a9ebd540 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/context_test.go @@ -0,0 +1,69 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "testing" +) + +func TestContextHashCollisions(t *testing.T) { + if debug { + t.Skip("hash collisions are expected, and would fail debug assertions") + } + // Unit test the de-duplication fall-back logic in Context. + // + // We can't test this via Instantiate because this is only a fall-back in + // case our hash is imperfect. + // + // These lookups and updates use reasonable looking types in an attempt to + // make them robust to internal type assertions, but could equally well use + // arbitrary types. + + // Create some distinct origin types. nullaryP and nullaryQ have no + // parameters and are identical (but have different type parameter names). + // unaryP has a parameter. + var nullaryP, nullaryQ, unaryP Type + { + // type nullaryP = func[P any]() + tparam := NewTypeParam(NewTypeName(nopos, nil, "P", nil), &emptyInterface) + nullaryP = NewSignatureType(nil, nil, []*TypeParam{tparam}, nil, nil, false) + } + { + // type nullaryQ = func[Q any]() + tparam := NewTypeParam(NewTypeName(nopos, nil, "Q", nil), &emptyInterface) + nullaryQ = NewSignatureType(nil, nil, []*TypeParam{tparam}, nil, nil, false) + } + { + // type unaryP = func[P any](_ P) + tparam := NewTypeParam(NewTypeName(nopos, nil, "P", nil), &emptyInterface) + params := NewTuple(NewVar(nopos, nil, "_", tparam)) + unaryP = NewSignatureType(nil, nil, []*TypeParam{tparam}, params, nil, false) + } + + ctxt := NewContext() + + // Update the context with an instantiation of nullaryP. + inst := NewSignatureType(nil, nil, nil, nil, nil, false) + if got := ctxt.update("", nullaryP, []Type{Typ[Int]}, inst); got != inst { + t.Error("bad") + } + + // unaryP is not identical to nullaryP, so we should not get inst when + // instantiated with identical type arguments. + if got := ctxt.lookup("", unaryP, []Type{Typ[Int]}); got != nil { + t.Error("bad") + } + + // nullaryQ is identical to nullaryP, so we *should* get inst when + // instantiated with identical type arguments. + if got := ctxt.lookup("", nullaryQ, []Type{Typ[Int]}); got != inst { + t.Error("bad") + } + + // ...but verify we don't get inst with different type arguments. + if got := ctxt.lookup("", nullaryQ, []Type{Typ[String]}); got != nil { + t.Error("bad") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/conversions.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/conversions.go new file mode 100644 index 0000000000000000000000000000000000000000..8027092c6cd6d2a4595834a982b26bf6d6fa5647 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/conversions.go @@ -0,0 +1,311 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements typechecking of conversions. + +package types2 + +import ( + "go/constant" + . "internal/types/errors" + "unicode" +) + +// conversion type-checks the conversion T(x). +// The result is in x. +func (check *Checker) conversion(x *operand, T Type) { + constArg := x.mode == constant_ + + constConvertibleTo := func(T Type, val *constant.Value) bool { + switch t, _ := under(T).(*Basic); { + case t == nil: + // nothing to do + case representableConst(x.val, check, t, val): + return true + case isInteger(x.typ) && isString(t): + codepoint := unicode.ReplacementChar + if i, ok := constant.Uint64Val(x.val); ok && i <= unicode.MaxRune { + codepoint = rune(i) + } + if val != nil { + *val = constant.MakeString(string(codepoint)) + } + return true + } + return false + } + + var ok bool + var cause string + switch { + case constArg && isConstType(T): + // constant conversion + ok = constConvertibleTo(T, &x.val) + // A conversion from an integer constant to an integer type + // can only fail if there's overflow. Give a concise error. + // (go.dev/issue/63563) + if !ok && isInteger(x.typ) && isInteger(T) { + check.errorf(x, InvalidConversion, "constant %s overflows %s", x.val, T) + x.mode = invalid + return + } + case constArg && isTypeParam(T): + // x is convertible to T if it is convertible + // to each specific type in the type set of T. + // If T's type set is empty, or if it doesn't + // have specific types, constant x cannot be + // converted. + ok = T.(*TypeParam).underIs(func(u Type) bool { + // u is nil if there are no specific type terms + if u == nil { + cause = check.sprintf("%s does not contain specific types", T) + return false + } + if isString(x.typ) && isBytesOrRunes(u) { + return true + } + if !constConvertibleTo(u, nil) { + if isInteger(x.typ) && isInteger(u) { + // see comment above on constant conversion + cause = check.sprintf("constant %s overflows %s (in %s)", x.val, u, T) + } else { + cause = check.sprintf("cannot convert %s to type %s (in %s)", x, u, T) + } + return false + } + return true + }) + x.mode = value // type parameters are not constants + case x.convertibleTo(check, T, &cause): + // non-constant conversion + ok = true + x.mode = value + } + + if !ok { + if cause != "" { + check.errorf(x, InvalidConversion, "cannot convert %s to type %s: %s", x, T, cause) + } else { + check.errorf(x, InvalidConversion, "cannot convert %s to type %s", x, T) + } + x.mode = invalid + return + } + + // The conversion argument types are final. For untyped values the + // conversion provides the type, per the spec: "A constant may be + // given a type explicitly by a constant declaration or conversion,...". + if isUntyped(x.typ) { + final := T + // - For conversions to interfaces, except for untyped nil arguments, + // use the argument's default type. + // - For conversions of untyped constants to non-constant types, also + // use the default type (e.g., []byte("foo") should report string + // not []byte as type for the constant "foo"). + // - For constant integer to string conversions, keep the argument type. + // (See also the TODO below.) + if x.typ == Typ[UntypedNil] { + // ok + } else if isNonTypeParamInterface(T) || constArg && !isConstType(T) { + final = Default(x.typ) + } else if x.mode == constant_ && isInteger(x.typ) && allString(T) { + final = x.typ + } + check.updateExprType(x.expr, final, true) + } + + x.typ = T +} + +// TODO(gri) convertibleTo checks if T(x) is valid. It assumes that the type +// of x is fully known, but that's not the case for say string(1<b-> ... ->g for a path [a, b, ... g]. +func pathString(path []Object) string { + var s string + for i, p := range path { + if i > 0 { + s += "->" + } + s += p.Name() + } + return s +} + +// objDecl type-checks the declaration of obj in its respective (file) environment. +// For the meaning of def, see Checker.definedType, in typexpr.go. +func (check *Checker) objDecl(obj Object, def *TypeName) { + if check.conf.Trace && obj.Type() == nil { + if check.indent == 0 { + fmt.Println() // empty line between top-level objects for readability + } + check.trace(obj.Pos(), "-- checking %s (%s, objPath = %s)", obj, obj.color(), pathString(check.objPath)) + check.indent++ + defer func() { + check.indent-- + check.trace(obj.Pos(), "=> %s (%s)", obj, obj.color()) + }() + } + + // Checking the declaration of obj means inferring its type + // (and possibly its value, for constants). + // An object's type (and thus the object) may be in one of + // three states which are expressed by colors: + // + // - an object whose type is not yet known is painted white (initial color) + // - an object whose type is in the process of being inferred is painted grey + // - an object whose type is fully inferred is painted black + // + // During type inference, an object's color changes from white to grey + // to black (pre-declared objects are painted black from the start). + // A black object (i.e., its type) can only depend on (refer to) other black + // ones. White and grey objects may depend on white and black objects. + // A dependency on a grey object indicates a cycle which may or may not be + // valid. + // + // When objects turn grey, they are pushed on the object path (a stack); + // they are popped again when they turn black. Thus, if a grey object (a + // cycle) is encountered, it is on the object path, and all the objects + // it depends on are the remaining objects on that path. Color encoding + // is such that the color value of a grey object indicates the index of + // that object in the object path. + + // During type-checking, white objects may be assigned a type without + // traversing through objDecl; e.g., when initializing constants and + // variables. Update the colors of those objects here (rather than + // everywhere where we set the type) to satisfy the color invariants. + if obj.color() == white && obj.Type() != nil { + obj.setColor(black) + return + } + + switch obj.color() { + case white: + assert(obj.Type() == nil) + // All color values other than white and black are considered grey. + // Because black and white are < grey, all values >= grey are grey. + // Use those values to encode the object's index into the object path. + obj.setColor(grey + color(check.push(obj))) + defer func() { + check.pop().setColor(black) + }() + + case black: + assert(obj.Type() != nil) + return + + default: + // Color values other than white or black are considered grey. + fallthrough + + case grey: + // We have a (possibly invalid) cycle. + // In the existing code, this is marked by a non-nil type + // for the object except for constants and variables whose + // type may be non-nil (known), or nil if it depends on the + // not-yet known initialization value. + // In the former case, set the type to Typ[Invalid] because + // we have an initialization cycle. The cycle error will be + // reported later, when determining initialization order. + // TODO(gri) Report cycle here and simplify initialization + // order code. + switch obj := obj.(type) { + case *Const: + if !check.validCycle(obj) || obj.typ == nil { + obj.typ = Typ[Invalid] + } + + case *Var: + if !check.validCycle(obj) || obj.typ == nil { + obj.typ = Typ[Invalid] + } + + case *TypeName: + if !check.validCycle(obj) { + // break cycle + // (without this, calling underlying() + // below may lead to an endless loop + // if we have a cycle for a defined + // (*Named) type) + obj.typ = Typ[Invalid] + } + + case *Func: + if !check.validCycle(obj) { + // Don't set obj.typ to Typ[Invalid] here + // because plenty of code type-asserts that + // functions have a *Signature type. Grey + // functions have their type set to an empty + // signature which makes it impossible to + // initialize a variable with the function. + } + + default: + unreachable() + } + assert(obj.Type() != nil) + return + } + + d := check.objMap[obj] + if d == nil { + check.dump("%v: %s should have been declared", obj.Pos(), obj) + unreachable() + } + + // save/restore current environment and set up object environment + defer func(env environment) { + check.environment = env + }(check.environment) + check.environment = environment{ + scope: d.file, + } + + // Const and var declarations must not have initialization + // cycles. We track them by remembering the current declaration + // in check.decl. Initialization expressions depending on other + // consts, vars, or functions, add dependencies to the current + // check.decl. + switch obj := obj.(type) { + case *Const: + check.decl = d // new package-level const decl + check.constDecl(obj, d.vtyp, d.init, d.inherited) + case *Var: + check.decl = d // new package-level var decl + check.varDecl(obj, d.lhs, d.vtyp, d.init) + case *TypeName: + // invalid recursive types are detected via path + check.typeDecl(obj, d.tdecl, def) + check.collectMethods(obj) // methods can only be added to top-level types + case *Func: + // functions may be recursive - no need to track dependencies + check.funcDecl(obj, d) + default: + unreachable() + } +} + +// validCycle reports whether the cycle starting with obj is valid and +// reports an error if it is not. +func (check *Checker) validCycle(obj Object) (valid bool) { + // The object map contains the package scope objects and the non-interface methods. + if debug { + info := check.objMap[obj] + inObjMap := info != nil && (info.fdecl == nil || info.fdecl.Recv == nil) // exclude methods + isPkgObj := obj.Parent() == check.pkg.scope + if isPkgObj != inObjMap { + check.dump("%v: inconsistent object map for %s (isPkgObj = %v, inObjMap = %v)", obj.Pos(), obj, isPkgObj, inObjMap) + unreachable() + } + } + + // Count cycle objects. + assert(obj.color() >= grey) + start := obj.color() - grey // index of obj in objPath + cycle := check.objPath[start:] + tparCycle := false // if set, the cycle is through a type parameter list + nval := 0 // number of (constant or variable) values in the cycle; valid if !generic + ndef := 0 // number of type definitions in the cycle; valid if !generic +loop: + for _, obj := range cycle { + switch obj := obj.(type) { + case *Const, *Var: + nval++ + case *TypeName: + // If we reach a generic type that is part of a cycle + // and we are in a type parameter list, we have a cycle + // through a type parameter list, which is invalid. + if check.inTParamList && isGeneric(obj.typ) { + tparCycle = true + break loop + } + + // Determine if the type name is an alias or not. For + // package-level objects, use the object map which + // provides syntactic information (which doesn't rely + // on the order in which the objects are set up). For + // local objects, we can rely on the order, so use + // the object's predicate. + // TODO(gri) It would be less fragile to always access + // the syntactic information. We should consider storing + // this information explicitly in the object. + var alias bool + if check.enableAlias { + alias = obj.IsAlias() + } else { + if d := check.objMap[obj]; d != nil { + alias = d.tdecl.Alias // package-level object + } else { + alias = obj.IsAlias() // function local object + } + } + if !alias { + ndef++ + } + case *Func: + // ignored for now + default: + unreachable() + } + } + + if check.conf.Trace { + check.trace(obj.Pos(), "## cycle detected: objPath = %s->%s (len = %d)", pathString(cycle), obj.Name(), len(cycle)) + if tparCycle { + check.trace(obj.Pos(), "## cycle contains: generic type in a type parameter list") + } else { + check.trace(obj.Pos(), "## cycle contains: %d values, %d type definitions", nval, ndef) + } + defer func() { + if valid { + check.trace(obj.Pos(), "=> cycle is valid") + } else { + check.trace(obj.Pos(), "=> error: cycle is invalid") + } + }() + } + + if !tparCycle { + // A cycle involving only constants and variables is invalid but we + // ignore them here because they are reported via the initialization + // cycle check. + if nval == len(cycle) { + return true + } + + // A cycle involving only types (and possibly functions) must have at least + // one type definition to be permitted: If there is no type definition, we + // have a sequence of alias type names which will expand ad infinitum. + if nval == 0 && ndef > 0 { + return true + } + } + + check.cycleError(cycle) + return false +} + +// cycleError reports a declaration cycle starting with +// the object in cycle that is "first" in the source. +func (check *Checker) cycleError(cycle []Object) { + // name returns the (possibly qualified) object name. + // This is needed because with generic types, cycles + // may refer to imported types. See go.dev/issue/50788. + // TODO(gri) This functionality is used elsewhere. Factor it out. + name := func(obj Object) string { + return packagePrefix(obj.Pkg(), check.qualifier) + obj.Name() + } + + // TODO(gri) Should we start with the last (rather than the first) object in the cycle + // since that is the earliest point in the source where we start seeing the + // cycle? That would be more consistent with other error messages. + i := firstInSrc(cycle) + obj := cycle[i] + objName := name(obj) + // If obj is a type alias, mark it as valid (not broken) in order to avoid follow-on errors. + tname, _ := obj.(*TypeName) + if tname != nil && tname.IsAlias() { + // If we use Alias nodes, it is initialized with Typ[Invalid]. + // TODO(gri) Adjust this code if we initialize with nil. + if !check.enableAlias { + check.validAlias(tname, Typ[Invalid]) + } + } + + // report a more concise error for self references + if len(cycle) == 1 { + if tname != nil { + check.errorf(obj, InvalidDeclCycle, "invalid recursive type: %s refers to itself", objName) + } else { + check.errorf(obj, InvalidDeclCycle, "invalid cycle in declaration: %s refers to itself", objName) + } + return + } + + var err error_ + err.code = InvalidDeclCycle + if tname != nil { + err.errorf(obj, "invalid recursive type %s", objName) + } else { + err.errorf(obj, "invalid cycle in declaration of %s", objName) + } + for range cycle { + err.errorf(obj, "%s refers to", objName) + i++ + if i >= len(cycle) { + i = 0 + } + obj = cycle[i] + objName = name(obj) + } + err.errorf(obj, "%s", objName) + check.report(&err) +} + +// firstInSrc reports the index of the object with the "smallest" +// source position in path. path must not be empty. +func firstInSrc(path []Object) int { + fst, pos := 0, path[0].Pos() + for i, t := range path[1:] { + if cmpPos(t.Pos(), pos) < 0 { + fst, pos = i+1, t.Pos() + } + } + return fst +} + +func (check *Checker) constDecl(obj *Const, typ, init syntax.Expr, inherited bool) { + assert(obj.typ == nil) + + // use the correct value of iota and errpos + defer func(iota constant.Value, errpos syntax.Pos) { + check.iota = iota + check.errpos = errpos + }(check.iota, check.errpos) + check.iota = obj.val + check.errpos = nopos + + // provide valid constant value under all circumstances + obj.val = constant.MakeUnknown() + + // determine type, if any + if typ != nil { + t := check.typ(typ) + if !isConstType(t) { + // don't report an error if the type is an invalid C (defined) type + // (go.dev/issue/22090) + if isValid(under(t)) { + check.errorf(typ, InvalidConstType, "invalid constant type %s", t) + } + obj.typ = Typ[Invalid] + return + } + obj.typ = t + } + + // check initialization + var x operand + if init != nil { + if inherited { + // The initialization expression is inherited from a previous + // constant declaration, and (error) positions refer to that + // expression and not the current constant declaration. Use + // the constant identifier position for any errors during + // init expression evaluation since that is all we have + // (see issues go.dev/issue/42991, go.dev/issue/42992). + check.errpos = obj.pos + } + check.expr(nil, &x, init) + } + check.initConst(obj, &x) +} + +func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init syntax.Expr) { + assert(obj.typ == nil) + + // determine type, if any + if typ != nil { + obj.typ = check.varType(typ) + // We cannot spread the type to all lhs variables if there + // are more than one since that would mark them as checked + // (see Checker.objDecl) and the assignment of init exprs, + // if any, would not be checked. + // + // TODO(gri) If we have no init expr, we should distribute + // a given type otherwise we need to re-evalate the type + // expr for each lhs variable, leading to duplicate work. + } + + // check initialization + if init == nil { + if typ == nil { + // error reported before by arityMatch + obj.typ = Typ[Invalid] + } + return + } + + if lhs == nil || len(lhs) == 1 { + assert(lhs == nil || lhs[0] == obj) + var x operand + check.expr(newTarget(obj.typ, obj.name), &x, init) + check.initVar(obj, &x, "variable declaration") + return + } + + if debug { + // obj must be one of lhs + found := false + for _, lhs := range lhs { + if obj == lhs { + found = true + break + } + } + if !found { + panic("inconsistent lhs") + } + } + + // We have multiple variables on the lhs and one init expr. + // Make sure all variables have been given the same type if + // one was specified, otherwise they assume the type of the + // init expression values (was go.dev/issue/15755). + if typ != nil { + for _, lhs := range lhs { + lhs.typ = obj.typ + } + } + + check.initVars(lhs, []syntax.Expr{init}, nil) +} + +// isImportedConstraint reports whether typ is an imported type constraint. +func (check *Checker) isImportedConstraint(typ Type) bool { + named := asNamed(typ) + if named == nil || named.obj.pkg == check.pkg || named.obj.pkg == nil { + return false + } + u, _ := named.under().(*Interface) + return u != nil && !u.IsMethodSet() +} + +func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeName) { + assert(obj.typ == nil) + + var rhs Type + check.later(func() { + if t := asNamed(obj.typ); t != nil { // type may be invalid + check.validType(t) + } + // If typ is local, an error was already reported where typ is specified/defined. + _ = check.isImportedConstraint(rhs) && check.verifyVersionf(tdecl.Type, go1_18, "using type constraint %s", rhs) + }).describef(obj, "validType(%s)", obj.Name()) + + aliasDecl := tdecl.Alias + if aliasDecl && tdecl.TParamList != nil { + // The parser will ensure this but we may still get an invalid AST. + // Complain and continue as regular type definition. + check.error(tdecl, BadDecl, "generic type cannot be alias") + aliasDecl = false + } + + // alias declaration + if aliasDecl { + check.verifyVersionf(tdecl, go1_9, "type aliases") + if check.enableAlias { + // TODO(gri) Should be able to use nil instead of Typ[Invalid] to mark + // the alias as incomplete. Currently this causes problems + // with certain cycles. Investigate. + alias := check.newAlias(obj, Typ[Invalid]) + setDefType(def, alias) + rhs = check.definedType(tdecl.Type, obj) + assert(rhs != nil) + alias.fromRHS = rhs + Unalias(alias) // resolve alias.actual + } else { + check.brokenAlias(obj) + rhs = check.typ(tdecl.Type) + check.validAlias(obj, rhs) + } + return + } + + // type definition or generic type declaration + named := check.newNamed(obj, nil, nil) + setDefType(def, named) + + if tdecl.TParamList != nil { + check.openScope(tdecl, "type parameters") + defer check.closeScope() + check.collectTypeParams(&named.tparams, tdecl.TParamList) + } + + // determine underlying type of named + rhs = check.definedType(tdecl.Type, obj) + assert(rhs != nil) + named.fromRHS = rhs + + // If the underlying type was not set while type-checking the right-hand + // side, it is invalid and an error should have been reported elsewhere. + if named.underlying == nil { + named.underlying = Typ[Invalid] + } + + // Disallow a lone type parameter as the RHS of a type declaration (go.dev/issue/45639). + // We don't need this restriction anymore if we make the underlying type of a type + // parameter its constraint interface: if the RHS is a lone type parameter, we will + // use its underlying type (like we do for any RHS in a type declaration), and its + // underlying type is an interface and the type declaration is well defined. + if isTypeParam(rhs) { + check.error(tdecl.Type, MisplacedTypeParam, "cannot use a type parameter as RHS in type declaration") + named.underlying = Typ[Invalid] + } +} + +func (check *Checker) collectTypeParams(dst **TypeParamList, list []*syntax.Field) { + tparams := make([]*TypeParam, len(list)) + + // Declare type parameters up-front. + // The scope of type parameters starts at the beginning of the type parameter + // list (so we can have mutually recursive parameterized type bounds). + if len(list) > 0 { + scopePos := list[0].Pos() + for i, f := range list { + tparams[i] = check.declareTypeParam(f.Name, scopePos) + } + } + + // Set the type parameters before collecting the type constraints because + // the parameterized type may be used by the constraints (go.dev/issue/47887). + // Example: type T[P T[P]] interface{} + *dst = bindTParams(tparams) + + // Signal to cycle detection that we are in a type parameter list. + // We can only be inside one type parameter list at any given time: + // function closures may appear inside a type parameter list but they + // cannot be generic, and their bodies are processed in delayed and + // sequential fashion. Note that with each new declaration, we save + // the existing environment and restore it when done; thus inTParamList + // is true exactly only when we are in a specific type parameter list. + assert(!check.inTParamList) + check.inTParamList = true + defer func() { + check.inTParamList = false + }() + + // Keep track of bounds for later validation. + var bound Type + for i, f := range list { + // Optimization: Re-use the previous type bound if it hasn't changed. + // This also preserves the grouped output of type parameter lists + // when printing type strings. + if i == 0 || f.Type != list[i-1].Type { + bound = check.bound(f.Type) + if isTypeParam(bound) { + // We may be able to allow this since it is now well-defined what + // the underlying type and thus type set of a type parameter is. + // But we may need some additional form of cycle detection within + // type parameter lists. + check.error(f.Type, MisplacedTypeParam, "cannot use a type parameter as constraint") + bound = Typ[Invalid] + } + } + tparams[i].bound = bound + } +} + +func (check *Checker) bound(x syntax.Expr) Type { + // A type set literal of the form ~T and A|B may only appear as constraint; + // embed it in an implicit interface so that only interface type-checking + // needs to take care of such type expressions. + if op, _ := x.(*syntax.Operation); op != nil && (op.Op == syntax.Tilde || op.Op == syntax.Or) { + t := check.typ(&syntax.InterfaceType{MethodList: []*syntax.Field{{Type: x}}}) + // mark t as implicit interface if all went well + if t, _ := t.(*Interface); t != nil { + t.implicit = true + } + return t + } + return check.typ(x) +} + +func (check *Checker) declareTypeParam(name *syntax.Name, scopePos syntax.Pos) *TypeParam { + // Use Typ[Invalid] for the type constraint to ensure that a type + // is present even if the actual constraint has not been assigned + // yet. + // TODO(gri) Need to systematically review all uses of type parameter + // constraints to make sure we don't rely on them if they + // are not properly set yet. + tname := NewTypeName(name.Pos(), check.pkg, name.Value, nil) + tpar := check.newTypeParam(tname, Typ[Invalid]) // assigns type to tname as a side-effect + check.declare(check.scope, name, tname, scopePos) + return tpar +} + +func (check *Checker) collectMethods(obj *TypeName) { + // get associated methods + // (Checker.collectObjects only collects methods with non-blank names; + // Checker.resolveBaseTypeName ensures that obj is not an alias name + // if it has attached methods.) + methods := check.methods[obj] + if methods == nil { + return + } + delete(check.methods, obj) + assert(!check.objMap[obj].tdecl.Alias) // don't use TypeName.IsAlias (requires fully set up object) + + // use an objset to check for name conflicts + var mset objset + + // spec: "If the base type is a struct type, the non-blank method + // and field names must be distinct." + base := asNamed(obj.typ) // shouldn't fail but be conservative + if base != nil { + assert(base.TypeArgs().Len() == 0) // collectMethods should not be called on an instantiated type + + // See go.dev/issue/52529: we must delay the expansion of underlying here, as + // base may not be fully set-up. + check.later(func() { + check.checkFieldUniqueness(base) + }).describef(obj, "verifying field uniqueness for %v", base) + + // Checker.Files may be called multiple times; additional package files + // may add methods to already type-checked types. Add pre-existing methods + // so that we can detect redeclarations. + for i := 0; i < base.NumMethods(); i++ { + m := base.Method(i) + assert(m.name != "_") + assert(mset.insert(m) == nil) + } + } + + // add valid methods + for _, m := range methods { + // spec: "For a base type, the non-blank names of methods bound + // to it must be unique." + assert(m.name != "_") + if alt := mset.insert(m); alt != nil { + if alt.Pos().IsKnown() { + check.errorf(m.pos, DuplicateMethod, "method %s.%s already declared at %s", obj.Name(), m.name, alt.Pos()) + } else { + check.errorf(m.pos, DuplicateMethod, "method %s.%s already declared", obj.Name(), m.name) + } + continue + } + + if base != nil { + base.AddMethod(m) + } + } +} + +func (check *Checker) checkFieldUniqueness(base *Named) { + if t, _ := base.under().(*Struct); t != nil { + var mset objset + for i := 0; i < base.NumMethods(); i++ { + m := base.Method(i) + assert(m.name != "_") + assert(mset.insert(m) == nil) + } + + // Check that any non-blank field names of base are distinct from its + // method names. + for _, fld := range t.fields { + if fld.name != "_" { + if alt := mset.insert(fld); alt != nil { + // Struct fields should already be unique, so we should only + // encounter an alternate via collision with a method name. + _ = alt.(*Func) + + // For historical consistency, we report the primary error on the + // method, and the alt decl on the field. + var err error_ + err.code = DuplicateFieldAndMethod + err.errorf(alt, "field and method with the same name %s", fld.name) + err.recordAltDecl(fld) + check.report(&err) + } + } + } + } +} + +func (check *Checker) funcDecl(obj *Func, decl *declInfo) { + assert(obj.typ == nil) + + // func declarations cannot use iota + assert(check.iota == nil) + + sig := new(Signature) + obj.typ = sig // guard against cycles + + // Avoid cycle error when referring to method while type-checking the signature. + // This avoids a nuisance in the best case (non-parameterized receiver type) and + // since the method is not a type, we get an error. If we have a parameterized + // receiver type, instantiating the receiver type leads to the instantiation of + // its methods, and we don't want a cycle error in that case. + // TODO(gri) review if this is correct and/or whether we still need this? + saved := obj.color_ + obj.color_ = black + fdecl := decl.fdecl + check.funcType(sig, fdecl.Recv, fdecl.TParamList, fdecl.Type) + obj.color_ = saved + + // Set the scope's extent to the complete "func (...) { ... }" + // so that Scope.Innermost works correctly. + sig.scope.pos = fdecl.Pos() + sig.scope.end = syntax.EndPos(fdecl) + + if len(fdecl.TParamList) > 0 && fdecl.Body == nil { + check.softErrorf(fdecl, BadDecl, "generic function is missing function body") + } + + // function body must be type-checked after global declarations + // (functions implemented elsewhere have no body) + if !check.conf.IgnoreFuncBodies && fdecl.Body != nil { + check.later(func() { + check.funcBody(decl, obj.name, sig, fdecl.Body, nil) + }).describef(obj, "func %s", obj.name) + } +} + +func (check *Checker) declStmt(list []syntax.Decl) { + pkg := check.pkg + + first := -1 // index of first ConstDecl in the current group, or -1 + var last *syntax.ConstDecl // last ConstDecl with init expressions, or nil + for index, decl := range list { + if _, ok := decl.(*syntax.ConstDecl); !ok { + first = -1 // we're not in a constant declaration + } + + switch s := decl.(type) { + case *syntax.ConstDecl: + top := len(check.delayed) + + // iota is the index of the current constDecl within the group + if first < 0 || s.Group == nil || list[index-1].(*syntax.ConstDecl).Group != s.Group { + first = index + last = nil + } + iota := constant.MakeInt64(int64(index - first)) + + // determine which initialization expressions to use + inherited := true + switch { + case s.Type != nil || s.Values != nil: + last = s + inherited = false + case last == nil: + last = new(syntax.ConstDecl) // make sure last exists + inherited = false + } + + // declare all constants + lhs := make([]*Const, len(s.NameList)) + values := syntax.UnpackListExpr(last.Values) + for i, name := range s.NameList { + obj := NewConst(name.Pos(), pkg, name.Value, nil, iota) + lhs[i] = obj + + var init syntax.Expr + if i < len(values) { + init = values[i] + } + + check.constDecl(obj, last.Type, init, inherited) + } + + // Constants must always have init values. + check.arity(s.Pos(), s.NameList, values, true, inherited) + + // process function literals in init expressions before scope changes + check.processDelayed(top) + + // spec: "The scope of a constant or variable identifier declared + // inside a function begins at the end of the ConstSpec or VarSpec + // (ShortVarDecl for short variable declarations) and ends at the + // end of the innermost containing block." + scopePos := syntax.EndPos(s) + for i, name := range s.NameList { + check.declare(check.scope, name, lhs[i], scopePos) + } + + case *syntax.VarDecl: + top := len(check.delayed) + + lhs0 := make([]*Var, len(s.NameList)) + for i, name := range s.NameList { + lhs0[i] = NewVar(name.Pos(), pkg, name.Value, nil) + } + + // initialize all variables + values := syntax.UnpackListExpr(s.Values) + for i, obj := range lhs0 { + var lhs []*Var + var init syntax.Expr + switch len(values) { + case len(s.NameList): + // lhs and rhs match + init = values[i] + case 1: + // rhs is expected to be a multi-valued expression + lhs = lhs0 + init = values[0] + default: + if i < len(values) { + init = values[i] + } + } + check.varDecl(obj, lhs, s.Type, init) + if len(values) == 1 { + // If we have a single lhs variable we are done either way. + // If we have a single rhs expression, it must be a multi- + // valued expression, in which case handling the first lhs + // variable will cause all lhs variables to have a type + // assigned, and we are done as well. + if debug { + for _, obj := range lhs0 { + assert(obj.typ != nil) + } + } + break + } + } + + // If we have no type, we must have values. + if s.Type == nil || values != nil { + check.arity(s.Pos(), s.NameList, values, false, false) + } + + // process function literals in init expressions before scope changes + check.processDelayed(top) + + // declare all variables + // (only at this point are the variable scopes (parents) set) + scopePos := syntax.EndPos(s) // see constant declarations + for i, name := range s.NameList { + // see constant declarations + check.declare(check.scope, name, lhs0[i], scopePos) + } + + case *syntax.TypeDecl: + obj := NewTypeName(s.Name.Pos(), pkg, s.Name.Value, nil) + // spec: "The scope of a type identifier declared inside a function + // begins at the identifier in the TypeSpec and ends at the end of + // the innermost containing block." + scopePos := s.Name.Pos() + check.declare(check.scope, s.Name, obj, scopePos) + // mark and unmark type before calling typeDecl; its type is still nil (see Checker.objDecl) + obj.setColor(grey + color(check.push(obj))) + check.typeDecl(obj, s, nil) + check.pop().setColor(black) + + default: + check.errorf(s, InvalidSyntaxTree, "unknown syntax.Decl node %T", s) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/errorcalls_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/errorcalls_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ba4dc87b6ae8b8a55a146f1424e9c40d4c361416 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/errorcalls_test.go @@ -0,0 +1,95 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "cmd/compile/internal/syntax" + "strconv" + "testing" +) + +const ( + errorfMinArgCount = 4 + errorfFormatIndex = 2 +) + +// TestErrorCalls makes sure that check.errorf calls have at least +// errorfMinArgCount arguments (otherwise we should use check.error) +// and use balanced parentheses/brackets. +func TestErrorCalls(t *testing.T) { + files, err := pkgFiles(".") + if err != nil { + t.Fatal(err) + } + + for _, file := range files { + syntax.Inspect(file, func(n syntax.Node) bool { + call, _ := n.(*syntax.CallExpr) + if call == nil { + return true + } + selx, _ := call.Fun.(*syntax.SelectorExpr) + if selx == nil { + return true + } + if !(isName(selx.X, "check") && isName(selx.Sel, "errorf")) { + return true + } + // check.errorf calls should have at least errorfMinArgCount arguments: + // position, code, format string, and arguments to format + if n := len(call.ArgList); n < errorfMinArgCount { + t.Errorf("%s: got %d arguments, want at least %d", call.Pos(), n, errorfMinArgCount) + return false + } + format := call.ArgList[errorfFormatIndex] + syntax.Inspect(format, func(n syntax.Node) bool { + if lit, _ := n.(*syntax.BasicLit); lit != nil && lit.Kind == syntax.StringLit { + if s, err := strconv.Unquote(lit.Value); err == nil { + if !balancedParentheses(s) { + t.Errorf("%s: unbalanced parentheses/brackets", lit.Pos()) + } + } + return false + } + return true + }) + return false + }) + } +} + +func isName(n syntax.Node, name string) bool { + if n, ok := n.(*syntax.Name); ok { + return n.Value == name + } + return false +} + +func balancedParentheses(s string) bool { + var stack []byte + for _, ch := range s { + var open byte + switch ch { + case '(', '[', '{': + stack = append(stack, byte(ch)) + continue + case ')': + open = '(' + case ']': + open = '[' + case '}': + open = '{' + default: + continue + } + // closing parenthesis/bracket must have matching opening + top := len(stack) - 1 + if top < 0 || stack[top] != open { + return false + } + stack = stack[:top] + } + return len(stack) == 0 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/errors.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..b8414b48498f242b088f5f8d1517156de31db1e4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/errors.go @@ -0,0 +1,332 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements various error reporters. + +package types2 + +import ( + "bytes" + "cmd/compile/internal/syntax" + "fmt" + . "internal/types/errors" + "runtime" + "strconv" + "strings" +) + +func assert(p bool) { + if !p { + msg := "assertion failed" + // Include information about the assertion location. Due to panic recovery, + // this location is otherwise buried in the middle of the panicking stack. + if _, file, line, ok := runtime.Caller(1); ok { + msg = fmt.Sprintf("%s:%d: %s", file, line, msg) + } + panic(msg) + } +} + +func unreachable() { + panic("unreachable") +} + +// An error_ represents a type-checking error. +// To report an error_, call Checker.report. +type error_ struct { + desc []errorDesc + code Code + soft bool // TODO(gri) eventually determine this from an error code +} + +// An errorDesc describes part of a type-checking error. +type errorDesc struct { + pos syntax.Pos + format string + args []interface{} +} + +func (err *error_) empty() bool { + return err.desc == nil +} + +func (err *error_) pos() syntax.Pos { + if err.empty() { + return nopos + } + return err.desc[0].pos +} + +func (err *error_) msg(qf Qualifier) string { + if err.empty() { + return "no error" + } + var buf strings.Builder + for i := range err.desc { + p := &err.desc[i] + if i > 0 { + fmt.Fprint(&buf, "\n\t") + if p.pos.IsKnown() { + fmt.Fprintf(&buf, "%s: ", p.pos) + } + } + buf.WriteString(sprintf(qf, false, p.format, p.args...)) + } + return buf.String() +} + +// String is for testing. +func (err *error_) String() string { + if err.empty() { + return "no error" + } + return fmt.Sprintf("%s: %s", err.pos(), err.msg(nil)) +} + +// errorf adds formatted error information to err. +// It may be called multiple times to provide additional information. +func (err *error_) errorf(at poser, format string, args ...interface{}) { + err.desc = append(err.desc, errorDesc{atPos(at), format, args}) +} + +func sprintf(qf Qualifier, tpSubscripts bool, format string, args ...interface{}) string { + for i, arg := range args { + switch a := arg.(type) { + case nil: + arg = "" + case operand: + panic("got operand instead of *operand") + case *operand: + arg = operandString(a, qf) + case syntax.Pos: + arg = a.String() + case syntax.Expr: + arg = syntax.String(a) + case []syntax.Expr: + var buf strings.Builder + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(syntax.String(x)) + } + buf.WriteByte(']') + arg = buf.String() + case Object: + arg = ObjectString(a, qf) + case Type: + var buf bytes.Buffer + w := newTypeWriter(&buf, qf) + w.tpSubscripts = tpSubscripts + w.typ(a) + arg = buf.String() + case []Type: + var buf bytes.Buffer + w := newTypeWriter(&buf, qf) + w.tpSubscripts = tpSubscripts + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteString(", ") + } + w.typ(x) + } + buf.WriteByte(']') + arg = buf.String() + case []*TypeParam: + var buf bytes.Buffer + w := newTypeWriter(&buf, qf) + w.tpSubscripts = tpSubscripts + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteString(", ") + } + w.typ(x) + } + buf.WriteByte(']') + arg = buf.String() + } + args[i] = arg + } + return fmt.Sprintf(format, args...) +} + +func (check *Checker) qualifier(pkg *Package) string { + // Qualify the package unless it's the package being type-checked. + if pkg != check.pkg { + if check.pkgPathMap == nil { + check.pkgPathMap = make(map[string]map[string]bool) + check.seenPkgMap = make(map[*Package]bool) + check.markImports(check.pkg) + } + // If the same package name was used by multiple packages, display the full path. + if len(check.pkgPathMap[pkg.name]) > 1 { + return strconv.Quote(pkg.path) + } + return pkg.name + } + return "" +} + +// markImports recursively walks pkg and its imports, to record unique import +// paths in pkgPathMap. +func (check *Checker) markImports(pkg *Package) { + if check.seenPkgMap[pkg] { + return + } + check.seenPkgMap[pkg] = true + + forName, ok := check.pkgPathMap[pkg.name] + if !ok { + forName = make(map[string]bool) + check.pkgPathMap[pkg.name] = forName + } + forName[pkg.path] = true + + for _, imp := range pkg.imports { + check.markImports(imp) + } +} + +// check may be nil. +func (check *Checker) sprintf(format string, args ...interface{}) string { + var qf Qualifier + if check != nil { + qf = check.qualifier + } + return sprintf(qf, false, format, args...) +} + +func (check *Checker) report(err *error_) { + if err.empty() { + panic("no error to report") + } + check.err(err.pos(), err.code, err.msg(check.qualifier), err.soft) +} + +func (check *Checker) trace(pos syntax.Pos, format string, args ...interface{}) { + fmt.Printf("%s:\t%s%s\n", + pos, + strings.Repeat(". ", check.indent), + sprintf(check.qualifier, true, format, args...), + ) +} + +// dump is only needed for debugging +func (check *Checker) dump(format string, args ...interface{}) { + fmt.Println(sprintf(check.qualifier, true, format, args...)) +} + +func (check *Checker) err(at poser, code Code, msg string, soft bool) { + switch code { + case InvalidSyntaxTree: + msg = "invalid syntax tree: " + msg + case 0: + panic("no error code provided") + } + + // Cheap trick: Don't report errors with messages containing + // "invalid operand" or "invalid type" as those tend to be + // follow-on errors which don't add useful information. Only + // exclude them if these strings are not at the beginning, + // and only if we have at least one error already reported. + if check.firstErr != nil && (strings.Index(msg, "invalid operand") > 0 || strings.Index(msg, "invalid type") > 0) { + return + } + + pos := atPos(at) + + // If we are encountering an error while evaluating an inherited + // constant initialization expression, pos is the position of in + // the original expression, and not of the currently declared + // constant identifier. Use the provided errpos instead. + // TODO(gri) We may also want to augment the error message and + // refer to the position (pos) in the original expression. + if check.errpos.IsKnown() { + assert(check.iota != nil) + pos = check.errpos + } + + // If we have a URL for error codes, add a link to the first line. + if code != 0 && check.conf.ErrorURL != "" { + u := fmt.Sprintf(check.conf.ErrorURL, code) + if i := strings.Index(msg, "\n"); i >= 0 { + msg = msg[:i] + u + msg[i:] + } else { + msg += u + } + } + + err := Error{pos, stripAnnotations(msg), msg, soft, code} + if check.firstErr == nil { + check.firstErr = err + } + + if check.conf.Trace { + check.trace(pos, "ERROR: %s", msg) + } + + f := check.conf.Error + if f == nil { + panic(bailout{}) // report only first error + } + f(err) +} + +const ( + invalidArg = "invalid argument: " + invalidOp = "invalid operation: " +) + +type poser interface { + Pos() syntax.Pos +} + +func (check *Checker) error(at poser, code Code, msg string) { + check.err(at, code, msg, false) +} + +func (check *Checker) errorf(at poser, code Code, format string, args ...interface{}) { + check.err(at, code, check.sprintf(format, args...), false) +} + +func (check *Checker) softErrorf(at poser, code Code, format string, args ...interface{}) { + check.err(at, code, check.sprintf(format, args...), true) +} + +func (check *Checker) versionErrorf(at poser, v goVersion, format string, args ...interface{}) { + msg := check.sprintf(format, args...) + msg = fmt.Sprintf("%s requires %s or later", msg, v) + check.err(at, UnsupportedFeature, msg, true) +} + +// atPos reports the left (= start) position of at. +func atPos(at poser) syntax.Pos { + switch x := at.(type) { + case *operand: + if x.expr != nil { + return syntax.StartPos(x.expr) + } + case syntax.Node: + return syntax.StartPos(x) + } + return at.Pos() +} + +// stripAnnotations removes internal (type) annotations from s. +func stripAnnotations(s string) string { + var buf strings.Builder + for _, r := range s { + // strip #'s and subscript digits + if r < '₀' || '₀'+10 <= r { // '₀' == U+2080 + buf.WriteRune(r) + } + } + if buf.Len() < len(s) { + return buf.String() + } + return s +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/errors_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/errors_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ac73ca4650dc4a54852ad20092a7b6df99a4e4ce --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/errors_test.go @@ -0,0 +1,44 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import "testing" + +func TestError(t *testing.T) { + var err error_ + want := "no error" + if got := err.String(); got != want { + t.Errorf("empty error: got %q, want %q", got, want) + } + + want = ": foo 42" + err.errorf(nopos, "foo %d", 42) + if got := err.String(); got != want { + t.Errorf("simple error: got %q, want %q", got, want) + } + + want = ": foo 42\n\tbar 43" + err.errorf(nopos, "bar %d", 43) + if got := err.String(); got != want { + t.Errorf("simple error: got %q, want %q", got, want) + } +} + +func TestStripAnnotations(t *testing.T) { + for _, test := range []struct { + in, want string + }{ + {"", ""}, + {" ", " "}, + {"foo", "foo"}, + {"foo₀", "foo"}, + {"foo(T₀)", "foo(T)"}, + } { + got := stripAnnotations(test.in) + if got != test.want { + t.Errorf("%q: got %q; want %q", test.in, got, test.want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/example_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/example_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7031fdb1ada63880e35a888e429b126024afaf0a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/example_test.go @@ -0,0 +1,252 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Only run where builders (build.golang.org) have +// access to compiled packages for import. +// +//go:build !android && !ios && !js && !wasip1 + +package types2_test + +// This file shows examples of basic usage of the go/types API. +// +// To locate a Go package, use (*go/build.Context).Import. +// To load, parse, and type-check a complete Go program +// from source, use golang.org/x/tools/go/loader. + +import ( + "cmd/compile/internal/syntax" + "cmd/compile/internal/types2" + "fmt" + "log" + "regexp" + "sort" + "strings" +) + +// ExampleScope prints the tree of Scopes of a package created from a +// set of parsed files. +func ExampleScope() { + // Parse the source files for a package. + var files []*syntax.File + for _, src := range []string{ + `package main +import "fmt" +func main() { + freezing := FToC(-18) + fmt.Println(freezing, Boiling) } +`, + `package main +import "fmt" +type Celsius float64 +func (c Celsius) String() string { return fmt.Sprintf("%g°C", c) } +func FToC(f float64) Celsius { return Celsius(f - 32 / 9 * 5) } +const Boiling Celsius = 100 +func Unused() { {}; {{ var x int; _ = x }} } // make sure empty block scopes get printed +`, + } { + files = append(files, mustParse(src)) + } + + // Type-check a package consisting of these files. + // Type information for the imported "fmt" package + // comes from $GOROOT/pkg/$GOOS_$GOOARCH/fmt.a. + conf := types2.Config{Importer: defaultImporter()} + pkg, err := conf.Check("temperature", files, nil) + if err != nil { + log.Fatal(err) + } + + // Print the tree of scopes. + // For determinism, we redact addresses. + var buf strings.Builder + pkg.Scope().WriteTo(&buf, 0, true) + rx := regexp.MustCompile(` 0x[a-fA-F0-9]*`) + fmt.Println(rx.ReplaceAllString(buf.String(), "")) + + // Output: + // package "temperature" scope { + // . const temperature.Boiling temperature.Celsius + // . type temperature.Celsius float64 + // . func temperature.FToC(f float64) temperature.Celsius + // . func temperature.Unused() + // . func temperature.main() + // . main scope { + // . . package fmt + // . . function scope { + // . . . var freezing temperature.Celsius + // . . } + // . } + // . main scope { + // . . package fmt + // . . function scope { + // . . . var c temperature.Celsius + // . . } + // . . function scope { + // . . . var f float64 + // . . } + // . . function scope { + // . . . block scope { + // . . . } + // . . . block scope { + // . . . . block scope { + // . . . . . var x int + // . . . . } + // . . . } + // . . } + // . } + // } +} + +// ExampleInfo prints various facts recorded by the type checker in a +// types2.Info struct: definitions of and references to each named object, +// and the type, value, and mode of every expression in the package. +func ExampleInfo() { + // Parse a single source file. + const input = ` +package fib + +type S string + +var a, b, c = len(b), S(c), "hello" + +func fib(x int) int { + if x < 2 { + return x + } + return fib(x-1) - fib(x-2) +}` + // Type-check the package. + // We create an empty map for each kind of input + // we're interested in, and Check populates them. + info := types2.Info{ + Types: make(map[syntax.Expr]types2.TypeAndValue), + Defs: make(map[*syntax.Name]types2.Object), + Uses: make(map[*syntax.Name]types2.Object), + } + pkg := mustTypecheck(input, nil, &info) + + // Print package-level variables in initialization order. + fmt.Printf("InitOrder: %v\n\n", info.InitOrder) + + // For each named object, print the line and + // column of its definition and each of its uses. + fmt.Println("Defs and Uses of each named object:") + usesByObj := make(map[types2.Object][]string) + for id, obj := range info.Uses { + posn := id.Pos() + lineCol := fmt.Sprintf("%d:%d", posn.Line(), posn.Col()) + usesByObj[obj] = append(usesByObj[obj], lineCol) + } + var items []string + for obj, uses := range usesByObj { + sort.Strings(uses) + item := fmt.Sprintf("%s:\n defined at %s\n used at %s", + types2.ObjectString(obj, types2.RelativeTo(pkg)), + obj.Pos(), + strings.Join(uses, ", ")) + items = append(items, item) + } + sort.Strings(items) // sort by line:col, in effect + fmt.Println(strings.Join(items, "\n")) + fmt.Println() + + // TODO(gri) Enable once positions are updated/verified + // fmt.Println("Types and Values of each expression:") + // items = nil + // for expr, tv := range info.Types { + // var buf strings.Builder + // posn := expr.Pos() + // tvstr := tv.Type.String() + // if tv.Value != nil { + // tvstr += " = " + tv.Value.String() + // } + // // line:col | expr | mode : type = value + // fmt.Fprintf(&buf, "%2d:%2d | %-19s | %-7s : %s", + // posn.Line(), posn.Col(), types2.ExprString(expr), + // mode(tv), tvstr) + // items = append(items, buf.String()) + // } + // sort.Strings(items) + // fmt.Println(strings.Join(items, "\n")) + + // Output: + // InitOrder: [c = "hello" b = S(c) a = len(b)] + // + // Defs and Uses of each named object: + // builtin len: + // defined at + // used at 6:15 + // func fib(x int) int: + // defined at fib:8:6 + // used at 12:20, 12:9 + // type S string: + // defined at fib:4:6 + // used at 6:23 + // type int: + // defined at + // used at 8:12, 8:17 + // type string: + // defined at + // used at 4:8 + // var b S: + // defined at fib:6:8 + // used at 6:19 + // var c string: + // defined at fib:6:11 + // used at 6:25 + // var x int: + // defined at fib:8:10 + // used at 10:10, 12:13, 12:24, 9:5 +} + +// TODO(gri) Enable once positions are updated/verified +// Types and Values of each expression: +// 4: 8 | string | type : string +// 6:15 | len | builtin : func(string) int +// 6:15 | len(b) | value : int +// 6:19 | b | var : fib.S +// 6:23 | S | type : fib.S +// 6:23 | S(c) | value : fib.S +// 6:25 | c | var : string +// 6:29 | "hello" | value : string = "hello" +// 8:12 | int | type : int +// 8:17 | int | type : int +// 9: 5 | x | var : int +// 9: 5 | x < 2 | value : untyped bool +// 9: 9 | 2 | value : int = 2 +// 10:10 | x | var : int +// 12: 9 | fib | value : func(x int) int +// 12: 9 | fib(x - 1) | value : int +// 12: 9 | fib(x - 1) - fib(x - 2) | value : int +// 12:13 | x | var : int +// 12:13 | x - 1 | value : int +// 12:15 | 1 | value : int = 1 +// 12:20 | fib | value : func(x int) int +// 12:20 | fib(x - 2) | value : int +// 12:24 | x | var : int +// 12:24 | x - 2 | value : int +// 12:26 | 2 | value : int = 2 + +func mode(tv types2.TypeAndValue) string { + switch { + case tv.IsVoid(): + return "void" + case tv.IsType(): + return "type" + case tv.IsBuiltin(): + return "builtin" + case tv.IsNil(): + return "nil" + case tv.Assignable(): + if tv.Addressable() { + return "var" + } + return "mapindex" + case tv.IsValue(): + return "value" + default: + return "unknown" + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/expr.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/expr.go new file mode 100644 index 0000000000000000000000000000000000000000..124d9701d60e9f59dbbe9e3c15673421d61b6721 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/expr.go @@ -0,0 +1,1699 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements typechecking of expressions. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "fmt" + "go/constant" + "go/token" + . "internal/types/errors" +) + +/* +Basic algorithm: + +Expressions are checked recursively, top down. Expression checker functions +are generally of the form: + + func f(x *operand, e *syntax.Expr, ...) + +where e is the expression to be checked, and x is the result of the check. +The check performed by f may fail in which case x.mode == invalid, and +related error messages will have been issued by f. + +If a hint argument is present, it is the composite literal element type +of an outer composite literal; it is used to type-check composite literal +elements that have no explicit type specification in the source +(e.g.: []T{{...}, {...}}, the hint is the type T in this case). + +All expressions are checked via rawExpr, which dispatches according +to expression kind. Upon returning, rawExpr is recording the types and +constant values for all expressions that have an untyped type (those types +may change on the way up in the expression tree). Usually these are constants, +but the results of comparisons or non-constant shifts of untyped constants +may also be untyped, but not constant. + +Untyped expressions may eventually become fully typed (i.e., not untyped), +typically when the value is assigned to a variable, or is used otherwise. +The updateExprType method is used to record this final type and update +the recorded types: the type-checked expression tree is again traversed down, +and the new type is propagated as needed. Untyped constant expression values +that become fully typed must now be representable by the full type (constant +sub-expression trees are left alone except for their roots). This mechanism +ensures that a client sees the actual (run-time) type an untyped value would +have. It also permits type-checking of lhs shift operands "as if the shift +were not present": when updateExprType visits an untyped lhs shift operand +and assigns it it's final type, that type must be an integer type, and a +constant lhs must be representable as an integer. + +When an expression gets its final type, either on the way out from rawExpr, +on the way down in updateExprType, or at the end of the type checker run, +the type (and constant value, if any) is recorded via Info.Types, if present. +*/ + +type opPredicates map[syntax.Operator]func(Type) bool + +var unaryOpPredicates opPredicates + +func init() { + // Setting unaryOpPredicates in init avoids declaration cycles. + unaryOpPredicates = opPredicates{ + syntax.Add: allNumeric, + syntax.Sub: allNumeric, + syntax.Xor: allInteger, + syntax.Not: allBoolean, + } +} + +func (check *Checker) op(m opPredicates, x *operand, op syntax.Operator) bool { + if pred := m[op]; pred != nil { + if !pred(x.typ) { + check.errorf(x, UndefinedOp, invalidOp+"operator %s not defined on %s", op, x) + return false + } + } else { + check.errorf(x, InvalidSyntaxTree, "unknown operator %s", op) + return false + } + return true +} + +// opPos returns the position of the operator if x is an operation; +// otherwise it returns the start position of x. +func opPos(x syntax.Expr) syntax.Pos { + switch op := x.(type) { + case nil: + return nopos // don't crash + case *syntax.Operation: + return op.Pos() + default: + return syntax.StartPos(x) + } +} + +// opName returns the name of the operation if x is an operation +// that might overflow; otherwise it returns the empty string. +func opName(x syntax.Expr) string { + if e, _ := x.(*syntax.Operation); e != nil { + op := int(e.Op) + if e.Y == nil { + if op < len(op2str1) { + return op2str1[op] + } + } else { + if op < len(op2str2) { + return op2str2[op] + } + } + } + return "" +} + +var op2str1 = [...]string{ + syntax.Xor: "bitwise complement", +} + +// This is only used for operations that may cause overflow. +var op2str2 = [...]string{ + syntax.Add: "addition", + syntax.Sub: "subtraction", + syntax.Xor: "bitwise XOR", + syntax.Mul: "multiplication", + syntax.Shl: "shift", +} + +// If typ is a type parameter, underIs returns the result of typ.underIs(f). +// Otherwise, underIs returns the result of f(under(typ)). +func underIs(typ Type, f func(Type) bool) bool { + if tpar, _ := typ.(*TypeParam); tpar != nil { + return tpar.underIs(f) + } + return f(under(typ)) +} + +func (check *Checker) unary(x *operand, e *syntax.Operation) { + check.expr(nil, x, e.X) + if x.mode == invalid { + return + } + + op := e.Op + switch op { + case syntax.And: + // spec: "As an exception to the addressability + // requirement x may also be a composite literal." + if _, ok := syntax.Unparen(e.X).(*syntax.CompositeLit); !ok && x.mode != variable { + check.errorf(x, UnaddressableOperand, invalidOp+"cannot take address of %s", x) + x.mode = invalid + return + } + x.mode = value + x.typ = &Pointer{base: x.typ} + return + + case syntax.Recv: + u := coreType(x.typ) + if u == nil { + check.errorf(x, InvalidReceive, invalidOp+"cannot receive from %s (no core type)", x) + x.mode = invalid + return + } + ch, _ := u.(*Chan) + if ch == nil { + check.errorf(x, InvalidReceive, invalidOp+"cannot receive from non-channel %s", x) + x.mode = invalid + return + } + if ch.dir == SendOnly { + check.errorf(x, InvalidReceive, invalidOp+"cannot receive from send-only channel %s", x) + x.mode = invalid + return + } + x.mode = commaok + x.typ = ch.elem + check.hasCallOrRecv = true + return + + case syntax.Tilde: + // Provide a better error position and message than what check.op below would do. + if !allInteger(x.typ) { + check.error(e, UndefinedOp, "cannot use ~ outside of interface or type constraint") + x.mode = invalid + return + } + check.error(e, UndefinedOp, "cannot use ~ outside of interface or type constraint (use ^ for bitwise complement)") + op = syntax.Xor + } + + if !check.op(unaryOpPredicates, x, op) { + x.mode = invalid + return + } + + if x.mode == constant_ { + if x.val.Kind() == constant.Unknown { + // nothing to do (and don't cause an error below in the overflow check) + return + } + var prec uint + if isUnsigned(x.typ) { + prec = uint(check.conf.sizeof(x.typ) * 8) + } + x.val = constant.UnaryOp(op2tok[op], x.val, prec) + x.expr = e + check.overflow(x, opPos(x.expr)) + return + } + + x.mode = value + // x.typ remains unchanged +} + +func isShift(op syntax.Operator) bool { + return op == syntax.Shl || op == syntax.Shr +} + +func isComparison(op syntax.Operator) bool { + // Note: tokens are not ordered well to make this much easier + switch op { + case syntax.Eql, syntax.Neq, syntax.Lss, syntax.Leq, syntax.Gtr, syntax.Geq: + return true + } + return false +} + +// updateExprType updates the type of x to typ and invokes itself +// recursively for the operands of x, depending on expression kind. +// If typ is still an untyped and not the final type, updateExprType +// only updates the recorded untyped type for x and possibly its +// operands. Otherwise (i.e., typ is not an untyped type anymore, +// or it is the final type for x), the type and value are recorded. +// Also, if x is a constant, it must be representable as a value of typ, +// and if x is the (formerly untyped) lhs operand of a non-constant +// shift, it must be an integer value. +func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) { + check.updateExprType0(nil, x, typ, final) +} + +func (check *Checker) updateExprType0(parent, x syntax.Expr, typ Type, final bool) { + old, found := check.untyped[x] + if !found { + return // nothing to do + } + + // update operands of x if necessary + switch x := x.(type) { + case *syntax.BadExpr, + *syntax.FuncLit, + *syntax.CompositeLit, + *syntax.IndexExpr, + *syntax.SliceExpr, + *syntax.AssertExpr, + *syntax.ListExpr, + //*syntax.StarExpr, + *syntax.KeyValueExpr, + *syntax.ArrayType, + *syntax.StructType, + *syntax.FuncType, + *syntax.InterfaceType, + *syntax.MapType, + *syntax.ChanType: + // These expression are never untyped - nothing to do. + // The respective sub-expressions got their final types + // upon assignment or use. + if debug { + check.dump("%v: found old type(%s): %s (new: %s)", atPos(x), x, old.typ, typ) + unreachable() + } + return + + case *syntax.CallExpr: + // Resulting in an untyped constant (e.g., built-in complex). + // The respective calls take care of calling updateExprType + // for the arguments if necessary. + + case *syntax.Name, *syntax.BasicLit, *syntax.SelectorExpr: + // An identifier denoting a constant, a constant literal, + // or a qualified identifier (imported untyped constant). + // No operands to take care of. + + case *syntax.ParenExpr: + check.updateExprType0(x, x.X, typ, final) + + // case *syntax.UnaryExpr: + // // If x is a constant, the operands were constants. + // // The operands don't need to be updated since they + // // never get "materialized" into a typed value. If + // // left in the untyped map, they will be processed + // // at the end of the type check. + // if old.val != nil { + // break + // } + // check.updateExprType0(x, x.X, typ, final) + + case *syntax.Operation: + if x.Y == nil { + // unary expression + if x.Op == syntax.Mul { + // see commented out code for StarExpr above + // TODO(gri) needs cleanup + if debug { + panic("unimplemented") + } + return + } + // If x is a constant, the operands were constants. + // The operands don't need to be updated since they + // never get "materialized" into a typed value. If + // left in the untyped map, they will be processed + // at the end of the type check. + if old.val != nil { + break + } + check.updateExprType0(x, x.X, typ, final) + break + } + + // binary expression + if old.val != nil { + break // see comment for unary expressions + } + if isComparison(x.Op) { + // The result type is independent of operand types + // and the operand types must have final types. + } else if isShift(x.Op) { + // The result type depends only on lhs operand. + // The rhs type was updated when checking the shift. + check.updateExprType0(x, x.X, typ, final) + } else { + // The operand types match the result type. + check.updateExprType0(x, x.X, typ, final) + check.updateExprType0(x, x.Y, typ, final) + } + + default: + unreachable() + } + + // If the new type is not final and still untyped, just + // update the recorded type. + if !final && isUntyped(typ) { + old.typ = under(typ).(*Basic) + check.untyped[x] = old + return + } + + // Otherwise we have the final (typed or untyped type). + // Remove it from the map of yet untyped expressions. + delete(check.untyped, x) + + if old.isLhs { + // If x is the lhs of a shift, its final type must be integer. + // We already know from the shift check that it is representable + // as an integer if it is a constant. + if !allInteger(typ) { + check.errorf(x, InvalidShiftOperand, invalidOp+"shifted operand %s (type %s) must be integer", x, typ) + return + } + // Even if we have an integer, if the value is a constant we + // still must check that it is representable as the specific + // int type requested (was go.dev/issue/22969). Fall through here. + } + if old.val != nil { + // If x is a constant, it must be representable as a value of typ. + c := operand{old.mode, x, old.typ, old.val, 0} + check.convertUntyped(&c, typ) + if c.mode == invalid { + return + } + } + + // Everything's fine, record final type and value for x. + check.recordTypeAndValue(x, old.mode, typ, old.val) +} + +// updateExprVal updates the value of x to val. +func (check *Checker) updateExprVal(x syntax.Expr, val constant.Value) { + if info, ok := check.untyped[x]; ok { + info.val = val + check.untyped[x] = info + } +} + +// implicitTypeAndValue returns the implicit type of x when used in a context +// where the target type is expected. If no such implicit conversion is +// possible, it returns a nil Type and non-zero error code. +// +// If x is a constant operand, the returned constant.Value will be the +// representation of x in this context. +func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, constant.Value, Code) { + if x.mode == invalid || isTyped(x.typ) || !isValid(target) { + return x.typ, nil, 0 + } + // x is untyped + + if isUntyped(target) { + // both x and target are untyped + if m := maxType(x.typ, target); m != nil { + return m, nil, 0 + } + return nil, nil, InvalidUntypedConversion + } + + if x.isNil() { + assert(isUntyped(x.typ)) + if hasNil(target) { + return target, nil, 0 + } + return nil, nil, InvalidUntypedConversion + } + + switch u := under(target).(type) { + case *Basic: + if x.mode == constant_ { + v, code := check.representation(x, u) + if code != 0 { + return nil, nil, code + } + return target, v, code + } + // Non-constant untyped values may appear as the + // result of comparisons (untyped bool), intermediate + // (delayed-checked) rhs operands of shifts, and as + // the value nil. + switch x.typ.(*Basic).kind { + case UntypedBool: + if !isBoolean(target) { + return nil, nil, InvalidUntypedConversion + } + case UntypedInt, UntypedRune, UntypedFloat, UntypedComplex: + if !isNumeric(target) { + return nil, nil, InvalidUntypedConversion + } + case UntypedString: + // Non-constant untyped string values are not permitted by the spec and + // should not occur during normal typechecking passes, but this path is + // reachable via the AssignableTo API. + if !isString(target) { + return nil, nil, InvalidUntypedConversion + } + default: + return nil, nil, InvalidUntypedConversion + } + case *Interface: + if isTypeParam(target) { + if !u.typeSet().underIs(func(u Type) bool { + if u == nil { + return false + } + t, _, _ := check.implicitTypeAndValue(x, u) + return t != nil + }) { + return nil, nil, InvalidUntypedConversion + } + break + } + // Update operand types to the default type rather than the target + // (interface) type: values must have concrete dynamic types. + // Untyped nil was handled upfront. + if !u.Empty() { + return nil, nil, InvalidUntypedConversion // cannot assign untyped values to non-empty interfaces + } + return Default(x.typ), nil, 0 // default type for nil is nil + default: + return nil, nil, InvalidUntypedConversion + } + return target, nil, 0 +} + +// If switchCase is true, the operator op is ignored. +func (check *Checker) comparison(x, y *operand, op syntax.Operator, switchCase bool) { + // Avoid spurious errors if any of the operands has an invalid type (go.dev/issue/54405). + if !isValid(x.typ) || !isValid(y.typ) { + x.mode = invalid + return + } + + if switchCase { + op = syntax.Eql + } + + errOp := x // operand for which error is reported, if any + cause := "" // specific error cause, if any + + // spec: "In any comparison, the first operand must be assignable + // to the type of the second operand, or vice versa." + code := MismatchedTypes + ok, _ := x.assignableTo(check, y.typ, nil) + if !ok { + ok, _ = y.assignableTo(check, x.typ, nil) + } + if !ok { + // Report the error on the 2nd operand since we only + // know after seeing the 2nd operand whether we have + // a type mismatch. + errOp = y + cause = check.sprintf("mismatched types %s and %s", x.typ, y.typ) + goto Error + } + + // check if comparison is defined for operands + code = UndefinedOp + switch op { + case syntax.Eql, syntax.Neq: + // spec: "The equality operators == and != apply to operands that are comparable." + switch { + case x.isNil() || y.isNil(): + // Comparison against nil requires that the other operand type has nil. + typ := x.typ + if x.isNil() { + typ = y.typ + } + if !hasNil(typ) { + // This case should only be possible for "nil == nil". + // Report the error on the 2nd operand since we only + // know after seeing the 2nd operand whether we have + // an invalid comparison. + errOp = y + goto Error + } + + case !Comparable(x.typ): + errOp = x + cause = check.incomparableCause(x.typ) + goto Error + + case !Comparable(y.typ): + errOp = y + cause = check.incomparableCause(y.typ) + goto Error + } + + case syntax.Lss, syntax.Leq, syntax.Gtr, syntax.Geq: + // spec: The ordering operators <, <=, >, and >= apply to operands that are ordered." + switch { + case !allOrdered(x.typ): + errOp = x + goto Error + case !allOrdered(y.typ): + errOp = y + goto Error + } + + default: + unreachable() + } + + // comparison is ok + if x.mode == constant_ && y.mode == constant_ { + x.val = constant.MakeBool(constant.Compare(x.val, op2tok[op], y.val)) + // The operands are never materialized; no need to update + // their types. + } else { + x.mode = value + // The operands have now their final types, which at run- + // time will be materialized. Update the expression trees. + // If the current types are untyped, the materialized type + // is the respective default type. + check.updateExprType(x.expr, Default(x.typ), true) + check.updateExprType(y.expr, Default(y.typ), true) + } + + // spec: "Comparison operators compare two operands and yield + // an untyped boolean value." + x.typ = Typ[UntypedBool] + return + +Error: + // We have an offending operand errOp and possibly an error cause. + if cause == "" { + if isTypeParam(x.typ) || isTypeParam(y.typ) { + // TODO(gri) should report the specific type causing the problem, if any + if !isTypeParam(x.typ) { + errOp = y + } + cause = check.sprintf("type parameter %s is not comparable with %s", errOp.typ, op) + } else { + cause = check.sprintf("operator %s not defined on %s", op, check.kindString(errOp.typ)) // catch-all + } + } + if switchCase { + check.errorf(x, code, "invalid case %s in switch on %s (%s)", x.expr, y.expr, cause) // error position always at 1st operand + } else { + check.errorf(errOp, code, invalidOp+"%s %s %s (%s)", x.expr, op, y.expr, cause) + } + x.mode = invalid +} + +// incomparableCause returns a more specific cause why typ is not comparable. +// If there is no more specific cause, the result is "". +func (check *Checker) incomparableCause(typ Type) string { + switch under(typ).(type) { + case *Slice, *Signature, *Map: + return check.kindString(typ) + " can only be compared to nil" + } + // see if we can extract a more specific error + var cause string + comparable(typ, true, nil, func(format string, args ...interface{}) { + cause = check.sprintf(format, args...) + }) + return cause +} + +// kindString returns the type kind as a string. +func (check *Checker) kindString(typ Type) string { + switch under(typ).(type) { + case *Array: + return "array" + case *Slice: + return "slice" + case *Struct: + return "struct" + case *Pointer: + return "pointer" + case *Signature: + return "func" + case *Interface: + if isTypeParam(typ) { + return check.sprintf("type parameter %s", typ) + } + return "interface" + case *Map: + return "map" + case *Chan: + return "chan" + default: + return check.sprintf("%s", typ) // catch-all + } +} + +// If e != nil, it must be the shift expression; it may be nil for non-constant shifts. +func (check *Checker) shift(x, y *operand, e syntax.Expr, op syntax.Operator) { + // TODO(gri) This function seems overly complex. Revisit. + + var xval constant.Value + if x.mode == constant_ { + xval = constant.ToInt(x.val) + } + + if allInteger(x.typ) || isUntyped(x.typ) && xval != nil && xval.Kind() == constant.Int { + // The lhs is of integer type or an untyped constant representable + // as an integer. Nothing to do. + } else { + // shift has no chance + check.errorf(x, InvalidShiftOperand, invalidOp+"shifted operand %s must be integer", x) + x.mode = invalid + return + } + + // spec: "The right operand in a shift expression must have integer type + // or be an untyped constant representable by a value of type uint." + + // Check that constants are representable by uint, but do not convert them + // (see also go.dev/issue/47243). + var yval constant.Value + if y.mode == constant_ { + // Provide a good error message for negative shift counts. + yval = constant.ToInt(y.val) // consider -1, 1.0, but not -1.1 + if yval.Kind() == constant.Int && constant.Sign(yval) < 0 { + check.errorf(y, InvalidShiftCount, invalidOp+"negative shift count %s", y) + x.mode = invalid + return + } + + if isUntyped(y.typ) { + // Caution: Check for representability here, rather than in the switch + // below, because isInteger includes untyped integers (was bug go.dev/issue/43697). + check.representable(y, Typ[Uint]) + if y.mode == invalid { + x.mode = invalid + return + } + } + } else { + // Check that RHS is otherwise at least of integer type. + switch { + case allInteger(y.typ): + if !allUnsigned(y.typ) && !check.verifyVersionf(y, go1_13, invalidOp+"signed shift count %s", y) { + x.mode = invalid + return + } + case isUntyped(y.typ): + // This is incorrect, but preserves pre-existing behavior. + // See also go.dev/issue/47410. + check.convertUntyped(y, Typ[Uint]) + if y.mode == invalid { + x.mode = invalid + return + } + default: + check.errorf(y, InvalidShiftCount, invalidOp+"shift count %s must be integer", y) + x.mode = invalid + return + } + } + + if x.mode == constant_ { + if y.mode == constant_ { + // if either x or y has an unknown value, the result is unknown + if x.val.Kind() == constant.Unknown || y.val.Kind() == constant.Unknown { + x.val = constant.MakeUnknown() + // ensure the correct type - see comment below + if !isInteger(x.typ) { + x.typ = Typ[UntypedInt] + } + return + } + // rhs must be within reasonable bounds in constant shifts + const shiftBound = 1023 - 1 + 52 // so we can express smallestFloat64 (see go.dev/issue/44057) + s, ok := constant.Uint64Val(yval) + if !ok || s > shiftBound { + check.errorf(y, InvalidShiftCount, invalidOp+"invalid shift count %s", y) + x.mode = invalid + return + } + // The lhs is representable as an integer but may not be an integer + // (e.g., 2.0, an untyped float) - this can only happen for untyped + // non-integer numeric constants. Correct the type so that the shift + // result is of integer type. + if !isInteger(x.typ) { + x.typ = Typ[UntypedInt] + } + // x is a constant so xval != nil and it must be of Int kind. + x.val = constant.Shift(xval, op2tok[op], uint(s)) + x.expr = e + check.overflow(x, opPos(x.expr)) + return + } + + // non-constant shift with constant lhs + if isUntyped(x.typ) { + // spec: "If the left operand of a non-constant shift + // expression is an untyped constant, the type of the + // constant is what it would be if the shift expression + // were replaced by its left operand alone.". + // + // Delay operand checking until we know the final type + // by marking the lhs expression as lhs shift operand. + // + // Usually (in correct programs), the lhs expression + // is in the untyped map. However, it is possible to + // create incorrect programs where the same expression + // is evaluated twice (via a declaration cycle) such + // that the lhs expression type is determined in the + // first round and thus deleted from the map, and then + // not found in the second round (double insertion of + // the same expr node still just leads to one entry for + // that node, and it can only be deleted once). + // Be cautious and check for presence of entry. + // Example: var e, f = int(1<<""[f]) // go.dev/issue/11347 + if info, found := check.untyped[x.expr]; found { + info.isLhs = true + check.untyped[x.expr] = info + } + // keep x's type + x.mode = value + return + } + } + + // non-constant shift - lhs must be an integer + if !allInteger(x.typ) { + check.errorf(x, InvalidShiftOperand, invalidOp+"shifted operand %s must be integer", x) + x.mode = invalid + return + } + + x.mode = value +} + +var binaryOpPredicates opPredicates + +func init() { + // Setting binaryOpPredicates in init avoids declaration cycles. + binaryOpPredicates = opPredicates{ + syntax.Add: allNumericOrString, + syntax.Sub: allNumeric, + syntax.Mul: allNumeric, + syntax.Div: allNumeric, + syntax.Rem: allInteger, + + syntax.And: allInteger, + syntax.Or: allInteger, + syntax.Xor: allInteger, + syntax.AndNot: allInteger, + + syntax.AndAnd: allBoolean, + syntax.OrOr: allBoolean, + } +} + +// If e != nil, it must be the binary expression; it may be nil for non-constant expressions +// (when invoked for an assignment operation where the binary expression is implicit). +func (check *Checker) binary(x *operand, e syntax.Expr, lhs, rhs syntax.Expr, op syntax.Operator) { + var y operand + + check.expr(nil, x, lhs) + check.expr(nil, &y, rhs) + + if x.mode == invalid { + return + } + if y.mode == invalid { + x.mode = invalid + x.expr = y.expr + return + } + + if isShift(op) { + check.shift(x, &y, e, op) + return + } + + check.matchTypes(x, &y) + if x.mode == invalid { + return + } + + if isComparison(op) { + check.comparison(x, &y, op, false) + return + } + + if !Identical(x.typ, y.typ) { + // only report an error if we have valid types + // (otherwise we had an error reported elsewhere already) + if isValid(x.typ) && isValid(y.typ) { + if e != nil { + check.errorf(x, MismatchedTypes, invalidOp+"%s (mismatched types %s and %s)", e, x.typ, y.typ) + } else { + check.errorf(x, MismatchedTypes, invalidOp+"%s %s= %s (mismatched types %s and %s)", lhs, op, rhs, x.typ, y.typ) + } + } + x.mode = invalid + return + } + + if !check.op(binaryOpPredicates, x, op) { + x.mode = invalid + return + } + + if op == syntax.Div || op == syntax.Rem { + // check for zero divisor + if (x.mode == constant_ || allInteger(x.typ)) && y.mode == constant_ && constant.Sign(y.val) == 0 { + check.error(&y, DivByZero, invalidOp+"division by zero") + x.mode = invalid + return + } + + // check for divisor underflow in complex division (see go.dev/issue/20227) + if x.mode == constant_ && y.mode == constant_ && isComplex(x.typ) { + re, im := constant.Real(y.val), constant.Imag(y.val) + re2, im2 := constant.BinaryOp(re, token.MUL, re), constant.BinaryOp(im, token.MUL, im) + if constant.Sign(re2) == 0 && constant.Sign(im2) == 0 { + check.error(&y, DivByZero, invalidOp+"division by zero") + x.mode = invalid + return + } + } + } + + if x.mode == constant_ && y.mode == constant_ { + // if either x or y has an unknown value, the result is unknown + if x.val.Kind() == constant.Unknown || y.val.Kind() == constant.Unknown { + x.val = constant.MakeUnknown() + // x.typ is unchanged + return + } + // force integer division for integer operands + tok := op2tok[op] + if op == syntax.Div && isInteger(x.typ) { + tok = token.QUO_ASSIGN + } + x.val = constant.BinaryOp(x.val, tok, y.val) + x.expr = e + check.overflow(x, opPos(x.expr)) + return + } + + x.mode = value + // x.typ is unchanged +} + +// matchTypes attempts to convert any untyped types x and y such that they match. +// If an error occurs, x.mode is set to invalid. +func (check *Checker) matchTypes(x, y *operand) { + // mayConvert reports whether the operands x and y may + // possibly have matching types after converting one + // untyped operand to the type of the other. + // If mayConvert returns true, we try to convert the + // operands to each other's types, and if that fails + // we report a conversion failure. + // If mayConvert returns false, we continue without an + // attempt at conversion, and if the operand types are + // not compatible, we report a type mismatch error. + mayConvert := func(x, y *operand) bool { + // If both operands are typed, there's no need for an implicit conversion. + if isTyped(x.typ) && isTyped(y.typ) { + return false + } + // An untyped operand may convert to its default type when paired with an empty interface + // TODO(gri) This should only matter for comparisons (the only binary operation that is + // valid with interfaces), but in that case the assignability check should take + // care of the conversion. Verify and possibly eliminate this extra test. + if isNonTypeParamInterface(x.typ) || isNonTypeParamInterface(y.typ) { + return true + } + // A boolean type can only convert to another boolean type. + if allBoolean(x.typ) != allBoolean(y.typ) { + return false + } + // A string type can only convert to another string type. + if allString(x.typ) != allString(y.typ) { + return false + } + // Untyped nil can only convert to a type that has a nil. + if x.isNil() { + return hasNil(y.typ) + } + if y.isNil() { + return hasNil(x.typ) + } + // An untyped operand cannot convert to a pointer. + // TODO(gri) generalize to type parameters + if isPointer(x.typ) || isPointer(y.typ) { + return false + } + return true + } + + if mayConvert(x, y) { + check.convertUntyped(x, y.typ) + if x.mode == invalid { + return + } + check.convertUntyped(y, x.typ) + if y.mode == invalid { + x.mode = invalid + return + } + } +} + +// exprKind describes the kind of an expression; the kind +// determines if an expression is valid in 'statement context'. +type exprKind int + +const ( + conversion exprKind = iota + expression + statement +) + +// target represent the (signature) type and description of the LHS +// variable of an assignment, or of a function result variable. +type target struct { + sig *Signature + desc string +} + +// newTarget creates a new target for the given type and description. +// The result is nil if typ is not a signature. +func newTarget(typ Type, desc string) *target { + if typ != nil { + if sig, _ := under(typ).(*Signature); sig != nil { + return &target{sig, desc} + } + } + return nil +} + +// rawExpr typechecks expression e and initializes x with the expression +// value or type. If an error occurred, x.mode is set to invalid. +// If a non-nil target T is given and e is a generic function, +// T is used to infer the type arguments for e. +// If hint != nil, it is the type of a composite literal element. +// If allowGeneric is set, the operand type may be an uninstantiated +// parameterized type or function value. +func (check *Checker) rawExpr(T *target, x *operand, e syntax.Expr, hint Type, allowGeneric bool) exprKind { + if check.conf.Trace { + check.trace(e.Pos(), "-- expr %s", e) + check.indent++ + defer func() { + check.indent-- + check.trace(e.Pos(), "=> %s", x) + }() + } + + kind := check.exprInternal(T, x, e, hint) + + if !allowGeneric { + check.nonGeneric(T, x) + } + + check.record(x) + + return kind +} + +// If x is a generic type, or a generic function whose type arguments cannot be inferred +// from a non-nil target T, nonGeneric reports an error and invalidates x.mode and x.typ. +// Otherwise it leaves x alone. +func (check *Checker) nonGeneric(T *target, x *operand) { + if x.mode == invalid || x.mode == novalue { + return + } + var what string + switch t := x.typ.(type) { + case *Named: + if isGeneric(t) { + what = "type" + } + case *Signature: + if t.tparams != nil { + if enableReverseTypeInference && T != nil { + check.funcInst(T, x.Pos(), x, nil, true) + return + } + what = "function" + } + } + if what != "" { + check.errorf(x.expr, WrongTypeArgCount, "cannot use generic %s %s without instantiation", what, x.expr) + x.mode = invalid + x.typ = Typ[Invalid] + } +} + +// exprInternal contains the core of type checking of expressions. +// Must only be called by rawExpr. +// (See rawExpr for an explanation of the parameters.) +func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Type) exprKind { + // make sure x has a valid state in case of bailout + // (was go.dev/issue/5770) + x.mode = invalid + x.typ = Typ[Invalid] + + switch e := e.(type) { + case nil: + unreachable() + + case *syntax.BadExpr: + goto Error // error was reported before + + case *syntax.Name: + check.ident(x, e, nil, false) + + case *syntax.DotsType: + // dots are handled explicitly where they are legal + // (array composite literals and parameter lists) + check.error(e, BadDotDotDotSyntax, "invalid use of '...'") + goto Error + + case *syntax.BasicLit: + if e.Bad { + goto Error // error reported during parsing + } + switch e.Kind { + case syntax.IntLit, syntax.FloatLit, syntax.ImagLit: + check.langCompat(e) + // The max. mantissa precision for untyped numeric values + // is 512 bits, or 4048 bits for each of the two integer + // parts of a fraction for floating-point numbers that are + // represented accurately in the go/constant package. + // Constant literals that are longer than this many bits + // are not meaningful; and excessively long constants may + // consume a lot of space and time for a useless conversion. + // Cap constant length with a generous upper limit that also + // allows for separators between all digits. + const limit = 10000 + if len(e.Value) > limit { + check.errorf(e, InvalidConstVal, "excessively long constant: %s... (%d chars)", e.Value[:10], len(e.Value)) + goto Error + } + } + x.setConst(e.Kind, e.Value) + if x.mode == invalid { + // The parser already establishes syntactic correctness. + // If we reach here it's because of number under-/overflow. + // TODO(gri) setConst (and in turn the go/constant package) + // should return an error describing the issue. + check.errorf(e, InvalidConstVal, "malformed constant: %s", e.Value) + goto Error + } + // Ensure that integer values don't overflow (go.dev/issue/54280). + x.expr = e // make sure that check.overflow below has an error position + check.overflow(x, opPos(x.expr)) + + case *syntax.FuncLit: + if sig, ok := check.typ(e.Type).(*Signature); ok { + // Set the Scope's extent to the complete "func (...) {...}" + // so that Scope.Innermost works correctly. + sig.scope.pos = e.Pos() + sig.scope.end = syntax.EndPos(e) + if !check.conf.IgnoreFuncBodies && e.Body != nil { + // Anonymous functions are considered part of the + // init expression/func declaration which contains + // them: use existing package-level declaration info. + decl := check.decl // capture for use in closure below + iota := check.iota // capture for use in closure below (go.dev/issue/22345) + // Don't type-check right away because the function may + // be part of a type definition to which the function + // body refers. Instead, type-check as soon as possible, + // but before the enclosing scope contents changes (go.dev/issue/22992). + check.later(func() { + check.funcBody(decl, "", sig, e.Body, iota) + }).describef(e, "func literal") + } + x.mode = value + x.typ = sig + } else { + check.errorf(e, InvalidSyntaxTree, "invalid function literal %v", e) + goto Error + } + + case *syntax.CompositeLit: + var typ, base Type + + switch { + case e.Type != nil: + // composite literal type present - use it + // [...]T array types may only appear with composite literals. + // Check for them here so we don't have to handle ... in general. + if atyp, _ := e.Type.(*syntax.ArrayType); atyp != nil && atyp.Len == nil { + // We have an "open" [...]T array type. + // Create a new ArrayType with unknown length (-1) + // and finish setting it up after analyzing the literal. + typ = &Array{len: -1, elem: check.varType(atyp.Elem)} + base = typ + break + } + typ = check.typ(e.Type) + base = typ + + case hint != nil: + // no composite literal type present - use hint (element type of enclosing type) + typ = hint + base, _ = deref(coreType(typ)) // *T implies &T{} + if base == nil { + check.errorf(e, InvalidLit, "invalid composite literal element type %s (no core type)", typ) + goto Error + } + + default: + // TODO(gri) provide better error messages depending on context + check.error(e, UntypedLit, "missing type in composite literal") + goto Error + } + + switch utyp := coreType(base).(type) { + case *Struct: + // Prevent crash if the struct referred to is not yet set up. + // See analogous comment for *Array. + if utyp.fields == nil { + check.error(e, InvalidTypeCycle, "invalid recursive type") + goto Error + } + if len(e.ElemList) == 0 { + break + } + // Convention for error messages on invalid struct literals: + // we mention the struct type only if it clarifies the error + // (e.g., a duplicate field error doesn't need the struct type). + fields := utyp.fields + if _, ok := e.ElemList[0].(*syntax.KeyValueExpr); ok { + // all elements must have keys + visited := make([]bool, len(fields)) + for _, e := range e.ElemList { + kv, _ := e.(*syntax.KeyValueExpr) + if kv == nil { + check.error(e, MixedStructLit, "mixture of field:value and value elements in struct literal") + continue + } + key, _ := kv.Key.(*syntax.Name) + // do all possible checks early (before exiting due to errors) + // so we don't drop information on the floor + check.expr(nil, x, kv.Value) + if key == nil { + check.errorf(kv, InvalidLitField, "invalid field name %s in struct literal", kv.Key) + continue + } + i := fieldIndex(utyp.fields, check.pkg, key.Value) + if i < 0 { + check.errorf(kv.Key, MissingLitField, "unknown field %s in struct literal of type %s", key.Value, base) + continue + } + fld := fields[i] + check.recordUse(key, fld) + etyp := fld.typ + check.assignment(x, etyp, "struct literal") + // 0 <= i < len(fields) + if visited[i] { + check.errorf(kv, DuplicateLitField, "duplicate field name %s in struct literal", key.Value) + continue + } + visited[i] = true + } + } else { + // no element must have a key + for i, e := range e.ElemList { + if kv, _ := e.(*syntax.KeyValueExpr); kv != nil { + check.error(kv, MixedStructLit, "mixture of field:value and value elements in struct literal") + continue + } + check.expr(nil, x, e) + if i >= len(fields) { + check.errorf(x, InvalidStructLit, "too many values in struct literal of type %s", base) + break // cannot continue + } + // i < len(fields) + fld := fields[i] + if !fld.Exported() && fld.pkg != check.pkg { + check.errorf(x, UnexportedLitField, "implicit assignment to unexported field %s in struct literal of type %s", fld.name, base) + continue + } + etyp := fld.typ + check.assignment(x, etyp, "struct literal") + } + if len(e.ElemList) < len(fields) { + check.errorf(e.Rbrace, InvalidStructLit, "too few values in struct literal of type %s", base) + // ok to continue + } + } + + case *Array: + // Prevent crash if the array referred to is not yet set up. Was go.dev/issue/18643. + // This is a stop-gap solution. Should use Checker.objPath to report entire + // path starting with earliest declaration in the source. TODO(gri) fix this. + if utyp.elem == nil { + check.error(e, InvalidTypeCycle, "invalid recursive type") + goto Error + } + n := check.indexedElts(e.ElemList, utyp.elem, utyp.len) + // If we have an array of unknown length (usually [...]T arrays, but also + // arrays [n]T where n is invalid) set the length now that we know it and + // record the type for the array (usually done by check.typ which is not + // called for [...]T). We handle [...]T arrays and arrays with invalid + // length the same here because it makes sense to "guess" the length for + // the latter if we have a composite literal; e.g. for [n]int{1, 2, 3} + // where n is invalid for some reason, it seems fair to assume it should + // be 3 (see also Checked.arrayLength and go.dev/issue/27346). + if utyp.len < 0 { + utyp.len = n + // e.Type is missing if we have a composite literal element + // that is itself a composite literal with omitted type. In + // that case there is nothing to record (there is no type in + // the source at that point). + if e.Type != nil { + check.recordTypeAndValue(e.Type, typexpr, utyp, nil) + } + } + + case *Slice: + // Prevent crash if the slice referred to is not yet set up. + // See analogous comment for *Array. + if utyp.elem == nil { + check.error(e, InvalidTypeCycle, "invalid recursive type") + goto Error + } + check.indexedElts(e.ElemList, utyp.elem, -1) + + case *Map: + // Prevent crash if the map referred to is not yet set up. + // See analogous comment for *Array. + if utyp.key == nil || utyp.elem == nil { + check.error(e, InvalidTypeCycle, "invalid recursive type") + goto Error + } + // If the map key type is an interface (but not a type parameter), + // the type of a constant key must be considered when checking for + // duplicates. + keyIsInterface := isNonTypeParamInterface(utyp.key) + visited := make(map[interface{}][]Type, len(e.ElemList)) + for _, e := range e.ElemList { + kv, _ := e.(*syntax.KeyValueExpr) + if kv == nil { + check.error(e, MissingLitKey, "missing key in map literal") + continue + } + check.exprWithHint(x, kv.Key, utyp.key) + check.assignment(x, utyp.key, "map literal") + if x.mode == invalid { + continue + } + if x.mode == constant_ { + duplicate := false + xkey := keyVal(x.val) + if keyIsInterface { + for _, vtyp := range visited[xkey] { + if Identical(vtyp, x.typ) { + duplicate = true + break + } + } + visited[xkey] = append(visited[xkey], x.typ) + } else { + _, duplicate = visited[xkey] + visited[xkey] = nil + } + if duplicate { + check.errorf(x, DuplicateLitKey, "duplicate key %s in map literal", x.val) + continue + } + } + check.exprWithHint(x, kv.Value, utyp.elem) + check.assignment(x, utyp.elem, "map literal") + } + + default: + // when "using" all elements unpack KeyValueExpr + // explicitly because check.use doesn't accept them + for _, e := range e.ElemList { + if kv, _ := e.(*syntax.KeyValueExpr); kv != nil { + // Ideally, we should also "use" kv.Key but we can't know + // if it's an externally defined struct key or not. Going + // forward anyway can lead to other errors. Give up instead. + e = kv.Value + } + check.use(e) + } + // if utyp is invalid, an error was reported before + if isValid(utyp) { + check.errorf(e, InvalidLit, "invalid composite literal type %s", typ) + goto Error + } + } + + x.mode = value + x.typ = typ + + case *syntax.ParenExpr: + // type inference doesn't go past parentheses (targe type T = nil) + kind := check.rawExpr(nil, x, e.X, nil, false) + x.expr = e + return kind + + case *syntax.SelectorExpr: + check.selector(x, e, nil, false) + + case *syntax.IndexExpr: + if check.indexExpr(x, e) { + if !enableReverseTypeInference { + T = nil + } + check.funcInst(T, e.Pos(), x, e, true) + } + if x.mode == invalid { + goto Error + } + + case *syntax.SliceExpr: + check.sliceExpr(x, e) + if x.mode == invalid { + goto Error + } + + case *syntax.AssertExpr: + check.expr(nil, x, e.X) + if x.mode == invalid { + goto Error + } + // x.(type) expressions are encoded via TypeSwitchGuards + if e.Type == nil { + check.error(e, InvalidSyntaxTree, "invalid use of AssertExpr") + goto Error + } + if isTypeParam(x.typ) { + check.errorf(x, InvalidAssert, invalidOp+"cannot use type assertion on type parameter value %s", x) + goto Error + } + if _, ok := under(x.typ).(*Interface); !ok { + check.errorf(x, InvalidAssert, invalidOp+"%s is not an interface", x) + goto Error + } + T := check.varType(e.Type) + if !isValid(T) { + goto Error + } + check.typeAssertion(e, x, T, false) + x.mode = commaok + x.typ = T + + case *syntax.TypeSwitchGuard: + // x.(type) expressions are handled explicitly in type switches + check.error(e, InvalidSyntaxTree, "use of .(type) outside type switch") + check.use(e.X) + goto Error + + case *syntax.CallExpr: + return check.callExpr(x, e) + + case *syntax.ListExpr: + // catch-all for unexpected expression lists + check.error(e, InvalidSyntaxTree, "unexpected list of expressions") + goto Error + + // case *syntax.UnaryExpr: + // check.expr(x, e.X) + // if x.mode == invalid { + // goto Error + // } + // check.unary(x, e, e.Op) + // if x.mode == invalid { + // goto Error + // } + // if e.Op == token.ARROW { + // x.expr = e + // return statement // receive operations may appear in statement context + // } + + // case *syntax.BinaryExpr: + // check.binary(x, e, e.X, e.Y, e.Op) + // if x.mode == invalid { + // goto Error + // } + + case *syntax.Operation: + if e.Y == nil { + // unary expression + if e.Op == syntax.Mul { + // pointer indirection + check.exprOrType(x, e.X, false) + switch x.mode { + case invalid: + goto Error + case typexpr: + check.validVarType(e.X, x.typ) + x.typ = &Pointer{base: x.typ} + default: + var base Type + if !underIs(x.typ, func(u Type) bool { + p, _ := u.(*Pointer) + if p == nil { + check.errorf(x, InvalidIndirection, invalidOp+"cannot indirect %s", x) + return false + } + if base != nil && !Identical(p.base, base) { + check.errorf(x, InvalidIndirection, invalidOp+"pointers of %s must have identical base types", x) + return false + } + base = p.base + return true + }) { + goto Error + } + x.mode = variable + x.typ = base + } + break + } + + check.unary(x, e) + if x.mode == invalid { + goto Error + } + if e.Op == syntax.Recv { + x.expr = e + return statement // receive operations may appear in statement context + } + break + } + + // binary expression + check.binary(x, e, e.X, e.Y, e.Op) + if x.mode == invalid { + goto Error + } + + case *syntax.KeyValueExpr: + // key:value expressions are handled in composite literals + check.error(e, InvalidSyntaxTree, "no key:value expected") + goto Error + + case *syntax.ArrayType, *syntax.SliceType, *syntax.StructType, *syntax.FuncType, + *syntax.InterfaceType, *syntax.MapType, *syntax.ChanType: + x.mode = typexpr + x.typ = check.typ(e) + // Note: rawExpr (caller of exprInternal) will call check.recordTypeAndValue + // even though check.typ has already called it. This is fine as both + // times the same expression and type are recorded. It is also not a + // performance issue because we only reach here for composite literal + // types, which are comparatively rare. + + default: + panic(fmt.Sprintf("%s: unknown expression type %T", atPos(e), e)) + } + + // everything went well + x.expr = e + return expression + +Error: + x.mode = invalid + x.expr = e + return statement // avoid follow-up errors +} + +// keyVal maps a complex, float, integer, string or boolean constant value +// to the corresponding complex128, float64, int64, uint64, string, or bool +// Go value if possible; otherwise it returns x. +// A complex constant that can be represented as a float (such as 1.2 + 0i) +// is returned as a floating point value; if a floating point value can be +// represented as an integer (such as 1.0) it is returned as an integer value. +// This ensures that constants of different kind but equal value (such as +// 1.0 + 0i, 1.0, 1) result in the same value. +func keyVal(x constant.Value) interface{} { + switch x.Kind() { + case constant.Complex: + f := constant.ToFloat(x) + if f.Kind() != constant.Float { + r, _ := constant.Float64Val(constant.Real(x)) + i, _ := constant.Float64Val(constant.Imag(x)) + return complex(r, i) + } + x = f + fallthrough + case constant.Float: + i := constant.ToInt(x) + if i.Kind() != constant.Int { + v, _ := constant.Float64Val(x) + return v + } + x = i + fallthrough + case constant.Int: + if v, ok := constant.Int64Val(x); ok { + return v + } + if v, ok := constant.Uint64Val(x); ok { + return v + } + case constant.String: + return constant.StringVal(x) + case constant.Bool: + return constant.BoolVal(x) + } + return x +} + +// typeAssertion checks x.(T). The type of x must be an interface. +func (check *Checker) typeAssertion(e syntax.Expr, x *operand, T Type, typeSwitch bool) { + var cause string + if check.assertableTo(x.typ, T, &cause) { + return // success + } + + if typeSwitch { + check.errorf(e, ImpossibleAssert, "impossible type switch case: %s\n\t%s cannot have dynamic type %s %s", e, x, T, cause) + return + } + + check.errorf(e, ImpossibleAssert, "impossible type assertion: %s\n\t%s does not implement %s %s", e, T, x.typ, cause) +} + +// expr typechecks expression e and initializes x with the expression value. +// If a non-nil target T is given and e is a generic function or +// a function call, T is used to infer the type arguments for e. +// The result must be a single value. +// If an error occurred, x.mode is set to invalid. +func (check *Checker) expr(T *target, x *operand, e syntax.Expr) { + check.rawExpr(T, x, e, nil, false) + check.exclude(x, 1<= 4 (32bits) + MaxAlign int64 // maximum alignment in bytes - must be >= 1 +} + +func (s *gcSizes) Alignof(T Type) (result int64) { + defer func() { + assert(result >= 1) + }() + + // For arrays and structs, alignment is defined in terms + // of alignment of the elements and fields, respectively. + switch t := under(T).(type) { + case *Array: + // spec: "For a variable x of array type: unsafe.Alignof(x) + // is the same as unsafe.Alignof(x[0]), but at least 1." + return s.Alignof(t.elem) + case *Struct: + if len(t.fields) == 0 && IsSyncAtomicAlign64(T) { + // Special case: sync/atomic.align64 is an + // empty struct we recognize as a signal that + // the struct it contains must be + // 64-bit-aligned. + // + // This logic is equivalent to the logic in + // cmd/compile/internal/types/size.go:calcStructOffset + return 8 + } + + // spec: "For a variable x of struct type: unsafe.Alignof(x) + // is the largest of the values unsafe.Alignof(x.f) for each + // field f of x, but at least 1." + max := int64(1) + for _, f := range t.fields { + if a := s.Alignof(f.typ); a > max { + max = a + } + } + return max + case *Slice, *Interface: + // Multiword data structures are effectively structs + // in which each element has size WordSize. + // Type parameters lead to variable sizes/alignments; + // StdSizes.Alignof won't be called for them. + assert(!isTypeParam(T)) + return s.WordSize + case *Basic: + // Strings are like slices and interfaces. + if t.Info()&IsString != 0 { + return s.WordSize + } + case *TypeParam, *Union: + unreachable() + } + a := s.Sizeof(T) // may be 0 or negative + // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1." + if a < 1 { + return 1 + } + // complex{64,128} are aligned like [2]float{32,64}. + if isComplex(T) { + a /= 2 + } + if a > s.MaxAlign { + return s.MaxAlign + } + return a +} + +func (s *gcSizes) Offsetsof(fields []*Var) []int64 { + offsets := make([]int64, len(fields)) + var offs int64 + for i, f := range fields { + if offs < 0 { + // all remaining offsets are too large + offsets[i] = -1 + continue + } + // offs >= 0 + a := s.Alignof(f.typ) + offs = align(offs, a) // possibly < 0 if align overflows + offsets[i] = offs + if d := s.Sizeof(f.typ); d >= 0 && offs >= 0 { + offs += d // ok to overflow to < 0 + } else { + offs = -1 // f.typ or offs is too large + } + } + return offsets +} + +func (s *gcSizes) Sizeof(T Type) int64 { + switch t := under(T).(type) { + case *Basic: + assert(isTyped(T)) + k := t.kind + if int(k) < len(basicSizes) { + if s := basicSizes[k]; s > 0 { + return int64(s) + } + } + if k == String { + return s.WordSize * 2 + } + case *Array: + n := t.len + if n <= 0 { + return 0 + } + // n > 0 + esize := s.Sizeof(t.elem) + if esize < 0 { + return -1 // element too large + } + if esize == 0 { + return 0 // 0-size element + } + // esize > 0 + // Final size is esize * n; and size must be <= maxInt64. + const maxInt64 = 1<<63 - 1 + if esize > maxInt64/n { + return -1 // esize * n overflows + } + return esize * n + case *Slice: + return s.WordSize * 3 + case *Struct: + n := t.NumFields() + if n == 0 { + return 0 + } + offsets := s.Offsetsof(t.fields) + offs := offsets[n-1] + size := s.Sizeof(t.fields[n-1].typ) + if offs < 0 || size < 0 { + return -1 // type too large + } + // gc: The last field of a non-zero-sized struct is not allowed to + // have size 0. + if offs > 0 && size == 0 { + size = 1 + } + // gc: Size includes alignment padding. + return align(offs+size, s.Alignof(t)) // may overflow to < 0 which is ok + case *Interface: + // Type parameters lead to variable sizes/alignments; + // StdSizes.Sizeof won't be called for them. + assert(!isTypeParam(T)) + return s.WordSize * 2 + case *TypeParam, *Union: + unreachable() + } + return s.WordSize // catch-all +} + +// gcSizesFor returns the Sizes used by gc for an architecture. +// The result is a nil *gcSizes pointer (which is not a valid types.Sizes) +// if a compiler/architecture pair is not known. +func gcSizesFor(compiler, arch string) *gcSizes { + if compiler != "gc" { + return nil + } + return gcArchSizes[arch] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/hilbert_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/hilbert_test.go new file mode 100644 index 0000000000000000000000000000000000000000..df8a3e7d78af57e4a384838a17bd5bbca4055cbb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/hilbert_test.go @@ -0,0 +1,206 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "bytes" + "flag" + "fmt" + "os" + "testing" + + . "cmd/compile/internal/types2" +) + +var ( + H = flag.Int("H", 5, "Hilbert matrix size") + out = flag.String("out", "", "write generated program to out") +) + +func TestHilbert(t *testing.T) { + // generate source + src := program(*H, *out) + if *out != "" { + os.WriteFile(*out, src, 0666) + return + } + + DefPredeclaredTestFuncs() // declare assert (used by code generated by verify) + mustTypecheck(string(src), nil, nil) +} + +func program(n int, out string) []byte { + var g gen + + g.p(`// Code generated by: go test -run=Hilbert -H=%d -out=%q. DO NOT EDIT. + +// +`+`build ignore + +// This program tests arbitrary precision constant arithmetic +// by generating the constant elements of a Hilbert matrix H, +// its inverse I, and the product P = H*I. The product should +// be the identity matrix. +package main + +func main() { + if !ok { + printProduct() + return + } + println("PASS") +} + +`, n, out) + g.hilbert(n) + g.inverse(n) + g.product(n) + g.verify(n) + g.printProduct(n) + g.binomials(2*n - 1) + g.factorials(2*n - 1) + + return g.Bytes() +} + +type gen struct { + bytes.Buffer +} + +func (g *gen) p(format string, args ...interface{}) { + fmt.Fprintf(&g.Buffer, format, args...) +} + +func (g *gen) hilbert(n int) { + g.p(`// Hilbert matrix, n = %d +const ( +`, n) + for i := 0; i < n; i++ { + g.p("\t") + for j := 0; j < n; j++ { + if j > 0 { + g.p(", ") + } + g.p("h%d_%d", i, j) + } + if i == 0 { + g.p(" = ") + for j := 0; j < n; j++ { + if j > 0 { + g.p(", ") + } + g.p("1.0/(iota + %d)", j+1) + } + } + g.p("\n") + } + g.p(")\n\n") +} + +func (g *gen) inverse(n int) { + g.p(`// Inverse Hilbert matrix +const ( +`) + for i := 0; i < n; i++ { + for j := 0; j < n; j++ { + s := "+" + if (i+j)&1 != 0 { + s = "-" + } + g.p("\ti%d_%d = %s%d * b%d_%d * b%d_%d * b%d_%d * b%d_%d\n", + i, j, s, i+j+1, n+i, n-j-1, n+j, n-i-1, i+j, i, i+j, i) + } + g.p("\n") + } + g.p(")\n\n") +} + +func (g *gen) product(n int) { + g.p(`// Product matrix +const ( +`) + for i := 0; i < n; i++ { + for j := 0; j < n; j++ { + g.p("\tp%d_%d = ", i, j) + for k := 0; k < n; k++ { + if k > 0 { + g.p(" + ") + } + g.p("h%d_%d*i%d_%d", i, k, k, j) + } + g.p("\n") + } + g.p("\n") + } + g.p(")\n\n") +} + +func (g *gen) verify(n int) { + g.p(`// Verify that product is the identity matrix +const ok = +`) + for i := 0; i < n; i++ { + for j := 0; j < n; j++ { + if j == 0 { + g.p("\t") + } else { + g.p(" && ") + } + v := 0 + if i == j { + v = 1 + } + g.p("p%d_%d == %d", i, j, v) + } + g.p(" &&\n") + } + g.p("\ttrue\n\n") + + // verify ok at type-check time + if *out == "" { + g.p("const _ = assert(ok)\n\n") + } +} + +func (g *gen) printProduct(n int) { + g.p("func printProduct() {\n") + for i := 0; i < n; i++ { + g.p("\tprintln(") + for j := 0; j < n; j++ { + if j > 0 { + g.p(", ") + } + g.p("p%d_%d", i, j) + } + g.p(")\n") + } + g.p("}\n\n") +} + +func (g *gen) binomials(n int) { + g.p(`// Binomials +const ( +`) + for j := 0; j <= n; j++ { + if j > 0 { + g.p("\n") + } + for k := 0; k <= j; k++ { + g.p("\tb%d_%d = f%d / (f%d*f%d)\n", j, k, j, k, j-k) + } + } + g.p(")\n\n") +} + +func (g *gen) factorials(n int) { + g.p(`// Factorials +const ( + f0 = 1 + f1 = 1 +`) + for i := 2; i <= n; i++ { + g.p("\tf%d = f%d * %d\n", i, i-1, i) + } + g.p(")\n\n") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/importer_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/importer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6b9b5009186cfb100d6bbb8112ba19c1ddcda451 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/importer_test.go @@ -0,0 +1,35 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements the (temporary) plumbing to get importing to work. + +package types2_test + +import ( + gcimporter "cmd/compile/internal/importer" + "cmd/compile/internal/types2" + "io" +) + +func defaultImporter() types2.Importer { + return &gcimports{ + packages: make(map[string]*types2.Package), + } +} + +type gcimports struct { + packages map[string]*types2.Package + lookup func(path string) (io.ReadCloser, error) +} + +func (m *gcimports) Import(path string) (*types2.Package, error) { + return m.ImportFrom(path, "" /* no vendoring */, 0) +} + +func (m *gcimports) ImportFrom(path, srcDir string, mode types2.ImportMode) (*types2.Package, error) { + if mode != 0 { + panic("mode must be 0") + } + return gcimporter.Import(m.packages, path, srcDir, m.lookup) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/index.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/index.go new file mode 100644 index 0000000000000000000000000000000000000000..4db2213086444e4cd974f4a3e12f9ae2fa83a684 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/index.go @@ -0,0 +1,464 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements typechecking of index/slice expressions. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "go/constant" + . "internal/types/errors" +) + +// If e is a valid function instantiation, indexExpr returns true. +// In that case x represents the uninstantiated function value and +// it is the caller's responsibility to instantiate the function. +func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst bool) { + check.exprOrType(x, e.X, true) + // x may be generic + + switch x.mode { + case invalid: + check.use(e.Index) + return false + + case typexpr: + // type instantiation + x.mode = invalid + // TODO(gri) here we re-evaluate e.X - try to avoid this + x.typ = check.varType(e) + if isValid(x.typ) { + x.mode = typexpr + } + return false + + case value: + if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 { + // function instantiation + return true + } + } + + // x should not be generic at this point, but be safe and check + check.nonGeneric(nil, x) + if x.mode == invalid { + return false + } + + // ordinary index expression + valid := false + length := int64(-1) // valid if >= 0 + switch typ := under(x.typ).(type) { + case *Basic: + if isString(typ) { + valid = true + if x.mode == constant_ { + length = int64(len(constant.StringVal(x.val))) + } + // an indexed string always yields a byte value + // (not a constant) even if the string and the + // index are constant + x.mode = value + x.typ = universeByte // use 'byte' name + } + + case *Array: + valid = true + length = typ.len + if x.mode != variable { + x.mode = value + } + x.typ = typ.elem + + case *Pointer: + if typ, _ := under(typ.base).(*Array); typ != nil { + valid = true + length = typ.len + x.mode = variable + x.typ = typ.elem + } + + case *Slice: + valid = true + x.mode = variable + x.typ = typ.elem + + case *Map: + index := check.singleIndex(e) + if index == nil { + x.mode = invalid + return false + } + var key operand + check.expr(nil, &key, index) + check.assignment(&key, typ.key, "map index") + // ok to continue even if indexing failed - map element type is known + x.mode = mapindex + x.typ = typ.elem + x.expr = e + return false + + case *Interface: + if !isTypeParam(x.typ) { + break + } + // TODO(gri) report detailed failure cause for better error messages + var key, elem Type // key != nil: we must have all maps + mode := variable // non-maps result mode + // TODO(gri) factor out closure and use it for non-typeparam cases as well + if typ.typeSet().underIs(func(u Type) bool { + l := int64(-1) // valid if >= 0 + var k, e Type // k is only set for maps + switch t := u.(type) { + case *Basic: + if isString(t) { + e = universeByte + mode = value + } + case *Array: + l = t.len + e = t.elem + if x.mode != variable { + mode = value + } + case *Pointer: + if t, _ := under(t.base).(*Array); t != nil { + l = t.len + e = t.elem + } + case *Slice: + e = t.elem + case *Map: + k = t.key + e = t.elem + } + if e == nil { + return false + } + if elem == nil { + // first type + length = l + key, elem = k, e + return true + } + // all map keys must be identical (incl. all nil) + // (that is, we cannot mix maps with other types) + if !Identical(key, k) { + return false + } + // all element types must be identical + if !Identical(elem, e) { + return false + } + // track the minimal length for arrays, if any + if l >= 0 && l < length { + length = l + } + return true + }) { + // For maps, the index expression must be assignable to the map key type. + if key != nil { + index := check.singleIndex(e) + if index == nil { + x.mode = invalid + return false + } + var k operand + check.expr(nil, &k, index) + check.assignment(&k, key, "map index") + // ok to continue even if indexing failed - map element type is known + x.mode = mapindex + x.typ = elem + x.expr = e + return false + } + + // no maps + valid = true + x.mode = mode + x.typ = elem + } + } + + if !valid { + check.errorf(e.Pos(), NonSliceableOperand, invalidOp+"cannot index %s", x) + check.use(e.Index) + x.mode = invalid + return false + } + + index := check.singleIndex(e) + if index == nil { + x.mode = invalid + return false + } + + // In pathological (invalid) cases (e.g.: type T1 [][[]T1{}[0][0]]T0) + // the element type may be accessed before it's set. Make sure we have + // a valid type. + if x.typ == nil { + x.typ = Typ[Invalid] + } + + check.index(index, length) + return false +} + +func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) { + check.expr(nil, x, e.X) + if x.mode == invalid { + check.use(e.Index[:]...) + return + } + + valid := false + length := int64(-1) // valid if >= 0 + switch u := coreString(x.typ).(type) { + case nil: + check.errorf(x, NonSliceableOperand, invalidOp+"cannot slice %s: %s has no core type", x, x.typ) + x.mode = invalid + return + + case *Basic: + if isString(u) { + if e.Full { + at := e.Index[2] + if at == nil { + at = e // e.Index[2] should be present but be careful + } + check.error(at, InvalidSliceExpr, invalidOp+"3-index slice of string") + x.mode = invalid + return + } + valid = true + if x.mode == constant_ { + length = int64(len(constant.StringVal(x.val))) + } + // spec: "For untyped string operands the result + // is a non-constant value of type string." + if isUntyped(x.typ) { + x.typ = Typ[String] + } + } + + case *Array: + valid = true + length = u.len + if x.mode != variable { + check.errorf(x, NonSliceableOperand, invalidOp+"%s (slice of unaddressable value)", x) + x.mode = invalid + return + } + x.typ = &Slice{elem: u.elem} + + case *Pointer: + if u, _ := under(u.base).(*Array); u != nil { + valid = true + length = u.len + x.typ = &Slice{elem: u.elem} + } + + case *Slice: + valid = true + // x.typ doesn't change + } + + if !valid { + check.errorf(x, NonSliceableOperand, invalidOp+"cannot slice %s", x) + x.mode = invalid + return + } + + x.mode = value + + // spec: "Only the first index may be omitted; it defaults to 0." + if e.Full && (e.Index[1] == nil || e.Index[2] == nil) { + check.error(e, InvalidSyntaxTree, "2nd and 3rd index required in 3-index slice") + x.mode = invalid + return + } + + // check indices + var ind [3]int64 + for i, expr := range e.Index { + x := int64(-1) + switch { + case expr != nil: + // The "capacity" is only known statically for strings, arrays, + // and pointers to arrays, and it is the same as the length for + // those types. + max := int64(-1) + if length >= 0 { + max = length + 1 + } + if _, v := check.index(expr, max); v >= 0 { + x = v + } + case i == 0: + // default is 0 for the first index + x = 0 + case length >= 0: + // default is length (== capacity) otherwise + x = length + } + ind[i] = x + } + + // constant indices must be in range + // (check.index already checks that existing indices >= 0) +L: + for i, x := range ind[:len(ind)-1] { + if x > 0 { + for j, y := range ind[i+1:] { + if y >= 0 && y < x { + // The value y corresponds to the expression e.Index[i+1+j]. + // Because y >= 0, it must have been set from the expression + // when checking indices and thus e.Index[i+1+j] is not nil. + check.errorf(e.Index[i+1+j], SwappedSliceIndices, "invalid slice indices: %d < %d", y, x) + break L // only report one error, ok to continue + } + } + } + } +} + +// singleIndex returns the (single) index from the index expression e. +// If the index is missing, or if there are multiple indices, an error +// is reported and the result is nil. +func (check *Checker) singleIndex(e *syntax.IndexExpr) syntax.Expr { + index := e.Index + if index == nil { + check.errorf(e, InvalidSyntaxTree, "missing index for %s", e.X) + return nil + } + if l, _ := index.(*syntax.ListExpr); l != nil { + if n := len(l.ElemList); n <= 1 { + check.errorf(e, InvalidSyntaxTree, "invalid use of ListExpr for index expression %v with %d indices", e, n) + return nil + } + // len(l.ElemList) > 1 + check.error(l.ElemList[1], InvalidIndex, invalidOp+"more than one index") + index = l.ElemList[0] // continue with first index + } + return index +} + +// index checks an index expression for validity. +// If max >= 0, it is the upper bound for index. +// If the result typ is != Typ[Invalid], index is valid and typ is its (possibly named) integer type. +// If the result val >= 0, index is valid and val is its constant int value. +func (check *Checker) index(index syntax.Expr, max int64) (typ Type, val int64) { + typ = Typ[Invalid] + val = -1 + + var x operand + check.expr(nil, &x, index) + if !check.isValidIndex(&x, InvalidIndex, "index", false) { + return + } + + if x.mode != constant_ { + return x.typ, -1 + } + + if x.val.Kind() == constant.Unknown { + return + } + + v, ok := constant.Int64Val(x.val) + assert(ok) + if max >= 0 && v >= max { + check.errorf(&x, InvalidIndex, invalidArg+"index %s out of bounds [0:%d]", x.val.String(), max) + return + } + + // 0 <= v [ && v < max ] + return x.typ, v +} + +// isValidIndex checks whether operand x satisfies the criteria for integer +// index values. If allowNegative is set, a constant operand may be negative. +// If the operand is not valid, an error is reported (using what as context) +// and the result is false. +func (check *Checker) isValidIndex(x *operand, code Code, what string, allowNegative bool) bool { + if x.mode == invalid { + return false + } + + // spec: "a constant index that is untyped is given type int" + check.convertUntyped(x, Typ[Int]) + if x.mode == invalid { + return false + } + + // spec: "the index x must be of integer type or an untyped constant" + if !allInteger(x.typ) { + check.errorf(x, code, invalidArg+"%s %s must be integer", what, x) + return false + } + + if x.mode == constant_ { + // spec: "a constant index must be non-negative ..." + if !allowNegative && constant.Sign(x.val) < 0 { + check.errorf(x, code, invalidArg+"%s %s must not be negative", what, x) + return false + } + + // spec: "... and representable by a value of type int" + if !representableConst(x.val, check, Typ[Int], &x.val) { + check.errorf(x, code, invalidArg+"%s %s overflows int", what, x) + return false + } + } + + return true +} + +// indexedElts checks the elements (elts) of an array or slice composite literal +// against the literal's element type (typ), and the element indices against +// the literal length if known (length >= 0). It returns the length of the +// literal (maximum index value + 1). +func (check *Checker) indexedElts(elts []syntax.Expr, typ Type, length int64) int64 { + visited := make(map[int64]bool, len(elts)) + var index, max int64 + for _, e := range elts { + // determine and check index + validIndex := false + eval := e + if kv, _ := e.(*syntax.KeyValueExpr); kv != nil { + if typ, i := check.index(kv.Key, length); isValid(typ) { + if i >= 0 { + index = i + validIndex = true + } else { + check.errorf(e, InvalidLitIndex, "index %s must be integer constant", kv.Key) + } + } + eval = kv.Value + } else if length >= 0 && index >= length { + check.errorf(e, OversizeArrayLit, "index %d is out of bounds (>= %d)", index, length) + } else { + validIndex = true + } + + // if we have a valid index, check for duplicate entries + if validIndex { + if visited[index] { + check.errorf(e, DuplicateLitKey, "duplicate index %d in array or slice literal", index) + } + visited[index] = true + } + index++ + if index > max { + max = index + } + + // check element against composite literal element type + var x operand + check.exprWithHint(&x, eval, typ) + check.assignment(&x, typ, "array or slice literal") + } + return max +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/infer.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/infer.go new file mode 100644 index 0000000000000000000000000000000000000000..a520f7025343ba6744de7f98b510ac5000993aa7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/infer.go @@ -0,0 +1,790 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements type parameter inference. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "fmt" + . "internal/types/errors" + "strings" +) + +// If enableReverseTypeInference is set, uninstantiated and +// partially instantiated generic functions may be assigned +// (incl. returned) to variables of function type and type +// inference will attempt to infer the missing type arguments. +// Available with go1.21. +const enableReverseTypeInference = true // disable for debugging + +// infer attempts to infer the complete set of type arguments for generic function instantiation/call +// based on the given type parameters tparams, type arguments targs, function parameters params, and +// function arguments args, if any. There must be at least one type parameter, no more type arguments +// than type parameters, and params and args must match in number (incl. zero). +// If reverse is set, an error message's contents are reversed for a better error message for some +// errors related to reverse type inference (where the function call is synthetic). +// If successful, infer returns the complete list of given and inferred type arguments, one for each +// type parameter. Otherwise the result is nil and appropriate errors will be reported. +func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand, reverse bool) (inferred []Type) { + // Don't verify result conditions if there's no error handler installed: + // in that case, an error leads to an exit panic and the result value may + // be incorrect. But in that case it doesn't matter because callers won't + // be able to use it either. + if check.conf.Error != nil { + defer func() { + assert(inferred == nil || len(inferred) == len(tparams) && !containsNil(inferred)) + }() + } + + if traceInference { + check.dump("== infer : %s%s ➞ %s", tparams, params, targs) // aligned with rename print below + defer func() { + check.dump("=> %s ➞ %s\n", tparams, inferred) + }() + } + + // There must be at least one type parameter, and no more type arguments than type parameters. + n := len(tparams) + assert(n > 0 && len(targs) <= n) + + // Parameters and arguments must match in number. + assert(params.Len() == len(args)) + + // If we already have all type arguments, we're done. + if len(targs) == n && !containsNil(targs) { + return targs + } + + // If we have invalid (ordinary) arguments, an error was reported before. + // Avoid additional inference errors and exit early (go.dev/issue/60434). + for _, arg := range args { + if arg.mode == invalid { + return nil + } + } + + // Make sure we have a "full" list of type arguments, some of which may + // be nil (unknown). Make a copy so as to not clobber the incoming slice. + if len(targs) < n { + targs2 := make([]Type, n) + copy(targs2, targs) + targs = targs2 + } + // len(targs) == n + + // Continue with the type arguments we have. Avoid matching generic + // parameters that already have type arguments against function arguments: + // It may fail because matching uses type identity while parameter passing + // uses assignment rules. Instantiate the parameter list with the type + // arguments we have, and continue with that parameter list. + + // Substitute type arguments for their respective type parameters in params, + // if any. Note that nil targs entries are ignored by check.subst. + // We do this for better error messages; it's not needed for correctness. + // For instance, given: + // + // func f[P, Q any](P, Q) {} + // + // func _(s string) { + // f[int](s, s) // ERROR + // } + // + // With substitution, we get the error: + // "cannot use s (variable of type string) as int value in argument to f[int]" + // + // Without substitution we get the (worse) error: + // "type string of s does not match inferred type int for P" + // even though the type int was provided (not inferred) for P. + // + // TODO(gri) We might be able to finesse this in the error message reporting + // (which only happens in case of an error) and then avoid doing + // the substitution (which always happens). + if params.Len() > 0 { + smap := makeSubstMap(tparams, targs) + params = check.subst(nopos, params, smap, nil, check.context()).(*Tuple) + } + + // Unify parameter and argument types for generic parameters with typed arguments + // and collect the indices of generic parameters with untyped arguments. + // Terminology: generic parameter = function parameter with a type-parameterized type + u := newUnifier(tparams, targs, check.allowVersion(check.pkg, pos, go1_21)) + + errorf := func(tpar, targ Type, arg *operand) { + // provide a better error message if we can + targs := u.inferred(tparams) + if targs[0] == nil { + // The first type parameter couldn't be inferred. + // If none of them could be inferred, don't try + // to provide the inferred type in the error msg. + allFailed := true + for _, targ := range targs { + if targ != nil { + allFailed = false + break + } + } + if allFailed { + check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match %s (cannot infer %s)", targ, arg.expr, tpar, typeParamsString(tparams)) + return + } + } + smap := makeSubstMap(tparams, targs) + // TODO(gri): pass a poser here, rather than arg.Pos(). + inferred := check.subst(arg.Pos(), tpar, smap, nil, check.context()) + // CannotInferTypeArgs indicates a failure of inference, though the actual + // error may be better attributed to a user-provided type argument (hence + // InvalidTypeArg). We can't differentiate these cases, so fall back on + // the more general CannotInferTypeArgs. + if inferred != tpar { + if reverse { + check.errorf(arg, CannotInferTypeArgs, "inferred type %s for %s does not match type %s of %s", inferred, tpar, targ, arg.expr) + } else { + check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match inferred type %s for %s", targ, arg.expr, inferred, tpar) + } + } else { + check.errorf(arg, CannotInferTypeArgs, "type %s of %s does not match %s", targ, arg.expr, tpar) + } + } + + // indices of generic parameters with untyped arguments, for later use + var untyped []int + + // --- 1 --- + // use information from function arguments + + if traceInference { + u.tracef("== function parameters: %s", params) + u.tracef("-- function arguments : %s", args) + } + + for i, arg := range args { + if arg.mode == invalid { + // An error was reported earlier. Ignore this arg + // and continue, we may still be able to infer all + // targs resulting in fewer follow-on errors. + // TODO(gri) determine if we still need this check + continue + } + par := params.At(i) + if isParameterized(tparams, par.typ) || isParameterized(tparams, arg.typ) { + // Function parameters are always typed. Arguments may be untyped. + // Collect the indices of untyped arguments and handle them later. + if isTyped(arg.typ) { + if !u.unify(par.typ, arg.typ, assign) { + errorf(par.typ, arg.typ, arg) + return nil + } + } else if _, ok := par.typ.(*TypeParam); ok && !arg.isNil() { + // Since default types are all basic (i.e., non-composite) types, an + // untyped argument will never match a composite parameter type; the + // only parameter type it can possibly match against is a *TypeParam. + // Thus, for untyped arguments we only need to look at parameter types + // that are single type parameters. + // Also, untyped nils don't have a default type and can be ignored. + untyped = append(untyped, i) + } + } + } + + if traceInference { + inferred := u.inferred(tparams) + u.tracef("=> %s ➞ %s\n", tparams, inferred) + } + + // --- 2 --- + // use information from type parameter constraints + + if traceInference { + u.tracef("== type parameters: %s", tparams) + } + + // Unify type parameters with their constraints as long + // as progress is being made. + // + // This is an O(n^2) algorithm where n is the number of + // type parameters: if there is progress, at least one + // type argument is inferred per iteration, and we have + // a doubly nested loop. + // + // In practice this is not a problem because the number + // of type parameters tends to be very small (< 5 or so). + // (It should be possible for unification to efficiently + // signal newly inferred type arguments; then the loops + // here could handle the respective type parameters only, + // but that will come at a cost of extra complexity which + // may not be worth it.) + for i := 0; ; i++ { + nn := u.unknowns() + if traceInference { + if i > 0 { + fmt.Println() + } + u.tracef("-- iteration %d", i) + } + + for _, tpar := range tparams { + tx := u.at(tpar) + core, single := coreTerm(tpar) + if traceInference { + u.tracef("-- type parameter %s = %s: core(%s) = %s, single = %v", tpar, tx, tpar, core, single) + } + + // If there is a core term (i.e., a core type with tilde information) + // unify the type parameter with the core type. + if core != nil { + // A type parameter can be unified with its core type in two cases. + switch { + case tx != nil: + // The corresponding type argument tx is known. There are 2 cases: + // 1) If the core type has a tilde, per spec requirement for tilde + // elements, the core type is an underlying (literal) type. + // And because of the tilde, the underlying type of tx must match + // against the core type. + // But because unify automatically matches a defined type against + // an underlying literal type, we can simply unify tx with the + // core type. + // 2) If the core type doesn't have a tilde, we also must unify tx + // with the core type. + if !u.unify(tx, core.typ, 0) { + // TODO(gri) Type parameters that appear in the constraint and + // for which we have type arguments inferred should + // use those type arguments for a better error message. + check.errorf(pos, CannotInferTypeArgs, "%s (type %s) does not satisfy %s", tpar, tx, tpar.Constraint()) + return nil + } + case single && !core.tilde: + // The corresponding type argument tx is unknown and there's a single + // specific type and no tilde. + // In this case the type argument must be that single type; set it. + u.set(tpar, core.typ) + } + } else { + if tx != nil { + // We don't have a core type, but the type argument tx is known. + // It must have (at least) all the methods of the type constraint, + // and the method signatures must unify; otherwise tx cannot satisfy + // the constraint. + // TODO(gri) Now that unification handles interfaces, this code can + // be reduced to calling u.unify(tx, tpar.iface(), assign) + // (which will compare signatures exactly as we do below). + // We leave it as is for now because missingMethod provides + // a failure cause which allows for a better error message. + // Eventually, unify should return an error with cause. + var cause string + constraint := tpar.iface() + if m, _ := check.missingMethod(tx, constraint, true, func(x, y Type) bool { return u.unify(x, y, exact) }, &cause); m != nil { + // TODO(gri) better error message (see TODO above) + check.errorf(pos, CannotInferTypeArgs, "%s (type %s) does not satisfy %s %s", tpar, tx, tpar.Constraint(), cause) + return nil + } + } + } + } + + if u.unknowns() == nn { + break // no progress + } + } + + if traceInference { + inferred := u.inferred(tparams) + u.tracef("=> %s ➞ %s\n", tparams, inferred) + } + + // --- 3 --- + // use information from untyped constants + + if traceInference { + u.tracef("== untyped arguments: %v", untyped) + } + + // Some generic parameters with untyped arguments may have been given a type by now. + // Collect all remaining parameters that don't have a type yet and determine the + // maximum untyped type for each of those parameters, if possible. + var maxUntyped map[*TypeParam]Type // lazily allocated (we may not need it) + for _, index := range untyped { + tpar := params.At(index).typ.(*TypeParam) // is type parameter by construction of untyped + if u.at(tpar) == nil { + arg := args[index] // arg corresponding to tpar + if maxUntyped == nil { + maxUntyped = make(map[*TypeParam]Type) + } + max := maxUntyped[tpar] + if max == nil { + max = arg.typ + } else { + m := maxType(max, arg.typ) + if m == nil { + check.errorf(arg, CannotInferTypeArgs, "mismatched types %s and %s (cannot infer %s)", max, arg.typ, tpar) + return nil + } + max = m + } + maxUntyped[tpar] = max + } + } + // maxUntyped contains the maximum untyped type for each type parameter + // which doesn't have a type yet. Set the respective default types. + for tpar, typ := range maxUntyped { + d := Default(typ) + assert(isTyped(d)) + u.set(tpar, d) + } + + // --- simplify --- + + // u.inferred(tparams) now contains the incoming type arguments plus any additional type + // arguments which were inferred. The inferred non-nil entries may still contain + // references to other type parameters found in constraints. + // For instance, for [A any, B interface{ []C }, C interface{ *A }], if A == int + // was given, unification produced the type list [int, []C, *A]. We eliminate the + // remaining type parameters by substituting the type parameters in this type list + // until nothing changes anymore. + inferred = u.inferred(tparams) + if debug { + for i, targ := range targs { + assert(targ == nil || inferred[i] == targ) + } + } + + // The data structure of each (provided or inferred) type represents a graph, where + // each node corresponds to a type and each (directed) vertex points to a component + // type. The substitution process described above repeatedly replaces type parameter + // nodes in these graphs with the graphs of the types the type parameters stand for, + // which creates a new (possibly bigger) graph for each type. + // The substitution process will not stop if the replacement graph for a type parameter + // also contains that type parameter. + // For instance, for [A interface{ *A }], without any type argument provided for A, + // unification produces the type list [*A]. Substituting A in *A with the value for + // A will lead to infinite expansion by producing [**A], [****A], [********A], etc., + // because the graph A -> *A has a cycle through A. + // Generally, cycles may occur across multiple type parameters and inferred types + // (for instance, consider [P interface{ *Q }, Q interface{ func(P) }]). + // We eliminate cycles by walking the graphs for all type parameters. If a cycle + // through a type parameter is detected, killCycles nils out the respective type + // (in the inferred list) which kills the cycle, and marks the corresponding type + // parameter as not inferred. + // + // TODO(gri) If useful, we could report the respective cycle as an error. We don't + // do this now because type inference will fail anyway, and furthermore, + // constraints with cycles of this kind cannot currently be satisfied by + // any user-supplied type. But should that change, reporting an error + // would be wrong. + killCycles(tparams, inferred) + + // dirty tracks the indices of all types that may still contain type parameters. + // We know that nil type entries and entries corresponding to provided (non-nil) + // type arguments are clean, so exclude them from the start. + var dirty []int + for i, typ := range inferred { + if typ != nil && (i >= len(targs) || targs[i] == nil) { + dirty = append(dirty, i) + } + } + + for len(dirty) > 0 { + if traceInference { + u.tracef("-- simplify %s ➞ %s", tparams, inferred) + } + // TODO(gri) Instead of creating a new substMap for each iteration, + // provide an update operation for substMaps and only change when + // needed. Optimization. + smap := makeSubstMap(tparams, inferred) + n := 0 + for _, index := range dirty { + t0 := inferred[index] + if t1 := check.subst(nopos, t0, smap, nil, check.context()); t1 != t0 { + // t0 was simplified to t1. + // If t0 was a generic function, but the simplified signature t1 does + // not contain any type parameters anymore, the function is not generic + // anymore. Remove it's type parameters. (go.dev/issue/59953) + // Note that if t0 was a signature, t1 must be a signature, and t1 + // can only be a generic signature if it originated from a generic + // function argument. Those signatures are never defined types and + // thus there is no need to call under below. + // TODO(gri) Consider doing this in Checker.subst. + // Then this would fall out automatically here and also + // in instantiation (where we also explicitly nil out + // type parameters). See the *Signature TODO in subst. + if sig, _ := t1.(*Signature); sig != nil && sig.TypeParams().Len() > 0 && !isParameterized(tparams, sig) { + sig.tparams = nil + } + inferred[index] = t1 + dirty[n] = index + n++ + } + } + dirty = dirty[:n] + } + + // Once nothing changes anymore, we may still have type parameters left; + // e.g., a constraint with core type *P may match a type parameter Q but + // we don't have any type arguments to fill in for *P or Q (go.dev/issue/45548). + // Don't let such inferences escape; instead treat them as unresolved. + for i, typ := range inferred { + if typ == nil || isParameterized(tparams, typ) { + obj := tparams[i].obj + check.errorf(pos, CannotInferTypeArgs, "cannot infer %s (%s)", obj.name, obj.pos) + return nil + } + } + + return +} + +// containsNil reports whether list contains a nil entry. +func containsNil(list []Type) bool { + for _, t := range list { + if t == nil { + return true + } + } + return false +} + +// renameTParams renames the type parameters in the given type such that each type +// parameter is given a new identity. renameTParams returns the new type parameters +// and updated type. If the result type is unchanged from the argument type, none +// of the type parameters in tparams occurred in the type. +// If typ is a generic function, type parameters held with typ are not changed and +// must be updated separately if desired. +// The positions is only used for debug traces. +func (check *Checker) renameTParams(pos syntax.Pos, tparams []*TypeParam, typ Type) ([]*TypeParam, Type) { + // For the purpose of type inference we must differentiate type parameters + // occurring in explicit type or value function arguments from the type + // parameters we are solving for via unification because they may be the + // same in self-recursive calls: + // + // func f[P constraint](x P) { + // f(x) + // } + // + // In this example, without type parameter renaming, the P used in the + // instantiation f[P] has the same pointer identity as the P we are trying + // to solve for through type inference. This causes problems for type + // unification. Because any such self-recursive call is equivalent to + // a mutually recursive call, type parameter renaming can be used to + // create separate, disentangled type parameters. The above example + // can be rewritten into the following equivalent code: + // + // func f[P constraint](x P) { + // f2(x) + // } + // + // func f2[P2 constraint](x P2) { + // f(x) + // } + // + // Type parameter renaming turns the first example into the second + // example by renaming the type parameter P into P2. + if len(tparams) == 0 { + return nil, typ // nothing to do + } + + tparams2 := make([]*TypeParam, len(tparams)) + for i, tparam := range tparams { + tname := NewTypeName(tparam.Obj().Pos(), tparam.Obj().Pkg(), tparam.Obj().Name(), nil) + tparams2[i] = NewTypeParam(tname, nil) + tparams2[i].index = tparam.index // == i + } + + renameMap := makeRenameMap(tparams, tparams2) + for i, tparam := range tparams { + tparams2[i].bound = check.subst(pos, tparam.bound, renameMap, nil, check.context()) + } + + return tparams2, check.subst(pos, typ, renameMap, nil, check.context()) +} + +// typeParamsString produces a string containing all the type parameter names +// in list suitable for human consumption. +func typeParamsString(list []*TypeParam) string { + // common cases + n := len(list) + switch n { + case 0: + return "" + case 1: + return list[0].obj.name + case 2: + return list[0].obj.name + " and " + list[1].obj.name + } + + // general case (n > 2) + var buf strings.Builder + for i, tname := range list[:n-1] { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(tname.obj.name) + } + buf.WriteString(", and ") + buf.WriteString(list[n-1].obj.name) + return buf.String() +} + +// isParameterized reports whether typ contains any of the type parameters of tparams. +// If typ is a generic function, isParameterized ignores the type parameter declarations; +// it only considers the signature proper (incoming and result parameters). +func isParameterized(tparams []*TypeParam, typ Type) bool { + w := tpWalker{ + tparams: tparams, + seen: make(map[Type]bool), + } + return w.isParameterized(typ) +} + +type tpWalker struct { + tparams []*TypeParam + seen map[Type]bool +} + +func (w *tpWalker) isParameterized(typ Type) (res bool) { + // detect cycles + if x, ok := w.seen[typ]; ok { + return x + } + w.seen[typ] = false + defer func() { + w.seen[typ] = res + }() + + switch t := typ.(type) { + case *Basic: + // nothing to do + + case *Alias: + return w.isParameterized(Unalias(t)) + + case *Array: + return w.isParameterized(t.elem) + + case *Slice: + return w.isParameterized(t.elem) + + case *Struct: + return w.varList(t.fields) + + case *Pointer: + return w.isParameterized(t.base) + + case *Tuple: + // This case does not occur from within isParameterized + // because tuples only appear in signatures where they + // are handled explicitly. But isParameterized is also + // called by Checker.callExpr with a function result tuple + // if instantiation failed (go.dev/issue/59890). + return t != nil && w.varList(t.vars) + + case *Signature: + // t.tparams may not be nil if we are looking at a signature + // of a generic function type (or an interface method) that is + // part of the type we're testing. We don't care about these type + // parameters. + // Similarly, the receiver of a method may declare (rather than + // use) type parameters, we don't care about those either. + // Thus, we only need to look at the input and result parameters. + return t.params != nil && w.varList(t.params.vars) || t.results != nil && w.varList(t.results.vars) + + case *Interface: + tset := t.typeSet() + for _, m := range tset.methods { + if w.isParameterized(m.typ) { + return true + } + } + return tset.is(func(t *term) bool { + return t != nil && w.isParameterized(t.typ) + }) + + case *Map: + return w.isParameterized(t.key) || w.isParameterized(t.elem) + + case *Chan: + return w.isParameterized(t.elem) + + case *Named: + for _, t := range t.TypeArgs().list() { + if w.isParameterized(t) { + return true + } + } + + case *TypeParam: + return tparamIndex(w.tparams, t) >= 0 + + default: + panic(fmt.Sprintf("unexpected %T", typ)) + } + + return false +} + +func (w *tpWalker) varList(list []*Var) bool { + for _, v := range list { + if w.isParameterized(v.typ) { + return true + } + } + return false +} + +// If the type parameter has a single specific type S, coreTerm returns (S, true). +// Otherwise, if tpar has a core type T, it returns a term corresponding to that +// core type and false. In that case, if any term of tpar has a tilde, the core +// term has a tilde. In all other cases coreTerm returns (nil, false). +func coreTerm(tpar *TypeParam) (*term, bool) { + n := 0 + var single *term // valid if n == 1 + var tilde bool + tpar.is(func(t *term) bool { + if t == nil { + assert(n == 0) + return false // no terms + } + n++ + single = t + if t.tilde { + tilde = true + } + return true + }) + if n == 1 { + if debug { + assert(debug && under(single.typ) == coreType(tpar)) + } + return single, true + } + if typ := coreType(tpar); typ != nil { + // A core type is always an underlying type. + // If any term of tpar has a tilde, we don't + // have a precise core type and we must return + // a tilde as well. + return &term{tilde, typ}, false + } + return nil, false +} + +// killCycles walks through the given type parameters and looks for cycles +// created by type parameters whose inferred types refer back to that type +// parameter, either directly or indirectly. If such a cycle is detected, +// it is killed by setting the corresponding inferred type to nil. +// +// TODO(gri) Determine if we can simply abort inference as soon as we have +// found a single cycle. +func killCycles(tparams []*TypeParam, inferred []Type) { + w := cycleFinder{tparams, inferred, make(map[Type]bool)} + for _, t := range tparams { + w.typ(t) // t != nil + } +} + +type cycleFinder struct { + tparams []*TypeParam + inferred []Type + seen map[Type]bool +} + +func (w *cycleFinder) typ(typ Type) { + if w.seen[typ] { + // We have seen typ before. If it is one of the type parameters + // in w.tparams, iterative substitution will lead to infinite expansion. + // Nil out the corresponding type which effectively kills the cycle. + if tpar, _ := typ.(*TypeParam); tpar != nil { + if i := tparamIndex(w.tparams, tpar); i >= 0 { + // cycle through tpar + w.inferred[i] = nil + } + } + // If we don't have one of our type parameters, the cycle is due + // to an ordinary recursive type and we can just stop walking it. + return + } + w.seen[typ] = true + defer delete(w.seen, typ) + + switch t := typ.(type) { + case *Basic: + // nothing to do + + case *Alias: + w.typ(Unalias(t)) + + case *Array: + w.typ(t.elem) + + case *Slice: + w.typ(t.elem) + + case *Struct: + w.varList(t.fields) + + case *Pointer: + w.typ(t.base) + + // case *Tuple: + // This case should not occur because tuples only appear + // in signatures where they are handled explicitly. + + case *Signature: + if t.params != nil { + w.varList(t.params.vars) + } + if t.results != nil { + w.varList(t.results.vars) + } + + case *Union: + for _, t := range t.terms { + w.typ(t.typ) + } + + case *Interface: + for _, m := range t.methods { + w.typ(m.typ) + } + for _, t := range t.embeddeds { + w.typ(t) + } + + case *Map: + w.typ(t.key) + w.typ(t.elem) + + case *Chan: + w.typ(t.elem) + + case *Named: + for _, tpar := range t.TypeArgs().list() { + w.typ(tpar) + } + + case *TypeParam: + if i := tparamIndex(w.tparams, t); i >= 0 && w.inferred[i] != nil { + w.typ(w.inferred[i]) + } + + default: + panic(fmt.Sprintf("unexpected %T", typ)) + } +} + +func (w *cycleFinder) varList(list []*Var) { + for _, v := range list { + w.typ(v.typ) + } +} + +// If tpar is a type parameter in list, tparamIndex returns the index +// of the type parameter in list. Otherwise the result is < 0. +func tparamIndex(list []*TypeParam, tpar *TypeParam) int { + for i, p := range list { + if p == tpar { + return i + } + } + return -1 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/initorder.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/initorder.go new file mode 100644 index 0000000000000000000000000000000000000000..841b725b17160a9dfad143814102c958b838deb8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/initorder.go @@ -0,0 +1,336 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "container/heap" + "fmt" + . "internal/types/errors" + "sort" +) + +// initOrder computes the Info.InitOrder for package variables. +func (check *Checker) initOrder() { + // An InitOrder may already have been computed if a package is + // built from several calls to (*Checker).Files. Clear it. + check.Info.InitOrder = check.Info.InitOrder[:0] + + // Compute the object dependency graph and initialize + // a priority queue with the list of graph nodes. + pq := nodeQueue(dependencyGraph(check.objMap)) + heap.Init(&pq) + + const debug = false + if debug { + fmt.Printf("Computing initialization order for %s\n\n", check.pkg) + fmt.Println("Object dependency graph:") + for obj, d := range check.objMap { + // only print objects that may appear in the dependency graph + if obj, _ := obj.(dependency); obj != nil { + if len(d.deps) > 0 { + fmt.Printf("\t%s depends on\n", obj.Name()) + for dep := range d.deps { + fmt.Printf("\t\t%s\n", dep.Name()) + } + } else { + fmt.Printf("\t%s has no dependencies\n", obj.Name()) + } + } + } + fmt.Println() + + fmt.Println("Transposed object dependency graph (functions eliminated):") + for _, n := range pq { + fmt.Printf("\t%s depends on %d nodes\n", n.obj.Name(), n.ndeps) + for p := range n.pred { + fmt.Printf("\t\t%s is dependent\n", p.obj.Name()) + } + } + fmt.Println() + + fmt.Println("Processing nodes:") + } + + // Determine initialization order by removing the highest priority node + // (the one with the fewest dependencies) and its edges from the graph, + // repeatedly, until there are no nodes left. + // In a valid Go program, those nodes always have zero dependencies (after + // removing all incoming dependencies), otherwise there are initialization + // cycles. + emitted := make(map[*declInfo]bool) + for len(pq) > 0 { + // get the next node + n := heap.Pop(&pq).(*graphNode) + + if debug { + fmt.Printf("\t%s (src pos %d) depends on %d nodes now\n", + n.obj.Name(), n.obj.order(), n.ndeps) + } + + // if n still depends on other nodes, we have a cycle + if n.ndeps > 0 { + cycle := findPath(check.objMap, n.obj, n.obj, make(map[Object]bool)) + // If n.obj is not part of the cycle (e.g., n.obj->b->c->d->c), + // cycle will be nil. Don't report anything in that case since + // the cycle is reported when the algorithm gets to an object + // in the cycle. + // Furthermore, once an object in the cycle is encountered, + // the cycle will be broken (dependency count will be reduced + // below), and so the remaining nodes in the cycle don't trigger + // another error (unless they are part of multiple cycles). + if cycle != nil { + check.reportCycle(cycle) + } + // Ok to continue, but the variable initialization order + // will be incorrect at this point since it assumes no + // cycle errors. + } + + // reduce dependency count of all dependent nodes + // and update priority queue + for p := range n.pred { + p.ndeps-- + heap.Fix(&pq, p.index) + } + + // record the init order for variables with initializers only + v, _ := n.obj.(*Var) + info := check.objMap[v] + if v == nil || !info.hasInitializer() { + continue + } + + // n:1 variable declarations such as: a, b = f() + // introduce a node for each lhs variable (here: a, b); + // but they all have the same initializer - emit only + // one, for the first variable seen + if emitted[info] { + continue // initializer already emitted, if any + } + emitted[info] = true + + infoLhs := info.lhs // possibly nil (see declInfo.lhs field comment) + if infoLhs == nil { + infoLhs = []*Var{v} + } + init := &Initializer{infoLhs, info.init} + check.Info.InitOrder = append(check.Info.InitOrder, init) + } + + if debug { + fmt.Println() + fmt.Println("Initialization order:") + for _, init := range check.Info.InitOrder { + fmt.Printf("\t%s\n", init) + } + fmt.Println() + } +} + +// findPath returns the (reversed) list of objects []Object{to, ... from} +// such that there is a path of object dependencies from 'from' to 'to'. +// If there is no such path, the result is nil. +func findPath(objMap map[Object]*declInfo, from, to Object, seen map[Object]bool) []Object { + if seen[from] { + return nil + } + seen[from] = true + + for d := range objMap[from].deps { + if d == to { + return []Object{d} + } + if P := findPath(objMap, d, to, seen); P != nil { + return append(P, d) + } + } + + return nil +} + +// reportCycle reports an error for the given cycle. +func (check *Checker) reportCycle(cycle []Object) { + obj := cycle[0] + + // report a more concise error for self references + if len(cycle) == 1 { + check.errorf(obj, InvalidInitCycle, "initialization cycle: %s refers to itself", obj.Name()) + return + } + + var err error_ + err.code = InvalidInitCycle + err.errorf(obj, "initialization cycle for %s", obj.Name()) + // subtle loop: print cycle[i] for i = 0, n-1, n-2, ... 1 for len(cycle) = n + for i := len(cycle) - 1; i >= 0; i-- { + err.errorf(obj, "%s refers to", obj.Name()) + obj = cycle[i] + } + // print cycle[0] again to close the cycle + err.errorf(obj, "%s", obj.Name()) + check.report(&err) +} + +// ---------------------------------------------------------------------------- +// Object dependency graph + +// A dependency is an object that may be a dependency in an initialization +// expression. Only constants, variables, and functions can be dependencies. +// Constants are here because constant expression cycles are reported during +// initialization order computation. +type dependency interface { + Object + isDependency() +} + +// A graphNode represents a node in the object dependency graph. +// Each node p in n.pred represents an edge p->n, and each node +// s in n.succ represents an edge n->s; with a->b indicating that +// a depends on b. +type graphNode struct { + obj dependency // object represented by this node + pred, succ nodeSet // consumers and dependencies of this node (lazily initialized) + index int // node index in graph slice/priority queue + ndeps int // number of outstanding dependencies before this object can be initialized +} + +// cost returns the cost of removing this node, which involves copying each +// predecessor to each successor (and vice-versa). +func (n *graphNode) cost() int { + return len(n.pred) * len(n.succ) +} + +type nodeSet map[*graphNode]bool + +func (s *nodeSet) add(p *graphNode) { + if *s == nil { + *s = make(nodeSet) + } + (*s)[p] = true +} + +// dependencyGraph computes the object dependency graph from the given objMap, +// with any function nodes removed. The resulting graph contains only constants +// and variables. +func dependencyGraph(objMap map[Object]*declInfo) []*graphNode { + // M is the dependency (Object) -> graphNode mapping + M := make(map[dependency]*graphNode) + for obj := range objMap { + // only consider nodes that may be an initialization dependency + if obj, _ := obj.(dependency); obj != nil { + M[obj] = &graphNode{obj: obj} + } + } + + // compute edges for graph M + // (We need to include all nodes, even isolated ones, because they still need + // to be scheduled for initialization in correct order relative to other nodes.) + for obj, n := range M { + // for each dependency obj -> d (= deps[i]), create graph edges n->s and s->n + for d := range objMap[obj].deps { + // only consider nodes that may be an initialization dependency + if d, _ := d.(dependency); d != nil { + d := M[d] + n.succ.add(d) + d.pred.add(n) + } + } + } + + var G, funcG []*graphNode // separate non-functions and functions + for _, n := range M { + if _, ok := n.obj.(*Func); ok { + funcG = append(funcG, n) + } else { + G = append(G, n) + } + } + + // remove function nodes and collect remaining graph nodes in G + // (Mutually recursive functions may introduce cycles among themselves + // which are permitted. Yet such cycles may incorrectly inflate the dependency + // count for variables which in turn may not get scheduled for initialization + // in correct order.) + // + // Note that because we recursively copy predecessors and successors + // throughout the function graph, the cost of removing a function at + // position X is proportional to cost * (len(funcG)-X). Therefore, we should + // remove high-cost functions last. + sort.Slice(funcG, func(i, j int) bool { + return funcG[i].cost() < funcG[j].cost() + }) + for _, n := range funcG { + // connect each predecessor p of n with each successor s + // and drop the function node (don't collect it in G) + for p := range n.pred { + // ignore self-cycles + if p != n { + // Each successor s of n becomes a successor of p, and + // each predecessor p of n becomes a predecessor of s. + for s := range n.succ { + // ignore self-cycles + if s != n { + p.succ.add(s) + s.pred.add(p) + } + } + delete(p.succ, n) // remove edge to n + } + } + for s := range n.succ { + delete(s.pred, n) // remove edge to n + } + } + + // fill in index and ndeps fields + for i, n := range G { + n.index = i + n.ndeps = len(n.succ) + } + + return G +} + +// ---------------------------------------------------------------------------- +// Priority queue + +// nodeQueue implements the container/heap interface; +// a nodeQueue may be used as a priority queue. +type nodeQueue []*graphNode + +func (a nodeQueue) Len() int { return len(a) } + +func (a nodeQueue) Swap(i, j int) { + x, y := a[i], a[j] + a[i], a[j] = y, x + x.index, y.index = j, i +} + +func (a nodeQueue) Less(i, j int) bool { + x, y := a[i], a[j] + + // Prioritize all constants before non-constants. See go.dev/issue/66575/. + _, xConst := x.obj.(*Const) + _, yConst := y.obj.(*Const) + if xConst != yConst { + return xConst + } + + // nodes are prioritized by number of incoming dependencies (1st key) + // and source order (2nd key) + return x.ndeps < y.ndeps || x.ndeps == y.ndeps && x.obj.order() < y.obj.order() +} + +func (a *nodeQueue) Push(x interface{}) { + panic("unreachable") +} + +func (a *nodeQueue) Pop() interface{} { + n := len(*a) + x := (*a)[n-1] + x.index = -1 // for safety + *a = (*a)[:n-1] + return x +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/instantiate.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/instantiate.go new file mode 100644 index 0000000000000000000000000000000000000000..e33d4b41c2716b238fc224f3092c4a3520dcb287 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/instantiate.go @@ -0,0 +1,366 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements instantiation of generic types +// through substitution of type parameters by type arguments. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "errors" + "fmt" + . "internal/types/errors" +) + +// Instantiate instantiates the type orig with the given type arguments targs. +// orig must be a *Named or a *Signature type. If there is no error, the +// resulting Type is an instantiated type of the same kind (either a *Named or +// a *Signature). Methods attached to a *Named type are also instantiated, and +// associated with a new *Func that has the same position as the original +// method, but nil function scope. +// +// If ctxt is non-nil, it may be used to de-duplicate the instance against +// previous instances with the same identity. As a special case, generic +// *Signature origin types are only considered identical if they are pointer +// equivalent, so that instantiating distinct (but possibly identical) +// signatures will yield different instances. The use of a shared context does +// not guarantee that identical instances are deduplicated in all cases. +// +// If validate is set, Instantiate verifies that the number of type arguments +// and parameters match, and that the type arguments satisfy their +// corresponding type constraints. If verification fails, the resulting error +// may wrap an *ArgumentError indicating which type argument did not satisfy +// its corresponding type parameter constraint, and why. +// +// If validate is not set, Instantiate does not verify the type argument count +// or whether the type arguments satisfy their constraints. Instantiate is +// guaranteed to not return an error, but may panic. Specifically, for +// *Signature types, Instantiate will panic immediately if the type argument +// count is incorrect; for *Named types, a panic may occur later inside the +// *Named API. +func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error) { + if ctxt == nil { + ctxt = NewContext() + } + if validate { + var tparams []*TypeParam + switch t := orig.(type) { + case *Named: + tparams = t.TypeParams().list() + case *Signature: + tparams = t.TypeParams().list() + } + if len(targs) != len(tparams) { + return nil, fmt.Errorf("got %d type arguments but %s has %d type parameters", len(targs), orig, len(tparams)) + } + if i, err := (*Checker)(nil).verify(nopos, tparams, targs, ctxt); err != nil { + return nil, &ArgumentError{i, err} + } + } + + inst := (*Checker)(nil).instance(nopos, orig, targs, nil, ctxt) + return inst, nil +} + +// instance instantiates the given original (generic) function or type with the +// provided type arguments and returns the resulting instance. If an identical +// instance exists already in the given contexts, it returns that instance, +// otherwise it creates a new one. +// +// If expanding is non-nil, it is the Named instance type currently being +// expanded. If ctxt is non-nil, it is the context associated with the current +// type-checking pass or call to Instantiate. At least one of expanding or ctxt +// must be non-nil. +// +// For Named types the resulting instance may be unexpanded. +func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, expanding *Named, ctxt *Context) (res Type) { + // The order of the contexts below matters: we always prefer instances in the + // expanding instance context in order to preserve reference cycles. + // + // Invariant: if expanding != nil, the returned instance will be the instance + // recorded in expanding.inst.ctxt. + var ctxts []*Context + if expanding != nil { + ctxts = append(ctxts, expanding.inst.ctxt) + } + if ctxt != nil { + ctxts = append(ctxts, ctxt) + } + assert(len(ctxts) > 0) + + // Compute all hashes; hashes may differ across contexts due to different + // unique IDs for Named types within the hasher. + hashes := make([]string, len(ctxts)) + for i, ctxt := range ctxts { + hashes[i] = ctxt.instanceHash(orig, targs) + } + + // If local is non-nil, updateContexts return the type recorded in + // local. + updateContexts := func(res Type) Type { + for i := len(ctxts) - 1; i >= 0; i-- { + res = ctxts[i].update(hashes[i], orig, targs, res) + } + return res + } + + // typ may already have been instantiated with identical type arguments. In + // that case, re-use the existing instance. + for i, ctxt := range ctxts { + if inst := ctxt.lookup(hashes[i], orig, targs); inst != nil { + return updateContexts(inst) + } + } + + switch orig := orig.(type) { + case *Named: + res = check.newNamedInstance(pos, orig, targs, expanding) // substituted lazily + + case *Signature: + assert(expanding == nil) // function instances cannot be reached from Named types + + tparams := orig.TypeParams() + // TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here) + if !check.validateTArgLen(pos, orig.String(), tparams.Len(), len(targs)) { + return Typ[Invalid] + } + if tparams.Len() == 0 { + return orig // nothing to do (minor optimization) + } + sig := check.subst(pos, orig, makeSubstMap(tparams.list(), targs), nil, ctxt).(*Signature) + // If the signature doesn't use its type parameters, subst + // will not make a copy. In that case, make a copy now (so + // we can set tparams to nil w/o causing side-effects). + if sig == orig { + copy := *sig + sig = © + } + // After instantiating a generic signature, it is not generic + // anymore; we need to set tparams to nil. + sig.tparams = nil + res = sig + + default: + // only types and functions can be generic + panic(fmt.Sprintf("%v: cannot instantiate %v", pos, orig)) + } + + // Update all contexts; it's possible that we've lost a race. + return updateContexts(res) +} + +// validateTArgLen checks that the number of type arguments (got) matches the +// number of type parameters (want); if they don't match an error is reported. +// If validation fails and check is nil, validateTArgLen panics. +func (check *Checker) validateTArgLen(pos syntax.Pos, name string, want, got int) bool { + var qual string + switch { + case got < want: + qual = "not enough" + case got > want: + qual = "too many" + default: + return true + } + + msg := check.sprintf("%s type arguments for type %s: have %d, want %d", qual, name, got, want) + if check != nil { + check.error(atPos(pos), WrongTypeArgCount, msg) + return false + } + + panic(fmt.Sprintf("%v: %s", pos, msg)) +} + +func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type, ctxt *Context) (int, error) { + smap := makeSubstMap(tparams, targs) + for i, tpar := range tparams { + // Ensure that we have a (possibly implicit) interface as type bound (go.dev/issue/51048). + tpar.iface() + // The type parameter bound is parameterized with the same type parameters + // as the instantiated type; before we can use it for bounds checking we + // need to instantiate it with the type arguments with which we instantiated + // the parameterized type. + bound := check.subst(pos, tpar.bound, smap, nil, ctxt) + var cause string + if !check.implements(pos, targs[i], bound, true, &cause) { + return i, errors.New(cause) + } + } + return -1, nil +} + +// implements checks if V implements T. The receiver may be nil if implements +// is called through an exported API call such as AssignableTo. If constraint +// is set, T is a type constraint. +// +// If the provided cause is non-nil, it may be set to an error string +// explaining why V does not implement (or satisfy, for constraints) T. +func (check *Checker) implements(pos syntax.Pos, V, T Type, constraint bool, cause *string) bool { + Vu := under(V) + Tu := under(T) + if !isValid(Vu) || !isValid(Tu) { + return true // avoid follow-on errors + } + if p, _ := Vu.(*Pointer); p != nil && !isValid(under(p.base)) { + return true // avoid follow-on errors (see go.dev/issue/49541 for an example) + } + + verb := "implement" + if constraint { + verb = "satisfy" + } + + Ti, _ := Tu.(*Interface) + if Ti == nil { + if cause != nil { + var detail string + if isInterfacePtr(Tu) { + detail = check.sprintf("type %s is pointer to interface, not interface", T) + } else { + detail = check.sprintf("%s is not an interface", T) + } + *cause = check.sprintf("%s does not %s %s (%s)", V, verb, T, detail) + } + return false + } + + // Every type satisfies the empty interface. + if Ti.Empty() { + return true + } + // T is not the empty interface (i.e., the type set of T is restricted) + + // An interface V with an empty type set satisfies any interface. + // (The empty set is a subset of any set.) + Vi, _ := Vu.(*Interface) + if Vi != nil && Vi.typeSet().IsEmpty() { + return true + } + // type set of V is not empty + + // No type with non-empty type set satisfies the empty type set. + if Ti.typeSet().IsEmpty() { + if cause != nil { + *cause = check.sprintf("cannot %s %s (empty type set)", verb, T) + } + return false + } + + // V must implement T's methods, if any. + if m, _ := check.missingMethod(V, T, true, Identical, cause); m != nil /* !Implements(V, T) */ { + if cause != nil { + *cause = check.sprintf("%s does not %s %s %s", V, verb, T, *cause) + } + return false + } + + // Only check comparability if we don't have a more specific error. + checkComparability := func() bool { + if !Ti.IsComparable() { + return true + } + // If T is comparable, V must be comparable. + // If V is strictly comparable, we're done. + if comparable(V, false /* strict comparability */, nil, nil) { + return true + } + // For constraint satisfaction, use dynamic (spec) comparability + // so that ordinary, non-type parameter interfaces implement comparable. + if constraint && comparable(V, true /* spec comparability */, nil, nil) { + // V is comparable if we are at Go 1.20 or higher. + if check == nil || check.allowVersion(check.pkg, atPos(pos), go1_20) { // atPos needed so that go/types generate passes + return true + } + if cause != nil { + *cause = check.sprintf("%s to %s comparable requires go1.20 or later", V, verb) + } + return false + } + if cause != nil { + *cause = check.sprintf("%s does not %s comparable", V, verb) + } + return false + } + + // V must also be in the set of types of T, if any. + // Constraints with empty type sets were already excluded above. + if !Ti.typeSet().hasTerms() { + return checkComparability() // nothing to do + } + + // If V is itself an interface, each of its possible types must be in the set + // of T types (i.e., the V type set must be a subset of the T type set). + // Interfaces V with empty type sets were already excluded above. + if Vi != nil { + if !Vi.typeSet().subsetOf(Ti.typeSet()) { + // TODO(gri) report which type is missing + if cause != nil { + *cause = check.sprintf("%s does not %s %s", V, verb, T) + } + return false + } + return checkComparability() + } + + // Otherwise, V's type must be included in the iface type set. + var alt Type + if Ti.typeSet().is(func(t *term) bool { + if !t.includes(V) { + // If V ∉ t.typ but V ∈ ~t.typ then remember this type + // so we can suggest it as an alternative in the error + // message. + if alt == nil && !t.tilde && Identical(t.typ, under(t.typ)) { + tt := *t + tt.tilde = true + if tt.includes(V) { + alt = t.typ + } + } + return true + } + return false + }) { + if cause != nil { + var detail string + switch { + case alt != nil: + detail = check.sprintf("possibly missing ~ for %s in %s", alt, T) + case mentions(Ti, V): + detail = check.sprintf("%s mentions %s, but %s is not in the type set of %s", T, V, V, T) + default: + detail = check.sprintf("%s missing in %s", V, Ti.typeSet().terms) + } + *cause = check.sprintf("%s does not %s %s (%s)", V, verb, T, detail) + } + return false + } + + return checkComparability() +} + +// mentions reports whether type T "mentions" typ in an (embedded) element or term +// of T (whether typ is in the type set of T or not). For better error messages. +func mentions(T, typ Type) bool { + switch T := T.(type) { + case *Interface: + for _, e := range T.embeddeds { + if mentions(e, typ) { + return true + } + } + case *Union: + for _, t := range T.terms { + if mentions(t.typ, typ) { + return true + } + } + default: + if Identical(T, typ) { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/instantiate_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/instantiate_test.go new file mode 100644 index 0000000000000000000000000000000000000000..af772b993ce1b2e232bd61fa923755bc81f785a3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/instantiate_test.go @@ -0,0 +1,232 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package types2_test + +import ( + . "cmd/compile/internal/types2" + "strings" + "testing" +) + +func TestInstantiateEquality(t *testing.T) { + emptySignature := NewSignatureType(nil, nil, nil, nil, nil, false) + tests := []struct { + src string + name1 string + targs1 []Type + name2 string + targs2 []Type + wantEqual bool + }{ + { + "package basictype; type T[P any] int", + "T", []Type{Typ[Int]}, + "T", []Type{Typ[Int]}, + true, + }, + { + "package differenttypeargs; type T[P any] int", + "T", []Type{Typ[Int]}, + "T", []Type{Typ[String]}, + false, + }, + { + "package typeslice; type T[P any] int", + "T", []Type{NewSlice(Typ[Int])}, + "T", []Type{NewSlice(Typ[Int])}, + true, + }, + { + // interface{interface{...}} is equivalent to interface{...} + "package equivalentinterfaces; type T[P any] int", + "T", []Type{ + NewInterfaceType([]*Func{NewFunc(nopos, nil, "M", emptySignature)}, nil), + }, + "T", []Type{ + NewInterfaceType( + nil, + []Type{ + NewInterfaceType([]*Func{NewFunc(nopos, nil, "M", emptySignature)}, nil), + }, + ), + }, + true, + }, + { + // int|string is equivalent to string|int + "package equivalenttypesets; type T[P any] int", + "T", []Type{ + NewInterfaceType(nil, []Type{ + NewUnion([]*Term{NewTerm(false, Typ[Int]), NewTerm(false, Typ[String])}), + }), + }, + "T", []Type{ + NewInterfaceType(nil, []Type{ + NewUnion([]*Term{NewTerm(false, Typ[String]), NewTerm(false, Typ[Int])}), + }), + }, + true, + }, + { + "package basicfunc; func F[P any]() {}", + "F", []Type{Typ[Int]}, + "F", []Type{Typ[Int]}, + true, + }, + { + "package funcslice; func F[P any]() {}", + "F", []Type{NewSlice(Typ[Int])}, + "F", []Type{NewSlice(Typ[Int])}, + true, + }, + { + "package funcwithparams; func F[P any](x string) float64 { return 0 }", + "F", []Type{Typ[Int]}, + "F", []Type{Typ[Int]}, + true, + }, + { + "package differentfuncargs; func F[P any](x string) float64 { return 0 }", + "F", []Type{Typ[Int]}, + "F", []Type{Typ[String]}, + false, + }, + { + "package funcequality; func F1[P any](x int) {}; func F2[Q any](x int) {}", + "F1", []Type{Typ[Int]}, + "F2", []Type{Typ[Int]}, + false, + }, + { + "package funcsymmetry; func F1[P any](x P) {}; func F2[Q any](x Q) {}", + "F1", []Type{Typ[Int]}, + "F2", []Type{Typ[Int]}, + false, + }, + } + + for _, test := range tests { + pkg := mustTypecheck(test.src, nil, nil) + + t.Run(pkg.Name(), func(t *testing.T) { + ctxt := NewContext() + + T1 := pkg.Scope().Lookup(test.name1).Type() + res1, err := Instantiate(ctxt, T1, test.targs1, false) + if err != nil { + t.Fatal(err) + } + + T2 := pkg.Scope().Lookup(test.name2).Type() + res2, err := Instantiate(ctxt, T2, test.targs2, false) + if err != nil { + t.Fatal(err) + } + + if gotEqual := res1 == res2; gotEqual != test.wantEqual { + t.Errorf("%s == %s: %t, want %t", res1, res2, gotEqual, test.wantEqual) + } + }) + } +} + +func TestInstantiateNonEquality(t *testing.T) { + const src = "package p; type T[P any] int" + pkg1 := mustTypecheck(src, nil, nil) + pkg2 := mustTypecheck(src, nil, nil) + // We consider T1 and T2 to be distinct types, so their instances should not + // be deduplicated by the context. + T1 := pkg1.Scope().Lookup("T").Type().(*Named) + T2 := pkg2.Scope().Lookup("T").Type().(*Named) + ctxt := NewContext() + res1, err := Instantiate(ctxt, T1, []Type{Typ[Int]}, false) + if err != nil { + t.Fatal(err) + } + res2, err := Instantiate(ctxt, T2, []Type{Typ[Int]}, false) + if err != nil { + t.Fatal(err) + } + if res1 == res2 { + t.Errorf("instance from pkg1 (%s) is pointer-equivalent to instance from pkg2 (%s)", res1, res2) + } + if Identical(res1, res2) { + t.Errorf("instance from pkg1 (%s) is identical to instance from pkg2 (%s)", res1, res2) + } +} + +func TestMethodInstantiation(t *testing.T) { + const prefix = `package p + +type T[P any] struct{} + +var X T[int] + +` + tests := []struct { + decl string + want string + }{ + {"func (r T[P]) m() P", "func (T[int]).m() int"}, + {"func (r T[P]) m(P)", "func (T[int]).m(int)"}, + {"func (r *T[P]) m(P)", "func (*T[int]).m(int)"}, + {"func (r T[P]) m() T[P]", "func (T[int]).m() T[int]"}, + {"func (r T[P]) m(T[P])", "func (T[int]).m(T[int])"}, + {"func (r T[P]) m(T[P], P, string)", "func (T[int]).m(T[int], int, string)"}, + {"func (r T[P]) m(T[P], T[string], T[int])", "func (T[int]).m(T[int], T[string], T[int])"}, + } + + for _, test := range tests { + src := prefix + test.decl + pkg := mustTypecheck(src, nil, nil) + typ := NewPointer(pkg.Scope().Lookup("X").Type()) + obj, _, _ := LookupFieldOrMethod(typ, false, pkg, "m") + m, _ := obj.(*Func) + if m == nil { + t.Fatalf(`LookupFieldOrMethod(%s, "m") = %v, want func m`, typ, obj) + } + if got := ObjectString(m, RelativeTo(pkg)); got != test.want { + t.Errorf("instantiated %q, want %q", got, test.want) + } + } +} + +func TestImmutableSignatures(t *testing.T) { + const src = `package p + +type T[P any] struct{} + +func (T[P]) m() {} + +var _ T[int] +` + pkg := mustTypecheck(src, nil, nil) + typ := pkg.Scope().Lookup("T").Type().(*Named) + obj, _, _ := LookupFieldOrMethod(typ, false, pkg, "m") + if obj == nil { + t.Fatalf(`LookupFieldOrMethod(%s, "m") = %v, want func m`, typ, obj) + } + + // Verify that the original method is not mutated by instantiating T (this + // bug manifested when subst did not return a new signature). + want := "func (T[P]).m()" + if got := stripAnnotations(ObjectString(obj, RelativeTo(pkg))); got != want { + t.Errorf("instantiated %q, want %q", got, want) + } +} + +// Copied from errors.go. +func stripAnnotations(s string) string { + var buf strings.Builder + for _, r := range s { + // strip #'s and subscript digits + if r < '₀' || '₀'+10 <= r { // '₀' == U+2080 + buf.WriteRune(r) + } + } + if buf.Len() < len(s) { + return buf.String() + } + return s +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/interface.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/interface.go new file mode 100644 index 0000000000000000000000000000000000000000..4072098e05234d94fe071f02f2d8e38863778f7c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/interface.go @@ -0,0 +1,186 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + . "internal/types/errors" +) + +// ---------------------------------------------------------------------------- +// API + +// An Interface represents an interface type. +type Interface struct { + check *Checker // for error reporting; nil once type set is computed + methods []*Func // ordered list of explicitly declared methods + embeddeds []Type // ordered list of explicitly embedded elements + embedPos *[]syntax.Pos // positions of embedded elements; or nil (for error messages) - use pointer to save space + implicit bool // interface is wrapper for type set literal (non-interface T, ~T, or A|B) + complete bool // indicates that all fields (except for tset) are set up + + tset *_TypeSet // type set described by this interface, computed lazily +} + +// typeSet returns the type set for interface t. +func (t *Interface) typeSet() *_TypeSet { return computeInterfaceTypeSet(t.check, nopos, t) } + +// emptyInterface represents the empty interface +var emptyInterface = Interface{complete: true, tset: &topTypeSet} + +// NewInterfaceType returns a new interface for the given methods and embedded types. +// NewInterfaceType takes ownership of the provided methods and may modify their types +// by setting missing receivers. +func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface { + if len(methods) == 0 && len(embeddeds) == 0 { + return &emptyInterface + } + + // set method receivers if necessary + typ := (*Checker)(nil).newInterface() + for _, m := range methods { + if sig := m.typ.(*Signature); sig.recv == nil { + sig.recv = NewVar(m.pos, m.pkg, "", typ) + } + } + + // sort for API stability + sortMethods(methods) + + typ.methods = methods + typ.embeddeds = embeddeds + typ.complete = true + + return typ +} + +// check may be nil +func (check *Checker) newInterface() *Interface { + typ := &Interface{check: check} + if check != nil { + check.needsCleanup(typ) + } + return typ +} + +// MarkImplicit marks the interface t as implicit, meaning this interface +// corresponds to a constraint literal such as ~T or A|B without explicit +// interface embedding. MarkImplicit should be called before any concurrent use +// of implicit interfaces. +func (t *Interface) MarkImplicit() { + t.implicit = true +} + +// NumExplicitMethods returns the number of explicitly declared methods of interface t. +func (t *Interface) NumExplicitMethods() int { return len(t.methods) } + +// ExplicitMethod returns the i'th explicitly declared method of interface t for 0 <= i < t.NumExplicitMethods(). +// The methods are ordered by their unique Id. +func (t *Interface) ExplicitMethod(i int) *Func { return t.methods[i] } + +// NumEmbeddeds returns the number of embedded types in interface t. +func (t *Interface) NumEmbeddeds() int { return len(t.embeddeds) } + +// EmbeddedType returns the i'th embedded type of interface t for 0 <= i < t.NumEmbeddeds(). +func (t *Interface) EmbeddedType(i int) Type { return t.embeddeds[i] } + +// NumMethods returns the total number of methods of interface t. +func (t *Interface) NumMethods() int { return t.typeSet().NumMethods() } + +// Method returns the i'th method of interface t for 0 <= i < t.NumMethods(). +// The methods are ordered by their unique Id. +func (t *Interface) Method(i int) *Func { return t.typeSet().Method(i) } + +// Empty reports whether t is the empty interface. +func (t *Interface) Empty() bool { return t.typeSet().IsAll() } + +// IsComparable reports whether each type in interface t's type set is comparable. +func (t *Interface) IsComparable() bool { return t.typeSet().IsComparable(nil) } + +// IsMethodSet reports whether the interface t is fully described by its method set. +func (t *Interface) IsMethodSet() bool { return t.typeSet().IsMethodSet() } + +// IsImplicit reports whether the interface t is a wrapper for a type set literal. +func (t *Interface) IsImplicit() bool { return t.implicit } + +func (t *Interface) Underlying() Type { return t } +func (t *Interface) String() string { return TypeString(t, nil) } + +// ---------------------------------------------------------------------------- +// Implementation + +func (t *Interface) cleanup() { + t.typeSet() // any interface that escapes type checking must be safe for concurrent use + t.check = nil + t.embedPos = nil +} + +func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *TypeName) { + addEmbedded := func(pos syntax.Pos, typ Type) { + ityp.embeddeds = append(ityp.embeddeds, typ) + if ityp.embedPos == nil { + ityp.embedPos = new([]syntax.Pos) + } + *ityp.embedPos = append(*ityp.embedPos, pos) + } + + for _, f := range iface.MethodList { + if f.Name == nil { + addEmbedded(atPos(f.Type), parseUnion(check, f.Type)) + continue + } + // f.Name != nil + + // We have a method with name f.Name. + name := f.Name.Value + if name == "_" { + check.error(f.Name, BlankIfaceMethod, "methods must have a unique non-blank name") + continue // ignore + } + + typ := check.typ(f.Type) + sig, _ := typ.(*Signature) + if sig == nil { + if isValid(typ) { + check.errorf(f.Type, InvalidSyntaxTree, "%s is not a method signature", typ) + } + continue // ignore + } + + // use named receiver type if available (for better error messages) + var recvTyp Type = ityp + if def != nil { + if named := asNamed(def.typ); named != nil { + recvTyp = named + } + } + sig.recv = NewVar(f.Name.Pos(), check.pkg, "", recvTyp) + + m := NewFunc(f.Name.Pos(), check.pkg, name, sig) + check.recordDef(f.Name, m) + ityp.methods = append(ityp.methods, m) + } + + // All methods and embedded elements for this interface are collected; + // i.e., this interface may be used in a type set computation. + ityp.complete = true + + if len(ityp.methods) == 0 && len(ityp.embeddeds) == 0 { + // empty interface + ityp.tset = &topTypeSet + return + } + + // sort for API stability + // (don't sort embeddeds: they must correspond to *embedPos entries) + sortMethods(ityp.methods) + + // Compute type set as soon as possible to report any errors. + // Subsequent uses of type sets will use this computed type + // set and won't need to pass in a *Checker. + check.later(func() { + computeInterfaceTypeSet(check, iface.Pos(), ityp) + }).describef(iface, "compute type set for %s", ityp) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/issues_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/issues_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0117571f7b043c3f027626d663079743be92f82f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/issues_test.go @@ -0,0 +1,1095 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements tests for various issues. + +package types2_test + +import ( + "cmd/compile/internal/syntax" + "fmt" + "internal/testenv" + "regexp" + "sort" + "strings" + "testing" + + . "cmd/compile/internal/types2" +) + +func TestIssue5770(t *testing.T) { + _, err := typecheck(`package p; type S struct{T}`, nil, nil) + const want = "undefined: T" + if err == nil || !strings.Contains(err.Error(), want) { + t.Errorf("got: %v; want: %s", err, want) + } +} + +func TestIssue5849(t *testing.T) { + src := ` +package p +var ( + s uint + _ = uint8(8) + _ = uint16(16) << s + _ = uint32(32 << s) + _ = uint64(64 << s + s) + _ = (interface{})("foo") + _ = (interface{})(nil) +)` + types := make(map[syntax.Expr]TypeAndValue) + mustTypecheck(src, nil, &Info{Types: types}) + + for x, tv := range types { + var want Type + switch x := x.(type) { + case *syntax.BasicLit: + switch x.Value { + case `8`: + want = Typ[Uint8] + case `16`: + want = Typ[Uint16] + case `32`: + want = Typ[Uint32] + case `64`: + want = Typ[Uint] // because of "+ s", s is of type uint + case `"foo"`: + want = Typ[String] + } + case *syntax.Name: + if x.Value == "nil" { + want = NewInterfaceType(nil, nil) // interface{} (for now, go/types types this as "untyped nil") + } + } + if want != nil && !Identical(tv.Type, want) { + t.Errorf("got %s; want %s", tv.Type, want) + } + } +} + +func TestIssue6413(t *testing.T) { + src := ` +package p +func f() int { + defer f() + go f() + return 0 +} +` + types := make(map[syntax.Expr]TypeAndValue) + mustTypecheck(src, nil, &Info{Types: types}) + + want := Typ[Int] + n := 0 + for x, tv := range types { + if _, ok := x.(*syntax.CallExpr); ok { + if tv.Type != want { + t.Errorf("%s: got %s; want %s", x.Pos(), tv.Type, want) + } + n++ + } + } + + if n != 2 { + t.Errorf("got %d CallExprs; want 2", n) + } +} + +func TestIssue7245(t *testing.T) { + src := ` +package p +func (T) m() (res bool) { return } +type T struct{} // receiver type after method declaration +` + f := mustParse(src) + + var conf Config + defs := make(map[*syntax.Name]Object) + _, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, &Info{Defs: defs}) + if err != nil { + t.Fatal(err) + } + + m := f.DeclList[0].(*syntax.FuncDecl) + res1 := defs[m.Name].(*Func).Type().(*Signature).Results().At(0) + res2 := defs[m.Type.ResultList[0].Name].(*Var) + + if res1 != res2 { + t.Errorf("got %s (%p) != %s (%p)", res1, res2, res1, res2) + } +} + +// This tests that uses of existing vars on the LHS of an assignment +// are Uses, not Defs; and also that the (illegal) use of a non-var on +// the LHS of an assignment is a Use nonetheless. +func TestIssue7827(t *testing.T) { + const src = ` +package p +func _() { + const w = 1 // defs w + x, y := 2, 3 // defs x, y + w, x, z := 4, 5, 6 // uses w, x, defs z; error: cannot assign to w + _, _, _ = x, y, z // uses x, y, z +} +` + const want = `L3 defs func p._() +L4 defs const w untyped int +L5 defs var x int +L5 defs var y int +L6 defs var z int +L6 uses const w untyped int +L6 uses var x int +L7 uses var x int +L7 uses var y int +L7 uses var z int` + + // don't abort at the first error + conf := Config{Error: func(err error) { t.Log(err) }} + defs := make(map[*syntax.Name]Object) + uses := make(map[*syntax.Name]Object) + _, err := typecheck(src, &conf, &Info{Defs: defs, Uses: uses}) + if s := err.Error(); !strings.HasSuffix(s, "cannot assign to w") { + t.Errorf("Check: unexpected error: %s", s) + } + + var facts []string + for id, obj := range defs { + if obj != nil { + fact := fmt.Sprintf("L%d defs %s", id.Pos().Line(), obj) + facts = append(facts, fact) + } + } + for id, obj := range uses { + fact := fmt.Sprintf("L%d uses %s", id.Pos().Line(), obj) + facts = append(facts, fact) + } + sort.Strings(facts) + + got := strings.Join(facts, "\n") + if got != want { + t.Errorf("Unexpected defs/uses\ngot:\n%s\nwant:\n%s", got, want) + } +} + +// This tests that the package associated with the types2.Object.Pkg method +// is the type's package independent of the order in which the imports are +// listed in the sources src1, src2 below. +// The actual issue is in go/internal/gcimporter which has a corresponding +// test; we leave this test here to verify correct behavior at the go/types +// level. +func TestIssue13898(t *testing.T) { + testenv.MustHaveGoBuild(t) + + const src0 = ` +package main + +import "go/types" + +func main() { + var info types.Info + for _, obj := range info.Uses { + _ = obj.Pkg() + } +} +` + // like src0, but also imports go/importer + const src1 = ` +package main + +import ( + "go/types" + _ "go/importer" +) + +func main() { + var info types.Info + for _, obj := range info.Uses { + _ = obj.Pkg() + } +} +` + // like src1 but with different import order + // (used to fail with this issue) + const src2 = ` +package main + +import ( + _ "go/importer" + "go/types" +) + +func main() { + var info types.Info + for _, obj := range info.Uses { + _ = obj.Pkg() + } +} +` + f := func(test, src string) { + info := &Info{Uses: make(map[*syntax.Name]Object)} + mustTypecheck(src, nil, info) + + var pkg *Package + count := 0 + for id, obj := range info.Uses { + if id.Value == "Pkg" { + pkg = obj.Pkg() + count++ + } + } + if count != 1 { + t.Fatalf("%s: got %d entries named Pkg; want 1", test, count) + } + if pkg.Name() != "types" { + t.Fatalf("%s: got %v; want package types2", test, pkg) + } + } + + f("src0", src0) + f("src1", src1) + f("src2", src2) +} + +func TestIssue22525(t *testing.T) { + const src = `package p; func f() { var a, b, c, d, e int }` + + got := "\n" + conf := Config{Error: func(err error) { got += err.Error() + "\n" }} + typecheck(src, &conf, nil) // do not crash + want := ` +p:1:27: a declared and not used +p:1:30: b declared and not used +p:1:33: c declared and not used +p:1:36: d declared and not used +p:1:39: e declared and not used +` + if got != want { + t.Errorf("got: %swant: %s", got, want) + } +} + +func TestIssue25627(t *testing.T) { + const prefix = `package p; import "unsafe"; type P *struct{}; type I interface{}; type T ` + // The src strings (without prefix) are constructed such that the number of semicolons + // plus one corresponds to the number of fields expected in the respective struct. + for _, src := range []string{ + `struct { x Missing }`, + `struct { Missing }`, + `struct { *Missing }`, + `struct { unsafe.Pointer }`, + `struct { P }`, + `struct { *I }`, + `struct { a int; b Missing; *Missing }`, + } { + f := mustParse(prefix + src) + + conf := Config{Importer: defaultImporter(), Error: func(err error) {}} + info := &Info{Types: make(map[syntax.Expr]TypeAndValue)} + _, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, info) + if err != nil { + if _, ok := err.(Error); !ok { + t.Fatal(err) + } + } + + syntax.Inspect(f, func(n syntax.Node) bool { + if decl, _ := n.(*syntax.TypeDecl); decl != nil { + if tv, ok := info.Types[decl.Type]; ok && decl.Name.Value == "T" { + want := strings.Count(src, ";") + 1 + if got := tv.Type.(*Struct).NumFields(); got != want { + t.Errorf("%s: got %d fields; want %d", src, got, want) + } + } + } + return true + }) + } +} + +func TestIssue28005(t *testing.T) { + // method names must match defining interface name for this test + // (see last comment in this function) + sources := [...]string{ + "package p; type A interface{ A() }", + "package p; type B interface{ B() }", + "package p; type X interface{ A; B }", + } + + // compute original file ASTs + var orig [len(sources)]*syntax.File + for i, src := range sources { + orig[i] = mustParse(src) + } + + // run the test for all order permutations of the incoming files + for _, perm := range [][len(sources)]int{ + {0, 1, 2}, + {0, 2, 1}, + {1, 0, 2}, + {1, 2, 0}, + {2, 0, 1}, + {2, 1, 0}, + } { + // create file order permutation + files := make([]*syntax.File, len(sources)) + for i := range perm { + files[i] = orig[perm[i]] + } + + // type-check package with given file order permutation + var conf Config + info := &Info{Defs: make(map[*syntax.Name]Object)} + _, err := conf.Check("", files, info) + if err != nil { + t.Fatal(err) + } + + // look for interface object X + var obj Object + for name, def := range info.Defs { + if name.Value == "X" { + obj = def + break + } + } + if obj == nil { + t.Fatal("object X not found") + } + iface := obj.Type().Underlying().(*Interface) // object X must be an interface + + // Each iface method m is embedded; and m's receiver base type name + // must match the method's name per the choice in the source file. + for i := 0; i < iface.NumMethods(); i++ { + m := iface.Method(i) + recvName := m.Type().(*Signature).Recv().Type().(*Named).Obj().Name() + if recvName != m.Name() { + t.Errorf("perm %v: got recv %s; want %s", perm, recvName, m.Name()) + } + } + } +} + +func TestIssue28282(t *testing.T) { + // create type interface { error } + et := Universe.Lookup("error").Type() + it := NewInterfaceType(nil, []Type{et}) + // verify that after completing the interface, the embedded method remains unchanged + // (interfaces are "completed" lazily now, so the completion happens implicitly when + // accessing Method(0)) + want := et.Underlying().(*Interface).Method(0) + got := it.Method(0) + if got != want { + t.Fatalf("%s.Method(0): got %q (%p); want %q (%p)", it, got, got, want, want) + } + // verify that lookup finds the same method in both interfaces (redundant check) + obj, _, _ := LookupFieldOrMethod(et, false, nil, "Error") + if obj != want { + t.Fatalf("%s.Lookup: got %q (%p); want %q (%p)", et, obj, obj, want, want) + } + obj, _, _ = LookupFieldOrMethod(it, false, nil, "Error") + if obj != want { + t.Fatalf("%s.Lookup: got %q (%p); want %q (%p)", it, obj, obj, want, want) + } +} + +func TestIssue29029(t *testing.T) { + f1 := mustParse(`package p; type A interface { M() }`) + f2 := mustParse(`package p; var B interface { A }`) + + // printInfo prints the *Func definitions recorded in info, one *Func per line. + printInfo := func(info *Info) string { + var buf strings.Builder + for _, obj := range info.Defs { + if fn, ok := obj.(*Func); ok { + fmt.Fprintln(&buf, fn) + } + } + return buf.String() + } + + // The *Func (method) definitions for package p must be the same + // independent on whether f1 and f2 are type-checked together, or + // incrementally. + + // type-check together + var conf Config + info := &Info{Defs: make(map[*syntax.Name]Object)} + check := NewChecker(&conf, NewPackage("", "p"), info) + if err := check.Files([]*syntax.File{f1, f2}); err != nil { + t.Fatal(err) + } + want := printInfo(info) + + // type-check incrementally + info = &Info{Defs: make(map[*syntax.Name]Object)} + check = NewChecker(&conf, NewPackage("", "p"), info) + if err := check.Files([]*syntax.File{f1}); err != nil { + t.Fatal(err) + } + if err := check.Files([]*syntax.File{f2}); err != nil { + t.Fatal(err) + } + got := printInfo(info) + + if got != want { + t.Errorf("\ngot : %swant: %s", got, want) + } +} + +func TestIssue34151(t *testing.T) { + const asrc = `package a; type I interface{ M() }; type T struct { F interface { I } }` + const bsrc = `package b; import "a"; type T struct { F interface { a.I } }; var _ = a.T(T{})` + + a := mustTypecheck(asrc, nil, nil) + + conf := Config{Importer: importHelper{pkg: a}} + mustTypecheck(bsrc, &conf, nil) +} + +type importHelper struct { + pkg *Package + fallback Importer +} + +func (h importHelper) Import(path string) (*Package, error) { + if path == h.pkg.Path() { + return h.pkg, nil + } + if h.fallback == nil { + return nil, fmt.Errorf("got package path %q; want %q", path, h.pkg.Path()) + } + return h.fallback.Import(path) +} + +// TestIssue34921 verifies that we don't update an imported type's underlying +// type when resolving an underlying type. Specifically, when determining the +// underlying type of b.T (which is the underlying type of a.T, which is int) +// we must not set the underlying type of a.T again since that would lead to +// a race condition if package b is imported elsewhere, in a package that is +// concurrently type-checked. +func TestIssue34921(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Error(r) + } + }() + + var sources = []string{ + `package a; type T int`, + `package b; import "a"; type T a.T`, + } + + var pkg *Package + for _, src := range sources { + conf := Config{Importer: importHelper{pkg: pkg}} + pkg = mustTypecheck(src, &conf, nil) // pkg imported by the next package in this test + } +} + +func TestIssue43088(t *testing.T) { + // type T1 struct { + // _ T2 + // } + // + // type T2 struct { + // _ struct { + // _ T2 + // } + // } + n1 := NewTypeName(nopos, nil, "T1", nil) + T1 := NewNamed(n1, nil, nil) + n2 := NewTypeName(nopos, nil, "T2", nil) + T2 := NewNamed(n2, nil, nil) + s1 := NewStruct([]*Var{NewField(nopos, nil, "_", T2, false)}, nil) + T1.SetUnderlying(s1) + s2 := NewStruct([]*Var{NewField(nopos, nil, "_", T2, false)}, nil) + s3 := NewStruct([]*Var{NewField(nopos, nil, "_", s2, false)}, nil) + T2.SetUnderlying(s3) + + // These calls must terminate (no endless recursion). + Comparable(T1) + Comparable(T2) +} + +func TestIssue44515(t *testing.T) { + typ := Unsafe.Scope().Lookup("Pointer").Type() + + got := TypeString(typ, nil) + want := "unsafe.Pointer" + if got != want { + t.Errorf("got %q; want %q", got, want) + } + + qf := func(pkg *Package) string { + if pkg == Unsafe { + return "foo" + } + return "" + } + got = TypeString(typ, qf) + want = "foo.Pointer" + if got != want { + t.Errorf("got %q; want %q", got, want) + } +} + +func TestIssue43124(t *testing.T) { + // TODO(rFindley) move this to testdata by enhancing support for importing. + + testenv.MustHaveGoBuild(t) // The go command is needed for the importer to determine the locations of stdlib .a files. + + // All involved packages have the same name (template). Error messages should + // disambiguate between text/template and html/template by printing the full + // path. + const ( + asrc = `package a; import "text/template"; func F(template.Template) {}; func G(int) {}` + bsrc = ` +package b + +import ( + "a" + "html/template" +) + +func _() { + // Packages should be fully qualified when there is ambiguity within the + // error string itself. + a.F(template /* ERRORx "cannot use.*html/template.* as .*text/template" */ .Template{}) +} +` + csrc = ` +package c + +import ( + "a" + "fmt" + "html/template" +) + +// go.dev/issue/46905: make sure template is not the first package qualified. +var _ fmt.Stringer = 1 // ERRORx "cannot use 1.*as fmt\\.Stringer" + +// Packages should be fully qualified when there is ambiguity in reachable +// packages. In this case both a (and for that matter html/template) import +// text/template. +func _() { a.G(template /* ERRORx "cannot use .*html/template.*Template" */ .Template{}) } +` + + tsrc = ` +package template + +import "text/template" + +type T int + +// Verify that the current package name also causes disambiguation. +var _ T = template /* ERRORx "cannot use.*text/template.* as T value" */.Template{} +` + ) + + a := mustTypecheck(asrc, nil, nil) + imp := importHelper{pkg: a, fallback: defaultImporter()} + + withImporter := func(cfg *Config) { + cfg.Importer = imp + } + + testFiles(t, []string{"b.go"}, [][]byte{[]byte(bsrc)}, 0, false, withImporter) + testFiles(t, []string{"c.go"}, [][]byte{[]byte(csrc)}, 0, false, withImporter) + testFiles(t, []string{"t.go"}, [][]byte{[]byte(tsrc)}, 0, false, withImporter) +} + +func TestIssue50646(t *testing.T) { + anyType := Universe.Lookup("any").Type() + comparableType := Universe.Lookup("comparable").Type() + + if !Comparable(anyType) { + t.Error("any is not a comparable type") + } + if !Comparable(comparableType) { + t.Error("comparable is not a comparable type") + } + + if Implements(anyType, comparableType.Underlying().(*Interface)) { + t.Error("any implements comparable") + } + if !Implements(comparableType, anyType.(*Interface)) { + t.Error("comparable does not implement any") + } + + if AssignableTo(anyType, comparableType) { + t.Error("any assignable to comparable") + } + if !AssignableTo(comparableType, anyType) { + t.Error("comparable not assignable to any") + } +} + +func TestIssue55030(t *testing.T) { + // makeSig makes the signature func(typ...) + makeSig := func(typ Type) { + par := NewVar(nopos, nil, "", typ) + params := NewTuple(par) + NewSignatureType(nil, nil, nil, params, nil, true) + } + + // makeSig must not panic for the following (example) types: + // []int + makeSig(NewSlice(Typ[Int])) + + // string + makeSig(Typ[String]) + + // P where P's core type is string + { + P := NewTypeName(nopos, nil, "P", nil) // [P string] + makeSig(NewTypeParam(P, NewInterfaceType(nil, []Type{Typ[String]}))) + } + + // P where P's core type is an (unnamed) slice + { + P := NewTypeName(nopos, nil, "P", nil) // [P []int] + makeSig(NewTypeParam(P, NewInterfaceType(nil, []Type{NewSlice(Typ[Int])}))) + } + + // P where P's core type is bytestring (i.e., string or []byte) + { + t1 := NewTerm(true, Typ[String]) // ~string + t2 := NewTerm(false, NewSlice(Typ[Byte])) // []byte + u := NewUnion([]*Term{t1, t2}) // ~string | []byte + P := NewTypeName(nopos, nil, "P", nil) // [P ~string | []byte] + makeSig(NewTypeParam(P, NewInterfaceType(nil, []Type{u}))) + } +} + +func TestIssue51093(t *testing.T) { + // Each test stands for a conversion of the form P(val) + // where P is a type parameter with typ as constraint. + // The test ensures that P(val) has the correct type P + // and is not a constant. + var tests = []struct { + typ string + val string + }{ + {"bool", "false"}, + {"int", "-1"}, + {"uint", "1.0"}, + {"rune", "'a'"}, + {"float64", "3.5"}, + {"complex64", "1.25"}, + {"string", "\"foo\""}, + + // some more complex constraints + {"~byte", "1"}, + {"~int | ~float64 | complex128", "1"}, + {"~uint64 | ~rune", "'X'"}, + } + + for _, test := range tests { + src := fmt.Sprintf("package p; func _[P %s]() { _ = P(%s) }", test.typ, test.val) + types := make(map[syntax.Expr]TypeAndValue) + mustTypecheck(src, nil, &Info{Types: types}) + + var n int + for x, tv := range types { + if x, _ := x.(*syntax.CallExpr); x != nil { + // there must be exactly one CallExpr which is the P(val) conversion + n++ + tpar, _ := tv.Type.(*TypeParam) + if tpar == nil { + t.Fatalf("%s: got type %s, want type parameter", syntax.String(x), tv.Type) + } + if name := tpar.Obj().Name(); name != "P" { + t.Fatalf("%s: got type parameter name %s, want P", syntax.String(x), name) + } + // P(val) must not be constant + if tv.Value != nil { + t.Errorf("%s: got constant value %s (%s), want no constant", syntax.String(x), tv.Value, tv.Value.String()) + } + } + } + + if n != 1 { + t.Fatalf("%s: got %d CallExpr nodes; want 1", src, 1) + } + } +} + +func TestIssue54258(t *testing.T) { + tests := []struct{ main, b, want string }{ + { //--------------------------------------------------------------- + `package main +import "b" +type I0 interface { + M0(w struct{ f string }) +} +var _ I0 = b.S{} +`, + `package b +type S struct{} +func (S) M0(struct{ f string }) {} +`, + `6:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I0 value in variable declaration: b[.]S does not implement I0 [(]wrong type for method M0[)] +.*have M0[(]struct{f string /[*] package b [*]/ }[)] +.*want M0[(]struct{f string /[*] package main [*]/ }[)]`}, + + { //--------------------------------------------------------------- + `package main +import "b" +type I1 interface { + M1(struct{ string }) +} +var _ I1 = b.S{} +`, + `package b +type S struct{} +func (S) M1(struct{ string }) {} +`, + `6:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I1 value in variable declaration: b[.]S does not implement I1 [(]wrong type for method M1[)] +.*have M1[(]struct{string /[*] package b [*]/ }[)] +.*want M1[(]struct{string /[*] package main [*]/ }[)]`}, + + { //--------------------------------------------------------------- + `package main +import "b" +type I2 interface { + M2(y struct{ f struct{ f string } }) +} +var _ I2 = b.S{} +`, + `package b +type S struct{} +func (S) M2(struct{ f struct{ f string } }) {} +`, + `6:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I2 value in variable declaration: b[.]S does not implement I2 [(]wrong type for method M2[)] +.*have M2[(]struct{f struct{f string} /[*] package b [*]/ }[)] +.*want M2[(]struct{f struct{f string} /[*] package main [*]/ }[)]`}, + + { //--------------------------------------------------------------- + `package main +import "b" +type I3 interface { + M3(z struct{ F struct{ f string } }) +} +var _ I3 = b.S{} +`, + `package b +type S struct{} +func (S) M3(struct{ F struct{ f string } }) {} +`, + `6:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I3 value in variable declaration: b[.]S does not implement I3 [(]wrong type for method M3[)] +.*have M3[(]struct{F struct{f string /[*] package b [*]/ }}[)] +.*want M3[(]struct{F struct{f string /[*] package main [*]/ }}[)]`}, + + { //--------------------------------------------------------------- + `package main +import "b" +type I4 interface { + M4(_ struct { *string }) +} +var _ I4 = b.S{} +`, + `package b +type S struct{} +func (S) M4(struct { *string }) {} +`, + `6:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I4 value in variable declaration: b[.]S does not implement I4 [(]wrong type for method M4[)] +.*have M4[(]struct{[*]string /[*] package b [*]/ }[)] +.*want M4[(]struct{[*]string /[*] package main [*]/ }[)]`}, + + { //--------------------------------------------------------------- + `package main +import "b" +type t struct{ A int } +type I5 interface { + M5(_ struct {b.S;t}) +} +var _ I5 = b.S{} +`, + `package b +type S struct{} +type t struct{ A int } +func (S) M5(struct {S;t}) {} +`, + `7:12: cannot use b[.]S{} [(]value of type b[.]S[)] as I5 value in variable declaration: b[.]S does not implement I5 [(]wrong type for method M5[)] +.*have M5[(]struct{b[.]S; b[.]t}[)] +.*want M5[(]struct{b[.]S; t}[)]`}, + } + + test := func(main, b, want string) { + re := regexp.MustCompile(want) + bpkg := mustTypecheck(b, nil, nil) + mast := mustParse(main) + conf := Config{Importer: importHelper{pkg: bpkg}} + _, err := conf.Check(mast.PkgName.Value, []*syntax.File{mast}, nil) + if err == nil { + t.Error("Expected failure, but it did not") + } else if got := err.Error(); !re.MatchString(got) { + t.Errorf("Wanted match for\n\t%s\n but got\n\t%s", want, got) + } else if testing.Verbose() { + t.Logf("Saw expected\n\t%s", err.Error()) + } + } + for _, t := range tests { + test(t.main, t.b, t.want) + } +} + +func TestIssue59944(t *testing.T) { + testenv.MustHaveCGO(t) + + // The typechecker should resolve methods declared on aliases of cgo types. + const src = ` +package p + +/* +struct layout { + int field; +}; +*/ +import "C" + +type Layout = C.struct_layout + +func (l *Layout) Binding() {} + +func _() { + _ = (*Layout).Binding +} +` + + // code generated by cmd/cgo for the above source. + const cgoTypes = ` +// Code generated by cmd/cgo; DO NOT EDIT. + +package p + +import "unsafe" + +import "syscall" + +import _cgopackage "runtime/cgo" + +type _ _cgopackage.Incomplete +var _ syscall.Errno +func _Cgo_ptr(ptr unsafe.Pointer) unsafe.Pointer { return ptr } + +//go:linkname _Cgo_always_false runtime.cgoAlwaysFalse +var _Cgo_always_false bool +//go:linkname _Cgo_use runtime.cgoUse +func _Cgo_use(interface{}) +type _Ctype_int int32 + +type _Ctype_struct_layout struct { + field _Ctype_int +} + +type _Ctype_void [0]byte + +//go:linkname _cgo_runtime_cgocall runtime.cgocall +func _cgo_runtime_cgocall(unsafe.Pointer, uintptr) int32 + +//go:linkname _cgoCheckPointer runtime.cgoCheckPointer +func _cgoCheckPointer(interface{}, interface{}) + +//go:linkname _cgoCheckResult runtime.cgoCheckResult +func _cgoCheckResult(interface{}) +` + testFiles(t, []string{"p.go", "_cgo_gotypes.go"}, [][]byte{[]byte(src), []byte(cgoTypes)}, 0, false, func(cfg *Config) { + *boolFieldAddr(cfg, "go115UsesCgo") = true + }) +} + +func TestIssue61931(t *testing.T) { + const src = ` +package p + +func A(func(any), ...any) {} +func B[T any](T) {} + +func _() { + A(B, nil // syntax error: missing ',' before newline in argument list +} +` + f, err := syntax.Parse(syntax.NewFileBase(pkgName(src)), strings.NewReader(src), func(error) {}, nil, 0) + if err == nil { + t.Fatal("expected syntax error") + } + + var conf Config + conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // must not panic +} + +func TestIssue61938(t *testing.T) { + const src = ` +package p + +func f[T any]() {} +func _() { f() } +` + // no error handler provided (this issue) + var conf Config + typecheck(src, &conf, nil) // must not panic + + // with error handler (sanity check) + conf.Error = func(error) {} + typecheck(src, &conf, nil) // must not panic +} + +func TestIssue63260(t *testing.T) { + const src = ` +package p + +func _() { + use(f[*string]) +} + +func use(func()) {} + +func f[I *T, T any]() { + var v T + _ = v +}` + + info := Info{ + Defs: make(map[*syntax.Name]Object), + } + pkg := mustTypecheck(src, nil, &info) + + // get type parameter T in signature of f + T := pkg.Scope().Lookup("f").Type().(*Signature).TypeParams().At(1) + if T.Obj().Name() != "T" { + t.Fatalf("got type parameter %s, want T", T) + } + + // get type of variable v in body of f + var v Object + for name, obj := range info.Defs { + if name.Value == "v" { + v = obj + break + } + } + if v == nil { + t.Fatal("variable v not found") + } + + // type of v and T must be pointer-identical + if v.Type() != T { + t.Fatalf("types of v and T are not pointer-identical: %p != %p", v.Type().(*TypeParam), T) + } +} + +func TestIssue44410(t *testing.T) { + const src = ` +package p + +type A = []int +type S struct{ A } +` + + t.Setenv("GODEBUG", "gotypesalias=1") + pkg := mustTypecheck(src, nil, nil) + + S := pkg.Scope().Lookup("S") + if S == nil { + t.Fatal("object S not found") + } + + got := S.String() + const want = "type p.S struct{p.A}" + if got != want { + t.Fatalf("got %q; want %q", got, want) + } +} + +func TestIssue59831(t *testing.T) { + // Package a exports a type S with an unexported method m; + // the tests check the error messages when m is not found. + const asrc = `package a; type S struct{}; func (S) m() {}` + apkg := mustTypecheck(asrc, nil, nil) + + // Package b exports a type S with an exported method m; + // the tests check the error messages when M is not found. + const bsrc = `package b; type S struct{}; func (S) M() {}` + bpkg := mustTypecheck(bsrc, nil, nil) + + tests := []struct { + imported *Package + src, err string + }{ + // tests importing a (or nothing) + {apkg, `package a1; import "a"; var _ interface { M() } = a.S{}`, + "a.S does not implement interface{M()} (missing method M) have m() want M()"}, + + {apkg, `package a2; import "a"; var _ interface { m() } = a.S{}`, + "a.S does not implement interface{m()} (unexported method m)"}, // test for issue + + {nil, `package a3; type S struct{}; func (S) m(); var _ interface { M() } = S{}`, + "S does not implement interface{M()} (missing method M) have m() want M()"}, + + {nil, `package a4; type S struct{}; func (S) m(); var _ interface { m() } = S{}`, + ""}, // no error expected + + {nil, `package a5; type S struct{}; func (S) m(); var _ interface { n() } = S{}`, + "S does not implement interface{n()} (missing method n)"}, + + // tests importing b (or nothing) + {bpkg, `package b1; import "b"; var _ interface { m() } = b.S{}`, + "b.S does not implement interface{m()} (missing method m) have M() want m()"}, + + {bpkg, `package b2; import "b"; var _ interface { M() } = b.S{}`, + ""}, // no error expected + + {nil, `package b3; type S struct{}; func (S) M(); var _ interface { M() } = S{}`, + ""}, // no error expected + + {nil, `package b4; type S struct{}; func (S) M(); var _ interface { m() } = S{}`, + "S does not implement interface{m()} (missing method m) have M() want m()"}, + + {nil, `package b5; type S struct{}; func (S) M(); var _ interface { n() } = S{}`, + "S does not implement interface{n()} (missing method n)"}, + } + + for _, test := range tests { + // typecheck test source + conf := Config{Importer: importHelper{pkg: test.imported}} + pkg, err := typecheck(test.src, &conf, nil) + if err == nil { + if test.err != "" { + t.Errorf("package %s: got no error, want %q", pkg.Name(), test.err) + } + continue + } + if test.err == "" { + t.Errorf("package %s: got %q, want not error", pkg.Name(), err.Error()) + } + + // flatten reported error message + errmsg := strings.ReplaceAll(err.Error(), "\n", " ") + errmsg = strings.ReplaceAll(errmsg, "\t", "") + + // verify error message + if !strings.Contains(errmsg, test.err) { + t.Errorf("package %s: got %q, want %q", pkg.Name(), errmsg, test.err) + } + } +} + +func TestIssue64759(t *testing.T) { + const src = ` +//go:build go1.18 +package p + +func f[S ~[]E, E any](S) {} + +func _() { + f([]string{}) +} +` + // Per the go:build directive, the source must typecheck + // even though the (module) Go version is set to go1.17. + conf := Config{GoVersion: "go1.17"} + mustTypecheck(src, &conf, nil) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/labels.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/labels.go new file mode 100644 index 0000000000000000000000000000000000000000..ffb37004cee87e7be34020ecf4010b6f628b5bea --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/labels.go @@ -0,0 +1,269 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + . "internal/types/errors" +) + +// labels checks correct label use in body. +func (check *Checker) labels(body *syntax.BlockStmt) { + // set of all labels in this body + all := NewScope(nil, body.Pos(), syntax.EndPos(body), "label") + + fwdJumps := check.blockBranches(all, nil, nil, body.List) + + // If there are any forward jumps left, no label was found for + // the corresponding goto statements. Either those labels were + // never defined, or they are inside blocks and not reachable + // for the respective gotos. + for _, jmp := range fwdJumps { + var msg string + var code Code + name := jmp.Label.Value + if alt := all.Lookup(name); alt != nil { + msg = "goto %s jumps into block" + alt.(*Label).used = true // avoid another error + code = JumpIntoBlock + } else { + msg = "label %s not declared" + code = UndeclaredLabel + } + check.errorf(jmp.Label, code, msg, name) + } + + // spec: "It is illegal to define a label that is never used." + for name, obj := range all.elems { + obj = resolve(name, obj) + if lbl := obj.(*Label); !lbl.used { + check.softErrorf(lbl.pos, UnusedLabel, "label %s declared and not used", lbl.name) + } + } +} + +// A block tracks label declarations in a block and its enclosing blocks. +type block struct { + parent *block // enclosing block + lstmt *syntax.LabeledStmt // labeled statement to which this block belongs, or nil + labels map[string]*syntax.LabeledStmt // allocated lazily +} + +// insert records a new label declaration for the current block. +// The label must not have been declared before in any block. +func (b *block) insert(s *syntax.LabeledStmt) { + name := s.Label.Value + if debug { + assert(b.gotoTarget(name) == nil) + } + labels := b.labels + if labels == nil { + labels = make(map[string]*syntax.LabeledStmt) + b.labels = labels + } + labels[name] = s +} + +// gotoTarget returns the labeled statement in the current +// or an enclosing block with the given label name, or nil. +func (b *block) gotoTarget(name string) *syntax.LabeledStmt { + for s := b; s != nil; s = s.parent { + if t := s.labels[name]; t != nil { + return t + } + } + return nil +} + +// enclosingTarget returns the innermost enclosing labeled +// statement with the given label name, or nil. +func (b *block) enclosingTarget(name string) *syntax.LabeledStmt { + for s := b; s != nil; s = s.parent { + if t := s.lstmt; t != nil && t.Label.Value == name { + return t + } + } + return nil +} + +// blockBranches processes a block's statement list and returns the set of outgoing forward jumps. +// all is the scope of all declared labels, parent the set of labels declared in the immediately +// enclosing block, and lstmt is the labeled statement this block is associated with (or nil). +func (check *Checker) blockBranches(all *Scope, parent *block, lstmt *syntax.LabeledStmt, list []syntax.Stmt) []*syntax.BranchStmt { + b := &block{parent, lstmt, nil} + + var ( + varDeclPos syntax.Pos + fwdJumps, badJumps []*syntax.BranchStmt + ) + + // All forward jumps jumping over a variable declaration are possibly + // invalid (they may still jump out of the block and be ok). + // recordVarDecl records them for the given position. + recordVarDecl := func(pos syntax.Pos) { + varDeclPos = pos + badJumps = append(badJumps[:0], fwdJumps...) // copy fwdJumps to badJumps + } + + jumpsOverVarDecl := func(jmp *syntax.BranchStmt) bool { + if varDeclPos.IsKnown() { + for _, bad := range badJumps { + if jmp == bad { + return true + } + } + } + return false + } + + var stmtBranches func(syntax.Stmt) + stmtBranches = func(s syntax.Stmt) { + switch s := s.(type) { + case *syntax.DeclStmt: + for _, d := range s.DeclList { + if d, _ := d.(*syntax.VarDecl); d != nil { + recordVarDecl(d.Pos()) + } + } + + case *syntax.LabeledStmt: + // declare non-blank label + if name := s.Label.Value; name != "_" { + lbl := NewLabel(s.Label.Pos(), check.pkg, name) + if alt := all.Insert(lbl); alt != nil { + var err error_ + err.code = DuplicateLabel + err.soft = true + err.errorf(lbl.pos, "label %s already declared", name) + err.recordAltDecl(alt) + check.report(&err) + // ok to continue + } else { + b.insert(s) + check.recordDef(s.Label, lbl) + } + // resolve matching forward jumps and remove them from fwdJumps + i := 0 + for _, jmp := range fwdJumps { + if jmp.Label.Value == name { + // match + lbl.used = true + check.recordUse(jmp.Label, lbl) + if jumpsOverVarDecl(jmp) { + check.softErrorf( + jmp.Label, + JumpOverDecl, + "goto %s jumps over variable declaration at line %d", + name, + varDeclPos.Line(), + ) + // ok to continue + } + } else { + // no match - record new forward jump + fwdJumps[i] = jmp + i++ + } + } + fwdJumps = fwdJumps[:i] + lstmt = s + } + stmtBranches(s.Stmt) + + case *syntax.BranchStmt: + if s.Label == nil { + return // checked in 1st pass (check.stmt) + } + + // determine and validate target + name := s.Label.Value + switch s.Tok { + case syntax.Break: + // spec: "If there is a label, it must be that of an enclosing + // "for", "switch", or "select" statement, and that is the one + // whose execution terminates." + valid := false + if t := b.enclosingTarget(name); t != nil { + switch t.Stmt.(type) { + case *syntax.SwitchStmt, *syntax.SelectStmt, *syntax.ForStmt: + valid = true + } + } + if !valid { + check.errorf(s.Label, MisplacedLabel, "invalid break label %s", name) + return + } + + case syntax.Continue: + // spec: "If there is a label, it must be that of an enclosing + // "for" statement, and that is the one whose execution advances." + valid := false + if t := b.enclosingTarget(name); t != nil { + switch t.Stmt.(type) { + case *syntax.ForStmt: + valid = true + } + } + if !valid { + check.errorf(s.Label, MisplacedLabel, "invalid continue label %s", name) + return + } + + case syntax.Goto: + if b.gotoTarget(name) == nil { + // label may be declared later - add branch to forward jumps + fwdJumps = append(fwdJumps, s) + return + } + + default: + check.errorf(s, InvalidSyntaxTree, "branch statement: %s %s", s.Tok, name) + return + } + + // record label use + obj := all.Lookup(name) + obj.(*Label).used = true + check.recordUse(s.Label, obj) + + case *syntax.AssignStmt: + if s.Op == syntax.Def { + recordVarDecl(s.Pos()) + } + + case *syntax.BlockStmt: + // Unresolved forward jumps inside the nested block + // become forward jumps in the current block. + fwdJumps = append(fwdJumps, check.blockBranches(all, b, lstmt, s.List)...) + + case *syntax.IfStmt: + stmtBranches(s.Then) + if s.Else != nil { + stmtBranches(s.Else) + } + + case *syntax.SwitchStmt: + b := &block{b, lstmt, nil} + for _, s := range s.Body { + fwdJumps = append(fwdJumps, check.blockBranches(all, b, nil, s.Body)...) + } + + case *syntax.SelectStmt: + b := &block{b, lstmt, nil} + for _, s := range s.Body { + fwdJumps = append(fwdJumps, check.blockBranches(all, b, nil, s.Body)...) + } + + case *syntax.ForStmt: + stmtBranches(s.Body) + } + } + + for _, s := range list { + stmtBranches(s) + } + + return fwdJumps +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/lookup.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/lookup.go new file mode 100644 index 0000000000000000000000000000000000000000..bc47c150607635dbb0a310670ef0b711729d35f4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/lookup.go @@ -0,0 +1,603 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements various field and method lookup functions. + +package types2 + +import ( + "bytes" + "cmd/compile/internal/syntax" + "strings" +) + +// Internal use of LookupFieldOrMethod: If the obj result is a method +// associated with a concrete (non-interface) type, the method's signature +// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing +// the method's type. + +// LookupFieldOrMethod looks up a field or method with given package and name +// in T and returns the corresponding *Var or *Func, an index sequence, and a +// bool indicating if there were any pointer indirections on the path to the +// field or method. If addressable is set, T is the type of an addressable +// variable (only matters for method lookups). T must not be nil. +// +// The last index entry is the field or method index in the (possibly embedded) +// type where the entry was found, either: +// +// 1. the list of declared methods of a named type; or +// 2. the list of all methods (method set) of an interface type; or +// 3. the list of fields of a struct type. +// +// The earlier index entries are the indices of the embedded struct fields +// traversed to get to the found entry, starting at depth 0. +// +// If no entry is found, a nil object is returned. In this case, the returned +// index and indirect values have the following meaning: +// +// - If index != nil, the index sequence points to an ambiguous entry +// (the same name appeared more than once at the same embedding level). +// +// - If indirect is set, a method with a pointer receiver type was found +// but there was no pointer on the path from the actual receiver type to +// the method's formal receiver base type, nor was the receiver addressable. +func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) { + if T == nil { + panic("LookupFieldOrMethod on nil type") + } + + // Methods cannot be associated to a named pointer type. + // (spec: "The type denoted by T is called the receiver base type; + // it must not be a pointer or interface type and it must be declared + // in the same package as the method."). + // Thus, if we have a named pointer type, proceed with the underlying + // pointer type but discard the result if it is a method since we would + // not have found it for T (see also go.dev/issue/8590). + if t := asNamed(T); t != nil { + if p, _ := t.Underlying().(*Pointer); p != nil { + obj, index, indirect = lookupFieldOrMethodImpl(p, false, pkg, name, false) + if _, ok := obj.(*Func); ok { + return nil, nil, false + } + return + } + } + + obj, index, indirect = lookupFieldOrMethodImpl(T, addressable, pkg, name, false) + + // If we didn't find anything and if we have a type parameter with a core type, + // see if there is a matching field (but not a method, those need to be declared + // explicitly in the constraint). If the constraint is a named pointer type (see + // above), we are ok here because only fields are accepted as results. + const enableTParamFieldLookup = false // see go.dev/issue/51576 + if enableTParamFieldLookup && obj == nil && isTypeParam(T) { + if t := coreType(T); t != nil { + obj, index, indirect = lookupFieldOrMethodImpl(t, addressable, pkg, name, false) + if _, ok := obj.(*Var); !ok { + obj, index, indirect = nil, nil, false // accept fields (variables) only + } + } + } + return +} + +// lookupFieldOrMethodImpl is the implementation of LookupFieldOrMethod. +// Notably, in contrast to LookupFieldOrMethod, it won't find struct fields +// in base types of defined (*Named) pointer types T. For instance, given +// the declaration: +// +// type T *struct{f int} +// +// lookupFieldOrMethodImpl won't find the field f in the defined (*Named) type T +// (methods on T are not permitted in the first place). +// +// Thus, lookupFieldOrMethodImpl should only be called by LookupFieldOrMethod +// and missingMethod (the latter doesn't care about struct fields). +// +// If foldCase is true, method names are considered equal if they are equal +// with case folding, irrespective of which package they are in. +// +// The resulting object may not be fully type-checked. +func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string, foldCase bool) (obj Object, index []int, indirect bool) { + // WARNING: The code in this function is extremely subtle - do not modify casually! + + if name == "_" { + return // blank fields/methods are never found + } + + // Importantly, we must not call under before the call to deref below (nor + // does deref call under), as doing so could incorrectly result in finding + // methods of the pointer base type when T is a (*Named) pointer type. + typ, isPtr := deref(T) + + // *typ where typ is an interface (incl. a type parameter) has no methods. + if isPtr { + if _, ok := under(typ).(*Interface); ok { + return + } + } + + // Start with typ as single entry at shallowest depth. + current := []embeddedType{{typ, nil, isPtr, false}} + + // seen tracks named types that we have seen already, allocated lazily. + // Used to avoid endless searches in case of recursive types. + // + // We must use a lookup on identity rather than a simple map[*Named]bool as + // instantiated types may be identical but not equal. + var seen instanceLookup + + // search current depth + for len(current) > 0 { + var next []embeddedType // embedded types found at current depth + + // look for (pkg, name) in all types at current depth + for _, e := range current { + typ := e.typ + + // If we have a named type, we may have associated methods. + // Look for those first. + if named := asNamed(typ); named != nil { + if alt := seen.lookup(named); alt != nil { + // We have seen this type before, at a more shallow depth + // (note that multiples of this type at the current depth + // were consolidated before). The type at that depth shadows + // this same type at the current depth, so we can ignore + // this one. + continue + } + seen.add(named) + + // look for a matching attached method + if i, m := named.lookupMethod(pkg, name, foldCase); m != nil { + // potential match + // caution: method may not have a proper signature yet + index = concat(e.index, i) + if obj != nil || e.multiples { + return nil, index, false // collision + } + obj = m + indirect = e.indirect + continue // we can't have a matching field or interface method + } + } + + switch t := under(typ).(type) { + case *Struct: + // look for a matching field and collect embedded types + for i, f := range t.fields { + if f.sameId(pkg, name) { + assert(f.typ != nil) + index = concat(e.index, i) + if obj != nil || e.multiples { + return nil, index, false // collision + } + obj = f + indirect = e.indirect + continue // we can't have a matching interface method + } + // Collect embedded struct fields for searching the next + // lower depth, but only if we have not seen a match yet + // (if we have a match it is either the desired field or + // we have a name collision on the same depth; in either + // case we don't need to look further). + // Embedded fields are always of the form T or *T where + // T is a type name. If e.typ appeared multiple times at + // this depth, f.typ appears multiple times at the next + // depth. + if obj == nil && f.embedded { + typ, isPtr := deref(f.typ) + // TODO(gri) optimization: ignore types that can't + // have fields or methods (only Named, Struct, and + // Interface types need to be considered). + next = append(next, embeddedType{typ, concat(e.index, i), e.indirect || isPtr, e.multiples}) + } + } + + case *Interface: + // look for a matching method (interface may be a type parameter) + if i, m := t.typeSet().LookupMethod(pkg, name, foldCase); m != nil { + assert(m.typ != nil) + index = concat(e.index, i) + if obj != nil || e.multiples { + return nil, index, false // collision + } + obj = m + indirect = e.indirect + } + } + } + + if obj != nil { + // found a potential match + // spec: "A method call x.m() is valid if the method set of (the type of) x + // contains m and the argument list can be assigned to the parameter + // list of m. If x is addressable and &x's method set contains m, x.m() + // is shorthand for (&x).m()". + if f, _ := obj.(*Func); f != nil { + // determine if method has a pointer receiver + if f.hasPtrRecv() && !indirect && !addressable { + return nil, nil, true // pointer/addressable receiver required + } + } + return + } + + current = consolidateMultiples(next) + } + + return nil, nil, false // not found +} + +// embeddedType represents an embedded type +type embeddedType struct { + typ Type + index []int // embedded field indices, starting with index at depth 0 + indirect bool // if set, there was a pointer indirection on the path to this field + multiples bool // if set, typ appears multiple times at this depth +} + +// consolidateMultiples collects multiple list entries with the same type +// into a single entry marked as containing multiples. The result is the +// consolidated list. +func consolidateMultiples(list []embeddedType) []embeddedType { + if len(list) <= 1 { + return list // at most one entry - nothing to do + } + + n := 0 // number of entries w/ unique type + prev := make(map[Type]int) // index at which type was previously seen + for _, e := range list { + if i, found := lookupType(prev, e.typ); found { + list[i].multiples = true + // ignore this entry + } else { + prev[e.typ] = n + list[n] = e + n++ + } + } + return list[:n] +} + +func lookupType(m map[Type]int, typ Type) (int, bool) { + // fast path: maybe the types are equal + if i, found := m[typ]; found { + return i, true + } + + for t, i := range m { + if Identical(t, typ) { + return i, true + } + } + + return 0, false +} + +type instanceLookup struct { + // buf is used to avoid allocating the map m in the common case of a small + // number of instances. + buf [3]*Named + m map[*Named][]*Named +} + +func (l *instanceLookup) lookup(inst *Named) *Named { + for _, t := range l.buf { + if t != nil && Identical(inst, t) { + return t + } + } + for _, t := range l.m[inst.Origin()] { + if Identical(inst, t) { + return t + } + } + return nil +} + +func (l *instanceLookup) add(inst *Named) { + for i, t := range l.buf { + if t == nil { + l.buf[i] = inst + return + } + } + if l.m == nil { + l.m = make(map[*Named][]*Named) + } + insts := l.m[inst.Origin()] + l.m[inst.Origin()] = append(insts, inst) +} + +// MissingMethod returns (nil, false) if V implements T, otherwise it +// returns a missing method required by T and whether it is missing or +// just has the wrong type: either a pointer receiver or wrong signature. +// +// For non-interface types V, or if static is set, V implements T if all +// methods of T are present in V. Otherwise (V is an interface and static +// is not set), MissingMethod only checks that methods of T which are also +// present in V have matching types (e.g., for a type assertion x.(T) where +// x is of interface type V). +func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType bool) { + return (*Checker)(nil).missingMethod(V, T, static, Identical, nil) +} + +// missingMethod is like MissingMethod but accepts a *Checker as receiver, +// a comparator equivalent for type comparison, and a *string for error causes. +// The receiver may be nil if missingMethod is invoked through an exported +// API call (such as MissingMethod), i.e., when all methods have been type- +// checked. +// The underlying type of T must be an interface; T (rather than its under- +// lying type) is used for better error messages (reported through *cause). +// The comparator is used to compare signatures. +// If a method is missing and cause is not nil, *cause describes the error. +func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y Type) bool, cause *string) (method *Func, wrongType bool) { + methods := under(T).(*Interface).typeSet().methods // T must be an interface + if len(methods) == 0 { + return nil, false + } + + const ( + ok = iota + notFound + wrongName + unexported + wrongSig + ambigSel + ptrRecv + field + ) + + state := ok + var m *Func // method on T we're trying to implement + var f *Func // method on V, if found (state is one of ok, wrongName, wrongSig) + + if u, _ := under(V).(*Interface); u != nil { + tset := u.typeSet() + for _, m = range methods { + _, f = tset.LookupMethod(m.pkg, m.name, false) + + if f == nil { + if !static { + continue + } + state = notFound + break + } + + if !equivalent(f.typ, m.typ) { + state = wrongSig + break + } + } + } else { + for _, m = range methods { + obj, index, indirect := lookupFieldOrMethodImpl(V, false, m.pkg, m.name, false) + + // check if m is ambiguous, on *V, or on V with case-folding + if obj == nil { + switch { + case index != nil: + state = ambigSel + case indirect: + state = ptrRecv + default: + state = notFound + obj, _, _ = lookupFieldOrMethodImpl(V, false, m.pkg, m.name, true /* fold case */) + f, _ = obj.(*Func) + if f != nil { + state = wrongName + if f.name == m.name { + // If the names are equal, f must be unexported + // (otherwise the package wouldn't matter). + state = unexported + } + } + } + break + } + + // we must have a method (not a struct field) + f, _ = obj.(*Func) + if f == nil { + state = field + break + } + + // methods may not have a fully set up signature yet + if check != nil { + check.objDecl(f, nil) + } + + if !equivalent(f.typ, m.typ) { + state = wrongSig + break + } + } + } + + if state == ok { + return nil, false + } + + if cause != nil { + if f != nil { + // This method may be formatted in funcString below, so must have a fully + // set up signature. + if check != nil { + check.objDecl(f, nil) + } + } + switch state { + case notFound: + switch { + case isInterfacePtr(V): + *cause = "(" + check.interfacePtrError(V) + ")" + case isInterfacePtr(T): + *cause = "(" + check.interfacePtrError(T) + ")" + default: + *cause = check.sprintf("(missing method %s)", m.Name()) + } + case wrongName: + fs, ms := check.funcString(f, false), check.funcString(m, false) + *cause = check.sprintf("(missing method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms) + case unexported: + *cause = check.sprintf("(unexported method %s)", m.Name()) + case wrongSig: + fs, ms := check.funcString(f, false), check.funcString(m, false) + if fs == ms { + // Don't report "want Foo, have Foo". + // Add package information to disambiguate (go.dev/issue/54258). + fs, ms = check.funcString(f, true), check.funcString(m, true) + } + if fs == ms { + // We still have "want Foo, have Foo". + // This is most likely due to different type parameters with + // the same name appearing in the instantiated signatures + // (go.dev/issue/61685). + // Rather than reporting this misleading error cause, for now + // just point out that the method signature is incorrect. + // TODO(gri) should find a good way to report the root cause + *cause = check.sprintf("(wrong type for method %s)", m.Name()) + break + } + *cause = check.sprintf("(wrong type for method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms) + case ambigSel: + *cause = check.sprintf("(ambiguous selector %s.%s)", V, m.Name()) + case ptrRecv: + *cause = check.sprintf("(method %s has pointer receiver)", m.Name()) + case field: + *cause = check.sprintf("(%s.%s is a field, not a method)", V, m.Name()) + default: + unreachable() + } + } + + return m, state == wrongSig || state == ptrRecv +} + +func isInterfacePtr(T Type) bool { + p, _ := under(T).(*Pointer) + return p != nil && IsInterface(p.base) +} + +// check may be nil. +func (check *Checker) interfacePtrError(T Type) string { + assert(isInterfacePtr(T)) + if p, _ := under(T).(*Pointer); isTypeParam(p.base) { + return check.sprintf("type %s is pointer to type parameter, not type parameter", T) + } + return check.sprintf("type %s is pointer to interface, not interface", T) +} + +// funcString returns a string of the form name + signature for f. +// check may be nil. +func (check *Checker) funcString(f *Func, pkgInfo bool) string { + buf := bytes.NewBufferString(f.name) + var qf Qualifier + if check != nil && !pkgInfo { + qf = check.qualifier + } + w := newTypeWriter(buf, qf) + w.pkgInfo = pkgInfo + w.paramNames = false + w.signature(f.typ.(*Signature)) + return buf.String() +} + +// assertableTo reports whether a value of type V can be asserted to have type T. +// The receiver may be nil if assertableTo is invoked through an exported API call +// (such as AssertableTo), i.e., when all methods have been type-checked. +// The underlying type of V must be an interface. +// If the result is false and cause is not nil, *cause describes the error. +// TODO(gri) replace calls to this function with calls to newAssertableTo. +func (check *Checker) assertableTo(V, T Type, cause *string) bool { + // no static check is required if T is an interface + // spec: "If T is an interface type, x.(T) asserts that the + // dynamic type of x implements the interface T." + if IsInterface(T) { + return true + } + // TODO(gri) fix this for generalized interfaces + m, _ := check.missingMethod(T, V, false, Identical, cause) + return m == nil +} + +// newAssertableTo reports whether a value of type V can be asserted to have type T. +// It also implements behavior for interfaces that currently are only permitted +// in constraint position (we have not yet defined that behavior in the spec). +// The underlying type of V must be an interface. +// If the result is false and cause is not nil, *cause is set to the error cause. +func (check *Checker) newAssertableTo(pos syntax.Pos, V, T Type, cause *string) bool { + // no static check is required if T is an interface + // spec: "If T is an interface type, x.(T) asserts that the + // dynamic type of x implements the interface T." + if IsInterface(T) { + return true + } + return check.implements(pos, T, V, false, cause) +} + +// deref dereferences typ if it is a *Pointer (but not a *Named type +// with an underlying pointer type!) and returns its base and true. +// Otherwise it returns (typ, false). +func deref(typ Type) (Type, bool) { + if p, _ := Unalias(typ).(*Pointer); p != nil { + // p.base should never be nil, but be conservative + if p.base == nil { + if debug { + panic("pointer with nil base type (possibly due to an invalid cyclic declaration)") + } + return Typ[Invalid], true + } + return p.base, true + } + return typ, false +} + +// derefStructPtr dereferences typ if it is a (named or unnamed) pointer to a +// (named or unnamed) struct and returns its base. Otherwise it returns typ. +func derefStructPtr(typ Type) Type { + if p, _ := under(typ).(*Pointer); p != nil { + if _, ok := under(p.base).(*Struct); ok { + return p.base + } + } + return typ +} + +// concat returns the result of concatenating list and i. +// The result does not share its underlying array with list. +func concat(list []int, i int) []int { + var t []int + t = append(t, list...) + return append(t, i) +} + +// fieldIndex returns the index for the field with matching package and name, or a value < 0. +func fieldIndex(fields []*Var, pkg *Package, name string) int { + if name != "_" { + for i, f := range fields { + if f.sameId(pkg, name) { + return i + } + } + } + return -1 +} + +// lookupMethod returns the index of and method with matching package and name, or (-1, nil). +// If foldCase is true, method names are considered equal if they are equal with case folding +// and their packages are ignored (e.g., pkg1.m, pkg1.M, pkg2.m, and pkg2.M are all equal). +func lookupMethod(methods []*Func, pkg *Package, name string, foldCase bool) (int, *Func) { + if name != "_" { + for i, m := range methods { + if m.sameId(pkg, name) || foldCase && strings.EqualFold(m.name, name) { + return i, m + } + } + } + return -1, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/lookup_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/lookup_test.go new file mode 100644 index 0000000000000000000000000000000000000000..56fe48c1e2db5f85d931ea9e28abf3b8ea625815 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/lookup_test.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "path/filepath" + "runtime" + "testing" + + . "cmd/compile/internal/types2" +) + +// BenchmarkLookupFieldOrMethod measures types.LookupFieldOrMethod performance. +// LookupFieldOrMethod is a performance hotspot for both type-checking and +// external API calls. +func BenchmarkLookupFieldOrMethod(b *testing.B) { + // Choose an arbitrary, large package. + path := filepath.Join(runtime.GOROOT(), "src", "net", "http") + + files, err := pkgFiles(path) + if err != nil { + b.Fatal(err) + } + + conf := Config{ + Importer: defaultImporter(), + } + + pkg, err := conf.Check("http", files, nil) + if err != nil { + b.Fatal(err) + } + + scope := pkg.Scope() + names := scope.Names() + + // Look up an arbitrary name for each type referenced in the package scope. + lookup := func() { + for _, name := range names { + typ := scope.Lookup(name).Type() + LookupFieldOrMethod(typ, true, pkg, "m") + } + } + + // Perform a lookup once, to ensure that any lazily-evaluated state is + // complete. + lookup() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + lookup() + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/main_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/main_test.go new file mode 100644 index 0000000000000000000000000000000000000000..42d26943c40c29e1a2a10eb668e8d43999433ef0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/main_test.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "go/build" + "internal/testenv" + "os" + "testing" +) + +func TestMain(m *testing.M) { + build.Default.GOROOT = testenv.GOROOT(nil) + os.Exit(m.Run()) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/map.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/map.go new file mode 100644 index 0000000000000000000000000000000000000000..0d3464caae728b7b0207365264170337666953ad --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/map.go @@ -0,0 +1,24 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +// A Map represents a map type. +type Map struct { + key, elem Type +} + +// NewMap returns a new map for the given key and element types. +func NewMap(key, elem Type) *Map { + return &Map{key: key, elem: elem} +} + +// Key returns the key type of map m. +func (m *Map) Key() Type { return m.key } + +// Elem returns the element type of map m. +func (m *Map) Elem() Type { return m.elem } + +func (t *Map) Underlying() Type { return t } +func (t *Map) String() string { return TypeString(t, nil) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/mono.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/mono.go new file mode 100644 index 0000000000000000000000000000000000000000..dae9230252692726a9907ab20d07a9b1928761d2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/mono.go @@ -0,0 +1,339 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + . "internal/types/errors" +) + +// This file implements a check to validate that a Go package doesn't +// have unbounded recursive instantiation, which is not compatible +// with compilers using static instantiation (such as +// monomorphization). +// +// It implements a sort of "type flow" analysis by detecting which +// type parameters are instantiated with other type parameters (or +// types derived thereof). A package cannot be statically instantiated +// if the graph has any cycles involving at least one derived type. +// +// Concretely, we construct a directed, weighted graph. Vertices are +// used to represent type parameters as well as some defined +// types. Edges are used to represent how types depend on each other: +// +// * Everywhere a type-parameterized function or type is instantiated, +// we add edges to each type parameter from the vertices (if any) +// representing each type parameter or defined type referenced by +// the type argument. If the type argument is just the referenced +// type itself, then the edge has weight 0, otherwise 1. +// +// * For every defined type declared within a type-parameterized +// function or method, we add an edge of weight 1 to the defined +// type from each ambient type parameter. +// +// For example, given: +// +// func f[A, B any]() { +// type T int +// f[T, map[A]B]() +// } +// +// we construct vertices representing types A, B, and T. Because of +// declaration "type T int", we construct edges T<-A and T<-B with +// weight 1; and because of instantiation "f[T, map[A]B]" we construct +// edges A<-T with weight 0, and B<-A and B<-B with weight 1. +// +// Finally, we look for any positive-weight cycles. Zero-weight cycles +// are allowed because static instantiation will reach a fixed point. + +type monoGraph struct { + vertices []monoVertex + edges []monoEdge + + // canon maps method receiver type parameters to their respective + // receiver type's type parameters. + canon map[*TypeParam]*TypeParam + + // nameIdx maps a defined type or (canonical) type parameter to its + // vertex index. + nameIdx map[*TypeName]int +} + +type monoVertex struct { + weight int // weight of heaviest known path to this vertex + pre int // previous edge (if any) in the above path + len int // length of the above path + + // obj is the defined type or type parameter represented by this + // vertex. + obj *TypeName +} + +type monoEdge struct { + dst, src int + weight int + + pos syntax.Pos + typ Type +} + +func (check *Checker) monomorph() { + // We detect unbounded instantiation cycles using a variant of + // Bellman-Ford's algorithm. Namely, instead of always running |V| + // iterations, we run until we either reach a fixed point or we've + // found a path of length |V|. This allows us to terminate earlier + // when there are no cycles, which should be the common case. + + again := true + for again { + again = false + + for i, edge := range check.mono.edges { + src := &check.mono.vertices[edge.src] + dst := &check.mono.vertices[edge.dst] + + // N.B., we're looking for the greatest weight paths, unlike + // typical Bellman-Ford. + w := src.weight + edge.weight + if w <= dst.weight { + continue + } + + dst.pre = i + dst.len = src.len + 1 + if dst.len == len(check.mono.vertices) { + check.reportInstanceLoop(edge.dst) + return + } + + dst.weight = w + again = true + } + } +} + +func (check *Checker) reportInstanceLoop(v int) { + var stack []int + seen := make([]bool, len(check.mono.vertices)) + + // We have a path that contains a cycle and ends at v, but v may + // only be reachable from the cycle, not on the cycle itself. We + // start by walking backwards along the path until we find a vertex + // that appears twice. + for !seen[v] { + stack = append(stack, v) + seen[v] = true + v = check.mono.edges[check.mono.vertices[v].pre].src + } + + // Trim any vertices we visited before visiting v the first + // time. Since v is the first vertex we found within the cycle, any + // vertices we visited earlier cannot be part of the cycle. + for stack[0] != v { + stack = stack[1:] + } + + // TODO(mdempsky): Pivot stack so we report the cycle from the top? + + var err error_ + err.code = InvalidInstanceCycle + obj0 := check.mono.vertices[v].obj + err.errorf(obj0, "instantiation cycle:") + + qf := RelativeTo(check.pkg) + for _, v := range stack { + edge := check.mono.edges[check.mono.vertices[v].pre] + obj := check.mono.vertices[edge.dst].obj + + switch obj.Type().(type) { + default: + panic("unexpected type") + case *Named: + err.errorf(edge.pos, "%s implicitly parameterized by %s", obj.Name(), TypeString(edge.typ, qf)) // secondary error, \t indented + case *TypeParam: + err.errorf(edge.pos, "%s instantiated as %s", obj.Name(), TypeString(edge.typ, qf)) // secondary error, \t indented + } + } + check.report(&err) +} + +// recordCanon records that tpar is the canonical type parameter +// corresponding to method type parameter mpar. +func (w *monoGraph) recordCanon(mpar, tpar *TypeParam) { + if w.canon == nil { + w.canon = make(map[*TypeParam]*TypeParam) + } + w.canon[mpar] = tpar +} + +// recordInstance records that the given type parameters were +// instantiated with the corresponding type arguments. +func (w *monoGraph) recordInstance(pkg *Package, pos syntax.Pos, tparams []*TypeParam, targs []Type, xlist []syntax.Expr) { + for i, tpar := range tparams { + pos := pos + if i < len(xlist) { + pos = syntax.StartPos(xlist[i]) + } + w.assign(pkg, pos, tpar, targs[i]) + } +} + +// assign records that tpar was instantiated as targ at pos. +func (w *monoGraph) assign(pkg *Package, pos syntax.Pos, tpar *TypeParam, targ Type) { + // Go generics do not have an analog to C++`s template-templates, + // where a template parameter can itself be an instantiable + // template. So any instantiation cycles must occur within a single + // package. Accordingly, we can ignore instantiations of imported + // type parameters. + // + // TODO(mdempsky): Push this check up into recordInstance? All type + // parameters in a list will appear in the same package. + if tpar.Obj().Pkg() != pkg { + return + } + + // flow adds an edge from vertex src representing that typ flows to tpar. + flow := func(src int, typ Type) { + weight := 1 + if typ == targ { + weight = 0 + } + + w.addEdge(w.typeParamVertex(tpar), src, weight, pos, targ) + } + + // Recursively walk the type argument to find any defined types or + // type parameters. + var do func(typ Type) + do = func(typ Type) { + switch typ := Unalias(typ).(type) { + default: + panic("unexpected type") + + case *TypeParam: + assert(typ.Obj().Pkg() == pkg) + flow(w.typeParamVertex(typ), typ) + + case *Named: + if src := w.localNamedVertex(pkg, typ.Origin()); src >= 0 { + flow(src, typ) + } + + targs := typ.TypeArgs() + for i := 0; i < targs.Len(); i++ { + do(targs.At(i)) + } + + case *Array: + do(typ.Elem()) + case *Basic: + // ok + case *Chan: + do(typ.Elem()) + case *Map: + do(typ.Key()) + do(typ.Elem()) + case *Pointer: + do(typ.Elem()) + case *Slice: + do(typ.Elem()) + + case *Interface: + for i := 0; i < typ.NumMethods(); i++ { + do(typ.Method(i).Type()) + } + case *Signature: + tuple := func(tup *Tuple) { + for i := 0; i < tup.Len(); i++ { + do(tup.At(i).Type()) + } + } + tuple(typ.Params()) + tuple(typ.Results()) + case *Struct: + for i := 0; i < typ.NumFields(); i++ { + do(typ.Field(i).Type()) + } + } + } + do(targ) +} + +// localNamedVertex returns the index of the vertex representing +// named, or -1 if named doesn't need representation. +func (w *monoGraph) localNamedVertex(pkg *Package, named *Named) int { + obj := named.Obj() + if obj.Pkg() != pkg { + return -1 // imported type + } + + root := pkg.Scope() + if obj.Parent() == root { + return -1 // package scope, no ambient type parameters + } + + if idx, ok := w.nameIdx[obj]; ok { + return idx + } + + idx := -1 + + // Walk the type definition's scope to find any ambient type + // parameters that it's implicitly parameterized by. + for scope := obj.Parent(); scope != root; scope = scope.Parent() { + for _, elem := range scope.elems { + if elem, ok := elem.(*TypeName); ok && !elem.IsAlias() && cmpPos(elem.Pos(), obj.Pos()) < 0 { + if tpar, ok := elem.Type().(*TypeParam); ok { + if idx < 0 { + idx = len(w.vertices) + w.vertices = append(w.vertices, monoVertex{obj: obj}) + } + + w.addEdge(idx, w.typeParamVertex(tpar), 1, obj.Pos(), tpar) + } + } + } + } + + if w.nameIdx == nil { + w.nameIdx = make(map[*TypeName]int) + } + w.nameIdx[obj] = idx + return idx +} + +// typeParamVertex returns the index of the vertex representing tpar. +func (w *monoGraph) typeParamVertex(tpar *TypeParam) int { + if x, ok := w.canon[tpar]; ok { + tpar = x + } + + obj := tpar.Obj() + + if idx, ok := w.nameIdx[obj]; ok { + return idx + } + + if w.nameIdx == nil { + w.nameIdx = make(map[*TypeName]int) + } + + idx := len(w.vertices) + w.vertices = append(w.vertices, monoVertex{obj: obj}) + w.nameIdx[obj] = idx + return idx +} + +func (w *monoGraph) addEdge(dst, src, weight int, pos syntax.Pos, typ Type) { + // TODO(mdempsky): Deduplicate redundant edges? + w.edges = append(w.edges, monoEdge{ + dst: dst, + src: src, + weight: weight, + + pos: pos, + typ: typ, + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/mono_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/mono_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c2955a282818b46df420b8878c71a54d0f2e3c37 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/mono_test.go @@ -0,0 +1,82 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "cmd/compile/internal/types2" + "errors" + "fmt" + "strings" + "testing" +) + +func checkMono(t *testing.T, body string) error { + src := "package x; import `unsafe`; var _ unsafe.Pointer;\n" + body + + var buf strings.Builder + conf := types2.Config{ + Error: func(err error) { fmt.Fprintln(&buf, err) }, + Importer: defaultImporter(), + } + typecheck(src, &conf, nil) + if buf.Len() == 0 { + return nil + } + return errors.New(strings.TrimRight(buf.String(), "\n")) +} + +func TestMonoGood(t *testing.T) { + for i, good := range goods { + if err := checkMono(t, good); err != nil { + t.Errorf("%d: unexpected failure: %v", i, err) + } + } +} + +func TestMonoBad(t *testing.T) { + for i, bad := range bads { + if err := checkMono(t, bad); err == nil { + t.Errorf("%d: unexpected success", i) + } else { + t.Log(err) + } + } +} + +var goods = []string{ + "func F[T any](x T) { F(x) }", + "func F[T, U, V any]() { F[U, V, T](); F[V, T, U]() }", + "type Ring[A, B, C any] struct { L *Ring[B, C, A]; R *Ring[C, A, B] }", + "func F[T any]() { type U[T any] [unsafe.Sizeof(F[*T])]byte }", + "func F[T any]() { type U[T any] [unsafe.Sizeof(F[*T])]byte; var _ U[int] }", + "type U[T any] [unsafe.Sizeof(F[*T])]byte; func F[T any]() { var _ U[U[int]] }", + "func F[T any]() { type A = int; F[A]() }", +} + +// TODO(mdempsky): Validate specific error messages and positioning. + +var bads = []string{ + "func F[T any](x T) { F(&x) }", + "func F[T any]() { F[*T]() }", + "func F[T any]() { F[[]T]() }", + "func F[T any]() { F[[1]T]() }", + "func F[T any]() { F[chan T]() }", + "func F[T any]() { F[map[*T]int]() }", + "func F[T any]() { F[map[error]T]() }", + "func F[T any]() { F[func(T)]() }", + "func F[T any]() { F[func() T]() }", + "func F[T any]() { F[struct{ t T }]() }", + "func F[T any]() { F[interface{ t() T }]() }", + "type U[_ any] int; func F[T any]() { F[U[T]]() }", + "func F[T any]() { type U int; F[U]() }", + "func F[T any]() { type U int; F[*U]() }", + "type U[T any] int; func (U[T]) m() { var _ U[*T] }", + "type U[T any] int; func (*U[T]) m() { var _ U[*T] }", + "type U[T1 any] [unsafe.Sizeof(F[*T1])]byte; func F[T2 any]() { var _ U[T2] }", + "func F[A, B, C, D, E any]() { F[B, C, D, E, *A]() }", + "type U[_ any] int; const X = unsafe.Sizeof(func() { type A[T any] U[A[*T]] })", + "func F[T any]() { type A = *T; F[A]() }", + "type A[T any] struct { _ A[*T] }", +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/named.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/named.go new file mode 100644 index 0000000000000000000000000000000000000000..893247de35b671e063990fb70586ca9db7447d01 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/named.go @@ -0,0 +1,658 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "sync" + "sync/atomic" +) + +// Type-checking Named types is subtle, because they may be recursively +// defined, and because their full details may be spread across multiple +// declarations (via methods). For this reason they are type-checked lazily, +// to avoid information being accessed before it is complete. +// +// Conceptually, it is helpful to think of named types as having two distinct +// sets of information: +// - "LHS" information, defining their identity: Obj() and TypeArgs() +// - "RHS" information, defining their details: TypeParams(), Underlying(), +// and methods. +// +// In this taxonomy, LHS information is available immediately, but RHS +// information is lazy. Specifically, a named type N may be constructed in any +// of the following ways: +// 1. type-checked from the source +// 2. loaded eagerly from export data +// 3. loaded lazily from export data (when using unified IR) +// 4. instantiated from a generic type +// +// In cases 1, 3, and 4, it is possible that the underlying type or methods of +// N may not be immediately available. +// - During type-checking, we allocate N before type-checking its underlying +// type or methods, so that we may resolve recursive references. +// - When loading from export data, we may load its methods and underlying +// type lazily using a provided load function. +// - After instantiating, we lazily expand the underlying type and methods +// (note that instances may be created while still in the process of +// type-checking the original type declaration). +// +// In cases 3 and 4 this lazy construction may also occur concurrently, due to +// concurrent use of the type checker API (after type checking or importing has +// finished). It is critical that we keep track of state, so that Named types +// are constructed exactly once and so that we do not access their details too +// soon. +// +// We achieve this by tracking state with an atomic state variable, and +// guarding potentially concurrent calculations with a mutex. At any point in +// time this state variable determines which data on N may be accessed. As +// state monotonically progresses, any data available at state M may be +// accessed without acquiring the mutex at state N, provided N >= M. +// +// GLOSSARY: Here are a few terms used in this file to describe Named types: +// - We say that a Named type is "instantiated" if it has been constructed by +// instantiating a generic named type with type arguments. +// - We say that a Named type is "declared" if it corresponds to a type +// declaration in the source. Instantiated named types correspond to a type +// instantiation in the source, not a declaration. But their Origin type is +// a declared type. +// - We say that a Named type is "resolved" if its RHS information has been +// loaded or fully type-checked. For Named types constructed from export +// data, this may involve invoking a loader function to extract information +// from export data. For instantiated named types this involves reading +// information from their origin. +// - We say that a Named type is "expanded" if it is an instantiated type and +// type parameters in its underlying type and methods have been substituted +// with the type arguments from the instantiation. A type may be partially +// expanded if some but not all of these details have been substituted. +// Similarly, we refer to these individual details (underlying type or +// method) as being "expanded". +// - When all information is known for a named type, we say it is "complete". +// +// Some invariants to keep in mind: each declared Named type has a single +// corresponding object, and that object's type is the (possibly generic) Named +// type. Declared Named types are identical if and only if their pointers are +// identical. On the other hand, multiple instantiated Named types may be +// identical even though their pointers are not identical. One has to use +// Identical to compare them. For instantiated named types, their obj is a +// synthetic placeholder that records their position of the corresponding +// instantiation in the source (if they were constructed during type checking). +// +// To prevent infinite expansion of named instances that are created outside of +// type-checking, instances share a Context with other instances created during +// their expansion. Via the pidgeonhole principle, this guarantees that in the +// presence of a cycle of named types, expansion will eventually find an +// existing instance in the Context and short-circuit the expansion. +// +// Once an instance is complete, we can nil out this shared Context to unpin +// memory, though this Context may still be held by other incomplete instances +// in its "lineage". + +// A Named represents a named (defined) type. +type Named struct { + check *Checker // non-nil during type-checking; nil otherwise + obj *TypeName // corresponding declared object for declared types; see above for instantiated types + + // fromRHS holds the type (on RHS of declaration) this *Named type is derived + // from (for cycle reporting). Only used by validType, and therefore does not + // require synchronization. + fromRHS Type + + // information for instantiated types; nil otherwise + inst *instance + + mu sync.Mutex // guards all fields below + state_ uint32 // the current state of this type; must only be accessed atomically + underlying Type // possibly a *Named during setup; never a *Named once set up completely + tparams *TypeParamList // type parameters, or nil + + // methods declared for this type (not the method set of this type) + // Signatures are type-checked lazily. + // For non-instantiated types, this is a fully populated list of methods. For + // instantiated types, methods are individually expanded when they are first + // accessed. + methods []*Func + + // loader may be provided to lazily load type parameters, underlying type, and methods. + loader func(*Named) (tparams []*TypeParam, underlying Type, methods []*Func) +} + +// instance holds information that is only necessary for instantiated named +// types. +type instance struct { + orig *Named // original, uninstantiated type + targs *TypeList // type arguments + expandedMethods int // number of expanded methods; expandedMethods <= len(orig.methods) + ctxt *Context // local Context; set to nil after full expansion +} + +// namedState represents the possible states that a named type may assume. +type namedState uint32 + +const ( + unresolved namedState = iota // tparams, underlying type and methods might be unavailable + resolved // resolve has run; methods might be incomplete (for instances) + complete // all data is known +) + +// NewNamed returns a new named type for the given type name, underlying type, and associated methods. +// If the given type name obj doesn't have a type yet, its type is set to the returned named type. +// The underlying type must not be a *Named. +func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named { + if asNamed(underlying) != nil { + panic("underlying type must not be *Named") + } + return (*Checker)(nil).newNamed(obj, underlying, methods) +} + +// resolve resolves the type parameters, methods, and underlying type of n. +// This information may be loaded from a provided loader function, or computed +// from an origin type (in the case of instances). +// +// After resolution, the type parameters, methods, and underlying type of n are +// accessible; but if n is an instantiated type, its methods may still be +// unexpanded. +func (n *Named) resolve() *Named { + if n.state() >= resolved { // avoid locking below + return n + } + + // TODO(rfindley): if n.check is non-nil we can avoid locking here, since + // type-checking is not concurrent. Evaluate if this is worth doing. + n.mu.Lock() + defer n.mu.Unlock() + + if n.state() >= resolved { + return n + } + + if n.inst != nil { + assert(n.underlying == nil) // n is an unresolved instance + assert(n.loader == nil) // instances are created by instantiation, in which case n.loader is nil + + orig := n.inst.orig + orig.resolve() + underlying := n.expandUnderlying() + + n.tparams = orig.tparams + n.underlying = underlying + n.fromRHS = orig.fromRHS // for cycle detection + + if len(orig.methods) == 0 { + n.setState(complete) // nothing further to do + n.inst.ctxt = nil + } else { + n.setState(resolved) + } + return n + } + + // TODO(mdempsky): Since we're passing n to the loader anyway + // (necessary because types2 expects the receiver type for methods + // on defined interface types to be the Named rather than the + // underlying Interface), maybe it should just handle calling + // SetTypeParams, SetUnderlying, and AddMethod instead? Those + // methods would need to support reentrant calls though. It would + // also make the API more future-proof towards further extensions. + if n.loader != nil { + assert(n.underlying == nil) + assert(n.TypeArgs().Len() == 0) // instances are created by instantiation, in which case n.loader is nil + + tparams, underlying, methods := n.loader(n) + + n.tparams = bindTParams(tparams) + n.underlying = underlying + n.fromRHS = underlying // for cycle detection + n.methods = methods + n.loader = nil + } + + n.setState(complete) + return n +} + +// state atomically accesses the current state of the receiver. +func (n *Named) state() namedState { + return namedState(atomic.LoadUint32(&n.state_)) +} + +// setState atomically stores the given state for n. +// Must only be called while holding n.mu. +func (n *Named) setState(state namedState) { + atomic.StoreUint32(&n.state_, uint32(state)) +} + +// newNamed is like NewNamed but with a *Checker receiver. +func (check *Checker) newNamed(obj *TypeName, underlying Type, methods []*Func) *Named { + typ := &Named{check: check, obj: obj, fromRHS: underlying, underlying: underlying, methods: methods} + if obj.typ == nil { + obj.typ = typ + } + // Ensure that typ is always sanity-checked. + if check != nil { + check.needsCleanup(typ) + } + return typ +} + +// newNamedInstance creates a new named instance for the given origin and type +// arguments, recording pos as the position of its synthetic object (for error +// reporting). +// +// If set, expanding is the named type instance currently being expanded, that +// led to the creation of this instance. +func (check *Checker) newNamedInstance(pos syntax.Pos, orig *Named, targs []Type, expanding *Named) *Named { + assert(len(targs) > 0) + + obj := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil) + inst := &instance{orig: orig, targs: newTypeList(targs)} + + // Only pass the expanding context to the new instance if their packages + // match. Since type reference cycles are only possible within a single + // package, this is sufficient for the purposes of short-circuiting cycles. + // Avoiding passing the context in other cases prevents unnecessary coupling + // of types across packages. + if expanding != nil && expanding.Obj().pkg == obj.pkg { + inst.ctxt = expanding.inst.ctxt + } + typ := &Named{check: check, obj: obj, inst: inst} + obj.typ = typ + // Ensure that typ is always sanity-checked. + if check != nil { + check.needsCleanup(typ) + } + return typ +} + +func (t *Named) cleanup() { + assert(t.inst == nil || t.inst.orig.inst == nil) + // Ensure that every defined type created in the course of type-checking has + // either non-*Named underlying type, or is unexpanded. + // + // This guarantees that we don't leak any types whose underlying type is + // *Named, because any unexpanded instances will lazily compute their + // underlying type by substituting in the underlying type of their origin. + // The origin must have either been imported or type-checked and expanded + // here, and in either case its underlying type will be fully expanded. + switch t.underlying.(type) { + case nil: + if t.TypeArgs().Len() == 0 { + panic("nil underlying") + } + case *Named: + t.under() // t.under may add entries to check.cleaners + } + t.check = nil +} + +// Obj returns the type name for the declaration defining the named type t. For +// instantiated types, this is same as the type name of the origin type. +func (t *Named) Obj() *TypeName { + if t.inst == nil { + return t.obj + } + return t.inst.orig.obj +} + +// Origin returns the generic type from which the named type t is +// instantiated. If t is not an instantiated type, the result is t. +func (t *Named) Origin() *Named { + if t.inst == nil { + return t + } + return t.inst.orig +} + +// TypeParams returns the type parameters of the named type t, or nil. +// The result is non-nil for an (originally) generic type even if it is instantiated. +func (t *Named) TypeParams() *TypeParamList { return t.resolve().tparams } + +// SetTypeParams sets the type parameters of the named type t. +// t must not have type arguments. +func (t *Named) SetTypeParams(tparams []*TypeParam) { + assert(t.inst == nil) + t.resolve().tparams = bindTParams(tparams) +} + +// TypeArgs returns the type arguments used to instantiate the named type t. +func (t *Named) TypeArgs() *TypeList { + if t.inst == nil { + return nil + } + return t.inst.targs +} + +// NumMethods returns the number of explicit methods defined for t. +func (t *Named) NumMethods() int { + return len(t.Origin().resolve().methods) +} + +// Method returns the i'th method of named type t for 0 <= i < t.NumMethods(). +// +// For an ordinary or instantiated type t, the receiver base type of this +// method is the named type t. For an uninstantiated generic type t, each +// method receiver is instantiated with its receiver type parameters. +func (t *Named) Method(i int) *Func { + t.resolve() + + if t.state() >= complete { + return t.methods[i] + } + + assert(t.inst != nil) // only instances should have incomplete methods + orig := t.inst.orig + + t.mu.Lock() + defer t.mu.Unlock() + + if len(t.methods) != len(orig.methods) { + assert(len(t.methods) == 0) + t.methods = make([]*Func, len(orig.methods)) + } + + if t.methods[i] == nil { + assert(t.inst.ctxt != nil) // we should still have a context remaining from the resolution phase + t.methods[i] = t.expandMethod(i) + t.inst.expandedMethods++ + + // Check if we've created all methods at this point. If we have, mark the + // type as fully expanded. + if t.inst.expandedMethods == len(orig.methods) { + t.setState(complete) + t.inst.ctxt = nil // no need for a context anymore + } + } + + return t.methods[i] +} + +// expandMethod substitutes type arguments in the i'th method for an +// instantiated receiver. +func (t *Named) expandMethod(i int) *Func { + // t.orig.methods is not lazy. origm is the method instantiated with its + // receiver type parameters (the "origin" method). + origm := t.inst.orig.Method(i) + assert(origm != nil) + + check := t.check + // Ensure that the original method is type-checked. + if check != nil { + check.objDecl(origm, nil) + } + + origSig := origm.typ.(*Signature) + rbase, _ := deref(origSig.Recv().Type()) + + // If rbase is t, then origm is already the instantiated method we're looking + // for. In this case, we return origm to preserve the invariant that + // traversing Method->Receiver Type->Method should get back to the same + // method. + // + // This occurs if t is instantiated with the receiver type parameters, as in + // the use of m in func (r T[_]) m() { r.m() }. + if rbase == t { + return origm + } + + sig := origSig + // We can only substitute if we have a correspondence between type arguments + // and type parameters. This check is necessary in the presence of invalid + // code. + if origSig.RecvTypeParams().Len() == t.inst.targs.Len() { + smap := makeSubstMap(origSig.RecvTypeParams().list(), t.inst.targs.list()) + var ctxt *Context + if check != nil { + ctxt = check.context() + } + sig = check.subst(origm.pos, origSig, smap, t, ctxt).(*Signature) + } + + if sig == origSig { + // No substitution occurred, but we still need to create a new signature to + // hold the instantiated receiver. + copy := *origSig + sig = © + } + + var rtyp Type + if origm.hasPtrRecv() { + rtyp = NewPointer(t) + } else { + rtyp = t + } + + sig.recv = substVar(origSig.recv, rtyp) + return substFunc(origm, sig) +} + +// SetUnderlying sets the underlying type and marks t as complete. +// t must not have type arguments. +func (t *Named) SetUnderlying(underlying Type) { + assert(t.inst == nil) + if underlying == nil { + panic("underlying type must not be nil") + } + if asNamed(underlying) != nil { + panic("underlying type must not be *Named") + } + t.resolve().underlying = underlying + if t.fromRHS == nil { + t.fromRHS = underlying // for cycle detection + } +} + +// AddMethod adds method m unless it is already in the method list. +// t must not have type arguments. +func (t *Named) AddMethod(m *Func) { + assert(t.inst == nil) + t.resolve() + if i, _ := lookupMethod(t.methods, m.pkg, m.name, false); i < 0 { + t.methods = append(t.methods, m) + } +} + +// TODO(gri) Investigate if Unalias can be moved to where underlying is set. +func (t *Named) Underlying() Type { return Unalias(t.resolve().underlying) } +func (t *Named) String() string { return TypeString(t, nil) } + +// ---------------------------------------------------------------------------- +// Implementation +// +// TODO(rfindley): reorganize the loading and expansion methods under this +// heading. + +// under returns the expanded underlying type of n0; possibly by following +// forward chains of named types. If an underlying type is found, resolve +// the chain by setting the underlying type for each defined type in the +// chain before returning it. If no underlying type is found or a cycle +// is detected, the result is Typ[Invalid]. If a cycle is detected and +// n0.check != nil, the cycle is reported. +// +// This is necessary because the underlying type of named may be itself a +// named type that is incomplete: +// +// type ( +// A B +// B *C +// C A +// ) +// +// The type of C is the (named) type of A which is incomplete, +// and which has as its underlying type the named type B. +func (n0 *Named) under() Type { + u := n0.Underlying() + + // If the underlying type of a defined type is not a defined + // (incl. instance) type, then that is the desired underlying + // type. + var n1 *Named + switch u1 := u.(type) { + case nil: + // After expansion via Underlying(), we should never encounter a nil + // underlying. + panic("nil underlying") + default: + // common case + return u + case *Named: + // handled below + n1 = u1 + } + + if n0.check == nil { + panic("Named.check == nil but type is incomplete") + } + + // Invariant: after this point n0 as well as any named types in its + // underlying chain should be set up when this function exits. + check := n0.check + n := n0 + + seen := make(map[*Named]int) // types that need their underlying type resolved + var path []Object // objects encountered, for cycle reporting + +loop: + for { + seen[n] = len(seen) + path = append(path, n.obj) + n = n1 + if i, ok := seen[n]; ok { + // cycle + check.cycleError(path[i:]) + u = Typ[Invalid] + break + } + u = n.Underlying() + switch u1 := u.(type) { + case nil: + u = Typ[Invalid] + break loop + default: + break loop + case *Named: + // Continue collecting *Named types in the chain. + n1 = u1 + } + } + + for n := range seen { + // We should never have to update the underlying type of an imported type; + // those underlying types should have been resolved during the import. + // Also, doing so would lead to a race condition (was go.dev/issue/31749). + // Do this check always, not just in debug mode (it's cheap). + if n.obj.pkg != check.pkg { + panic("imported type with unresolved underlying type") + } + n.underlying = u + } + + return u +} + +func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) { + n.resolve() + // If n is an instance, we may not have yet instantiated all of its methods. + // Look up the method index in orig, and only instantiate method at the + // matching index (if any). + i, _ := lookupMethod(n.Origin().methods, pkg, name, foldCase) + if i < 0 { + return -1, nil + } + // For instances, m.Method(i) will be different from the orig method. + return i, n.Method(i) +} + +// context returns the type-checker context. +func (check *Checker) context() *Context { + if check.ctxt == nil { + check.ctxt = NewContext() + } + return check.ctxt +} + +// expandUnderlying substitutes type arguments in the underlying type n.orig, +// returning the result. Returns Typ[Invalid] if there was an error. +func (n *Named) expandUnderlying() Type { + check := n.check + if check != nil && check.conf.Trace { + check.trace(n.obj.pos, "-- Named.expandUnderlying %s", n) + check.indent++ + defer func() { + check.indent-- + check.trace(n.obj.pos, "=> %s (tparams = %s, under = %s)", n, n.tparams.list(), n.underlying) + }() + } + + assert(n.inst.orig.underlying != nil) + if n.inst.ctxt == nil { + n.inst.ctxt = NewContext() + } + + orig := n.inst.orig + targs := n.inst.targs + + if asNamed(orig.underlying) != nil { + // We should only get a Named underlying type here during type checking + // (for example, in recursive type declarations). + assert(check != nil) + } + + if orig.tparams.Len() != targs.Len() { + // Mismatching arg and tparam length may be checked elsewhere. + return Typ[Invalid] + } + + // Ensure that an instance is recorded before substituting, so that we + // resolve n for any recursive references. + h := n.inst.ctxt.instanceHash(orig, targs.list()) + n2 := n.inst.ctxt.update(h, orig, n.TypeArgs().list(), n) + assert(n == n2) + + smap := makeSubstMap(orig.tparams.list(), targs.list()) + var ctxt *Context + if check != nil { + ctxt = check.context() + } + underlying := n.check.subst(n.obj.pos, orig.underlying, smap, n, ctxt) + // If the underlying type of n is an interface, we need to set the receiver of + // its methods accurately -- we set the receiver of interface methods on + // the RHS of a type declaration to the defined type. + if iface, _ := underlying.(*Interface); iface != nil { + if methods, copied := replaceRecvType(iface.methods, orig, n); copied { + // If the underlying type doesn't actually use type parameters, it's + // possible that it wasn't substituted. In this case we need to create + // a new *Interface before modifying receivers. + if iface == orig.underlying { + old := iface + iface = check.newInterface() + iface.embeddeds = old.embeddeds + assert(old.complete) // otherwise we are copying incomplete data + iface.complete = old.complete + iface.implicit = old.implicit // should be false but be conservative + underlying = iface + } + iface.methods = methods + iface.tset = nil // recompute type set with new methods + + // If check != nil, check.newInterface will have saved the interface for later completion. + if check == nil { // golang/go#61561: all newly created interfaces must be fully evaluated + iface.typeSet() + } + } + } + + return underlying +} + +// safeUnderlying returns the underlying type of typ without expanding +// instances, to avoid infinite recursion. +// +// TODO(rfindley): eliminate this function or give it a better name. +func safeUnderlying(typ Type) Type { + if t := asNamed(typ); t != nil { + return t.underlying + } + return typ.Underlying() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/named_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/named_test.go new file mode 100644 index 0000000000000000000000000000000000000000..705dcaee27dbf6d6bbd1688e86e267e76a091ce6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/named_test.go @@ -0,0 +1,114 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "testing" + + "cmd/compile/internal/syntax" + . "cmd/compile/internal/types2" +) + +func BenchmarkNamed(b *testing.B) { + const src = ` +package p + +type T struct { + P int +} + +func (T) M(int) {} +func (T) N() (i int) { return } + +type G[P any] struct { + F P +} + +func (G[P]) M(P) {} +func (G[P]) N() (p P) { return } + +type Inst = G[int] + ` + pkg := mustTypecheck(src, nil, nil) + + var ( + T = pkg.Scope().Lookup("T").Type() + G = pkg.Scope().Lookup("G").Type() + SrcInst = pkg.Scope().Lookup("Inst").Type() + UserInst = mustInstantiate(b, G, Typ[Int]) + ) + + tests := []struct { + name string + typ Type + }{ + {"nongeneric", T}, + {"generic", G}, + {"src instance", SrcInst}, + {"user instance", UserInst}, + } + + b.Run("Underlying", func(b *testing.B) { + for _, test := range tests { + b.Run(test.name, func(b *testing.B) { + // Access underlying once, to trigger any lazy calculation. + _ = test.typ.Underlying() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = test.typ.Underlying() + } + }) + } + }) +} + +func mustInstantiate(tb testing.TB, orig Type, targs ...Type) Type { + inst, err := Instantiate(nil, orig, targs, true) + if err != nil { + tb.Fatal(err) + } + return inst +} + +// Test that types do not expand infinitely, as in go.dev/issue/52715. +func TestFiniteTypeExpansion(t *testing.T) { + const src = ` +package p + +type Tree[T any] struct { + *Node[T] +} + +func (*Tree[R]) N(r R) R { return r } + +type Node[T any] struct { + *Tree[T] +} + +func (Node[Q]) M(Q) {} + +type Inst = *Tree[int] +` + + f := mustParse(src) + pkg := NewPackage("p", f.PkgName.Value) + if err := NewChecker(nil, pkg, nil).Files([]*syntax.File{f}); err != nil { + t.Fatal(err) + } + + firstFieldType := func(n *Named) *Named { + return n.Underlying().(*Struct).Field(0).Type().(*Pointer).Elem().(*Named) + } + + Inst := pkg.Scope().Lookup("Inst").Type().(*Pointer).Elem().(*Named) + Node := firstFieldType(Inst) + Tree := firstFieldType(Node) + if !Identical(Inst, Tree) { + t.Fatalf("Not a cycle: got %v, want %v", Tree, Inst) + } + if Inst != Tree { + t.Errorf("Duplicate instances in cycle: %s (%p) -> %s (%p) -> %s (%p)", Inst, Inst, Node, Node, Tree, Tree) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/object.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/object.go new file mode 100644 index 0000000000000000000000000000000000000000..251587224b825451499be0849a96037a5b68e526 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/object.go @@ -0,0 +1,619 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "bytes" + "cmd/compile/internal/syntax" + "fmt" + "go/constant" + "unicode" + "unicode/utf8" +) + +// An Object describes a named language entity such as a package, +// constant, type, variable, function (incl. methods), or label. +// All objects implement the Object interface. +type Object interface { + Parent() *Scope // scope in which this object is declared; nil for methods and struct fields + Pos() syntax.Pos // position of object identifier in declaration + Pkg() *Package // package to which this object belongs; nil for labels and objects in the Universe scope + Name() string // package local object name + Type() Type // object type + Exported() bool // reports whether the name starts with a capital letter + Id() string // object name if exported, qualified name if not exported (see func Id) + + // String returns a human-readable string of the object. + String() string + + // order reflects a package-level object's source order: if object + // a is before object b in the source, then a.order() < b.order(). + // order returns a value > 0 for package-level objects; it returns + // 0 for all other objects (including objects in file scopes). + order() uint32 + + // color returns the object's color. + color() color + + // setType sets the type of the object. + setType(Type) + + // setOrder sets the order number of the object. It must be > 0. + setOrder(uint32) + + // setColor sets the object's color. It must not be white. + setColor(color color) + + // setParent sets the parent scope of the object. + setParent(*Scope) + + // sameId reports whether obj.Id() and Id(pkg, name) are the same. + sameId(pkg *Package, name string) bool + + // scopePos returns the start position of the scope of this Object + scopePos() syntax.Pos + + // setScopePos sets the start position of the scope for this Object. + setScopePos(pos syntax.Pos) +} + +func isExported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +// Id returns name if it is exported, otherwise it +// returns the name qualified with the package path. +func Id(pkg *Package, name string) string { + if isExported(name) { + return name + } + // unexported names need the package path for differentiation + // (if there's no package, make sure we don't start with '.' + // as that may change the order of methods between a setup + // inside a package and outside a package - which breaks some + // tests) + path := "_" + // pkg is nil for objects in Universe scope and possibly types + // introduced via Eval (see also comment in object.sameId) + if pkg != nil && pkg.path != "" { + path = pkg.path + } + return path + "." + name +} + +// An object implements the common parts of an Object. +type object struct { + parent *Scope + pos syntax.Pos + pkg *Package + name string + typ Type + order_ uint32 + color_ color + scopePos_ syntax.Pos +} + +// color encodes the color of an object (see Checker.objDecl for details). +type color uint32 + +// An object may be painted in one of three colors. +// Color values other than white or black are considered grey. +const ( + white color = iota + black + grey // must be > white and black +) + +func (c color) String() string { + switch c { + case white: + return "white" + case black: + return "black" + default: + return "grey" + } +} + +// colorFor returns the (initial) color for an object depending on +// whether its type t is known or not. +func colorFor(t Type) color { + if t != nil { + return black + } + return white +} + +// Parent returns the scope in which the object is declared. +// The result is nil for methods and struct fields. +func (obj *object) Parent() *Scope { return obj.parent } + +// Pos returns the declaration position of the object's identifier. +func (obj *object) Pos() syntax.Pos { return obj.pos } + +// Pkg returns the package to which the object belongs. +// The result is nil for labels and objects in the Universe scope. +func (obj *object) Pkg() *Package { return obj.pkg } + +// Name returns the object's (package-local, unqualified) name. +func (obj *object) Name() string { return obj.name } + +// Type returns the object's type. +func (obj *object) Type() Type { return obj.typ } + +// Exported reports whether the object is exported (starts with a capital letter). +// It doesn't take into account whether the object is in a local (function) scope +// or not. +func (obj *object) Exported() bool { return isExported(obj.name) } + +// Id is a wrapper for Id(obj.Pkg(), obj.Name()). +func (obj *object) Id() string { return Id(obj.pkg, obj.name) } + +func (obj *object) String() string { panic("abstract") } +func (obj *object) order() uint32 { return obj.order_ } +func (obj *object) color() color { return obj.color_ } +func (obj *object) scopePos() syntax.Pos { return obj.scopePos_ } + +func (obj *object) setParent(parent *Scope) { obj.parent = parent } +func (obj *object) setType(typ Type) { obj.typ = typ } +func (obj *object) setOrder(order uint32) { assert(order > 0); obj.order_ = order } +func (obj *object) setColor(color color) { assert(color != white); obj.color_ = color } +func (obj *object) setScopePos(pos syntax.Pos) { obj.scopePos_ = pos } + +func (obj *object) sameId(pkg *Package, name string) bool { + // spec: + // "Two identifiers are different if they are spelled differently, + // or if they appear in different packages and are not exported. + // Otherwise, they are the same." + if name != obj.name { + return false + } + // obj.Name == name + if obj.Exported() { + return true + } + // not exported, so packages must be the same (pkg == nil for + // fields in Universe scope; this can only happen for types + // introduced via Eval) + if pkg == nil || obj.pkg == nil { + return pkg == obj.pkg + } + // pkg != nil && obj.pkg != nil + return pkg.path == obj.pkg.path +} + +// less reports whether object a is ordered before object b. +// +// Objects are ordered nil before non-nil, exported before +// non-exported, then by name, and finally (for non-exported +// functions) by package path. +func (a *object) less(b *object) bool { + if a == b { + return false + } + + // Nil before non-nil. + if a == nil { + return true + } + if b == nil { + return false + } + + // Exported functions before non-exported. + ea := isExported(a.name) + eb := isExported(b.name) + if ea != eb { + return ea + } + + // Order by name and then (for non-exported names) by package. + if a.name != b.name { + return a.name < b.name + } + if !ea { + return a.pkg.path < b.pkg.path + } + + return false +} + +// A PkgName represents an imported Go package. +// PkgNames don't have a type. +type PkgName struct { + object + imported *Package + used bool // set if the package was used +} + +// NewPkgName returns a new PkgName object representing an imported package. +// The remaining arguments set the attributes found with all Objects. +func NewPkgName(pos syntax.Pos, pkg *Package, name string, imported *Package) *PkgName { + return &PkgName{object{nil, pos, pkg, name, Typ[Invalid], 0, black, nopos}, imported, false} +} + +// Imported returns the package that was imported. +// It is distinct from Pkg(), which is the package containing the import statement. +func (obj *PkgName) Imported() *Package { return obj.imported } + +// A Const represents a declared constant. +type Const struct { + object + val constant.Value +} + +// NewConst returns a new constant with value val. +// The remaining arguments set the attributes found with all Objects. +func NewConst(pos syntax.Pos, pkg *Package, name string, typ Type, val constant.Value) *Const { + return &Const{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, val} +} + +// Val returns the constant's value. +func (obj *Const) Val() constant.Value { return obj.val } + +func (*Const) isDependency() {} // a constant may be a dependency of an initialization expression + +// A TypeName represents a name for a (defined or alias) type. +type TypeName struct { + object +} + +// NewTypeName returns a new type name denoting the given typ. +// The remaining arguments set the attributes found with all Objects. +// +// The typ argument may be a defined (Named) type or an alias type. +// It may also be nil such that the returned TypeName can be used as +// argument for NewNamed, which will set the TypeName's type as a side- +// effect. +func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName { + return &TypeName{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}} +} + +// NewTypeNameLazy returns a new defined type like NewTypeName, but it +// lazily calls resolve to finish constructing the Named object. +func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(named *Named) (tparams []*TypeParam, underlying Type, methods []*Func)) *TypeName { + obj := NewTypeName(pos, pkg, name, nil) + NewNamed(obj, nil, nil).loader = load + return obj +} + +// IsAlias reports whether obj is an alias name for a type. +func (obj *TypeName) IsAlias() bool { + switch t := obj.typ.(type) { + case nil: + return false + // case *Alias: + // handled by default case + case *Basic: + // unsafe.Pointer is not an alias. + if obj.pkg == Unsafe { + return false + } + // Any user-defined type name for a basic type is an alias for a + // basic type (because basic types are pre-declared in the Universe + // scope, outside any package scope), and so is any type name with + // a different name than the name of the basic type it refers to. + // Additionally, we need to look for "byte" and "rune" because they + // are aliases but have the same names (for better error messages). + return obj.pkg != nil || t.name != obj.name || t == universeByte || t == universeRune + case *Named: + return obj != t.obj + case *TypeParam: + return obj != t.obj + default: + return true + } +} + +// A Variable represents a declared variable (including function parameters and results, and struct fields). +type Var struct { + object + embedded bool // if set, the variable is an embedded struct field, and name is the type name + isField bool // var is struct field + used bool // set if the variable was used + origin *Var // if non-nil, the Var from which this one was instantiated +} + +// NewVar returns a new variable. +// The arguments set the attributes found with all Objects. +func NewVar(pos syntax.Pos, pkg *Package, name string, typ Type) *Var { + return &Var{object: object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}} +} + +// NewParam returns a new variable representing a function parameter. +func NewParam(pos syntax.Pos, pkg *Package, name string, typ Type) *Var { + return &Var{object: object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, used: true} // parameters are always 'used' +} + +// NewField returns a new variable representing a struct field. +// For embedded fields, the name is the unqualified type name +// under which the field is accessible. +func NewField(pos syntax.Pos, pkg *Package, name string, typ Type, embedded bool) *Var { + return &Var{object: object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, embedded: embedded, isField: true} +} + +// Anonymous reports whether the variable is an embedded field. +// Same as Embedded; only present for backward-compatibility. +func (obj *Var) Anonymous() bool { return obj.embedded } + +// Embedded reports whether the variable is an embedded field. +func (obj *Var) Embedded() bool { return obj.embedded } + +// IsField reports whether the variable is a struct field. +func (obj *Var) IsField() bool { return obj.isField } + +// Origin returns the canonical Var for its receiver, i.e. the Var object +// recorded in Info.Defs. +// +// For synthetic Vars created during instantiation (such as struct fields or +// function parameters that depend on type arguments), this will be the +// corresponding Var on the generic (uninstantiated) type. For all other Vars +// Origin returns the receiver. +func (obj *Var) Origin() *Var { + if obj.origin != nil { + return obj.origin + } + return obj +} + +func (*Var) isDependency() {} // a variable may be a dependency of an initialization expression + +// A Func represents a declared function, concrete method, or abstract +// (interface) method. Its Type() is always a *Signature. +// An abstract method may belong to many interfaces due to embedding. +type Func struct { + object + hasPtrRecv_ bool // only valid for methods that don't have a type yet; use hasPtrRecv() to read + origin *Func // if non-nil, the Func from which this one was instantiated +} + +// NewFunc returns a new function with the given signature, representing +// the function's type. +func NewFunc(pos syntax.Pos, pkg *Package, name string, sig *Signature) *Func { + // don't store a (typed) nil signature + var typ Type + if sig != nil { + typ = sig + } + return &Func{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, false, nil} +} + +// FullName returns the package- or receiver-type-qualified name of +// function or method obj. +func (obj *Func) FullName() string { + var buf bytes.Buffer + writeFuncName(&buf, obj, nil) + return buf.String() +} + +// Scope returns the scope of the function's body block. +// The result is nil for imported or instantiated functions and methods +// (but there is also no mechanism to get to an instantiated function). +func (obj *Func) Scope() *Scope { return obj.typ.(*Signature).scope } + +// Origin returns the canonical Func for its receiver, i.e. the Func object +// recorded in Info.Defs. +// +// For synthetic functions created during instantiation (such as methods on an +// instantiated Named type or interface methods that depend on type arguments), +// this will be the corresponding Func on the generic (uninstantiated) type. +// For all other Funcs Origin returns the receiver. +func (obj *Func) Origin() *Func { + if obj.origin != nil { + return obj.origin + } + return obj +} + +// Pkg returns the package to which the function belongs. +// +// The result is nil for methods of types in the Universe scope, +// like method Error of the error built-in interface type. +func (obj *Func) Pkg() *Package { return obj.object.Pkg() } + +// hasPtrRecv reports whether the receiver is of the form *T for the given method obj. +func (obj *Func) hasPtrRecv() bool { + // If a method's receiver type is set, use that as the source of truth for the receiver. + // Caution: Checker.funcDecl (decl.go) marks a function by setting its type to an empty + // signature. We may reach here before the signature is fully set up: we must explicitly + // check if the receiver is set (we cannot just look for non-nil obj.typ). + if sig, _ := obj.typ.(*Signature); sig != nil && sig.recv != nil { + _, isPtr := deref(sig.recv.typ) + return isPtr + } + + // If a method's type is not set it may be a method/function that is: + // 1) client-supplied (via NewFunc with no signature), or + // 2) internally created but not yet type-checked. + // For case 1) we can't do anything; the client must know what they are doing. + // For case 2) we can use the information gathered by the resolver. + return obj.hasPtrRecv_ +} + +func (*Func) isDependency() {} // a function may be a dependency of an initialization expression + +// A Label represents a declared label. +// Labels don't have a type. +type Label struct { + object + used bool // set if the label was used +} + +// NewLabel returns a new label. +func NewLabel(pos syntax.Pos, pkg *Package, name string) *Label { + return &Label{object{pos: pos, pkg: pkg, name: name, typ: Typ[Invalid], color_: black}, false} +} + +// A Builtin represents a built-in function. +// Builtins don't have a valid type. +type Builtin struct { + object + id builtinId +} + +func newBuiltin(id builtinId) *Builtin { + return &Builtin{object{name: predeclaredFuncs[id].name, typ: Typ[Invalid], color_: black}, id} +} + +// Nil represents the predeclared value nil. +type Nil struct { + object +} + +func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) { + var tname *TypeName + typ := obj.Type() + + switch obj := obj.(type) { + case *PkgName: + fmt.Fprintf(buf, "package %s", obj.Name()) + if path := obj.imported.path; path != "" && path != obj.name { + fmt.Fprintf(buf, " (%q)", path) + } + return + + case *Const: + buf.WriteString("const") + + case *TypeName: + tname = obj + buf.WriteString("type") + if isTypeParam(typ) { + buf.WriteString(" parameter") + } + + case *Var: + if obj.isField { + buf.WriteString("field") + } else { + buf.WriteString("var") + } + + case *Func: + buf.WriteString("func ") + writeFuncName(buf, obj, qf) + if typ != nil { + WriteSignature(buf, typ.(*Signature), qf) + } + return + + case *Label: + buf.WriteString("label") + typ = nil + + case *Builtin: + buf.WriteString("builtin") + typ = nil + + case *Nil: + buf.WriteString("nil") + return + + default: + panic(fmt.Sprintf("writeObject(%T)", obj)) + } + + buf.WriteByte(' ') + + // For package-level objects, qualify the name. + if obj.Pkg() != nil && obj.Pkg().scope.Lookup(obj.Name()) == obj { + buf.WriteString(packagePrefix(obj.Pkg(), qf)) + } + buf.WriteString(obj.Name()) + + if typ == nil { + return + } + + if tname != nil { + switch t := typ.(type) { + case *Basic: + // Don't print anything more for basic types since there's + // no more information. + return + case *Named: + if t.TypeParams().Len() > 0 { + newTypeWriter(buf, qf).tParamList(t.TypeParams().list()) + } + } + if tname.IsAlias() { + buf.WriteString(" =") + } else if t, _ := typ.(*TypeParam); t != nil { + typ = t.bound + } else { + // TODO(gri) should this be fromRHS for *Named? + typ = under(typ) + } + } + + // Special handling for any: because WriteType will format 'any' as 'any', + // resulting in the object string `type any = any` rather than `type any = + // interface{}`. To avoid this, swap in a different empty interface. + if obj == universeAny { + assert(Identical(typ, &emptyInterface)) + typ = &emptyInterface + } + + buf.WriteByte(' ') + WriteType(buf, typ, qf) +} + +func packagePrefix(pkg *Package, qf Qualifier) string { + if pkg == nil { + return "" + } + var s string + if qf != nil { + s = qf(pkg) + } else { + s = pkg.Path() + } + if s != "" { + s += "." + } + return s +} + +// ObjectString returns the string form of obj. +// The Qualifier controls the printing of +// package-level objects, and may be nil. +func ObjectString(obj Object, qf Qualifier) string { + var buf bytes.Buffer + writeObject(&buf, obj, qf) + return buf.String() +} + +func (obj *PkgName) String() string { return ObjectString(obj, nil) } +func (obj *Const) String() string { return ObjectString(obj, nil) } +func (obj *TypeName) String() string { return ObjectString(obj, nil) } +func (obj *Var) String() string { return ObjectString(obj, nil) } +func (obj *Func) String() string { return ObjectString(obj, nil) } +func (obj *Label) String() string { return ObjectString(obj, nil) } +func (obj *Builtin) String() string { return ObjectString(obj, nil) } +func (obj *Nil) String() string { return ObjectString(obj, nil) } + +func writeFuncName(buf *bytes.Buffer, f *Func, qf Qualifier) { + if f.typ != nil { + sig := f.typ.(*Signature) + if recv := sig.Recv(); recv != nil { + buf.WriteByte('(') + if _, ok := recv.Type().(*Interface); ok { + // gcimporter creates abstract methods of + // named interfaces using the interface type + // (not the named type) as the receiver. + // Don't print it in full. + buf.WriteString("interface") + } else { + WriteType(buf, recv.Type(), qf) + } + buf.WriteByte(')') + buf.WriteByte('.') + } else if f.pkg != nil { + buf.WriteString(packagePrefix(f.pkg, qf)) + } + } + buf.WriteString(f.name) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/object_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/object_test.go new file mode 100644 index 0000000000000000000000000000000000000000..ef1a864ec98bfe97e5cf751925e38d2840b1de0f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/object_test.go @@ -0,0 +1,156 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "internal/testenv" + "strings" + "testing" + + . "cmd/compile/internal/types2" +) + +func TestIsAlias(t *testing.T) { + check := func(obj *TypeName, want bool) { + if got := obj.IsAlias(); got != want { + t.Errorf("%v: got IsAlias = %v; want %v", obj, got, want) + } + } + + // predeclared types + check(Unsafe.Scope().Lookup("Pointer").(*TypeName), false) + for _, name := range Universe.Names() { + if obj, _ := Universe.Lookup(name).(*TypeName); obj != nil { + check(obj, name == "any" || name == "byte" || name == "rune") + } + } + + // various other types + pkg := NewPackage("p", "p") + t1 := NewTypeName(nopos, pkg, "t1", nil) + n1 := NewNamed(t1, new(Struct), nil) + t5 := NewTypeName(nopos, pkg, "t5", nil) + NewTypeParam(t5, nil) + for _, test := range []struct { + name *TypeName + alias bool + }{ + {NewTypeName(nopos, nil, "t0", nil), false}, // no type yet + {NewTypeName(nopos, pkg, "t0", nil), false}, // no type yet + {t1, false}, // type name refers to named type and vice versa + {NewTypeName(nopos, nil, "t2", NewInterfaceType(nil, nil)), true}, // type name refers to unnamed type + {NewTypeName(nopos, pkg, "t3", n1), true}, // type name refers to named type with different type name + {NewTypeName(nopos, nil, "t4", Typ[Int32]), true}, // type name refers to basic type with different name + {NewTypeName(nopos, nil, "int32", Typ[Int32]), false}, // type name refers to basic type with same name + {NewTypeName(nopos, pkg, "int32", Typ[Int32]), true}, // type name is declared in user-defined package (outside Universe) + {NewTypeName(nopos, nil, "rune", Typ[Rune]), true}, // type name refers to basic type rune which is an alias already + {t5, false}, // type name refers to type parameter and vice versa + } { + check(test.name, test.alias) + } +} + +// TestEmbeddedMethod checks that an embedded method is represented by +// the same Func Object as the original method. See also go.dev/issue/34421. +func TestEmbeddedMethod(t *testing.T) { + const src = `package p; type I interface { error }` + pkg := mustTypecheck(src, nil, nil) + + // get original error.Error method + eface := Universe.Lookup("error") + orig, _, _ := LookupFieldOrMethod(eface.Type(), false, nil, "Error") + if orig == nil { + t.Fatalf("original error.Error not found") + } + + // get embedded error.Error method + iface := pkg.Scope().Lookup("I") + embed, _, _ := LookupFieldOrMethod(iface.Type(), false, nil, "Error") + if embed == nil { + t.Fatalf("embedded error.Error not found") + } + + // original and embedded Error object should be identical + if orig != embed { + t.Fatalf("%s (%p) != %s (%p)", orig, orig, embed, embed) + } +} + +var testObjects = []struct { + src string + obj string + want string +}{ + {"import \"io\"; var r io.Reader", "r", "var p.r io.Reader"}, + + {"const c = 1.2", "c", "const p.c untyped float"}, + {"const c float64 = 3.14", "c", "const p.c float64"}, + + {"type t struct{f int}", "t", "type p.t struct{f int}"}, + {"type t func(int)", "t", "type p.t func(int)"}, + {"type t[P any] struct{f P}", "t", "type p.t[P any] struct{f P}"}, + {"type t[P any] struct{f P}", "t.P", "type parameter P any"}, + {"type C interface{m()}; type t[P C] struct{}", "t.P", "type parameter P p.C"}, + + {"type t = struct{f int}", "t", "type p.t = struct{f int}"}, + {"type t = func(int)", "t", "type p.t = func(int)"}, + + {"var v int", "v", "var p.v int"}, + + {"func f(int) string", "f", "func p.f(int) string"}, + {"func g[P any](x P){}", "g", "func p.g[P any](x P)"}, + {"func g[P interface{~int}](x P){}", "g.P", "type parameter P interface{~int}"}, + {"", "any", "type any = interface{}"}, +} + +func TestObjectString(t *testing.T) { + testenv.MustHaveGoBuild(t) + + for _, test := range testObjects { + src := "package p; " + test.src + pkg, err := typecheck(src, nil, nil) + if err != nil { + t.Errorf("%s: %s", src, err) + continue + } + + names := strings.Split(test.obj, ".") + if len(names) != 1 && len(names) != 2 { + t.Errorf("%s: invalid object path %s", test.src, test.obj) + continue + } + _, obj := pkg.Scope().LookupParent(names[0], nopos) + if obj == nil { + t.Errorf("%s: %s not found", test.src, names[0]) + continue + } + if len(names) == 2 { + if typ, ok := obj.Type().(interface{ TypeParams() *TypeParamList }); ok { + obj = lookupTypeParamObj(typ.TypeParams(), names[1]) + if obj == nil { + t.Errorf("%s: %s not found", test.src, test.obj) + continue + } + } else { + t.Errorf("%s: %s has no type parameters", test.src, names[0]) + continue + } + } + + if got := obj.String(); got != test.want { + t.Errorf("%s: got %s, want %s", test.src, got, test.want) + } + } +} + +func lookupTypeParamObj(list *TypeParamList, name string) Object { + for i := 0; i < list.Len(); i++ { + tpar := list.At(i) + if tpar.Obj().Name() == name { + return tpar.Obj() + } + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/objset.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/objset.go new file mode 100644 index 0000000000000000000000000000000000000000..88ff0af9cab8c06fa44b7c41e1c02c9b0f2d6d14 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/objset.go @@ -0,0 +1,31 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements objsets. +// +// An objset is similar to a Scope but objset elements +// are identified by their unique id, instead of their +// object name. + +package types2 + +// An objset is a set of objects identified by their unique id. +// The zero value for objset is a ready-to-use empty objset. +type objset map[string]Object // initialized lazily + +// insert attempts to insert an object obj into objset s. +// If s already contains an alternative object alt with +// the same name, insert leaves s unchanged and returns alt. +// Otherwise it inserts obj and returns nil. +func (s *objset) insert(obj Object) Object { + id := obj.Id() + if alt := (*s)[id]; alt != nil { + return alt + } + if *s == nil { + *s = make(map[string]Object) + } + (*s)[id] = obj + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/operand.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/operand.go new file mode 100644 index 0000000000000000000000000000000000000000..3f151007e577beb4b8df409c7114b0f3b4d55438 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/operand.go @@ -0,0 +1,396 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file defines operands and associated operations. + +package types2 + +import ( + "bytes" + "cmd/compile/internal/syntax" + "fmt" + "go/constant" + "go/token" + . "internal/types/errors" +) + +// An operandMode specifies the (addressing) mode of an operand. +type operandMode byte + +const ( + invalid operandMode = iota // operand is invalid + novalue // operand represents no value (result of a function call w/o result) + builtin // operand is a built-in function + typexpr // operand is a type + constant_ // operand is a constant; the operand's typ is a Basic type + variable // operand is an addressable variable + mapindex // operand is a map index expression (acts like a variable on lhs, commaok on rhs of an assignment) + value // operand is a computed value + nilvalue // operand is the nil value + commaok // like value, but operand may be used in a comma,ok expression + commaerr // like commaok, but second value is error, not boolean + cgofunc // operand is a cgo function +) + +var operandModeString = [...]string{ + invalid: "invalid operand", + novalue: "no value", + builtin: "built-in", + typexpr: "type", + constant_: "constant", + variable: "variable", + mapindex: "map index expression", + value: "value", + nilvalue: "nil", + commaok: "comma, ok expression", + commaerr: "comma, error expression", + cgofunc: "cgo function", +} + +// An operand represents an intermediate value during type checking. +// Operands have an (addressing) mode, the expression evaluating to +// the operand, the operand's type, a value for constants, and an id +// for built-in functions. +// The zero value of operand is a ready to use invalid operand. +type operand struct { + mode operandMode + expr syntax.Expr + typ Type + val constant.Value + id builtinId +} + +// Pos returns the position of the expression corresponding to x. +// If x is invalid the position is nopos. +func (x *operand) Pos() syntax.Pos { + // x.expr may not be set if x is invalid + if x.expr == nil { + return nopos + } + return x.expr.Pos() +} + +// Operand string formats +// (not all "untyped" cases can appear due to the type system, +// but they fall out naturally here) +// +// mode format +// +// invalid ( ) +// novalue ( ) +// builtin ( ) +// typexpr ( ) +// +// constant ( ) +// constant ( of type ) +// constant ( ) +// constant ( of type ) +// +// variable ( ) +// variable ( of type ) +// +// mapindex ( ) +// mapindex ( of type ) +// +// value ( ) +// value ( of type ) +// +// nilvalue untyped nil +// nilvalue nil ( of type ) +// +// commaok ( ) +// commaok ( of type ) +// +// commaerr ( ) +// commaerr ( of type ) +// +// cgofunc ( ) +// cgofunc ( of type ) +func operandString(x *operand, qf Qualifier) string { + // special-case nil + if x.mode == nilvalue { + switch x.typ { + case nil, Typ[Invalid]: + return "nil (with invalid type)" + case Typ[UntypedNil]: + return "nil" + default: + return fmt.Sprintf("nil (of type %s)", TypeString(x.typ, qf)) + } + } + + var buf bytes.Buffer + + var expr string + if x.expr != nil { + expr = syntax.String(x.expr) + } else { + switch x.mode { + case builtin: + expr = predeclaredFuncs[x.id].name + case typexpr: + expr = TypeString(x.typ, qf) + case constant_: + expr = x.val.String() + } + } + + // ( + if expr != "" { + buf.WriteString(expr) + buf.WriteString(" (") + } + + // + hasType := false + switch x.mode { + case invalid, novalue, builtin, typexpr: + // no type + default: + // should have a type, but be cautious (don't crash during printing) + if x.typ != nil { + if isUntyped(x.typ) { + buf.WriteString(x.typ.(*Basic).name) + buf.WriteByte(' ') + break + } + hasType = true + } + } + + // + buf.WriteString(operandModeString[x.mode]) + + // + if x.mode == constant_ { + if s := x.val.String(); s != expr { + buf.WriteByte(' ') + buf.WriteString(s) + } + } + + // + if hasType { + if isValid(x.typ) { + var intro string + if isGeneric(x.typ) { + intro = " of generic type " + } else { + intro = " of type " + } + buf.WriteString(intro) + WriteType(&buf, x.typ, qf) + if tpar, _ := x.typ.(*TypeParam); tpar != nil { + buf.WriteString(" constrained by ") + WriteType(&buf, tpar.bound, qf) // do not compute interface type sets here + // If we have the type set and it's empty, say so for better error messages. + if hasEmptyTypeset(tpar) { + buf.WriteString(" with empty type set") + } + } + } else { + buf.WriteString(" with invalid type") + } + } + + // ) + if expr != "" { + buf.WriteByte(')') + } + + return buf.String() +} + +func (x *operand) String() string { + return operandString(x, nil) +} + +// setConst sets x to the untyped constant for literal lit. +func (x *operand) setConst(k syntax.LitKind, lit string) { + var kind BasicKind + switch k { + case syntax.IntLit: + kind = UntypedInt + case syntax.FloatLit: + kind = UntypedFloat + case syntax.ImagLit: + kind = UntypedComplex + case syntax.RuneLit: + kind = UntypedRune + case syntax.StringLit: + kind = UntypedString + default: + unreachable() + } + + val := constant.MakeFromLiteral(lit, kind2tok[k], 0) + if val.Kind() == constant.Unknown { + x.mode = invalid + x.typ = Typ[Invalid] + return + } + x.mode = constant_ + x.typ = Typ[kind] + x.val = val +} + +// isNil reports whether x is the (untyped) nil value. +func (x *operand) isNil() bool { return x.mode == nilvalue } + +// assignableTo reports whether x is assignable to a variable of type T. If the +// result is false and a non-nil cause is provided, it may be set to a more +// detailed explanation of the failure (result != ""). The returned error code +// is only valid if the (first) result is false. The check parameter may be nil +// if assignableTo is invoked through an exported API call, i.e., when all +// methods have been type-checked. +func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Code) { + if x.mode == invalid || !isValid(T) { + return true, 0 // avoid spurious errors + } + + V := x.typ + + // x's type is identical to T + if Identical(V, T) { + return true, 0 + } + + Vu := under(V) + Tu := under(T) + Vp, _ := V.(*TypeParam) + Tp, _ := T.(*TypeParam) + + // x is an untyped value representable by a value of type T. + if isUntyped(Vu) { + assert(Vp == nil) + if Tp != nil { + // T is a type parameter: x is assignable to T if it is + // representable by each specific type in the type set of T. + return Tp.is(func(t *term) bool { + if t == nil { + return false + } + // A term may be a tilde term but the underlying + // type of an untyped value doesn't change so we + // don't need to do anything special. + newType, _, _ := check.implicitTypeAndValue(x, t.typ) + return newType != nil + }), IncompatibleAssign + } + newType, _, _ := check.implicitTypeAndValue(x, T) + return newType != nil, IncompatibleAssign + } + // Vu is typed + + // x's type V and T have identical underlying types + // and at least one of V or T is not a named type + // and neither V nor T is a type parameter. + if Identical(Vu, Tu) && (!hasName(V) || !hasName(T)) && Vp == nil && Tp == nil { + return true, 0 + } + + // T is an interface type, but not a type parameter, and V implements T. + // Also handle the case where T is a pointer to an interface so that we get + // the Checker.implements error cause. + if _, ok := Tu.(*Interface); ok && Tp == nil || isInterfacePtr(Tu) { + if check.implements(x.Pos(), V, T, false, cause) { + return true, 0 + } + // V doesn't implement T but V may still be assignable to T if V + // is a type parameter; do not report an error in that case yet. + if Vp == nil { + return false, InvalidIfaceAssign + } + if cause != nil { + *cause = "" + } + } + + // If V is an interface, check if a missing type assertion is the problem. + if Vi, _ := Vu.(*Interface); Vi != nil && Vp == nil { + if check.implements(x.Pos(), T, V, false, nil) { + // T implements V, so give hint about type assertion. + if cause != nil { + *cause = "need type assertion" + } + return false, IncompatibleAssign + } + } + + // x is a bidirectional channel value, T is a channel + // type, x's type V and T have identical element types, + // and at least one of V or T is not a named type. + if Vc, ok := Vu.(*Chan); ok && Vc.dir == SendRecv { + if Tc, ok := Tu.(*Chan); ok && Identical(Vc.elem, Tc.elem) { + return !hasName(V) || !hasName(T), InvalidChanAssign + } + } + + // optimization: if we don't have type parameters, we're done + if Vp == nil && Tp == nil { + return false, IncompatibleAssign + } + + errorf := func(format string, args ...interface{}) { + if check != nil && cause != nil { + msg := check.sprintf(format, args...) + if *cause != "" { + msg += "\n\t" + *cause + } + *cause = msg + } + } + + // x's type V is not a named type and T is a type parameter, and + // x is assignable to each specific type in T's type set. + if !hasName(V) && Tp != nil { + ok := false + code := IncompatibleAssign + Tp.is(func(T *term) bool { + if T == nil { + return false // no specific types + } + ok, code = x.assignableTo(check, T.typ, cause) + if !ok { + errorf("cannot assign %s to %s (in %s)", x.typ, T.typ, Tp) + return false + } + return true + }) + return ok, code + } + + // x's type V is a type parameter and T is not a named type, + // and values x' of each specific type in V's type set are + // assignable to T. + if Vp != nil && !hasName(T) { + x := *x // don't clobber outer x + ok := false + code := IncompatibleAssign + Vp.is(func(V *term) bool { + if V == nil { + return false // no specific types + } + x.typ = V.typ + ok, code = x.assignableTo(check, T, cause) + if !ok { + errorf("cannot assign %s (in %s) to %s", V.typ, Vp, T) + return false + } + return true + }) + return ok, code + } + + return false, IncompatibleAssign +} + +// kind2tok translates syntax.LitKinds into token.Tokens. +var kind2tok = [...]token.Token{ + syntax.IntLit: token.INT, + syntax.FloatLit: token.FLOAT, + syntax.ImagLit: token.IMAG, + syntax.RuneLit: token.CHAR, + syntax.StringLit: token.STRING, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/package.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/package.go new file mode 100644 index 0000000000000000000000000000000000000000..e08099d81f078b9f44cdea7c3f1dbe3a10671a0f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/package.go @@ -0,0 +1,80 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "fmt" +) + +// A Package describes a Go package. +type Package struct { + path string + name string + scope *Scope + imports []*Package + complete bool + fake bool // scope lookup errors are silently dropped if package is fake (internal use only) + cgo bool // uses of this package will be rewritten into uses of declarations from _cgo_gotypes.go + goVersion string // minimum Go version required for package (by Config.GoVersion, typically from go.mod) +} + +// NewPackage returns a new Package for the given package path and name. +// The package is not complete and contains no explicit imports. +func NewPackage(path, name string) *Package { + scope := NewScope(Universe, nopos, nopos, fmt.Sprintf("package %q", path)) + return &Package{path: path, name: name, scope: scope} +} + +// Path returns the package path. +func (pkg *Package) Path() string { return pkg.path } + +// Name returns the package name. +func (pkg *Package) Name() string { return pkg.name } + +// SetName sets the package name. +func (pkg *Package) SetName(name string) { pkg.name = name } + +// GoVersion returns the minimum Go version required by this package. +// If the minimum version is unknown, GoVersion returns the empty string. +// Individual source files may specify a different minimum Go version, +// as reported in the [go/ast.File.GoVersion] field. +func (pkg *Package) GoVersion() string { return pkg.goVersion } + +// Scope returns the (complete or incomplete) package scope +// holding the objects declared at package level (TypeNames, +// Consts, Vars, and Funcs). +// For a nil pkg receiver, Scope returns the Universe scope. +func (pkg *Package) Scope() *Scope { + if pkg != nil { + return pkg.scope + } + return Universe +} + +// A package is complete if its scope contains (at least) all +// exported objects; otherwise it is incomplete. +func (pkg *Package) Complete() bool { return pkg.complete } + +// MarkComplete marks a package as complete. +func (pkg *Package) MarkComplete() { pkg.complete = true } + +// Imports returns the list of packages directly imported by +// pkg; the list is in source order. +// +// If pkg was loaded from export data, Imports includes packages that +// provide package-level objects referenced by pkg. This may be more or +// less than the set of packages directly imported by pkg's source code. +// +// If pkg uses cgo and the FakeImportC configuration option +// was enabled, the imports list may contain a fake "C" package. +func (pkg *Package) Imports() []*Package { return pkg.imports } + +// SetImports sets the list of explicitly imported packages to list. +// It is the caller's responsibility to make sure list elements are unique. +func (pkg *Package) SetImports(list []*Package) { pkg.imports = list } + +func (pkg *Package) String() string { + return fmt.Sprintf("package %s (%q)", pkg.name, pkg.path) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/pointer.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/pointer.go new file mode 100644 index 0000000000000000000000000000000000000000..63055fc6b056a76822d261a2913b502f9fab2476 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/pointer.go @@ -0,0 +1,19 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +// A Pointer represents a pointer type. +type Pointer struct { + base Type // element type +} + +// NewPointer returns a new pointer type for the given element (base) type. +func NewPointer(elem Type) *Pointer { return &Pointer{base: elem} } + +// Elem returns the element type for the given pointer p. +func (p *Pointer) Elem() Type { return p.base } + +func (p *Pointer) Underlying() Type { return p } +func (p *Pointer) String() string { return TypeString(p, nil) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/predicates.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/predicates.go new file mode 100644 index 0000000000000000000000000000000000000000..7a096e3d97c0e436902c5645bbb0ab6178d4b3dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/predicates.go @@ -0,0 +1,546 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements commonly used type predicates. + +package types2 + +// isValid reports whether t is a valid type. +func isValid(t Type) bool { return Unalias(t) != Typ[Invalid] } + +// The isX predicates below report whether t is an X. +// If t is a type parameter the result is false; i.e., +// these predicates don't look inside a type parameter. + +func isBoolean(t Type) bool { return isBasic(t, IsBoolean) } +func isInteger(t Type) bool { return isBasic(t, IsInteger) } +func isUnsigned(t Type) bool { return isBasic(t, IsUnsigned) } +func isFloat(t Type) bool { return isBasic(t, IsFloat) } +func isComplex(t Type) bool { return isBasic(t, IsComplex) } +func isNumeric(t Type) bool { return isBasic(t, IsNumeric) } +func isString(t Type) bool { return isBasic(t, IsString) } +func isIntegerOrFloat(t Type) bool { return isBasic(t, IsInteger|IsFloat) } +func isConstType(t Type) bool { return isBasic(t, IsConstType) } + +// isBasic reports whether under(t) is a basic type with the specified info. +// If t is a type parameter the result is false; i.e., +// isBasic does not look inside a type parameter. +func isBasic(t Type, info BasicInfo) bool { + u, _ := under(t).(*Basic) + return u != nil && u.info&info != 0 +} + +// The allX predicates below report whether t is an X. +// If t is a type parameter the result is true if isX is true +// for all specified types of the type parameter's type set. +// allX is an optimized version of isX(coreType(t)) (which +// is the same as underIs(t, isX)). + +func allBoolean(t Type) bool { return allBasic(t, IsBoolean) } +func allInteger(t Type) bool { return allBasic(t, IsInteger) } +func allUnsigned(t Type) bool { return allBasic(t, IsUnsigned) } +func allNumeric(t Type) bool { return allBasic(t, IsNumeric) } +func allString(t Type) bool { return allBasic(t, IsString) } +func allOrdered(t Type) bool { return allBasic(t, IsOrdered) } +func allNumericOrString(t Type) bool { return allBasic(t, IsNumeric|IsString) } + +// allBasic reports whether under(t) is a basic type with the specified info. +// If t is a type parameter, the result is true if isBasic(t, info) is true +// for all specific types of the type parameter's type set. +// allBasic(t, info) is an optimized version of isBasic(coreType(t), info). +func allBasic(t Type, info BasicInfo) bool { + if tpar, _ := Unalias(t).(*TypeParam); tpar != nil { + return tpar.is(func(t *term) bool { return t != nil && isBasic(t.typ, info) }) + } + return isBasic(t, info) +} + +// hasName reports whether t has a name. This includes +// predeclared types, defined types, and type parameters. +// hasName may be called with types that are not fully set up. +func hasName(t Type) bool { + switch Unalias(t).(type) { + case *Basic, *Named, *TypeParam: + return true + } + return false +} + +// isTypeLit reports whether t is a type literal. +// This includes all non-defined types, but also basic types. +// isTypeLit may be called with types that are not fully set up. +func isTypeLit(t Type) bool { + switch Unalias(t).(type) { + case *Named, *TypeParam: + return false + } + return true +} + +// isTyped reports whether t is typed; i.e., not an untyped +// constant or boolean. isTyped may be called with types that +// are not fully set up. +func isTyped(t Type) bool { + // Alias or Named types cannot denote untyped types, + // thus we don't need to call Unalias or under + // (which would be unsafe to do for types that are + // not fully set up). + b, _ := t.(*Basic) + return b == nil || b.info&IsUntyped == 0 +} + +// isUntyped(t) is the same as !isTyped(t). +func isUntyped(t Type) bool { + return !isTyped(t) +} + +// IsInterface reports whether t is an interface type. +func IsInterface(t Type) bool { + _, ok := under(t).(*Interface) + return ok +} + +// isNonTypeParamInterface reports whether t is an interface type but not a type parameter. +func isNonTypeParamInterface(t Type) bool { + return !isTypeParam(t) && IsInterface(t) +} + +// isTypeParam reports whether t is a type parameter. +func isTypeParam(t Type) bool { + _, ok := Unalias(t).(*TypeParam) + return ok +} + +// hasEmptyTypeset reports whether t is a type parameter with an empty type set. +// The function does not force the computation of the type set and so is safe to +// use anywhere, but it may report a false negative if the type set has not been +// computed yet. +func hasEmptyTypeset(t Type) bool { + if tpar, _ := Unalias(t).(*TypeParam); tpar != nil && tpar.bound != nil { + iface, _ := safeUnderlying(tpar.bound).(*Interface) + return iface != nil && iface.tset != nil && iface.tset.IsEmpty() + } + return false +} + +// isGeneric reports whether a type is a generic, uninstantiated type +// (generic signatures are not included). +// TODO(gri) should we include signatures or assert that they are not present? +func isGeneric(t Type) bool { + // A parameterized type is only generic if it doesn't have an instantiation already. + named := asNamed(t) + return named != nil && named.obj != nil && named.inst == nil && named.TypeParams().Len() > 0 +} + +// Comparable reports whether values of type T are comparable. +func Comparable(T Type) bool { + return comparable(T, true, nil, nil) +} + +// If dynamic is set, non-type parameter interfaces are always comparable. +// If reportf != nil, it may be used to report why T is not comparable. +func comparable(T Type, dynamic bool, seen map[Type]bool, reportf func(string, ...interface{})) bool { + if seen[T] { + return true + } + if seen == nil { + seen = make(map[Type]bool) + } + seen[T] = true + + switch t := under(T).(type) { + case *Basic: + // assume invalid types to be comparable + // to avoid follow-up errors + return t.kind != UntypedNil + case *Pointer, *Chan: + return true + case *Struct: + for _, f := range t.fields { + if !comparable(f.typ, dynamic, seen, nil) { + if reportf != nil { + reportf("struct containing %s cannot be compared", f.typ) + } + return false + } + } + return true + case *Array: + if !comparable(t.elem, dynamic, seen, nil) { + if reportf != nil { + reportf("%s cannot be compared", t) + } + return false + } + return true + case *Interface: + if dynamic && !isTypeParam(T) || t.typeSet().IsComparable(seen) { + return true + } + if reportf != nil { + if t.typeSet().IsEmpty() { + reportf("empty type set") + } else { + reportf("incomparable types in type set") + } + } + // fallthrough + } + return false +} + +// hasNil reports whether type t includes the nil value. +func hasNil(t Type) bool { + switch u := under(t).(type) { + case *Basic: + return u.kind == UnsafePointer + case *Slice, *Pointer, *Signature, *Map, *Chan: + return true + case *Interface: + return !isTypeParam(t) || u.typeSet().underIs(func(u Type) bool { + return u != nil && hasNil(u) + }) + } + return false +} + +// An ifacePair is a node in a stack of interface type pairs compared for identity. +type ifacePair struct { + x, y *Interface + prev *ifacePair +} + +func (p *ifacePair) identical(q *ifacePair) bool { + return p.x == q.x && p.y == q.y || p.x == q.y && p.y == q.x +} + +// A comparer is used to compare types. +type comparer struct { + ignoreTags bool // if set, identical ignores struct tags + ignoreInvalids bool // if set, identical treats an invalid type as identical to any type +} + +// For changes to this code the corresponding changes should be made to unifier.nify. +func (c *comparer) identical(x, y Type, p *ifacePair) bool { + x = Unalias(x) + y = Unalias(y) + + if x == y { + return true + } + + if c.ignoreInvalids && (!isValid(x) || !isValid(y)) { + return true + } + + switch x := x.(type) { + case *Basic: + // Basic types are singletons except for the rune and byte + // aliases, thus we cannot solely rely on the x == y check + // above. See also comment in TypeName.IsAlias. + if y, ok := y.(*Basic); ok { + return x.kind == y.kind + } + + case *Array: + // Two array types are identical if they have identical element types + // and the same array length. + if y, ok := y.(*Array); ok { + // If one or both array lengths are unknown (< 0) due to some error, + // assume they are the same to avoid spurious follow-on errors. + return (x.len < 0 || y.len < 0 || x.len == y.len) && c.identical(x.elem, y.elem, p) + } + + case *Slice: + // Two slice types are identical if they have identical element types. + if y, ok := y.(*Slice); ok { + return c.identical(x.elem, y.elem, p) + } + + case *Struct: + // Two struct types are identical if they have the same sequence of fields, + // and if corresponding fields have the same names, and identical types, + // and identical tags. Two embedded fields are considered to have the same + // name. Lower-case field names from different packages are always different. + if y, ok := y.(*Struct); ok { + if x.NumFields() == y.NumFields() { + for i, f := range x.fields { + g := y.fields[i] + if f.embedded != g.embedded || + !c.ignoreTags && x.Tag(i) != y.Tag(i) || + !f.sameId(g.pkg, g.name) || + !c.identical(f.typ, g.typ, p) { + return false + } + } + return true + } + } + + case *Pointer: + // Two pointer types are identical if they have identical base types. + if y, ok := y.(*Pointer); ok { + return c.identical(x.base, y.base, p) + } + + case *Tuple: + // Two tuples types are identical if they have the same number of elements + // and corresponding elements have identical types. + if y, ok := y.(*Tuple); ok { + if x.Len() == y.Len() { + if x != nil { + for i, v := range x.vars { + w := y.vars[i] + if !c.identical(v.typ, w.typ, p) { + return false + } + } + } + return true + } + } + + case *Signature: + y, _ := y.(*Signature) + if y == nil { + return false + } + + // Two function types are identical if they have the same number of + // parameters and result values, corresponding parameter and result types + // are identical, and either both functions are variadic or neither is. + // Parameter and result names are not required to match, and type + // parameters are considered identical modulo renaming. + + if x.TypeParams().Len() != y.TypeParams().Len() { + return false + } + + // In the case of generic signatures, we will substitute in yparams and + // yresults. + yparams := y.params + yresults := y.results + + if x.TypeParams().Len() > 0 { + // We must ignore type parameter names when comparing x and y. The + // easiest way to do this is to substitute x's type parameters for y's. + xtparams := x.TypeParams().list() + ytparams := y.TypeParams().list() + + var targs []Type + for i := range xtparams { + targs = append(targs, x.TypeParams().At(i)) + } + smap := makeSubstMap(ytparams, targs) + + var check *Checker // ok to call subst on a nil *Checker + ctxt := NewContext() // need a non-nil Context for the substitution below + + // Constraints must be pair-wise identical, after substitution. + for i, xtparam := range xtparams { + ybound := check.subst(nopos, ytparams[i].bound, smap, nil, ctxt) + if !c.identical(xtparam.bound, ybound, p) { + return false + } + } + + yparams = check.subst(nopos, y.params, smap, nil, ctxt).(*Tuple) + yresults = check.subst(nopos, y.results, smap, nil, ctxt).(*Tuple) + } + + return x.variadic == y.variadic && + c.identical(x.params, yparams, p) && + c.identical(x.results, yresults, p) + + case *Union: + if y, _ := y.(*Union); y != nil { + // TODO(rfindley): can this be reached during type checking? If so, + // consider passing a type set map. + unionSets := make(map[*Union]*_TypeSet) + xset := computeUnionTypeSet(nil, unionSets, nopos, x) + yset := computeUnionTypeSet(nil, unionSets, nopos, y) + return xset.terms.equal(yset.terms) + } + + case *Interface: + // Two interface types are identical if they describe the same type sets. + // With the existing implementation restriction, this simplifies to: + // + // Two interface types are identical if they have the same set of methods with + // the same names and identical function types, and if any type restrictions + // are the same. Lower-case method names from different packages are always + // different. The order of the methods is irrelevant. + if y, ok := y.(*Interface); ok { + xset := x.typeSet() + yset := y.typeSet() + if xset.comparable != yset.comparable { + return false + } + if !xset.terms.equal(yset.terms) { + return false + } + a := xset.methods + b := yset.methods + if len(a) == len(b) { + // Interface types are the only types where cycles can occur + // that are not "terminated" via named types; and such cycles + // can only be created via method parameter types that are + // anonymous interfaces (directly or indirectly) embedding + // the current interface. Example: + // + // type T interface { + // m() interface{T} + // } + // + // If two such (differently named) interfaces are compared, + // endless recursion occurs if the cycle is not detected. + // + // If x and y were compared before, they must be equal + // (if they were not, the recursion would have stopped); + // search the ifacePair stack for the same pair. + // + // This is a quadratic algorithm, but in practice these stacks + // are extremely short (bounded by the nesting depth of interface + // type declarations that recur via parameter types, an extremely + // rare occurrence). An alternative implementation might use a + // "visited" map, but that is probably less efficient overall. + q := &ifacePair{x, y, p} + for p != nil { + if p.identical(q) { + return true // same pair was compared before + } + p = p.prev + } + if debug { + assertSortedMethods(a) + assertSortedMethods(b) + } + for i, f := range a { + g := b[i] + if f.Id() != g.Id() || !c.identical(f.typ, g.typ, q) { + return false + } + } + return true + } + } + + case *Map: + // Two map types are identical if they have identical key and value types. + if y, ok := y.(*Map); ok { + return c.identical(x.key, y.key, p) && c.identical(x.elem, y.elem, p) + } + + case *Chan: + // Two channel types are identical if they have identical value types + // and the same direction. + if y, ok := y.(*Chan); ok { + return x.dir == y.dir && c.identical(x.elem, y.elem, p) + } + + case *Named: + // Two named types are identical if their type names originate + // in the same type declaration; if they are instantiated they + // must have identical type argument lists. + if y := asNamed(y); y != nil { + // check type arguments before origins to match unifier + // (for correct source code we need to do all checks so + // order doesn't matter) + xargs := x.TypeArgs().list() + yargs := y.TypeArgs().list() + if len(xargs) != len(yargs) { + return false + } + for i, xarg := range xargs { + if !Identical(xarg, yargs[i]) { + return false + } + } + return identicalOrigin(x, y) + } + + case *TypeParam: + // nothing to do (x and y being equal is caught in the very beginning of this function) + + case nil: + // avoid a crash in case of nil type + + default: + unreachable() + } + + return false +} + +// identicalOrigin reports whether x and y originated in the same declaration. +func identicalOrigin(x, y *Named) bool { + // TODO(gri) is this correct? + return x.Origin().obj == y.Origin().obj +} + +// identicalInstance reports if two type instantiations are identical. +// Instantiations are identical if their origin and type arguments are +// identical. +func identicalInstance(xorig Type, xargs []Type, yorig Type, yargs []Type) bool { + if len(xargs) != len(yargs) { + return false + } + + for i, xa := range xargs { + if !Identical(xa, yargs[i]) { + return false + } + } + + return Identical(xorig, yorig) +} + +// Default returns the default "typed" type for an "untyped" type; +// it returns the incoming type for all other types. The default type +// for untyped nil is untyped nil. +func Default(t Type) Type { + if t, ok := Unalias(t).(*Basic); ok { + switch t.kind { + case UntypedBool: + return Typ[Bool] + case UntypedInt: + return Typ[Int] + case UntypedRune: + return universeRune // use 'rune' name + case UntypedFloat: + return Typ[Float64] + case UntypedComplex: + return Typ[Complex128] + case UntypedString: + return Typ[String] + } + } + return t +} + +// maxType returns the "largest" type that encompasses both x and y. +// If x and y are different untyped numeric types, the result is the type of x or y +// that appears later in this list: integer, rune, floating-point, complex. +// Otherwise, if x != y, the result is nil. +func maxType(x, y Type) Type { + // We only care about untyped types (for now), so == is good enough. + // TODO(gri) investigate generalizing this function to simplify code elsewhere + if x == y { + return x + } + if isUntyped(x) && isUntyped(y) && isNumeric(x) && isNumeric(y) { + // untyped types are basic types + if x.(*Basic).kind > y.(*Basic).kind { + return x + } + return y + } + return nil +} + +// clone makes a "flat copy" of *p and returns a pointer to the copy. +func clone[P *T, T any](p P) P { + c := *p + return &c +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/resolver.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/resolver.go new file mode 100644 index 0000000000000000000000000000000000000000..0cf7c9142e4c4e746030c60fe2d8306967ba712f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/resolver.go @@ -0,0 +1,776 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "fmt" + "go/constant" + . "internal/types/errors" + "sort" + "strconv" + "strings" + "unicode" +) + +// A declInfo describes a package-level const, type, var, or func declaration. +type declInfo struct { + file *Scope // scope of file containing this declaration + lhs []*Var // lhs of n:1 variable declarations, or nil + vtyp syntax.Expr // type, or nil (for const and var declarations only) + init syntax.Expr // init/orig expression, or nil (for const and var declarations only) + inherited bool // if set, the init expression is inherited from a previous constant declaration + tdecl *syntax.TypeDecl // type declaration, or nil + fdecl *syntax.FuncDecl // func declaration, or nil + + // The deps field tracks initialization expression dependencies. + deps map[Object]bool // lazily initialized +} + +// hasInitializer reports whether the declared object has an initialization +// expression or function body. +func (d *declInfo) hasInitializer() bool { + return d.init != nil || d.fdecl != nil && d.fdecl.Body != nil +} + +// addDep adds obj to the set of objects d's init expression depends on. +func (d *declInfo) addDep(obj Object) { + m := d.deps + if m == nil { + m = make(map[Object]bool) + d.deps = m + } + m[obj] = true +} + +// arity checks that the lhs and rhs of a const or var decl +// have a matching number of names and initialization values. +// If inherited is set, the initialization values are from +// another (constant) declaration. +func (check *Checker) arity(pos syntax.Pos, names []*syntax.Name, inits []syntax.Expr, constDecl, inherited bool) { + l := len(names) + r := len(inits) + + const code = WrongAssignCount + switch { + case l < r: + n := inits[l] + if inherited { + check.errorf(pos, code, "extra init expr at %s", n.Pos()) + } else { + check.errorf(n, code, "extra init expr %s", n) + } + case l > r && (constDecl || r != 1): // if r == 1 it may be a multi-valued function and we can't say anything yet + n := names[r] + check.errorf(n, code, "missing init expr for %s", n.Value) + } +} + +func validatedImportPath(path string) (string, error) { + s, err := strconv.Unquote(path) + if err != nil { + return "", err + } + if s == "" { + return "", fmt.Errorf("empty string") + } + const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" + for _, r := range s { + if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { + return s, fmt.Errorf("invalid character %#U", r) + } + } + return s, nil +} + +// declarePkgObj declares obj in the package scope, records its ident -> obj mapping, +// and updates check.objMap. The object must not be a function or method. +func (check *Checker) declarePkgObj(ident *syntax.Name, obj Object, d *declInfo) { + assert(ident.Value == obj.Name()) + + // spec: "A package-scope or file-scope identifier with name init + // may only be declared to be a function with this (func()) signature." + if ident.Value == "init" { + check.error(ident, InvalidInitDecl, "cannot declare init - must be func") + return + } + + // spec: "The main package must have package name main and declare + // a function main that takes no arguments and returns no value." + if ident.Value == "main" && check.pkg.name == "main" { + check.error(ident, InvalidMainDecl, "cannot declare main - must be func") + return + } + + check.declare(check.pkg.scope, ident, obj, nopos) + check.objMap[obj] = d + obj.setOrder(uint32(len(check.objMap))) +} + +// filename returns a filename suitable for debugging output. +func (check *Checker) filename(fileNo int) string { + file := check.files[fileNo] + if pos := file.Pos(); pos.IsKnown() { + // return check.fset.File(pos).Name() + // TODO(gri) do we need the actual file name here? + return pos.RelFilename() + } + return fmt.Sprintf("file[%d]", fileNo) +} + +func (check *Checker) importPackage(pos syntax.Pos, path, dir string) *Package { + // If we already have a package for the given (path, dir) + // pair, use it instead of doing a full import. + // Checker.impMap only caches packages that are marked Complete + // or fake (dummy packages for failed imports). Incomplete but + // non-fake packages do require an import to complete them. + key := importKey{path, dir} + imp := check.impMap[key] + if imp != nil { + return imp + } + + // no package yet => import it + if path == "C" && (check.conf.FakeImportC || check.conf.go115UsesCgo) { + imp = NewPackage("C", "C") + imp.fake = true // package scope is not populated + imp.cgo = check.conf.go115UsesCgo + } else { + // ordinary import + var err error + if importer := check.conf.Importer; importer == nil { + err = fmt.Errorf("Config.Importer not installed") + } else if importerFrom, ok := importer.(ImporterFrom); ok { + imp, err = importerFrom.ImportFrom(path, dir, 0) + if imp == nil && err == nil { + err = fmt.Errorf("Config.Importer.ImportFrom(%s, %s, 0) returned nil but no error", path, dir) + } + } else { + imp, err = importer.Import(path) + if imp == nil && err == nil { + err = fmt.Errorf("Config.Importer.Import(%s) returned nil but no error", path) + } + } + // make sure we have a valid package name + // (errors here can only happen through manipulation of packages after creation) + if err == nil && imp != nil && (imp.name == "_" || imp.name == "") { + err = fmt.Errorf("invalid package name: %q", imp.name) + imp = nil // create fake package below + } + if err != nil { + check.errorf(pos, BrokenImport, "could not import %s (%s)", path, err) + if imp == nil { + // create a new fake package + // come up with a sensible package name (heuristic) + name := path + if i := len(name); i > 0 && name[i-1] == '/' { + name = name[:i-1] + } + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + imp = NewPackage(path, name) + } + // continue to use the package as best as we can + imp.fake = true // avoid follow-up lookup failures + } + } + + // package should be complete or marked fake, but be cautious + if imp.complete || imp.fake { + check.impMap[key] = imp + // Once we've formatted an error message, keep the pkgPathMap + // up-to-date on subsequent imports. It is used for package + // qualification in error messages. + if check.pkgPathMap != nil { + check.markImports(imp) + } + return imp + } + + // something went wrong (importer may have returned incomplete package without error) + return nil +} + +// collectObjects collects all file and package objects and inserts them +// into their respective scopes. It also performs imports and associates +// methods with receiver base type names. +func (check *Checker) collectObjects() { + pkg := check.pkg + + // pkgImports is the set of packages already imported by any package file seen + // so far. Used to avoid duplicate entries in pkg.imports. Allocate and populate + // it (pkg.imports may not be empty if we are checking test files incrementally). + // Note that pkgImports is keyed by package (and thus package path), not by an + // importKey value. Two different importKey values may map to the same package + // which is why we cannot use the check.impMap here. + var pkgImports = make(map[*Package]bool) + for _, imp := range pkg.imports { + pkgImports[imp] = true + } + + type methodInfo struct { + obj *Func // method + ptr bool // true if pointer receiver + recv *syntax.Name // receiver type name + } + var methods []methodInfo // collected methods with valid receivers and non-blank _ names + var fileScopes []*Scope + for fileNo, file := range check.files { + // The package identifier denotes the current package, + // but there is no corresponding package object. + check.recordDef(file.PkgName, nil) + + fileScope := NewScope(pkg.scope, syntax.StartPos(file), syntax.EndPos(file), check.filename(fileNo)) + fileScopes = append(fileScopes, fileScope) + check.recordScope(file, fileScope) + + // determine file directory, necessary to resolve imports + // FileName may be "" (typically for tests) in which case + // we get "." as the directory which is what we would want. + fileDir := dir(file.PkgName.Pos().RelFilename()) // TODO(gri) should this be filename? + + first := -1 // index of first ConstDecl in the current group, or -1 + var last *syntax.ConstDecl // last ConstDecl with init expressions, or nil + for index, decl := range file.DeclList { + if _, ok := decl.(*syntax.ConstDecl); !ok { + first = -1 // we're not in a constant declaration + } + + switch s := decl.(type) { + case *syntax.ImportDecl: + // import package + if s.Path == nil || s.Path.Bad { + continue // error reported during parsing + } + path, err := validatedImportPath(s.Path.Value) + if err != nil { + check.errorf(s.Path, BadImportPath, "invalid import path (%s)", err) + continue + } + + imp := check.importPackage(s.Path.Pos(), path, fileDir) + if imp == nil { + continue + } + + // local name overrides imported package name + name := imp.name + if s.LocalPkgName != nil { + name = s.LocalPkgName.Value + if path == "C" { + // match 1.17 cmd/compile (not prescribed by spec) + check.error(s.LocalPkgName, ImportCRenamed, `cannot rename import "C"`) + continue + } + } + + if name == "init" { + check.error(s, InvalidInitDecl, "cannot import package as init - init must be a func") + continue + } + + // add package to list of explicit imports + // (this functionality is provided as a convenience + // for clients; it is not needed for type-checking) + if !pkgImports[imp] { + pkgImports[imp] = true + pkg.imports = append(pkg.imports, imp) + } + + pkgName := NewPkgName(s.Pos(), pkg, name, imp) + if s.LocalPkgName != nil { + // in a dot-import, the dot represents the package + check.recordDef(s.LocalPkgName, pkgName) + } else { + check.recordImplicit(s, pkgName) + } + + if imp.fake { + // match 1.17 cmd/compile (not prescribed by spec) + pkgName.used = true + } + + // add import to file scope + check.imports = append(check.imports, pkgName) + if name == "." { + // dot-import + if check.dotImportMap == nil { + check.dotImportMap = make(map[dotImportKey]*PkgName) + } + // merge imported scope with file scope + for name, obj := range imp.scope.elems { + // Note: Avoid eager resolve(name, obj) here, so we only + // resolve dot-imported objects as needed. + + // A package scope may contain non-exported objects, + // do not import them! + if isExported(name) { + // declare dot-imported object + // (Do not use check.declare because it modifies the object + // via Object.setScopePos, which leads to a race condition; + // the object may be imported into more than one file scope + // concurrently. See go.dev/issue/32154.) + if alt := fileScope.Lookup(name); alt != nil { + var err error_ + err.code = DuplicateDecl + err.errorf(s.LocalPkgName, "%s redeclared in this block", alt.Name()) + err.recordAltDecl(alt) + check.report(&err) + } else { + fileScope.insert(name, obj) + check.dotImportMap[dotImportKey{fileScope, name}] = pkgName + } + } + } + } else { + // declare imported package object in file scope + // (no need to provide s.LocalPkgName since we called check.recordDef earlier) + check.declare(fileScope, nil, pkgName, nopos) + } + + case *syntax.ConstDecl: + // iota is the index of the current constDecl within the group + if first < 0 || s.Group == nil || file.DeclList[index-1].(*syntax.ConstDecl).Group != s.Group { + first = index + last = nil + } + iota := constant.MakeInt64(int64(index - first)) + + // determine which initialization expressions to use + inherited := true + switch { + case s.Type != nil || s.Values != nil: + last = s + inherited = false + case last == nil: + last = new(syntax.ConstDecl) // make sure last exists + inherited = false + } + + // declare all constants + values := syntax.UnpackListExpr(last.Values) + for i, name := range s.NameList { + obj := NewConst(name.Pos(), pkg, name.Value, nil, iota) + + var init syntax.Expr + if i < len(values) { + init = values[i] + } + + d := &declInfo{file: fileScope, vtyp: last.Type, init: init, inherited: inherited} + check.declarePkgObj(name, obj, d) + } + + // Constants must always have init values. + check.arity(s.Pos(), s.NameList, values, true, inherited) + + case *syntax.VarDecl: + lhs := make([]*Var, len(s.NameList)) + // If there's exactly one rhs initializer, use + // the same declInfo d1 for all lhs variables + // so that each lhs variable depends on the same + // rhs initializer (n:1 var declaration). + var d1 *declInfo + if _, ok := s.Values.(*syntax.ListExpr); !ok { + // The lhs elements are only set up after the for loop below, + // but that's ok because declarePkgObj only collects the declInfo + // for a later phase. + d1 = &declInfo{file: fileScope, lhs: lhs, vtyp: s.Type, init: s.Values} + } + + // declare all variables + values := syntax.UnpackListExpr(s.Values) + for i, name := range s.NameList { + obj := NewVar(name.Pos(), pkg, name.Value, nil) + lhs[i] = obj + + d := d1 + if d == nil { + // individual assignments + var init syntax.Expr + if i < len(values) { + init = values[i] + } + d = &declInfo{file: fileScope, vtyp: s.Type, init: init} + } + + check.declarePkgObj(name, obj, d) + } + + // If we have no type, we must have values. + if s.Type == nil || values != nil { + check.arity(s.Pos(), s.NameList, values, false, false) + } + + case *syntax.TypeDecl: + _ = len(s.TParamList) != 0 && check.verifyVersionf(s.TParamList[0], go1_18, "type parameter") + obj := NewTypeName(s.Name.Pos(), pkg, s.Name.Value, nil) + check.declarePkgObj(s.Name, obj, &declInfo{file: fileScope, tdecl: s}) + + case *syntax.FuncDecl: + name := s.Name.Value + obj := NewFunc(s.Name.Pos(), pkg, name, nil) + hasTParamError := false // avoid duplicate type parameter errors + if s.Recv == nil { + // regular function + if name == "init" || name == "main" && pkg.name == "main" { + code := InvalidInitDecl + if name == "main" { + code = InvalidMainDecl + } + if len(s.TParamList) != 0 { + check.softErrorf(s.TParamList[0], code, "func %s must have no type parameters", name) + hasTParamError = true + } + if t := s.Type; len(t.ParamList) != 0 || len(t.ResultList) != 0 { + check.softErrorf(s.Name, code, "func %s must have no arguments and no return values", name) + } + } + // don't declare init functions in the package scope - they are invisible + if name == "init" { + obj.parent = pkg.scope + check.recordDef(s.Name, obj) + // init functions must have a body + if s.Body == nil { + // TODO(gri) make this error message consistent with the others above + check.softErrorf(obj.pos, MissingInitBody, "missing function body") + } + } else { + check.declare(pkg.scope, s.Name, obj, nopos) + } + } else { + // method + // d.Recv != nil + ptr, recv, _ := check.unpackRecv(s.Recv.Type, false) + // Methods with invalid receiver cannot be associated to a type, and + // methods with blank _ names are never found; no need to collect any + // of them. They will still be type-checked with all the other functions. + if recv != nil && name != "_" { + methods = append(methods, methodInfo{obj, ptr, recv}) + } + check.recordDef(s.Name, obj) + } + _ = len(s.TParamList) != 0 && !hasTParamError && check.verifyVersionf(s.TParamList[0], go1_18, "type parameter") + info := &declInfo{file: fileScope, fdecl: s} + // Methods are not package-level objects but we still track them in the + // object map so that we can handle them like regular functions (if the + // receiver is invalid); also we need their fdecl info when associating + // them with their receiver base type, below. + check.objMap[obj] = info + obj.setOrder(uint32(len(check.objMap))) + + default: + check.errorf(s, InvalidSyntaxTree, "unknown syntax.Decl node %T", s) + } + } + } + + // verify that objects in package and file scopes have different names + for _, scope := range fileScopes { + for name, obj := range scope.elems { + if alt := pkg.scope.Lookup(name); alt != nil { + obj = resolve(name, obj) + var err error_ + err.code = DuplicateDecl + if pkg, ok := obj.(*PkgName); ok { + err.errorf(alt, "%s already declared through import of %s", alt.Name(), pkg.Imported()) + err.recordAltDecl(pkg) + } else { + err.errorf(alt, "%s already declared through dot-import of %s", alt.Name(), obj.Pkg()) + // TODO(gri) dot-imported objects don't have a position; recordAltDecl won't print anything + err.recordAltDecl(obj) + } + check.report(&err) + } + } + } + + // Now that we have all package scope objects and all methods, + // associate methods with receiver base type name where possible. + // Ignore methods that have an invalid receiver. They will be + // type-checked later, with regular functions. + if methods != nil { + check.methods = make(map[*TypeName][]*Func) + for i := range methods { + m := &methods[i] + // Determine the receiver base type and associate m with it. + ptr, base := check.resolveBaseTypeName(m.ptr, m.recv, fileScopes) + if base != nil { + m.obj.hasPtrRecv_ = ptr + check.methods[base] = append(check.methods[base], m.obj) + } + } + } +} + +// unpackRecv unpacks a receiver type and returns its components: ptr indicates whether +// rtyp is a pointer receiver, rname is the receiver type name, and tparams are its +// type parameters, if any. The type parameters are only unpacked if unpackParams is +// set. If rname is nil, the receiver is unusable (i.e., the source has a bug which we +// cannot easily work around). +func (check *Checker) unpackRecv(rtyp syntax.Expr, unpackParams bool) (ptr bool, rname *syntax.Name, tparams []*syntax.Name) { +L: // unpack receiver type + // This accepts invalid receivers such as ***T and does not + // work for other invalid receivers, but we don't care. The + // validity of receiver expressions is checked elsewhere. + for { + switch t := rtyp.(type) { + case *syntax.ParenExpr: + rtyp = t.X + // case *ast.StarExpr: + // ptr = true + // rtyp = t.X + case *syntax.Operation: + if t.Op != syntax.Mul || t.Y != nil { + break + } + ptr = true + rtyp = t.X + default: + break L + } + } + + // unpack type parameters, if any + if ptyp, _ := rtyp.(*syntax.IndexExpr); ptyp != nil { + rtyp = ptyp.X + if unpackParams { + for _, arg := range syntax.UnpackListExpr(ptyp.Index) { + var par *syntax.Name + switch arg := arg.(type) { + case *syntax.Name: + par = arg + case *syntax.BadExpr: + // ignore - error already reported by parser + case nil: + check.error(ptyp, InvalidSyntaxTree, "parameterized receiver contains nil parameters") + default: + check.errorf(arg, BadDecl, "receiver type parameter %s must be an identifier", arg) + } + if par == nil { + par = syntax.NewName(arg.Pos(), "_") + } + tparams = append(tparams, par) + } + + } + } + + // unpack receiver name + if name, _ := rtyp.(*syntax.Name); name != nil { + rname = name + } + + return +} + +// resolveBaseTypeName returns the non-alias base type name for typ, and whether +// there was a pointer indirection to get to it. The base type name must be declared +// in package scope, and there can be at most one pointer indirection. If no such type +// name exists, the returned base is nil. +func (check *Checker) resolveBaseTypeName(seenPtr bool, typ syntax.Expr, fileScopes []*Scope) (ptr bool, base *TypeName) { + // Algorithm: Starting from a type expression, which may be a name, + // we follow that type through alias declarations until we reach a + // non-alias type name. If we encounter anything but pointer types or + // parentheses we're done. If we encounter more than one pointer type + // we're done. + ptr = seenPtr + var seen map[*TypeName]bool + for { + // check if we have a pointer type + // if pexpr, _ := typ.(*ast.StarExpr); pexpr != nil { + if pexpr, _ := typ.(*syntax.Operation); pexpr != nil && pexpr.Op == syntax.Mul && pexpr.Y == nil { + // if we've already seen a pointer, we're done + if ptr { + return false, nil + } + ptr = true + typ = syntax.Unparen(pexpr.X) // continue with pointer base type + } + + // typ must be a name, or a C.name cgo selector. + var name string + switch typ := typ.(type) { + case *syntax.Name: + name = typ.Value + case *syntax.SelectorExpr: + // C.struct_foo is a valid type name for packages using cgo. + // + // Detect this case, and adjust name so that the correct TypeName is + // resolved below. + if ident, _ := typ.X.(*syntax.Name); ident != nil && ident.Value == "C" { + // Check whether "C" actually resolves to an import of "C", by looking + // in the appropriate file scope. + var obj Object + for _, scope := range fileScopes { + if scope.Contains(ident.Pos()) { + obj = scope.Lookup(ident.Value) + } + } + // If Config.go115UsesCgo is set, the typechecker will resolve Cgo + // selectors to their cgo name. We must do the same here. + if pname, _ := obj.(*PkgName); pname != nil { + if pname.imported.cgo { // only set if Config.go115UsesCgo is set + name = "_Ctype_" + typ.Sel.Value + } + } + } + if name == "" { + return false, nil + } + default: + return false, nil + } + + // name must denote an object found in the current package scope + // (note that dot-imported objects are not in the package scope!) + obj := check.pkg.scope.Lookup(name) + if obj == nil { + return false, nil + } + + // the object must be a type name... + tname, _ := obj.(*TypeName) + if tname == nil { + return false, nil + } + + // ... which we have not seen before + if seen[tname] { + return false, nil + } + + // we're done if tdecl defined tname as a new type + // (rather than an alias) + tdecl := check.objMap[tname].tdecl // must exist for objects in package scope + if !tdecl.Alias { + return ptr, tname + } + + // otherwise, continue resolving + typ = tdecl.Type + if seen == nil { + seen = make(map[*TypeName]bool) + } + seen[tname] = true + } +} + +// packageObjects typechecks all package objects, but not function bodies. +func (check *Checker) packageObjects() { + // process package objects in source order for reproducible results + objList := make([]Object, len(check.objMap)) + i := 0 + for obj := range check.objMap { + objList[i] = obj + i++ + } + sort.Sort(inSourceOrder(objList)) + + // add new methods to already type-checked types (from a prior Checker.Files call) + for _, obj := range objList { + if obj, _ := obj.(*TypeName); obj != nil && obj.typ != nil { + check.collectMethods(obj) + } + } + + if check.enableAlias { + // With Alias nodes we can process declarations in any order. + for _, obj := range objList { + check.objDecl(obj, nil) + } + } else { + // Without Alias nodes, we process non-alias type declarations first, followed by + // alias declarations, and then everything else. This appears to avoid most situations + // where the type of an alias is needed before it is available. + // There may still be cases where this is not good enough (see also go.dev/issue/25838). + // In those cases Checker.ident will report an error ("invalid use of type alias"). + var aliasList []*TypeName + var othersList []Object // everything that's not a type + // phase 1: non-alias type declarations + for _, obj := range objList { + if tname, _ := obj.(*TypeName); tname != nil { + if check.objMap[tname].tdecl.Alias { + aliasList = append(aliasList, tname) + } else { + check.objDecl(obj, nil) + } + } else { + othersList = append(othersList, obj) + } + } + // phase 2: alias type declarations + for _, obj := range aliasList { + check.objDecl(obj, nil) + } + // phase 3: all other declarations + for _, obj := range othersList { + check.objDecl(obj, nil) + } + } + + // At this point we may have a non-empty check.methods map; this means that not all + // entries were deleted at the end of typeDecl because the respective receiver base + // types were not found. In that case, an error was reported when declaring those + // methods. We can now safely discard this map. + check.methods = nil +} + +// inSourceOrder implements the sort.Sort interface. +type inSourceOrder []Object + +func (a inSourceOrder) Len() int { return len(a) } +func (a inSourceOrder) Less(i, j int) bool { return a[i].order() < a[j].order() } +func (a inSourceOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// unusedImports checks for unused imports. +func (check *Checker) unusedImports() { + // If function bodies are not checked, packages' uses are likely missing - don't check. + if check.conf.IgnoreFuncBodies { + return + } + + // spec: "It is illegal (...) to directly import a package without referring to + // any of its exported identifiers. To import a package solely for its side-effects + // (initialization), use the blank identifier as explicit package name." + + for _, obj := range check.imports { + if !obj.used && obj.name != "_" { + check.errorUnusedPkg(obj) + } + } +} + +func (check *Checker) errorUnusedPkg(obj *PkgName) { + // If the package was imported with a name other than the final + // import path element, show it explicitly in the error message. + // Note that this handles both renamed imports and imports of + // packages containing unconventional package declarations. + // Note that this uses / always, even on Windows, because Go import + // paths always use forward slashes. + path := obj.imported.path + elem := path + if i := strings.LastIndex(elem, "/"); i >= 0 { + elem = elem[i+1:] + } + if obj.name == "" || obj.name == "." || obj.name == elem { + check.softErrorf(obj, UnusedImport, "%q imported and not used", path) + } else { + check.softErrorf(obj, UnusedImport, "%q imported as %s and not used", path, obj.name) + } +} + +// dir makes a good-faith attempt to return the directory +// portion of path. If path is empty, the result is ".". +// (Per the go/build package dependency tests, we cannot import +// path/filepath and simply use filepath.Dir.) +func dir(path string) string { + if i := strings.LastIndexAny(path, `/\`); i > 0 { + return path[:i] + } + // i <= 0 + return "." +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/resolver_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/resolver_test.go new file mode 100644 index 0000000000000000000000000000000000000000..8105d8af42cdaef4aae001b9295ba9c460c8263e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/resolver_test.go @@ -0,0 +1,218 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "cmd/compile/internal/syntax" + "fmt" + "internal/testenv" + "sort" + "testing" + + . "cmd/compile/internal/types2" +) + +type resolveTestImporter struct { + importer ImporterFrom + imported map[string]bool +} + +func (imp *resolveTestImporter) Import(string) (*Package, error) { + panic("should not be called") +} + +func (imp *resolveTestImporter) ImportFrom(path, srcDir string, mode ImportMode) (*Package, error) { + if mode != 0 { + panic("mode must be 0") + } + if imp.importer == nil { + imp.importer = defaultImporter().(ImporterFrom) + imp.imported = make(map[string]bool) + } + pkg, err := imp.importer.ImportFrom(path, srcDir, mode) + if err != nil { + return nil, err + } + imp.imported[path] = true + return pkg, nil +} + +func TestResolveIdents(t *testing.T) { + testenv.MustHaveGoBuild(t) + + sources := []string{ + ` + package p + import "fmt" + import "math" + const pi = math.Pi + func sin(x float64) float64 { + return math.Sin(x) + } + var Println = fmt.Println + `, + ` + package p + import "fmt" + type errorStringer struct { fmt.Stringer; error } + func f() string { + _ = "foo" + return fmt.Sprintf("%d", g()) + } + func g() (x int) { return } + `, + ` + package p + import . "go/parser" + import "sync" + func h() Mode { return ImportsOnly } + var _, x int = 1, 2 + func init() {} + type T struct{ *sync.Mutex; a, b, c int} + type I interface{ m() } + var _ = T{a: 1, b: 2, c: 3} + func (_ T) m() {} + func (T) _() {} + var i I + var _ = i.m + func _(s []int) { for i, x := range s { _, _ = i, x } } + func _(x interface{}) { + switch x := x.(type) { + case int: + _ = x + } + switch {} // implicit 'true' tag + } + `, + ` + package p + type S struct{} + func (T) _() {} + func (T) _() {} + `, + ` + package p + func _() { + L0: + L1: + goto L0 + for { + goto L1 + } + if true { + goto L2 + } + L2: + } + `, + } + + pkgnames := []string{ + "fmt", + "math", + } + + // parse package files + var files []*syntax.File + for _, src := range sources { + files = append(files, mustParse(src)) + } + + // resolve and type-check package AST + importer := new(resolveTestImporter) + conf := Config{Importer: importer} + uses := make(map[*syntax.Name]Object) + defs := make(map[*syntax.Name]Object) + _, err := conf.Check("testResolveIdents", files, &Info{Defs: defs, Uses: uses}) + if err != nil { + t.Fatal(err) + } + + // check that all packages were imported + for _, name := range pkgnames { + if !importer.imported[name] { + t.Errorf("package %s not imported", name) + } + } + + // check that qualified identifiers are resolved + for _, f := range files { + syntax.Inspect(f, func(n syntax.Node) bool { + if s, ok := n.(*syntax.SelectorExpr); ok { + if x, ok := s.X.(*syntax.Name); ok { + obj := uses[x] + if obj == nil { + t.Errorf("%s: unresolved qualified identifier %s", x.Pos(), x.Value) + return false + } + if _, ok := obj.(*PkgName); ok && uses[s.Sel] == nil { + t.Errorf("%s: unresolved selector %s", s.Sel.Pos(), s.Sel.Value) + return false + } + return false + } + return true + } + return true + }) + } + + for id, obj := range uses { + if obj == nil { + t.Errorf("%s: Uses[%s] == nil", id.Pos(), id.Value) + } + } + + // Check that each identifier in the source is found in uses or defs or both. + // We need the foundUses/Defs maps (rather than just deleting the found objects + // from the uses and defs maps) because syntax.Walk traverses shared nodes multiple + // times (e.g. types in field lists such as "a, b, c int"). + foundUses := make(map[*syntax.Name]bool) + foundDefs := make(map[*syntax.Name]bool) + var both []string + for _, f := range files { + syntax.Inspect(f, func(n syntax.Node) bool { + if x, ok := n.(*syntax.Name); ok { + var objects int + if _, found := uses[x]; found { + objects |= 1 + foundUses[x] = true + } + if _, found := defs[x]; found { + objects |= 2 + foundDefs[x] = true + } + switch objects { + case 0: + t.Errorf("%s: unresolved identifier %s", x.Pos(), x.Value) + case 3: + both = append(both, x.Value) + } + return false + } + return true + }) + } + + // check the expected set of idents that are simultaneously uses and defs + sort.Strings(both) + if got, want := fmt.Sprint(both), "[Mutex Stringer error]"; got != want { + t.Errorf("simultaneous uses/defs = %s, want %s", got, want) + } + + // any left-over identifiers didn't exist in the source + for x := range uses { + if !foundUses[x] { + t.Errorf("%s: identifier %s not present in source", x.Pos(), x.Value) + } + } + for x := range defs { + if !foundDefs[x] { + t.Errorf("%s: identifier %s not present in source", x.Pos(), x.Value) + } + } + + // TODO(gri) add tests to check ImplicitObj callbacks +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/return.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/return.go new file mode 100644 index 0000000000000000000000000000000000000000..01988b012e1356c0210e4f9a24246a98457063c3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/return.go @@ -0,0 +1,184 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements isTerminating. + +package types2 + +import ( + "cmd/compile/internal/syntax" +) + +// isTerminating reports if s is a terminating statement. +// If s is labeled, label is the label name; otherwise s +// is "". +func (check *Checker) isTerminating(s syntax.Stmt, label string) bool { + switch s := s.(type) { + default: + unreachable() + + case *syntax.DeclStmt, *syntax.EmptyStmt, *syntax.SendStmt, + *syntax.AssignStmt, *syntax.CallStmt: + // no chance + + case *syntax.LabeledStmt: + return check.isTerminating(s.Stmt, s.Label.Value) + + case *syntax.ExprStmt: + // calling the predeclared (possibly parenthesized) panic() function is terminating + if call, ok := syntax.Unparen(s.X).(*syntax.CallExpr); ok && check.isPanic[call] { + return true + } + + case *syntax.ReturnStmt: + return true + + case *syntax.BranchStmt: + if s.Tok == syntax.Goto || s.Tok == syntax.Fallthrough { + return true + } + + case *syntax.BlockStmt: + return check.isTerminatingList(s.List, "") + + case *syntax.IfStmt: + if s.Else != nil && + check.isTerminating(s.Then, "") && + check.isTerminating(s.Else, "") { + return true + } + + case *syntax.SwitchStmt: + return check.isTerminatingSwitch(s.Body, label) + + case *syntax.SelectStmt: + for _, cc := range s.Body { + if !check.isTerminatingList(cc.Body, "") || hasBreakList(cc.Body, label, true) { + return false + } + + } + return true + + case *syntax.ForStmt: + if _, ok := s.Init.(*syntax.RangeClause); ok { + // Range clauses guarantee that the loop terminates, + // so the loop is not a terminating statement. See go.dev/issue/49003. + break + } + if s.Cond == nil && !hasBreak(s.Body, label, true) { + return true + } + } + + return false +} + +func (check *Checker) isTerminatingList(list []syntax.Stmt, label string) bool { + // trailing empty statements are permitted - skip them + for i := len(list) - 1; i >= 0; i-- { + if _, ok := list[i].(*syntax.EmptyStmt); !ok { + return check.isTerminating(list[i], label) + } + } + return false // all statements are empty +} + +func (check *Checker) isTerminatingSwitch(body []*syntax.CaseClause, label string) bool { + hasDefault := false + for _, cc := range body { + if cc.Cases == nil { + hasDefault = true + } + if !check.isTerminatingList(cc.Body, "") || hasBreakList(cc.Body, label, true) { + return false + } + } + return hasDefault +} + +// TODO(gri) For nested breakable statements, the current implementation of hasBreak +// will traverse the same subtree repeatedly, once for each label. Replace +// with a single-pass label/break matching phase. + +// hasBreak reports if s is or contains a break statement +// referring to the label-ed statement or implicit-ly the +// closest outer breakable statement. +func hasBreak(s syntax.Stmt, label string, implicit bool) bool { + switch s := s.(type) { + default: + unreachable() + + case *syntax.DeclStmt, *syntax.EmptyStmt, *syntax.ExprStmt, + *syntax.SendStmt, *syntax.AssignStmt, *syntax.CallStmt, + *syntax.ReturnStmt: + // no chance + + case *syntax.LabeledStmt: + return hasBreak(s.Stmt, label, implicit) + + case *syntax.BranchStmt: + if s.Tok == syntax.Break { + if s.Label == nil { + return implicit + } + if s.Label.Value == label { + return true + } + } + + case *syntax.BlockStmt: + return hasBreakList(s.List, label, implicit) + + case *syntax.IfStmt: + if hasBreak(s.Then, label, implicit) || + s.Else != nil && hasBreak(s.Else, label, implicit) { + return true + } + + case *syntax.SwitchStmt: + if label != "" && hasBreakCaseList(s.Body, label, false) { + return true + } + + case *syntax.SelectStmt: + if label != "" && hasBreakCommList(s.Body, label, false) { + return true + } + + case *syntax.ForStmt: + if label != "" && hasBreak(s.Body, label, false) { + return true + } + } + + return false +} + +func hasBreakList(list []syntax.Stmt, label string, implicit bool) bool { + for _, s := range list { + if hasBreak(s, label, implicit) { + return true + } + } + return false +} + +func hasBreakCaseList(list []*syntax.CaseClause, label string, implicit bool) bool { + for _, s := range list { + if hasBreakList(s.Body, label, implicit) { + return true + } + } + return false +} + +func hasBreakCommList(list []*syntax.CommClause, label string, implicit bool) bool { + for _, s := range list { + if hasBreakList(s.Body, label, implicit) { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/scope.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/scope.go new file mode 100644 index 0000000000000000000000000000000000000000..25bde6a794fbac52b3751047e0844b41d4a9e9b1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/scope.go @@ -0,0 +1,292 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements Scopes. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// A Scope maintains a set of objects and links to its containing +// (parent) and contained (children) scopes. Objects may be inserted +// and looked up by name. The zero value for Scope is a ready-to-use +// empty scope. +type Scope struct { + parent *Scope + children []*Scope + number int // parent.children[number-1] is this scope; 0 if there is no parent + elems map[string]Object // lazily allocated + pos, end syntax.Pos // scope extent; may be invalid + comment string // for debugging only + isFunc bool // set if this is a function scope (internal use only) +} + +// NewScope returns a new, empty scope contained in the given parent +// scope, if any. The comment is for debugging only. +func NewScope(parent *Scope, pos, end syntax.Pos, comment string) *Scope { + s := &Scope{parent, nil, 0, nil, pos, end, comment, false} + // don't add children to Universe scope! + if parent != nil && parent != Universe { + parent.children = append(parent.children, s) + s.number = len(parent.children) + } + return s +} + +// Parent returns the scope's containing (parent) scope. +func (s *Scope) Parent() *Scope { return s.parent } + +// Len returns the number of scope elements. +func (s *Scope) Len() int { return len(s.elems) } + +// Names returns the scope's element names in sorted order. +func (s *Scope) Names() []string { + names := make([]string, len(s.elems)) + i := 0 + for name := range s.elems { + names[i] = name + i++ + } + sort.Strings(names) + return names +} + +// NumChildren returns the number of scopes nested in s. +func (s *Scope) NumChildren() int { return len(s.children) } + +// Child returns the i'th child scope for 0 <= i < NumChildren(). +func (s *Scope) Child(i int) *Scope { return s.children[i] } + +// Lookup returns the object in scope s with the given name if such an +// object exists; otherwise the result is nil. +func (s *Scope) Lookup(name string) Object { + return resolve(name, s.elems[name]) +} + +// LookupParent follows the parent chain of scopes starting with s until +// it finds a scope where Lookup(name) returns a non-nil object, and then +// returns that scope and object. If a valid position pos is provided, +// only objects that were declared at or before pos are considered. +// If no such scope and object exists, the result is (nil, nil). +// +// Note that obj.Parent() may be different from the returned scope if the +// object was inserted into the scope and already had a parent at that +// time (see Insert). This can only happen for dot-imported objects +// whose scope is the scope of the package that exported them. +func (s *Scope) LookupParent(name string, pos syntax.Pos) (*Scope, Object) { + for ; s != nil; s = s.parent { + if obj := s.Lookup(name); obj != nil && (!pos.IsKnown() || cmpPos(obj.scopePos(), pos) <= 0) { + return s, obj + } + } + return nil, nil +} + +// Insert attempts to insert an object obj into scope s. +// If s already contains an alternative object alt with +// the same name, Insert leaves s unchanged and returns alt. +// Otherwise it inserts obj, sets the object's parent scope +// if not already set, and returns nil. +func (s *Scope) Insert(obj Object) Object { + name := obj.Name() + if alt := s.Lookup(name); alt != nil { + return alt + } + s.insert(name, obj) + if obj.Parent() == nil { + obj.setParent(s) + } + return nil +} + +// InsertLazy is like Insert, but allows deferring construction of the +// inserted object until it's accessed with Lookup. The Object +// returned by resolve must have the same name as given to InsertLazy. +// If s already contains an alternative object with the same name, +// InsertLazy leaves s unchanged and returns false. Otherwise it +// records the binding and returns true. The object's parent scope +// will be set to s after resolve is called. +func (s *Scope) InsertLazy(name string, resolve func() Object) bool { + if s.elems[name] != nil { + return false + } + s.insert(name, &lazyObject{parent: s, resolve: resolve}) + return true +} + +func (s *Scope) insert(name string, obj Object) { + if s.elems == nil { + s.elems = make(map[string]Object) + } + s.elems[name] = obj +} + +// Squash merges s with its parent scope p by adding all +// objects of s to p, adding all children of s to the +// children of p, and removing s from p's children. +// The function f is called for each object obj in s which +// has an object alt in p. s should be discarded after +// having been squashed. +func (s *Scope) Squash(err func(obj, alt Object)) { + p := s.parent + assert(p != nil) + for name, obj := range s.elems { + obj = resolve(name, obj) + obj.setParent(nil) + if alt := p.Insert(obj); alt != nil { + err(obj, alt) + } + } + + j := -1 // index of s in p.children + for i, ch := range p.children { + if ch == s { + j = i + break + } + } + assert(j >= 0) + k := len(p.children) - 1 + p.children[j] = p.children[k] + p.children = p.children[:k] + + p.children = append(p.children, s.children...) + + s.children = nil + s.elems = nil +} + +// Pos and End describe the scope's source code extent [pos, end). +// The results are guaranteed to be valid only if the type-checked +// AST has complete position information. The extent is undefined +// for Universe and package scopes. +func (s *Scope) Pos() syntax.Pos { return s.pos } +func (s *Scope) End() syntax.Pos { return s.end } + +// Contains reports whether pos is within the scope's extent. +// The result is guaranteed to be valid only if the type-checked +// AST has complete position information. +func (s *Scope) Contains(pos syntax.Pos) bool { + return cmpPos(s.pos, pos) <= 0 && cmpPos(pos, s.end) < 0 +} + +// Innermost returns the innermost (child) scope containing +// pos. If pos is not within any scope, the result is nil. +// The result is also nil for the Universe scope. +// The result is guaranteed to be valid only if the type-checked +// AST has complete position information. +func (s *Scope) Innermost(pos syntax.Pos) *Scope { + // Package scopes do not have extents since they may be + // discontiguous, so iterate over the package's files. + if s.parent == Universe { + for _, s := range s.children { + if inner := s.Innermost(pos); inner != nil { + return inner + } + } + } + + if s.Contains(pos) { + for _, s := range s.children { + if s.Contains(pos) { + return s.Innermost(pos) + } + } + return s + } + return nil +} + +// WriteTo writes a string representation of the scope to w, +// with the scope elements sorted by name. +// The level of indentation is controlled by n >= 0, with +// n == 0 for no indentation. +// If recurse is set, it also writes nested (children) scopes. +func (s *Scope) WriteTo(w io.Writer, n int, recurse bool) { + const ind = ". " + indn := strings.Repeat(ind, n) + + fmt.Fprintf(w, "%s%s scope %p {\n", indn, s.comment, s) + + indn1 := indn + ind + for _, name := range s.Names() { + fmt.Fprintf(w, "%s%s\n", indn1, s.Lookup(name)) + } + + if recurse { + for _, s := range s.children { + s.WriteTo(w, n+1, recurse) + } + } + + fmt.Fprintf(w, "%s}\n", indn) +} + +// String returns a string representation of the scope, for debugging. +func (s *Scope) String() string { + var buf strings.Builder + s.WriteTo(&buf, 0, false) + return buf.String() +} + +// A lazyObject represents an imported Object that has not been fully +// resolved yet by its importer. +type lazyObject struct { + parent *Scope + resolve func() Object + obj Object + once sync.Once +} + +// resolve returns the Object represented by obj, resolving lazy +// objects as appropriate. +func resolve(name string, obj Object) Object { + if lazy, ok := obj.(*lazyObject); ok { + lazy.once.Do(func() { + obj := lazy.resolve() + + if _, ok := obj.(*lazyObject); ok { + panic("recursive lazy object") + } + if obj.Name() != name { + panic("lazy object has unexpected name") + } + + if obj.Parent() == nil { + obj.setParent(lazy.parent) + } + lazy.obj = obj + }) + + obj = lazy.obj + } + return obj +} + +// stub implementations so *lazyObject implements Object and we can +// store them directly into Scope.elems. +func (*lazyObject) Parent() *Scope { panic("unreachable") } +func (*lazyObject) Pos() syntax.Pos { panic("unreachable") } +func (*lazyObject) Pkg() *Package { panic("unreachable") } +func (*lazyObject) Name() string { panic("unreachable") } +func (*lazyObject) Type() Type { panic("unreachable") } +func (*lazyObject) Exported() bool { panic("unreachable") } +func (*lazyObject) Id() string { panic("unreachable") } +func (*lazyObject) String() string { panic("unreachable") } +func (*lazyObject) order() uint32 { panic("unreachable") } +func (*lazyObject) color() color { panic("unreachable") } +func (*lazyObject) setType(Type) { panic("unreachable") } +func (*lazyObject) setOrder(uint32) { panic("unreachable") } +func (*lazyObject) setColor(color color) { panic("unreachable") } +func (*lazyObject) setParent(*Scope) { panic("unreachable") } +func (*lazyObject) sameId(pkg *Package, name string) bool { panic("unreachable") } +func (*lazyObject) scopePos() syntax.Pos { panic("unreachable") } +func (*lazyObject) setScopePos(pos syntax.Pos) { panic("unreachable") } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/selection.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/selection.go new file mode 100644 index 0000000000000000000000000000000000000000..dfbf3a0191c1e457f6434c9dc519b68cada06687 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/selection.go @@ -0,0 +1,180 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements Selections. + +package types2 + +import ( + "bytes" + "fmt" +) + +// SelectionKind describes the kind of a selector expression x.f +// (excluding qualified identifiers). +// +// If x is a struct or *struct, a selector expression x.f may denote a +// sequence of selection operations x.a.b.c.f. The SelectionKind +// describes the kind of the final (explicit) operation; all the +// previous (implicit) operations are always field selections. +// Each element of Indices specifies an implicit field (a, b, c) +// by its index in the struct type of the field selection operand. +// +// For a FieldVal operation, the final selection refers to the field +// specified by Selection.Obj. +// +// For a MethodVal operation, the final selection refers to a method. +// If the "pointerness" of the method's declared receiver does not +// match that of the effective receiver after implicit field +// selection, then an & or * operation is implicitly applied to the +// receiver variable or value. +// So, x.f denotes (&x.a.b.c).f when f requires a pointer receiver but +// x.a.b.c is a non-pointer variable; and it denotes (*x.a.b.c).f when +// f requires a non-pointer receiver but x.a.b.c is a pointer value. +// +// All pointer indirections, whether due to implicit or explicit field +// selections or * operations inserted for "pointerness", panic if +// applied to a nil pointer, so a method call x.f() may panic even +// before the function call. +// +// By contrast, a MethodExpr operation T.f is essentially equivalent +// to a function literal of the form: +// +// func(x T, args) (results) { return x.f(args) } +// +// Consequently, any implicit field selections and * operations +// inserted for "pointerness" are not evaluated until the function is +// called, so a T.f or (*T).f expression never panics. +type SelectionKind int + +const ( + FieldVal SelectionKind = iota // x.f is a struct field selector + MethodVal // x.f is a method selector + MethodExpr // x.f is a method expression +) + +// A Selection describes a selector expression x.f. +// For the declarations: +// +// type T struct{ x int; E } +// type E struct{} +// func (e E) m() {} +// var p *T +// +// the following relations exist: +// +// Selector Kind Recv Obj Type Index Indirect +// +// p.x FieldVal T x int {0} true +// p.m MethodVal *T m func() {1, 0} true +// T.m MethodExpr T m func(T) {1, 0} false +type Selection struct { + kind SelectionKind + recv Type // type of x + obj Object // object denoted by x.f + index []int // path from x to x.f + indirect bool // set if there was any pointer indirection on the path +} + +// Kind returns the selection kind. +func (s *Selection) Kind() SelectionKind { return s.kind } + +// Recv returns the type of x in x.f. +func (s *Selection) Recv() Type { return s.recv } + +// Obj returns the object denoted by x.f; a *Var for +// a field selection, and a *Func in all other cases. +func (s *Selection) Obj() Object { return s.obj } + +// Type returns the type of x.f, which may be different from the type of f. +// See Selection for more information. +func (s *Selection) Type() Type { + switch s.kind { + case MethodVal: + // The type of x.f is a method with its receiver type set + // to the type of x. + sig := *s.obj.(*Func).typ.(*Signature) + recv := *sig.recv + recv.typ = s.recv + sig.recv = &recv + return &sig + + case MethodExpr: + // The type of x.f is a function (without receiver) + // and an additional first argument with the same type as x. + // TODO(gri) Similar code is already in call.go - factor! + // TODO(gri) Compute this eagerly to avoid allocations. + sig := *s.obj.(*Func).typ.(*Signature) + arg0 := *sig.recv + sig.recv = nil + arg0.typ = s.recv + var params []*Var + if sig.params != nil { + params = sig.params.vars + } + sig.params = NewTuple(append([]*Var{&arg0}, params...)...) + return &sig + } + + // In all other cases, the type of x.f is the type of x. + return s.obj.Type() +} + +// Index describes the path from x to f in x.f. +// The last index entry is the field or method index of the type declaring f; +// either: +// +// 1. the list of declared methods of a named type; or +// 2. the list of methods of an interface type; or +// 3. the list of fields of a struct type. +// +// The earlier index entries are the indices of the embedded fields implicitly +// traversed to get from (the type of) x to f, starting at embedding depth 0. +func (s *Selection) Index() []int { return s.index } + +// Indirect reports whether any pointer indirection was required to get from +// x to f in x.f. +// +// Beware: Indirect spuriously returns true (Go issue #8353) for a +// MethodVal selection in which the receiver argument and parameter +// both have type *T so there is no indirection. +// Unfortunately, a fix is too risky. +func (s *Selection) Indirect() bool { return s.indirect } + +func (s *Selection) String() string { return SelectionString(s, nil) } + +// SelectionString returns the string form of s. +// The Qualifier controls the printing of +// package-level objects, and may be nil. +// +// Examples: +// +// "field (T) f int" +// "method (T) f(X) Y" +// "method expr (T) f(X) Y" +func SelectionString(s *Selection, qf Qualifier) string { + var k string + switch s.kind { + case FieldVal: + k = "field " + case MethodVal: + k = "method " + case MethodExpr: + k = "method expr " + default: + unreachable() + } + var buf bytes.Buffer + buf.WriteString(k) + buf.WriteByte('(') + WriteType(&buf, s.Recv(), qf) + fmt.Fprintf(&buf, ") %s", s.obj.Name()) + if T := s.Type(); s.kind == FieldVal { + buf.WriteByte(' ') + WriteType(&buf, T, qf) + } else { + WriteSignature(&buf, T.(*Signature), qf) + } + return buf.String() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/self_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/self_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3c8bec1c45a458c1de810fea2abf837574c66695 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/self_test.go @@ -0,0 +1,118 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "cmd/compile/internal/syntax" + "internal/testenv" + "path" + "path/filepath" + "runtime" + "testing" + "time" + + . "cmd/compile/internal/types2" +) + +func TestSelf(t *testing.T) { + testenv.MustHaveGoBuild(t) // The Go command is needed for the importer to determine the locations of stdlib .a files. + + files, err := pkgFiles(".") + if err != nil { + t.Fatal(err) + } + + conf := Config{Importer: defaultImporter()} + _, err = conf.Check("cmd/compile/internal/types2", files, nil) + if err != nil { + t.Fatal(err) + } +} + +func BenchmarkCheck(b *testing.B) { + testenv.MustHaveGoBuild(b) // The Go command is needed for the importer to determine the locations of stdlib .a files. + + for _, p := range []string{ + filepath.Join("src", "net", "http"), + filepath.Join("src", "go", "parser"), + filepath.Join("src", "go", "constant"), + filepath.Join("src", "runtime"), + filepath.Join("src", "go", "internal", "gcimporter"), + } { + b.Run(path.Base(p), func(b *testing.B) { + path := filepath.Join(runtime.GOROOT(), p) + for _, ignoreFuncBodies := range []bool{false, true} { + name := "funcbodies" + if ignoreFuncBodies { + name = "nofuncbodies" + } + b.Run(name, func(b *testing.B) { + b.Run("info", func(b *testing.B) { + runbench(b, path, ignoreFuncBodies, true) + }) + b.Run("noinfo", func(b *testing.B) { + runbench(b, path, ignoreFuncBodies, false) + }) + }) + } + }) + } +} + +func runbench(b *testing.B, path string, ignoreFuncBodies, writeInfo bool) { + files, err := pkgFiles(path) + if err != nil { + b.Fatal(err) + } + + // determine line count + var lines uint + for _, f := range files { + lines += f.EOF.Line() + } + + b.ResetTimer() + start := time.Now() + for i := 0; i < b.N; i++ { + conf := Config{ + IgnoreFuncBodies: ignoreFuncBodies, + Importer: defaultImporter(), + } + var info *Info + if writeInfo { + info = &Info{ + Types: make(map[syntax.Expr]TypeAndValue), + Defs: make(map[*syntax.Name]Object), + Uses: make(map[*syntax.Name]Object), + Implicits: make(map[syntax.Node]Object), + Selections: make(map[*syntax.SelectorExpr]*Selection), + Scopes: make(map[syntax.Node]*Scope), + } + } + if _, err := conf.Check(path, files, info); err != nil { + b.Fatal(err) + } + } + b.StopTimer() + b.ReportMetric(float64(lines)*float64(b.N)/time.Since(start).Seconds(), "lines/s") +} + +func pkgFiles(path string) ([]*syntax.File, error) { + filenames, err := pkgFilenames(path, true) // from stdlib_test.go + if err != nil { + return nil, err + } + + var files []*syntax.File + for _, filename := range filenames { + file, err := syntax.ParseFile(filename, nil, nil, 0) + if err != nil { + return nil, err + } + files = append(files, file) + } + + return files, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/signature.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/signature.go new file mode 100644 index 0000000000000000000000000000000000000000..18a64ec1a0a9d3943d28ecca17916e26fa50c840 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/signature.go @@ -0,0 +1,332 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "fmt" + . "internal/types/errors" +) + +// ---------------------------------------------------------------------------- +// API + +// A Signature represents a (non-builtin) function or method type. +// The receiver is ignored when comparing signatures for identity. +type Signature struct { + // We need to keep the scope in Signature (rather than passing it around + // and store it in the Func Object) because when type-checking a function + // literal we call the general type checker which returns a general Type. + // We then unpack the *Signature and use the scope for the literal body. + rparams *TypeParamList // receiver type parameters from left to right, or nil + tparams *TypeParamList // type parameters from left to right, or nil + scope *Scope // function scope for package-local and non-instantiated signatures; nil otherwise + recv *Var // nil if not a method + params *Tuple // (incoming) parameters from left to right; or nil + results *Tuple // (outgoing) results from left to right; or nil + variadic bool // true if the last parameter's type is of the form ...T (or string, for append built-in only) +} + +// NewSignatureType creates a new function type for the given receiver, +// receiver type parameters, type parameters, parameters, and results. If +// variadic is set, params must hold at least one parameter and the last +// parameter's core type must be of unnamed slice or bytestring type. +// If recv is non-nil, typeParams must be empty. If recvTypeParams is +// non-empty, recv must be non-nil. +func NewSignatureType(recv *Var, recvTypeParams, typeParams []*TypeParam, params, results *Tuple, variadic bool) *Signature { + if variadic { + n := params.Len() + if n == 0 { + panic("variadic function must have at least one parameter") + } + core := coreString(params.At(n - 1).typ) + if _, ok := core.(*Slice); !ok && !isString(core) { + panic(fmt.Sprintf("got %s, want variadic parameter with unnamed slice type or string as core type", core.String())) + } + } + sig := &Signature{recv: recv, params: params, results: results, variadic: variadic} + if len(recvTypeParams) != 0 { + if recv == nil { + panic("function with receiver type parameters must have a receiver") + } + sig.rparams = bindTParams(recvTypeParams) + } + if len(typeParams) != 0 { + if recv != nil { + panic("function with type parameters cannot have a receiver") + } + sig.tparams = bindTParams(typeParams) + } + return sig +} + +// Recv returns the receiver of signature s (if a method), or nil if a +// function. It is ignored when comparing signatures for identity. +// +// For an abstract method, Recv returns the enclosing interface either +// as a *Named or an *Interface. Due to embedding, an interface may +// contain methods whose receiver type is a different interface. +func (s *Signature) Recv() *Var { return s.recv } + +// TypeParams returns the type parameters of signature s, or nil. +func (s *Signature) TypeParams() *TypeParamList { return s.tparams } + +// SetTypeParams sets the type parameters of signature s. +func (s *Signature) SetTypeParams(tparams []*TypeParam) { s.tparams = bindTParams(tparams) } + +// RecvTypeParams returns the receiver type parameters of signature s, or nil. +func (s *Signature) RecvTypeParams() *TypeParamList { return s.rparams } + +// Params returns the parameters of signature s, or nil. +func (s *Signature) Params() *Tuple { return s.params } + +// Results returns the results of signature s, or nil. +func (s *Signature) Results() *Tuple { return s.results } + +// Variadic reports whether the signature s is variadic. +func (s *Signature) Variadic() bool { return s.variadic } + +func (s *Signature) Underlying() Type { return s } +func (s *Signature) String() string { return TypeString(s, nil) } + +// ---------------------------------------------------------------------------- +// Implementation + +// funcType type-checks a function or method type. +func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []*syntax.Field, ftyp *syntax.FuncType) { + check.openScope(ftyp, "function") + check.scope.isFunc = true + check.recordScope(ftyp, check.scope) + sig.scope = check.scope + defer check.closeScope() + + if recvPar != nil { + // collect generic receiver type parameters, if any + // - a receiver type parameter is like any other type parameter, except that it is declared implicitly + // - the receiver specification acts as local declaration for its type parameters, which may be blank + _, rname, rparams := check.unpackRecv(recvPar.Type, true) + if len(rparams) > 0 { + // The scope of the type parameter T in "func (r T[T]) f()" + // starts after f, not at "r"; see #52038. + scopePos := ftyp.Pos() + tparams := make([]*TypeParam, len(rparams)) + for i, rparam := range rparams { + tparams[i] = check.declareTypeParam(rparam, scopePos) + } + sig.rparams = bindTParams(tparams) + // Blank identifiers don't get declared, so naive type-checking of the + // receiver type expression would fail in Checker.collectParams below, + // when Checker.ident cannot resolve the _ to a type. + // + // Checker.recvTParamMap maps these blank identifiers to their type parameter + // types, so that they may be resolved in Checker.ident when they fail + // lookup in the scope. + for i, p := range rparams { + if p.Value == "_" { + if check.recvTParamMap == nil { + check.recvTParamMap = make(map[*syntax.Name]*TypeParam) + } + check.recvTParamMap[p] = tparams[i] + } + } + // determine receiver type to get its type parameters + // and the respective type parameter bounds + var recvTParams []*TypeParam + if rname != nil { + // recv should be a Named type (otherwise an error is reported elsewhere) + // Also: Don't report an error via genericType since it will be reported + // again when we type-check the signature. + // TODO(gri) maybe the receiver should be marked as invalid instead? + if recv := asNamed(check.genericType(rname, nil)); recv != nil { + recvTParams = recv.TypeParams().list() + } + } + // provide type parameter bounds + if len(tparams) == len(recvTParams) { + smap := makeRenameMap(recvTParams, tparams) + for i, tpar := range tparams { + recvTPar := recvTParams[i] + check.mono.recordCanon(tpar, recvTPar) + // recvTPar.bound is (possibly) parameterized in the context of the + // receiver type declaration. Substitute parameters for the current + // context. + tpar.bound = check.subst(tpar.obj.pos, recvTPar.bound, smap, nil, check.context()) + } + } else if len(tparams) < len(recvTParams) { + // Reporting an error here is a stop-gap measure to avoid crashes in the + // compiler when a type parameter/argument cannot be inferred later. It + // may lead to follow-on errors (see issues go.dev/issue/51339, go.dev/issue/51343). + // TODO(gri) find a better solution + got := measure(len(tparams), "type parameter") + check.errorf(recvPar, BadRecv, "got %s, but receiver base type declares %d", got, len(recvTParams)) + } + } + } + + if tparams != nil { + // The parser will complain about invalid type parameters for methods. + check.collectTypeParams(&sig.tparams, tparams) + } + + // Use a temporary scope for all parameter declarations and then + // squash that scope into the parent scope (and report any + // redeclarations at that time). + // + // TODO(adonovan): now that each declaration has the correct + // scopePos, there should be no need for scope squashing. + // Audit to ensure all lookups honor scopePos and simplify. + scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)") + scopePos := syntax.EndPos(ftyp) // all parameters' scopes start after the signature + var recvList []*Var // TODO(gri) remove the need for making a list here + if recvPar != nil { + recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, false, scopePos) // use rewritten receiver type, if any + } + params, variadic := check.collectParams(scope, ftyp.ParamList, true, scopePos) + results, _ := check.collectParams(scope, ftyp.ResultList, false, scopePos) + scope.Squash(func(obj, alt Object) { + var err error_ + err.code = DuplicateDecl + err.errorf(obj, "%s redeclared in this block", obj.Name()) + err.recordAltDecl(alt) + check.report(&err) + }) + + if recvPar != nil { + // recv parameter list present (may be empty) + // spec: "The receiver is specified via an extra parameter section preceding the + // method name. That parameter section must declare a single parameter, the receiver." + var recv *Var + switch len(recvList) { + case 0: + // error reported by resolver + recv = NewParam(nopos, nil, "", Typ[Invalid]) // ignore recv below + default: + // more than one receiver + check.error(recvList[len(recvList)-1].Pos(), InvalidRecv, "method must have exactly one receiver") + fallthrough // continue with first receiver + case 1: + recv = recvList[0] + } + sig.recv = recv + + // Delay validation of receiver type as it may cause premature expansion + // of types the receiver type is dependent on (see issues go.dev/issue/51232, go.dev/issue/51233). + check.later(func() { + // spec: "The receiver type must be of the form T or *T where T is a type name." + rtyp, _ := deref(recv.typ) + atyp := Unalias(rtyp) + if !isValid(atyp) { + return // error was reported before + } + // spec: "The type denoted by T is called the receiver base type; it must not + // be a pointer or interface type and it must be declared in the same package + // as the method." + switch T := atyp.(type) { + case *Named: + // The receiver type may be an instantiated type referred to + // by an alias (which cannot have receiver parameters for now). + if T.TypeArgs() != nil && sig.RecvTypeParams() == nil { + check.errorf(recv, InvalidRecv, "cannot define new methods on instantiated type %s", rtyp) + break + } + if T.obj.pkg != check.pkg { + check.errorf(recv, InvalidRecv, "cannot define new methods on non-local type %s", rtyp) + break + } + var cause string + switch u := T.under().(type) { + case *Basic: + // unsafe.Pointer is treated like a regular pointer + if u.kind == UnsafePointer { + cause = "unsafe.Pointer" + } + case *Pointer, *Interface: + cause = "pointer or interface type" + case *TypeParam: + // The underlying type of a receiver base type cannot be a + // type parameter: "type T[P any] P" is not a valid declaration. + unreachable() + } + if cause != "" { + check.errorf(recv, InvalidRecv, "invalid receiver type %s (%s)", rtyp, cause) + } + case *Basic: + check.errorf(recv, InvalidRecv, "cannot define new methods on non-local type %s", rtyp) + default: + check.errorf(recv, InvalidRecv, "invalid receiver type %s", recv.typ) + } + }).describef(recv, "validate receiver %s", recv) + } + + sig.params = NewTuple(params...) + sig.results = NewTuple(results...) + sig.variadic = variadic +} + +// collectParams declares the parameters of list in scope and returns the corresponding +// variable list. +func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, variadicOk bool, scopePos syntax.Pos) (params []*Var, variadic bool) { + if list == nil { + return + } + + var named, anonymous bool + + var typ Type + var prev syntax.Expr + for i, field := range list { + ftype := field.Type + // type-check type of grouped fields only once + if ftype != prev { + prev = ftype + if t, _ := ftype.(*syntax.DotsType); t != nil { + ftype = t.Elem + if variadicOk && i == len(list)-1 { + variadic = true + } else { + check.softErrorf(t, MisplacedDotDotDot, "can only use ... with final parameter in list") + // ignore ... and continue + } + } + typ = check.varType(ftype) + } + // The parser ensures that f.Tag is nil and we don't + // care if a constructed AST contains a non-nil tag. + if field.Name != nil { + // named parameter + name := field.Name.Value + if name == "" { + check.error(field.Name, InvalidSyntaxTree, "anonymous parameter") + // ok to continue + } + par := NewParam(field.Name.Pos(), check.pkg, name, typ) + check.declare(scope, field.Name, par, scopePos) + params = append(params, par) + named = true + } else { + // anonymous parameter + par := NewParam(field.Pos(), check.pkg, "", typ) + check.recordImplicit(field, par) + params = append(params, par) + anonymous = true + } + } + + if named && anonymous { + check.error(list[0], InvalidSyntaxTree, "list contains both named and anonymous parameters") + // ok to continue + } + + // For a variadic function, change the last parameter's type from T to []T. + // Since we type-checked T rather than ...T, we also need to retro-actively + // record the type for ...T. + if variadic { + last := params[len(params)-1] + last.typ = &Slice{elem: last.typ} + check.recordTypeAndValue(list[len(list)-1].Type, typexpr, last.typ, nil) + } + + return +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/sizeof_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/sizeof_test.go new file mode 100644 index 0000000000000000000000000000000000000000..740dbc92762aff8b6419660ca2c29d27eba3411b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/sizeof_test.go @@ -0,0 +1,64 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "reflect" + "testing" +) + +// Signal size changes of important structures. + +func TestSizeof(t *testing.T) { + const _64bit = ^uint(0)>>32 != 0 + + var tests = []struct { + val interface{} // type as a value + _32bit uintptr // size on 32bit platforms + _64bit uintptr // size on 64bit platforms + }{ + // Types + {Basic{}, 16, 32}, + {Array{}, 16, 24}, + {Slice{}, 8, 16}, + {Struct{}, 24, 48}, + {Pointer{}, 8, 16}, + {Tuple{}, 12, 24}, + {Signature{}, 28, 56}, + {Union{}, 12, 24}, + {Interface{}, 40, 80}, + {Map{}, 16, 32}, + {Chan{}, 12, 24}, + {Named{}, 60, 112}, + {TypeParam{}, 28, 48}, + {term{}, 12, 24}, + + // Objects + {PkgName{}, 64, 104}, + {Const{}, 64, 104}, + {TypeName{}, 56, 88}, + {Var{}, 64, 104}, + {Func{}, 64, 104}, + {Label{}, 60, 96}, + {Builtin{}, 60, 96}, + {Nil{}, 56, 88}, + + // Misc + {Scope{}, 60, 104}, + {Package{}, 44, 88}, + {_TypeSet{}, 28, 56}, + } + + for _, test := range tests { + got := reflect.TypeOf(test.val).Size() + want := test._32bit + if _64bit { + want = test._64bit + } + if got != want { + t.Errorf("unsafe.Sizeof(%T) = %d, want %d", test.val, got, want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/sizes.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/sizes.go new file mode 100644 index 0000000000000000000000000000000000000000..486c05c61c7490bd495f25ced951b450d7129afd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/sizes.go @@ -0,0 +1,340 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements Sizes. + +package types2 + +// Sizes defines the sizing functions for package unsafe. +type Sizes interface { + // Alignof returns the alignment of a variable of type T. + // Alignof must implement the alignment guarantees required by the spec. + // The result must be >= 1. + Alignof(T Type) int64 + + // Offsetsof returns the offsets of the given struct fields, in bytes. + // Offsetsof must implement the offset guarantees required by the spec. + // A negative entry in the result indicates that the struct is too large. + Offsetsof(fields []*Var) []int64 + + // Sizeof returns the size of a variable of type T. + // Sizeof must implement the size guarantees required by the spec. + // A negative result indicates that T is too large. + Sizeof(T Type) int64 +} + +// StdSizes is a convenience type for creating commonly used Sizes. +// It makes the following simplifying assumptions: +// +// - The size of explicitly sized basic types (int16, etc.) is the +// specified size. +// - The size of strings and interfaces is 2*WordSize. +// - The size of slices is 3*WordSize. +// - The size of an array of n elements corresponds to the size of +// a struct of n consecutive fields of the array's element type. +// - The size of a struct is the offset of the last field plus that +// field's size. As with all element types, if the struct is used +// in an array its size must first be aligned to a multiple of the +// struct's alignment. +// - All other types have size WordSize. +// - Arrays and structs are aligned per spec definition; all other +// types are naturally aligned with a maximum alignment MaxAlign. +// +// *StdSizes implements Sizes. +type StdSizes struct { + WordSize int64 // word size in bytes - must be >= 4 (32bits) + MaxAlign int64 // maximum alignment in bytes - must be >= 1 +} + +func (s *StdSizes) Alignof(T Type) (result int64) { + defer func() { + assert(result >= 1) + }() + + // For arrays and structs, alignment is defined in terms + // of alignment of the elements and fields, respectively. + switch t := under(T).(type) { + case *Array: + // spec: "For a variable x of array type: unsafe.Alignof(x) + // is the same as unsafe.Alignof(x[0]), but at least 1." + return s.Alignof(t.elem) + case *Struct: + if len(t.fields) == 0 && IsSyncAtomicAlign64(T) { + // Special case: sync/atomic.align64 is an + // empty struct we recognize as a signal that + // the struct it contains must be + // 64-bit-aligned. + // + // This logic is equivalent to the logic in + // cmd/compile/internal/types/size.go:calcStructOffset + return 8 + } + + // spec: "For a variable x of struct type: unsafe.Alignof(x) + // is the largest of the values unsafe.Alignof(x.f) for each + // field f of x, but at least 1." + max := int64(1) + for _, f := range t.fields { + if a := s.Alignof(f.typ); a > max { + max = a + } + } + return max + case *Slice, *Interface: + // Multiword data structures are effectively structs + // in which each element has size WordSize. + // Type parameters lead to variable sizes/alignments; + // StdSizes.Alignof won't be called for them. + assert(!isTypeParam(T)) + return s.WordSize + case *Basic: + // Strings are like slices and interfaces. + if t.Info()&IsString != 0 { + return s.WordSize + } + case *TypeParam, *Union: + unreachable() + } + a := s.Sizeof(T) // may be 0 or negative + // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1." + if a < 1 { + return 1 + } + // complex{64,128} are aligned like [2]float{32,64}. + if isComplex(T) { + a /= 2 + } + if a > s.MaxAlign { + return s.MaxAlign + } + return a +} + +func IsSyncAtomicAlign64(T Type) bool { + named := asNamed(T) + if named == nil { + return false + } + obj := named.Obj() + return obj.Name() == "align64" && + obj.Pkg() != nil && + (obj.Pkg().Path() == "sync/atomic" || + obj.Pkg().Path() == "runtime/internal/atomic") +} + +func (s *StdSizes) Offsetsof(fields []*Var) []int64 { + offsets := make([]int64, len(fields)) + var offs int64 + for i, f := range fields { + if offs < 0 { + // all remaining offsets are too large + offsets[i] = -1 + continue + } + // offs >= 0 + a := s.Alignof(f.typ) + offs = align(offs, a) // possibly < 0 if align overflows + offsets[i] = offs + if d := s.Sizeof(f.typ); d >= 0 && offs >= 0 { + offs += d // ok to overflow to < 0 + } else { + offs = -1 // f.typ or offs is too large + } + } + return offsets +} + +var basicSizes = [...]byte{ + Bool: 1, + Int8: 1, + Int16: 2, + Int32: 4, + Int64: 8, + Uint8: 1, + Uint16: 2, + Uint32: 4, + Uint64: 8, + Float32: 4, + Float64: 8, + Complex64: 8, + Complex128: 16, +} + +func (s *StdSizes) Sizeof(T Type) int64 { + switch t := under(T).(type) { + case *Basic: + assert(isTyped(T)) + k := t.kind + if int(k) < len(basicSizes) { + if s := basicSizes[k]; s > 0 { + return int64(s) + } + } + if k == String { + return s.WordSize * 2 + } + case *Array: + n := t.len + if n <= 0 { + return 0 + } + // n > 0 + esize := s.Sizeof(t.elem) + if esize < 0 { + return -1 // element too large + } + if esize == 0 { + return 0 // 0-size element + } + // esize > 0 + a := s.Alignof(t.elem) + ea := align(esize, a) // possibly < 0 if align overflows + if ea < 0 { + return -1 + } + // ea >= 1 + n1 := n - 1 // n1 >= 0 + // Final size is ea*n1 + esize; and size must be <= maxInt64. + const maxInt64 = 1<<63 - 1 + if n1 > 0 && ea > maxInt64/n1 { + return -1 // ea*n1 overflows + } + return ea*n1 + esize // may still overflow to < 0 which is ok + case *Slice: + return s.WordSize * 3 + case *Struct: + n := t.NumFields() + if n == 0 { + return 0 + } + offsets := s.Offsetsof(t.fields) + offs := offsets[n-1] + size := s.Sizeof(t.fields[n-1].typ) + if offs < 0 || size < 0 { + return -1 // type too large + } + return offs + size // may overflow to < 0 which is ok + case *Interface: + // Type parameters lead to variable sizes/alignments; + // StdSizes.Sizeof won't be called for them. + assert(!isTypeParam(T)) + return s.WordSize * 2 + case *TypeParam, *Union: + unreachable() + } + return s.WordSize // catch-all +} + +// common architecture word sizes and alignments +var gcArchSizes = map[string]*gcSizes{ + "386": {4, 4}, + "amd64": {8, 8}, + "amd64p32": {4, 8}, + "arm": {4, 4}, + "arm64": {8, 8}, + "loong64": {8, 8}, + "mips": {4, 4}, + "mipsle": {4, 4}, + "mips64": {8, 8}, + "mips64le": {8, 8}, + "ppc64": {8, 8}, + "ppc64le": {8, 8}, + "riscv64": {8, 8}, + "s390x": {8, 8}, + "sparc64": {8, 8}, + "wasm": {8, 8}, + // When adding more architectures here, + // update the doc string of SizesFor below. +} + +// SizesFor returns the Sizes used by a compiler for an architecture. +// The result is nil if a compiler/architecture pair is not known. +// +// Supported architectures for compiler "gc": +// "386", "amd64", "amd64p32", "arm", "arm64", "loong64", "mips", "mipsle", +// "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm". +func SizesFor(compiler, arch string) Sizes { + switch compiler { + case "gc": + if s := gcSizesFor(compiler, arch); s != nil { + return Sizes(s) + } + case "gccgo": + if s, ok := gccgoArchSizes[arch]; ok { + return Sizes(s) + } + } + return nil +} + +// stdSizes is used if Config.Sizes == nil. +var stdSizes = SizesFor("gc", "amd64") + +func (conf *Config) alignof(T Type) int64 { + f := stdSizes.Alignof + if conf.Sizes != nil { + f = conf.Sizes.Alignof + } + if a := f(T); a >= 1 { + return a + } + panic("implementation of alignof returned an alignment < 1") +} + +func (conf *Config) offsetsof(T *Struct) []int64 { + var offsets []int64 + if T.NumFields() > 0 { + // compute offsets on demand + f := stdSizes.Offsetsof + if conf.Sizes != nil { + f = conf.Sizes.Offsetsof + } + offsets = f(T.fields) + // sanity checks + if len(offsets) != T.NumFields() { + panic("implementation of offsetsof returned the wrong number of offsets") + } + } + return offsets +} + +// offsetof returns the offset of the field specified via +// the index sequence relative to T. All embedded fields +// must be structs (rather than pointers to structs). +// If the offset is too large (because T is too large), +// the result is negative. +func (conf *Config) offsetof(T Type, index []int) int64 { + var offs int64 + for _, i := range index { + s := under(T).(*Struct) + d := conf.offsetsof(s)[i] + if d < 0 { + return -1 + } + offs += d + if offs < 0 { + return -1 + } + T = s.fields[i].typ + } + return offs +} + +// sizeof returns the size of T. +// If T is too large, the result is negative. +func (conf *Config) sizeof(T Type) int64 { + f := stdSizes.Sizeof + if conf.Sizes != nil { + f = conf.Sizes.Sizeof + } + return f(T) +} + +// align returns the smallest y >= x such that y % a == 0. +// a must be within 1 and 8 and it must be a power of 2. +// The result may be negative due to overflow. +func align(x, a int64) int64 { + assert(x >= 0 && 1 <= a && a <= 8 && a&(a-1) == 0) + return (x + a - 1) &^ (a - 1) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/sizes_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/sizes_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9a772f4b1523fe23e604ad0980a1af3fd99be097 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/sizes_test.go @@ -0,0 +1,194 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains tests for sizes. + +package types2_test + +import ( + "cmd/compile/internal/syntax" + "cmd/compile/internal/types2" + "internal/testenv" + "testing" +) + +// findStructType typechecks src and returns the first struct type encountered. +func findStructType(t *testing.T, src string) *types2.Struct { + return findStructTypeConfig(t, src, &types2.Config{}) +} + +func findStructTypeConfig(t *testing.T, src string, conf *types2.Config) *types2.Struct { + types := make(map[syntax.Expr]types2.TypeAndValue) + mustTypecheck(src, nil, &types2.Info{Types: types}) + for _, tv := range types { + if ts, ok := tv.Type.(*types2.Struct); ok { + return ts + } + } + t.Fatalf("failed to find a struct type in src:\n%s\n", src) + return nil +} + +// go.dev/issue/16316 +func TestMultipleSizeUse(t *testing.T) { + const src = ` +package main + +type S struct { + i int + b bool + s string + n int +} +` + ts := findStructType(t, src) + sizes := types2.StdSizes{WordSize: 4, MaxAlign: 4} + if got := sizes.Sizeof(ts); got != 20 { + t.Errorf("Sizeof(%v) with WordSize 4 = %d want 20", ts, got) + } + sizes = types2.StdSizes{WordSize: 8, MaxAlign: 8} + if got := sizes.Sizeof(ts); got != 40 { + t.Errorf("Sizeof(%v) with WordSize 8 = %d want 40", ts, got) + } +} + +// go.dev/issue/16464 +func TestAlignofNaclSlice(t *testing.T) { + const src = ` +package main + +var s struct { + x *int + y []byte +} +` + ts := findStructType(t, src) + sizes := &types2.StdSizes{WordSize: 4, MaxAlign: 8} + var fields []*types2.Var + // Make a copy manually :( + for i := 0; i < ts.NumFields(); i++ { + fields = append(fields, ts.Field(i)) + } + offsets := sizes.Offsetsof(fields) + if offsets[0] != 0 || offsets[1] != 4 { + t.Errorf("OffsetsOf(%v) = %v want %v", ts, offsets, []int{0, 4}) + } +} + +func TestIssue16902(t *testing.T) { + const src = ` +package a + +import "unsafe" + +const _ = unsafe.Offsetof(struct{ x int64 }{}.x) +` + info := types2.Info{Types: make(map[syntax.Expr]types2.TypeAndValue)} + conf := types2.Config{ + Importer: defaultImporter(), + Sizes: &types2.StdSizes{WordSize: 8, MaxAlign: 8}, + } + mustTypecheck(src, &conf, &info) + for _, tv := range info.Types { + _ = conf.Sizes.Sizeof(tv.Type) + _ = conf.Sizes.Alignof(tv.Type) + } +} + +// go.dev/issue/53884. +func TestAtomicAlign(t *testing.T) { + testenv.MustHaveGoBuild(t) // The Go command is needed for the importer to determine the locations of stdlib .a files. + + const src = ` +package main + +import "sync/atomic" + +var s struct { + x int32 + y atomic.Int64 + z int64 +} +` + + want := []int64{0, 8, 16} + for _, arch := range []string{"386", "amd64"} { + t.Run(arch, func(t *testing.T) { + conf := types2.Config{ + Importer: defaultImporter(), + Sizes: types2.SizesFor("gc", arch), + } + ts := findStructTypeConfig(t, src, &conf) + var fields []*types2.Var + // Make a copy manually :( + for i := 0; i < ts.NumFields(); i++ { + fields = append(fields, ts.Field(i)) + } + + offsets := conf.Sizes.Offsetsof(fields) + if offsets[0] != want[0] || offsets[1] != want[1] || offsets[2] != want[2] { + t.Errorf("OffsetsOf(%v) = %v want %v", ts, offsets, want) + } + }) + } +} + +type gcSizeTest struct { + name string + src string +} + +var gcSizesTests = []gcSizeTest{ + { + "issue60431", + ` +package main + +import "unsafe" + +// The foo struct size is expected to be rounded up to 16 bytes. +type foo struct { + a int64 + b bool +} + +func main() { + assert(unsafe.Sizeof(foo{}) == 16) +}`, + }, + { + "issue60734", + ` +package main + +import ( + "unsafe" +) + +// The Data struct size is expected to be rounded up to 16 bytes. +type Data struct { + Value uint32 // 4 bytes + Label [10]byte // 10 bytes + Active bool // 1 byte + // padded with 1 byte to make it align +} + +func main() { + assert(unsafe.Sizeof(Data{}) == 16) +} +`, + }, +} + +func TestGCSizes(t *testing.T) { + types2.DefPredeclaredTestFuncs() + for _, tc := range gcSizesTests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + conf := types2.Config{Importer: defaultImporter(), Sizes: types2.SizesFor("gc", "amd64")} + mustTypecheck(tc.src, &conf, nil) + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/slice.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/slice.go new file mode 100644 index 0000000000000000000000000000000000000000..9c22a6fb1b6853f06a03fa0c0f7967aeed1b1aeb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/slice.go @@ -0,0 +1,19 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +// A Slice represents a slice type. +type Slice struct { + elem Type +} + +// NewSlice returns a new slice type for the given element type. +func NewSlice(elem Type) *Slice { return &Slice{elem: elem} } + +// Elem returns the element type of slice s. +func (s *Slice) Elem() Type { return s.elem } + +func (s *Slice) Underlying() Type { return s } +func (s *Slice) String() string { return TypeString(s, nil) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/stdlib_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/stdlib_test.go new file mode 100644 index 0000000000000000000000000000000000000000..405af78572bbab0fcbac47de6927e49c0801131a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/stdlib_test.go @@ -0,0 +1,488 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file tests types2.Check by using it to +// typecheck the standard library and tests. + +package types2_test + +import ( + "bytes" + "cmd/compile/internal/syntax" + "errors" + "fmt" + "go/build" + "internal/testenv" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "testing" + "time" + + . "cmd/compile/internal/types2" +) + +var stdLibImporter = defaultImporter() + +func TestStdlib(t *testing.T) { + if testing.Short() { + t.Skip("skipping in short mode") + } + + testenv.MustHaveGoBuild(t) + + // Collect non-test files. + dirFiles := make(map[string][]string) + root := filepath.Join(testenv.GOROOT(t), "src") + walkPkgDirs(root, func(dir string, filenames []string) { + dirFiles[dir] = filenames + }, t.Error) + + c := &stdlibChecker{ + dirFiles: dirFiles, + pkgs: make(map[string]*futurePackage), + } + + start := time.Now() + + // Though we read files while parsing, type-checking is otherwise CPU bound. + // + // This doesn't achieve great CPU utilization as many packages may block + // waiting for a common import, but in combination with the non-deterministic + // map iteration below this should provide decent coverage of concurrent + // type-checking (see golang/go#47729). + cpulimit := make(chan struct{}, runtime.GOMAXPROCS(0)) + var wg sync.WaitGroup + + for dir := range dirFiles { + dir := dir + + cpulimit <- struct{}{} + wg.Add(1) + go func() { + defer func() { + wg.Done() + <-cpulimit + }() + + _, err := c.getDirPackage(dir) + if err != nil { + t.Errorf("error checking %s: %v", dir, err) + } + }() + } + + wg.Wait() + + if testing.Verbose() { + fmt.Println(len(dirFiles), "packages typechecked in", time.Since(start)) + } +} + +// stdlibChecker implements concurrent type-checking of the packages defined by +// dirFiles, which must define a closed set of packages (such as GOROOT/src). +type stdlibChecker struct { + dirFiles map[string][]string // non-test files per directory; must be pre-populated + + mu sync.Mutex + pkgs map[string]*futurePackage // future cache of type-checking results +} + +// A futurePackage is a future result of type-checking. +type futurePackage struct { + done chan struct{} // guards pkg and err + pkg *Package + err error +} + +func (c *stdlibChecker) Import(path string) (*Package, error) { + panic("unimplemented: use ImportFrom") +} + +func (c *stdlibChecker) ImportFrom(path, dir string, _ ImportMode) (*Package, error) { + if path == "unsafe" { + // unsafe cannot be type checked normally. + return Unsafe, nil + } + + p, err := build.Default.Import(path, dir, build.FindOnly) + if err != nil { + return nil, err + } + + pkg, err := c.getDirPackage(p.Dir) + if pkg != nil { + // As long as pkg is non-nil, avoid redundant errors related to failed + // imports. TestStdlib will collect errors once for each package. + return pkg, nil + } + return nil, err +} + +// getDirPackage gets the package defined in dir from the future cache. +// +// If this is the first goroutine requesting the package, getDirPackage +// type-checks. +func (c *stdlibChecker) getDirPackage(dir string) (*Package, error) { + c.mu.Lock() + fut, ok := c.pkgs[dir] + if !ok { + // First request for this package dir; type check. + fut = &futurePackage{ + done: make(chan struct{}), + } + c.pkgs[dir] = fut + files, ok := c.dirFiles[dir] + c.mu.Unlock() + if !ok { + fut.err = fmt.Errorf("no files for %s", dir) + } else { + // Using dir as the package path here may be inconsistent with the behavior + // of a normal importer, but is sufficient as dir is by construction unique + // to this package. + fut.pkg, fut.err = typecheckFiles(dir, files, c) + } + close(fut.done) + } else { + // Otherwise, await the result. + c.mu.Unlock() + <-fut.done + } + return fut.pkg, fut.err +} + +// firstComment returns the contents of the first non-empty comment in +// the given file, "skip", or the empty string. No matter the present +// comments, if any of them contains a build tag, the result is always +// "skip". Only comments within the first 4K of the file are considered. +// TODO(gri) should only read until we see "package" token. +func firstComment(filename string) (first string) { + f, err := os.Open(filename) + if err != nil { + return "" + } + defer f.Close() + + // read at most 4KB + var buf [4 << 10]byte + n, _ := f.Read(buf[:]) + src := bytes.NewBuffer(buf[:n]) + + // TODO(gri) we need a better way to terminate CommentsDo + defer func() { + if p := recover(); p != nil { + if s, ok := p.(string); ok { + first = s + } + } + }() + + syntax.CommentsDo(src, func(_, _ uint, text string) { + if text[0] != '/' { + return // not a comment + } + + // extract comment text + if text[1] == '*' { + text = text[:len(text)-2] + } + text = strings.TrimSpace(text[2:]) + + if strings.HasPrefix(text, "go:build ") { + panic("skip") + } + if first == "" { + first = text // text may be "" but that's ok + } + // continue as we may still see build tags + }) + + return +} + +func testTestDir(t *testing.T, path string, ignore ...string) { + files, err := os.ReadDir(path) + if err != nil { + // cmd/distpack deletes GOROOT/test, so skip the test if it isn't present. + // cmd/distpack also requires GOROOT/VERSION to exist, so use that to + // suppress false-positive skips. + if _, err := os.Stat(filepath.Join(testenv.GOROOT(t), "test")); os.IsNotExist(err) { + if _, err := os.Stat(filepath.Join(testenv.GOROOT(t), "VERSION")); err == nil { + t.Skipf("skipping: GOROOT/test not present") + } + } + t.Fatal(err) + } + + excluded := make(map[string]bool) + for _, filename := range ignore { + excluded[filename] = true + } + + for _, f := range files { + // filter directory contents + if f.IsDir() || !strings.HasSuffix(f.Name(), ".go") || excluded[f.Name()] { + continue + } + + // get per-file instructions + expectErrors := false + filename := filepath.Join(path, f.Name()) + goVersion := "" + if comment := firstComment(filename); comment != "" { + if strings.Contains(comment, "-goexperiment") { + continue // ignore this file + } + fields := strings.Fields(comment) + switch fields[0] { + case "skip", "compiledir": + continue // ignore this file + case "errorcheck": + expectErrors = true + for _, arg := range fields[1:] { + if arg == "-0" || arg == "-+" || arg == "-std" { + // Marked explicitly as not expecting errors (-0), + // or marked as compiling runtime/stdlib, which is only done + // to trigger runtime/stdlib-only error output. + // In both cases, the code should typecheck. + expectErrors = false + break + } + const prefix = "-lang=" + if strings.HasPrefix(arg, prefix) { + goVersion = arg[len(prefix):] + } + } + } + } + + // parse and type-check file + if testing.Verbose() { + fmt.Println("\t", filename) + } + file, err := syntax.ParseFile(filename, nil, nil, 0) + if err == nil { + conf := Config{ + GoVersion: goVersion, + Importer: stdLibImporter, + } + _, err = conf.Check(filename, []*syntax.File{file}, nil) + } + + if expectErrors { + if err == nil { + t.Errorf("expected errors but found none in %s", filename) + } + } else { + if err != nil { + t.Error(err) + } + } + } +} + +func TestStdTest(t *testing.T) { + testenv.MustHaveGoBuild(t) + + if testing.Short() && testenv.Builder() == "" { + t.Skip("skipping in short mode") + } + + testTestDir(t, filepath.Join(testenv.GOROOT(t), "test"), + "cmplxdivide.go", // also needs file cmplxdivide1.go - ignore + "directive.go", // tests compiler rejection of bad directive placement - ignore + "directive2.go", // tests compiler rejection of bad directive placement - ignore + "embedfunc.go", // tests //go:embed + "embedvers.go", // tests //go:embed + "linkname2.go", // types2 doesn't check validity of //go:xxx directives + "linkname3.go", // types2 doesn't check validity of //go:xxx directives + ) +} + +func TestStdFixed(t *testing.T) { + testenv.MustHaveGoBuild(t) + + if testing.Short() && testenv.Builder() == "" { + t.Skip("skipping in short mode") + } + + testTestDir(t, filepath.Join(testenv.GOROOT(t), "test", "fixedbugs"), + "bug248.go", "bug302.go", "bug369.go", // complex test instructions - ignore + "bug398.go", // types2 doesn't check for anonymous interface cycles (go.dev/issue/56103) + "issue6889.go", // gc-specific test + "issue11362.go", // canonical import path check + "issue16369.go", // types2 handles this correctly - not an issue + "issue18459.go", // types2 doesn't check validity of //go:xxx directives + "issue18882.go", // types2 doesn't check validity of //go:xxx directives + "issue20529.go", // types2 does not have constraints on stack size + "issue22200.go", // types2 does not have constraints on stack size + "issue22200b.go", // types2 does not have constraints on stack size + "issue25507.go", // types2 does not have constraints on stack size + "issue20780.go", // types2 does not have constraints on stack size + "issue42058a.go", // types2 does not have constraints on channel element size + "issue42058b.go", // types2 does not have constraints on channel element size + "issue48097.go", // go/types doesn't check validity of //go:xxx directives, and non-init bodyless function + "issue48230.go", // go/types doesn't check validity of //go:xxx directives + "issue49767.go", // go/types does not have constraints on channel element size + "issue49814.go", // go/types does not have constraints on array size + "issue56103.go", // anonymous interface cycles; will be a type checker error in 1.22 + "issue52697.go", // types2 does not have constraints on stack size + + // These tests requires runtime/cgo.Incomplete, which is only available on some platforms. + // However, types2 does not know about build constraints. + "bug514.go", + "issue40954.go", + "issue42032.go", + "issue42076.go", + "issue46903.go", + "issue51733.go", + "notinheap2.go", + "notinheap3.go", + ) +} + +func TestStdKen(t *testing.T) { + testenv.MustHaveGoBuild(t) + + testTestDir(t, filepath.Join(testenv.GOROOT(t), "test", "ken")) +} + +// Package paths of excluded packages. +var excluded = map[string]bool{ + "builtin": true, + + // go.dev/issue/46027: some imports are missing for this submodule. + "crypto/internal/edwards25519/field/_asm": true, + "crypto/internal/bigmod/_asm": true, +} + +// printPackageMu synchronizes the printing of type-checked package files in +// the typecheckFiles function. +// +// Without synchronization, package files may be interleaved during concurrent +// type-checking. +var printPackageMu sync.Mutex + +// typecheckFiles typechecks the given package files. +func typecheckFiles(path string, filenames []string, importer Importer) (*Package, error) { + // Parse package files. + var files []*syntax.File + for _, filename := range filenames { + var errs []error + errh := func(err error) { errs = append(errs, err) } + file, err := syntax.ParseFile(filename, errh, nil, 0) + if err != nil { + return nil, errors.Join(errs...) + } + + files = append(files, file) + } + + if testing.Verbose() { + printPackageMu.Lock() + fmt.Println("package", files[0].PkgName.Value) + for _, filename := range filenames { + fmt.Println("\t", filename) + } + printPackageMu.Unlock() + } + + // Typecheck package files. + var errs []error + conf := Config{ + Error: func(err error) { + errs = append(errs, err) + }, + Importer: importer, + } + info := Info{Uses: make(map[*syntax.Name]Object)} + pkg, _ := conf.Check(path, files, &info) + err := errors.Join(errs...) + if err != nil { + return pkg, err + } + + // Perform checks of API invariants. + + // All Objects have a package, except predeclared ones. + errorError := Universe.Lookup("error").Type().Underlying().(*Interface).ExplicitMethod(0) // (error).Error + for id, obj := range info.Uses { + predeclared := obj == Universe.Lookup(obj.Name()) || obj == errorError + if predeclared == (obj.Pkg() != nil) { + posn := id.Pos() + if predeclared { + return nil, fmt.Errorf("%s: predeclared object with package: %s", posn, obj) + } else { + return nil, fmt.Errorf("%s: user-defined object without package: %s", posn, obj) + } + } + } + + return pkg, nil +} + +// pkgFilenames returns the list of package filenames for the given directory. +func pkgFilenames(dir string, includeTest bool) ([]string, error) { + ctxt := build.Default + ctxt.CgoEnabled = false + pkg, err := ctxt.ImportDir(dir, 0) + if err != nil { + if _, nogo := err.(*build.NoGoError); nogo { + return nil, nil // no *.go files, not an error + } + return nil, err + } + if excluded[pkg.ImportPath] { + return nil, nil + } + var filenames []string + for _, name := range pkg.GoFiles { + filenames = append(filenames, filepath.Join(pkg.Dir, name)) + } + if includeTest { + for _, name := range pkg.TestGoFiles { + filenames = append(filenames, filepath.Join(pkg.Dir, name)) + } + } + return filenames, nil +} + +func walkPkgDirs(dir string, pkgh func(dir string, filenames []string), errh func(args ...interface{})) { + w := walker{pkgh, errh} + w.walk(dir) +} + +type walker struct { + pkgh func(dir string, filenames []string) + errh func(args ...any) +} + +func (w *walker) walk(dir string) { + files, err := os.ReadDir(dir) + if err != nil { + w.errh(err) + return + } + + // apply pkgh to the files in directory dir + + // Don't get test files as these packages are imported. + pkgFiles, err := pkgFilenames(dir, false) + if err != nil { + w.errh(err) + return + } + if pkgFiles != nil { + w.pkgh(dir, pkgFiles) + } + + // traverse subdirectories, but don't walk into testdata + for _, f := range files { + if f.IsDir() && f.Name() != "testdata" { + w.walk(filepath.Join(dir, f.Name())) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/stmt.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/stmt.go new file mode 100644 index 0000000000000000000000000000000000000000..d519657b6b153ebe9111478fd6323a5b90e7f9a5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/stmt.go @@ -0,0 +1,1082 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements typechecking of statements. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "go/constant" + "internal/buildcfg" + . "internal/types/errors" + "sort" +) + +func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body *syntax.BlockStmt, iota constant.Value) { + if check.conf.IgnoreFuncBodies { + panic("function body not ignored") + } + + if check.conf.Trace { + check.trace(body.Pos(), "-- %s: %s", name, sig) + } + + // save/restore current environment and set up function environment + // (and use 0 indentation at function start) + defer func(env environment, indent int) { + check.environment = env + check.indent = indent + }(check.environment, check.indent) + check.environment = environment{ + decl: decl, + scope: sig.scope, + iota: iota, + sig: sig, + } + check.indent = 0 + + check.stmtList(0, body.List) + + if check.hasLabel && !check.conf.IgnoreBranchErrors { + check.labels(body) + } + + if sig.results.Len() > 0 && !check.isTerminating(body, "") { + check.error(body.Rbrace, MissingReturn, "missing return") + } + + // spec: "Implementation restriction: A compiler may make it illegal to + // declare a variable inside a function body if the variable is never used." + check.usage(sig.scope) +} + +func (check *Checker) usage(scope *Scope) { + var unused []*Var + for name, elem := range scope.elems { + elem = resolve(name, elem) + if v, _ := elem.(*Var); v != nil && !v.used { + unused = append(unused, v) + } + } + sort.Slice(unused, func(i, j int) bool { + return cmpPos(unused[i].pos, unused[j].pos) < 0 + }) + for _, v := range unused { + check.softErrorf(v.pos, UnusedVar, "%s declared and not used", v.name) + } + + for _, scope := range scope.children { + // Don't go inside function literal scopes a second time; + // they are handled explicitly by funcBody. + if !scope.isFunc { + check.usage(scope) + } + } +} + +// stmtContext is a bitset describing which +// control-flow statements are permissible, +// and provides additional context information +// for better error messages. +type stmtContext uint + +const ( + // permissible control-flow statements + breakOk stmtContext = 1 << iota + continueOk + fallthroughOk + + // additional context information + finalSwitchCase + inTypeSwitch +) + +func (check *Checker) simpleStmt(s syntax.Stmt) { + if s != nil { + check.stmt(0, s) + } +} + +func trimTrailingEmptyStmts(list []syntax.Stmt) []syntax.Stmt { + for i := len(list); i > 0; i-- { + if _, ok := list[i-1].(*syntax.EmptyStmt); !ok { + return list[:i] + } + } + return nil +} + +func (check *Checker) stmtList(ctxt stmtContext, list []syntax.Stmt) { + ok := ctxt&fallthroughOk != 0 + inner := ctxt &^ fallthroughOk + list = trimTrailingEmptyStmts(list) // trailing empty statements are "invisible" to fallthrough analysis + for i, s := range list { + inner := inner + if ok && i+1 == len(list) { + inner |= fallthroughOk + } + check.stmt(inner, s) + } +} + +func (check *Checker) multipleSwitchDefaults(list []*syntax.CaseClause) { + var first *syntax.CaseClause + for _, c := range list { + if c.Cases == nil { + if first != nil { + check.errorf(c, DuplicateDefault, "multiple defaults (first at %s)", first.Pos()) + // TODO(gri) probably ok to bail out after first error (and simplify this code) + } else { + first = c + } + } + } +} + +func (check *Checker) multipleSelectDefaults(list []*syntax.CommClause) { + var first *syntax.CommClause + for _, c := range list { + if c.Comm == nil { + if first != nil { + check.errorf(c, DuplicateDefault, "multiple defaults (first at %s)", first.Pos()) + // TODO(gri) probably ok to bail out after first error (and simplify this code) + } else { + first = c + } + } + } +} + +func (check *Checker) openScope(node syntax.Node, comment string) { + check.openScopeUntil(node, syntax.EndPos(node), comment) +} + +func (check *Checker) openScopeUntil(node syntax.Node, end syntax.Pos, comment string) { + scope := NewScope(check.scope, node.Pos(), end, comment) + check.recordScope(node, scope) + check.scope = scope +} + +func (check *Checker) closeScope() { + check.scope = check.scope.Parent() +} + +func (check *Checker) suspendedCall(keyword string, call syntax.Expr) { + code := InvalidDefer + if keyword == "go" { + code = InvalidGo + } + + if _, ok := call.(*syntax.CallExpr); !ok { + check.errorf(call, code, "expression in %s must be function call", keyword) + check.use(call) + return + } + + var x operand + var msg string + switch check.rawExpr(nil, &x, call, nil, false) { + case conversion: + msg = "requires function call, not conversion" + case expression: + msg = "discards result of" + code = UnusedResults + case statement: + return + default: + unreachable() + } + check.errorf(&x, code, "%s %s %s", keyword, msg, &x) +} + +// goVal returns the Go value for val, or nil. +func goVal(val constant.Value) interface{} { + // val should exist, but be conservative and check + if val == nil { + return nil + } + // Match implementation restriction of other compilers. + // gc only checks duplicates for integer, floating-point + // and string values, so only create Go values for these + // types. + switch val.Kind() { + case constant.Int: + if x, ok := constant.Int64Val(val); ok { + return x + } + if x, ok := constant.Uint64Val(val); ok { + return x + } + case constant.Float: + if x, ok := constant.Float64Val(val); ok { + return x + } + case constant.String: + return constant.StringVal(val) + } + return nil +} + +// A valueMap maps a case value (of a basic Go type) to a list of positions +// where the same case value appeared, together with the corresponding case +// types. +// Since two case values may have the same "underlying" value but different +// types we need to also check the value's types (e.g., byte(1) vs myByte(1)) +// when the switch expression is of interface type. +type ( + valueMap map[interface{}][]valueType // underlying Go value -> valueType + valueType struct { + pos syntax.Pos + typ Type + } +) + +func (check *Checker) caseValues(x *operand, values []syntax.Expr, seen valueMap) { +L: + for _, e := range values { + var v operand + check.expr(nil, &v, e) + if x.mode == invalid || v.mode == invalid { + continue L + } + check.convertUntyped(&v, x.typ) + if v.mode == invalid { + continue L + } + // Order matters: By comparing v against x, error positions are at the case values. + res := v // keep original v unchanged + check.comparison(&res, x, syntax.Eql, true) + if res.mode == invalid { + continue L + } + if v.mode != constant_ { + continue L // we're done + } + // look for duplicate values + if val := goVal(v.val); val != nil { + // look for duplicate types for a given value + // (quadratic algorithm, but these lists tend to be very short) + for _, vt := range seen[val] { + if Identical(v.typ, vt.typ) { + var err error_ + err.code = DuplicateCase + err.errorf(&v, "duplicate case %s in expression switch", &v) + err.errorf(vt.pos, "previous case") + check.report(&err) + continue L + } + } + seen[val] = append(seen[val], valueType{v.Pos(), v.typ}) + } + } +} + +// isNil reports whether the expression e denotes the predeclared value nil. +func (check *Checker) isNil(e syntax.Expr) bool { + // The only way to express the nil value is by literally writing nil (possibly in parentheses). + if name, _ := syntax.Unparen(e).(*syntax.Name); name != nil { + _, ok := check.lookup(name.Value).(*Nil) + return ok + } + return false +} + +// If the type switch expression is invalid, x is nil. +func (check *Checker) caseTypes(x *operand, types []syntax.Expr, seen map[Type]syntax.Expr) (T Type) { + var dummy operand +L: + for _, e := range types { + // The spec allows the value nil instead of a type. + if check.isNil(e) { + T = nil + check.expr(nil, &dummy, e) // run e through expr so we get the usual Info recordings + } else { + T = check.varType(e) + if !isValid(T) { + continue L + } + } + // look for duplicate types + // (quadratic algorithm, but type switches tend to be reasonably small) + for t, other := range seen { + if T == nil && t == nil || T != nil && t != nil && Identical(T, t) { + // talk about "case" rather than "type" because of nil case + Ts := "nil" + if T != nil { + Ts = TypeString(T, check.qualifier) + } + var err error_ + err.code = DuplicateCase + err.errorf(e, "duplicate case %s in type switch", Ts) + err.errorf(other, "previous case") + check.report(&err) + continue L + } + } + seen[T] = e + if x != nil && T != nil { + check.typeAssertion(e, x, T, true) + } + } + return +} + +// TODO(gri) Once we are certain that typeHash is correct in all situations, use this version of caseTypes instead. +// (Currently it may be possible that different types have identical names and import paths due to ImporterFrom.) +// +// func (check *Checker) caseTypes(x *operand, xtyp *Interface, types []syntax.Expr, seen map[string]syntax.Expr) (T Type) { +// var dummy operand +// L: +// for _, e := range types { +// // The spec allows the value nil instead of a type. +// var hash string +// if check.isNil(e) { +// check.expr(nil, &dummy, e) // run e through expr so we get the usual Info recordings +// T = nil +// hash = "" // avoid collision with a type named nil +// } else { +// T = check.varType(e) +// if !isValid(T) { +// continue L +// } +// hash = typeHash(T, nil) +// } +// // look for duplicate types +// if other := seen[hash]; other != nil { +// // talk about "case" rather than "type" because of nil case +// Ts := "nil" +// if T != nil { +// Ts = TypeString(T, check.qualifier) +// } +// var err error_ +// err.code = _DuplicateCase +// err.errorf(e, "duplicate case %s in type switch", Ts) +// err.errorf(other, "previous case") +// check.report(&err) +// continue L +// } +// seen[hash] = e +// if T != nil { +// check.typeAssertion(e, x, xtyp, T, true) +// } +// } +// return +// } + +// stmt typechecks statement s. +func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) { + // statements must end with the same top scope as they started with + if debug { + defer func(scope *Scope) { + // don't check if code is panicking + if p := recover(); p != nil { + panic(p) + } + assert(scope == check.scope) + }(check.scope) + } + + // process collected function literals before scope changes + defer check.processDelayed(len(check.delayed)) + + // reset context for statements of inner blocks + inner := ctxt &^ (fallthroughOk | finalSwitchCase | inTypeSwitch) + + switch s := s.(type) { + case *syntax.EmptyStmt: + // ignore + + case *syntax.DeclStmt: + check.declStmt(s.DeclList) + + case *syntax.LabeledStmt: + check.hasLabel = true + check.stmt(ctxt, s.Stmt) + + case *syntax.ExprStmt: + // spec: "With the exception of specific built-in functions, + // function and method calls and receive operations can appear + // in statement context. Such statements may be parenthesized." + var x operand + kind := check.rawExpr(nil, &x, s.X, nil, false) + var msg string + var code Code + switch x.mode { + default: + if kind == statement { + return + } + msg = "is not used" + code = UnusedExpr + case builtin: + msg = "must be called" + code = UncalledBuiltin + case typexpr: + msg = "is not an expression" + code = NotAnExpr + } + check.errorf(&x, code, "%s %s", &x, msg) + + case *syntax.SendStmt: + var ch, val operand + check.expr(nil, &ch, s.Chan) + check.expr(nil, &val, s.Value) + if ch.mode == invalid || val.mode == invalid { + return + } + u := coreType(ch.typ) + if u == nil { + check.errorf(s, InvalidSend, invalidOp+"cannot send to %s: no core type", &ch) + return + } + uch, _ := u.(*Chan) + if uch == nil { + check.errorf(s, InvalidSend, invalidOp+"cannot send to non-channel %s", &ch) + return + } + if uch.dir == RecvOnly { + check.errorf(s, InvalidSend, invalidOp+"cannot send to receive-only channel %s", &ch) + return + } + check.assignment(&val, uch.elem, "send") + + case *syntax.AssignStmt: + if s.Rhs == nil { + // x++ or x-- + // (no need to call unpackExpr as s.Lhs must be single-valued) + var x operand + check.expr(nil, &x, s.Lhs) + if x.mode == invalid { + return + } + if !allNumeric(x.typ) { + check.errorf(s.Lhs, NonNumericIncDec, invalidOp+"%s%s%s (non-numeric type %s)", s.Lhs, s.Op, s.Op, x.typ) + return + } + check.assignVar(s.Lhs, nil, &x, "assignment") + return + } + + lhs := syntax.UnpackListExpr(s.Lhs) + rhs := syntax.UnpackListExpr(s.Rhs) + switch s.Op { + case 0: + check.assignVars(lhs, rhs) + return + case syntax.Def: + check.shortVarDecl(s.Pos(), lhs, rhs) + return + } + + // assignment operations + if len(lhs) != 1 || len(rhs) != 1 { + check.errorf(s, MultiValAssignOp, "assignment operation %s requires single-valued expressions", s.Op) + return + } + + var x operand + check.binary(&x, nil, lhs[0], rhs[0], s.Op) + check.assignVar(lhs[0], nil, &x, "assignment") + + case *syntax.CallStmt: + kind := "go" + if s.Tok == syntax.Defer { + kind = "defer" + } + check.suspendedCall(kind, s.Call) + + case *syntax.ReturnStmt: + res := check.sig.results + // Return with implicit results allowed for function with named results. + // (If one is named, all are named.) + results := syntax.UnpackListExpr(s.Results) + if len(results) == 0 && res.Len() > 0 && res.vars[0].name != "" { + // spec: "Implementation restriction: A compiler may disallow an empty expression + // list in a "return" statement if a different entity (constant, type, or variable) + // with the same name as a result parameter is in scope at the place of the return." + for _, obj := range res.vars { + if alt := check.lookup(obj.name); alt != nil && alt != obj { + var err error_ + err.code = OutOfScopeResult + err.errorf(s, "result parameter %s not in scope at return", obj.name) + err.errorf(alt, "inner declaration of %s", obj) + check.report(&err) + // ok to continue + } + } + } else { + var lhs []*Var + if res.Len() > 0 { + lhs = res.vars + } + check.initVars(lhs, results, s) + } + + case *syntax.BranchStmt: + if s.Label != nil { + check.hasLabel = true + break // checked in 2nd pass (check.labels) + } + if check.conf.IgnoreBranchErrors { + break + } + switch s.Tok { + case syntax.Break: + if ctxt&breakOk == 0 { + check.error(s, MisplacedBreak, "break not in for, switch, or select statement") + } + case syntax.Continue: + if ctxt&continueOk == 0 { + check.error(s, MisplacedContinue, "continue not in for statement") + } + case syntax.Fallthrough: + if ctxt&fallthroughOk == 0 { + var msg string + switch { + case ctxt&finalSwitchCase != 0: + msg = "cannot fallthrough final case in switch" + case ctxt&inTypeSwitch != 0: + msg = "cannot fallthrough in type switch" + default: + msg = "fallthrough statement out of place" + } + check.error(s, MisplacedFallthrough, msg) + } + case syntax.Goto: + // goto's must have labels, should have been caught above + fallthrough + default: + check.errorf(s, InvalidSyntaxTree, "branch statement: %s", s.Tok) + } + + case *syntax.BlockStmt: + check.openScope(s, "block") + defer check.closeScope() + + check.stmtList(inner, s.List) + + case *syntax.IfStmt: + check.openScope(s, "if") + defer check.closeScope() + + check.simpleStmt(s.Init) + var x operand + check.expr(nil, &x, s.Cond) + if x.mode != invalid && !allBoolean(x.typ) { + check.error(s.Cond, InvalidCond, "non-boolean condition in if statement") + } + check.stmt(inner, s.Then) + // The parser produces a correct AST but if it was modified + // elsewhere the else branch may be invalid. Check again. + switch s.Else.(type) { + case nil: + // valid or error already reported + case *syntax.IfStmt, *syntax.BlockStmt: + check.stmt(inner, s.Else) + default: + check.error(s.Else, InvalidSyntaxTree, "invalid else branch in if statement") + } + + case *syntax.SwitchStmt: + inner |= breakOk + check.openScope(s, "switch") + defer check.closeScope() + + check.simpleStmt(s.Init) + + if g, _ := s.Tag.(*syntax.TypeSwitchGuard); g != nil { + check.typeSwitchStmt(inner|inTypeSwitch, s, g) + } else { + check.switchStmt(inner, s) + } + + case *syntax.SelectStmt: + inner |= breakOk + + check.multipleSelectDefaults(s.Body) + + for i, clause := range s.Body { + if clause == nil { + continue // error reported before + } + + // clause.Comm must be a SendStmt, RecvStmt, or default case + valid := false + var rhs syntax.Expr // rhs of RecvStmt, or nil + switch s := clause.Comm.(type) { + case nil, *syntax.SendStmt: + valid = true + case *syntax.AssignStmt: + if _, ok := s.Rhs.(*syntax.ListExpr); !ok { + rhs = s.Rhs + } + case *syntax.ExprStmt: + rhs = s.X + } + + // if present, rhs must be a receive operation + if rhs != nil { + if x, _ := syntax.Unparen(rhs).(*syntax.Operation); x != nil && x.Y == nil && x.Op == syntax.Recv { + valid = true + } + } + + if !valid { + check.error(clause.Comm, InvalidSelectCase, "select case must be send or receive (possibly with assignment)") + continue + } + end := s.Rbrace + if i+1 < len(s.Body) { + end = s.Body[i+1].Pos() + } + check.openScopeUntil(clause, end, "case") + if clause.Comm != nil { + check.stmt(inner, clause.Comm) + } + check.stmtList(inner, clause.Body) + check.closeScope() + } + + case *syntax.ForStmt: + inner |= breakOk | continueOk + + if rclause, _ := s.Init.(*syntax.RangeClause); rclause != nil { + check.rangeStmt(inner, s, rclause) + break + } + + check.openScope(s, "for") + defer check.closeScope() + + check.simpleStmt(s.Init) + if s.Cond != nil { + var x operand + check.expr(nil, &x, s.Cond) + if x.mode != invalid && !allBoolean(x.typ) { + check.error(s.Cond, InvalidCond, "non-boolean condition in for statement") + } + } + check.simpleStmt(s.Post) + // spec: "The init statement may be a short variable + // declaration, but the post statement must not." + if s, _ := s.Post.(*syntax.AssignStmt); s != nil && s.Op == syntax.Def { + // The parser already reported an error. + check.use(s.Lhs) // avoid follow-up errors + } + check.stmt(inner, s.Body) + + default: + check.error(s, InvalidSyntaxTree, "invalid statement") + } +} + +func (check *Checker) switchStmt(inner stmtContext, s *syntax.SwitchStmt) { + // init statement already handled + + var x operand + if s.Tag != nil { + check.expr(nil, &x, s.Tag) + // By checking assignment of x to an invisible temporary + // (as a compiler would), we get all the relevant checks. + check.assignment(&x, nil, "switch expression") + if x.mode != invalid && !Comparable(x.typ) && !hasNil(x.typ) { + check.errorf(&x, InvalidExprSwitch, "cannot switch on %s (%s is not comparable)", &x, x.typ) + x.mode = invalid + } + } else { + // spec: "A missing switch expression is + // equivalent to the boolean value true." + x.mode = constant_ + x.typ = Typ[Bool] + x.val = constant.MakeBool(true) + // TODO(gri) should have a better position here + pos := s.Rbrace + if len(s.Body) > 0 { + pos = s.Body[0].Pos() + } + x.expr = syntax.NewName(pos, "true") + } + + check.multipleSwitchDefaults(s.Body) + + seen := make(valueMap) // map of seen case values to positions and types + for i, clause := range s.Body { + if clause == nil { + check.error(clause, InvalidSyntaxTree, "incorrect expression switch case") + continue + } + end := s.Rbrace + inner := inner + if i+1 < len(s.Body) { + end = s.Body[i+1].Pos() + inner |= fallthroughOk + } else { + inner |= finalSwitchCase + } + check.caseValues(&x, syntax.UnpackListExpr(clause.Cases), seen) + check.openScopeUntil(clause, end, "case") + check.stmtList(inner, clause.Body) + check.closeScope() + } +} + +func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, guard *syntax.TypeSwitchGuard) { + // init statement already handled + + // A type switch guard must be of the form: + // + // TypeSwitchGuard = [ identifier ":=" ] PrimaryExpr "." "(" "type" ")" . + // \__lhs__/ \___rhs___/ + + // check lhs, if any + lhs := guard.Lhs + if lhs != nil { + if lhs.Value == "_" { + // _ := x.(type) is an invalid short variable declaration + check.softErrorf(lhs, NoNewVar, "no new variable on left side of :=") + lhs = nil // avoid declared and not used error below + } else { + check.recordDef(lhs, nil) // lhs variable is implicitly declared in each cause clause + } + } + + // check rhs + var x operand + check.expr(nil, &x, guard.X) + if x.mode == invalid { + return + } + + // TODO(gri) we may want to permit type switches on type parameter values at some point + var sx *operand // switch expression against which cases are compared against; nil if invalid + if isTypeParam(x.typ) { + check.errorf(&x, InvalidTypeSwitch, "cannot use type switch on type parameter value %s", &x) + } else { + if _, ok := under(x.typ).(*Interface); ok { + sx = &x + } else { + check.errorf(&x, InvalidTypeSwitch, "%s is not an interface", &x) + } + } + + check.multipleSwitchDefaults(s.Body) + + var lhsVars []*Var // list of implicitly declared lhs variables + seen := make(map[Type]syntax.Expr) // map of seen types to positions + for i, clause := range s.Body { + if clause == nil { + check.error(s, InvalidSyntaxTree, "incorrect type switch case") + continue + } + end := s.Rbrace + if i+1 < len(s.Body) { + end = s.Body[i+1].Pos() + } + // Check each type in this type switch case. + cases := syntax.UnpackListExpr(clause.Cases) + T := check.caseTypes(sx, cases, seen) + check.openScopeUntil(clause, end, "case") + // If lhs exists, declare a corresponding variable in the case-local scope. + if lhs != nil { + // spec: "The TypeSwitchGuard may include a short variable declaration. + // When that form is used, the variable is declared at the beginning of + // the implicit block in each clause. In clauses with a case listing + // exactly one type, the variable has that type; otherwise, the variable + // has the type of the expression in the TypeSwitchGuard." + if len(cases) != 1 || T == nil { + T = x.typ + } + obj := NewVar(lhs.Pos(), check.pkg, lhs.Value, T) + // TODO(mdempsky): Just use clause.Colon? Why did I even suggest + // "at the end of the TypeSwitchCase" in go.dev/issue/16794 instead? + scopePos := clause.Pos() // for default clause (len(List) == 0) + if n := len(cases); n > 0 { + scopePos = syntax.EndPos(cases[n-1]) + } + check.declare(check.scope, nil, obj, scopePos) + check.recordImplicit(clause, obj) + // For the "declared and not used" error, all lhs variables act as + // one; i.e., if any one of them is 'used', all of them are 'used'. + // Collect them for later analysis. + lhsVars = append(lhsVars, obj) + } + check.stmtList(inner, clause.Body) + check.closeScope() + } + + // If lhs exists, we must have at least one lhs variable that was used. + // (We can't use check.usage because that only looks at one scope; and + // we don't want to use the same variable for all scopes and change the + // variable type underfoot.) + if lhs != nil { + var used bool + for _, v := range lhsVars { + if v.used { + used = true + } + v.used = true // avoid usage error when checking entire function + } + if !used { + check.softErrorf(lhs, UnusedVar, "%s declared and not used", lhs.Value) + } + } +} + +func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *syntax.RangeClause) { + // Convert syntax form to local variables. + type Expr = syntax.Expr + type identType = syntax.Name + identName := func(n *identType) string { return n.Value } + sKey := rclause.Lhs // possibly nil + var sValue, sExtra syntax.Expr + if p, _ := sKey.(*syntax.ListExpr); p != nil { + if len(p.ElemList) < 2 { + check.error(s, InvalidSyntaxTree, "invalid lhs in range clause") + return + } + // len(p.ElemList) >= 2 + sKey = p.ElemList[0] + sValue = p.ElemList[1] + if len(p.ElemList) > 2 { + // delay error reporting until we know more + sExtra = p.ElemList[2] + } + } + isDef := rclause.Def + rangeVar := rclause.X + noNewVarPos := s + + // Do not use rclause anymore. + rclause = nil + + // Everything from here on is shared between cmd/compile/internal/types2 and go/types. + + // check expression to iterate over + var x operand + check.expr(nil, &x, rangeVar) + + // determine key/value types + var key, val Type + if x.mode != invalid { + // Ranging over a type parameter is permitted if it has a core type. + k, v, cause, isFunc, ok := rangeKeyVal(x.typ, func(v goVersion) bool { + return check.allowVersion(check.pkg, x.expr, v) + }) + switch { + case !ok && cause != "": + check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s: %s", &x, cause) + case !ok: + check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s", &x) + case k == nil && sKey != nil: + check.softErrorf(sKey, InvalidIterVar, "range over %s permits no iteration variables", &x) + case v == nil && sValue != nil: + check.softErrorf(sValue, InvalidIterVar, "range over %s permits only one iteration variable", &x) + case sExtra != nil: + check.softErrorf(sExtra, InvalidIterVar, "range clause permits at most two iteration variables") + case isFunc && ((k == nil) != (sKey == nil) || (v == nil) != (sValue == nil)): + var count string + switch { + case k == nil: + count = "no iteration variables" + case v == nil: + count = "one iteration variable" + default: + count = "two iteration variables" + } + check.softErrorf(&x, InvalidIterVar, "range over %s must have %s", &x, count) + } + key, val = k, v + } + + // Open the for-statement block scope now, after the range clause. + // Iteration variables declared with := need to go in this scope (was go.dev/issue/51437). + check.openScope(s, "range") + defer check.closeScope() + + // check assignment to/declaration of iteration variables + // (irregular assignment, cannot easily map to existing assignment checks) + + // lhs expressions and initialization value (rhs) types + lhs := [2]Expr{sKey, sValue} // sKey, sValue may be nil + rhs := [2]Type{key, val} // key, val may be nil + + rangeOverInt := isInteger(x.typ) + + if isDef { + // short variable declaration + var vars []*Var + for i, lhs := range lhs { + if lhs == nil { + continue + } + + // determine lhs variable + var obj *Var + if ident, _ := lhs.(*identType); ident != nil { + // declare new variable + name := identName(ident) + obj = NewVar(ident.Pos(), check.pkg, name, nil) + check.recordDef(ident, obj) + // _ variables don't count as new variables + if name != "_" { + vars = append(vars, obj) + } + } else { + check.errorf(lhs, InvalidSyntaxTree, "cannot declare %s", lhs) + obj = NewVar(lhs.Pos(), check.pkg, "_", nil) // dummy variable + } + assert(obj.typ == nil) + + // initialize lhs iteration variable, if any + typ := rhs[i] + if typ == nil { + obj.typ = Typ[Invalid] + obj.used = true // don't complain about unused variable + continue + } + + if rangeOverInt { + assert(i == 0) // at most one iteration variable (rhs[1] == nil for rangeOverInt) + check.initVar(obj, &x, "range clause") + } else { + var y operand + y.mode = value + y.expr = lhs // we don't have a better rhs expression to use here + y.typ = typ + check.initVar(obj, &y, "assignment") // error is on variable, use "assignment" not "range clause" + } + assert(obj.typ != nil) + } + + // declare variables + if len(vars) > 0 { + scopePos := s.Body.Pos() + for _, obj := range vars { + check.declare(check.scope, nil /* recordDef already called */, obj, scopePos) + } + } else { + check.error(noNewVarPos, NoNewVar, "no new variables on left side of :=") + } + } else if sKey != nil /* lhs[0] != nil */ { + // ordinary assignment + for i, lhs := range lhs { + if lhs == nil { + continue + } + + // assign to lhs iteration variable, if any + typ := rhs[i] + if typ == nil { + continue + } + + if rangeOverInt { + assert(i == 0) // at most one iteration variable (rhs[1] == nil for rangeOverInt) + check.assignVar(lhs, nil, &x, "range clause") + // If the assignment succeeded, if x was untyped before, it now + // has a type inferred via the assignment. It must be an integer. + // (go.dev/issues/67027) + if x.mode != invalid && !isInteger(x.typ) { + check.softErrorf(lhs, InvalidRangeExpr, "cannot use iteration variable of type %s", x.typ) + } + } else { + var y operand + y.mode = value + y.expr = lhs // we don't have a better rhs expression to use here + y.typ = typ + check.assignVar(lhs, nil, &y, "assignment") // error is on variable, use "assignment" not "range clause" + } + } + } else if rangeOverInt { + // If we don't have any iteration variables, we still need to + // check that a (possibly untyped) integer range expression x + // is valid. + // We do this by checking the assignment _ = x. This ensures + // that an untyped x can be converted to a value of its default + // type (rune or int). + check.assignment(&x, nil, "range clause") + } + + check.stmt(inner, s.Body) +} + +// RangeKeyVal returns the key and value types for a range over typ. +// Exported for use by the compiler (does not exist in go/types). +func RangeKeyVal(typ Type) (Type, Type) { + key, val, _, _, _ := rangeKeyVal(typ, nil) + return key, val +} + +// rangeKeyVal returns the key and value type produced by a range clause +// over an expression of type typ. +// If allowVersion != nil, it is used to check the required language version. +// If the range clause is not permitted, rangeKeyVal returns ok = false. +// When ok = false, rangeKeyVal may also return a reason in cause. +func rangeKeyVal(typ Type, allowVersion func(goVersion) bool) (key, val Type, cause string, isFunc, ok bool) { + bad := func(cause string) (Type, Type, string, bool, bool) { + return Typ[Invalid], Typ[Invalid], cause, false, false + } + toSig := func(t Type) *Signature { + sig, _ := coreType(t).(*Signature) + return sig + } + + orig := typ + switch typ := arrayPtrDeref(coreType(typ)).(type) { + case nil: + return bad("no core type") + case *Basic: + if isString(typ) { + return Typ[Int], universeRune, "", false, true // use 'rune' name + } + if isInteger(typ) { + if allowVersion != nil && !allowVersion(go1_22) { + return bad("requires go1.22 or later") + } + return orig, nil, "", false, true + } + case *Array: + return Typ[Int], typ.elem, "", false, true + case *Slice: + return Typ[Int], typ.elem, "", false, true + case *Map: + return typ.key, typ.elem, "", false, true + case *Chan: + if typ.dir == SendOnly { + return bad("receive from send-only channel") + } + return typ.elem, nil, "", false, true + case *Signature: + // TODO(gri) when this becomes enabled permanently, add version check + if !buildcfg.Experiment.RangeFunc { + break + } + assert(typ.Recv() == nil) + switch { + case typ.Params().Len() != 1: + return bad("func must be func(yield func(...) bool): wrong argument count") + case toSig(typ.Params().At(0).Type()) == nil: + return bad("func must be func(yield func(...) bool): argument is not func") + case typ.Results().Len() != 0: + return bad("func must be func(yield func(...) bool): unexpected results") + } + cb := toSig(typ.Params().At(0).Type()) + assert(cb.Recv() == nil) + switch { + case cb.Params().Len() > 2: + return bad("func must be func(yield func(...) bool): yield func has too many parameters") + case cb.Results().Len() != 1 || !isBoolean(cb.Results().At(0).Type()): + return bad("func must be func(yield func(...) bool): yield func does not return bool") + } + if cb.Params().Len() >= 1 { + key = cb.Params().At(0).Type() + } + if cb.Params().Len() >= 2 { + val = cb.Params().At(1).Type() + } + return key, val, "", true, true + } + return +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/struct.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/struct.go new file mode 100644 index 0000000000000000000000000000000000000000..9e46b349a3920d186beca11d5bbd8f2764ad63f8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/struct.go @@ -0,0 +1,230 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + . "internal/types/errors" + "strconv" +) + +// ---------------------------------------------------------------------------- +// API + +// A Struct represents a struct type. +type Struct struct { + fields []*Var // fields != nil indicates the struct is set up (possibly with len(fields) == 0) + tags []string // field tags; nil if there are no tags +} + +// NewStruct returns a new struct with the given fields and corresponding field tags. +// If a field with index i has a tag, tags[i] must be that tag, but len(tags) may be +// only as long as required to hold the tag with the largest index i. Consequently, +// if no field has a tag, tags may be nil. +func NewStruct(fields []*Var, tags []string) *Struct { + var fset objset + for _, f := range fields { + if f.name != "_" && fset.insert(f) != nil { + panic("multiple fields with the same name") + } + } + if len(tags) > len(fields) { + panic("more tags than fields") + } + s := &Struct{fields: fields, tags: tags} + s.markComplete() + return s +} + +// NumFields returns the number of fields in the struct (including blank and embedded fields). +func (s *Struct) NumFields() int { return len(s.fields) } + +// Field returns the i'th field for 0 <= i < NumFields(). +func (s *Struct) Field(i int) *Var { return s.fields[i] } + +// Tag returns the i'th field tag for 0 <= i < NumFields(). +func (s *Struct) Tag(i int) string { + if i < len(s.tags) { + return s.tags[i] + } + return "" +} + +func (s *Struct) Underlying() Type { return s } +func (s *Struct) String() string { return TypeString(s, nil) } + +// ---------------------------------------------------------------------------- +// Implementation + +func (s *Struct) markComplete() { + if s.fields == nil { + s.fields = make([]*Var, 0) + } +} + +func (check *Checker) structType(styp *Struct, e *syntax.StructType) { + if e.FieldList == nil { + styp.markComplete() + return + } + + // struct fields and tags + var fields []*Var + var tags []string + + // for double-declaration checks + var fset objset + + // current field typ and tag + var typ Type + var tag string + add := func(ident *syntax.Name, embedded bool, pos syntax.Pos) { + if tag != "" && tags == nil { + tags = make([]string, len(fields)) + } + if tags != nil { + tags = append(tags, tag) + } + + name := ident.Value + fld := NewField(pos, check.pkg, name, typ, embedded) + // spec: "Within a struct, non-blank field names must be unique." + if name == "_" || check.declareInSet(&fset, pos, fld) { + fields = append(fields, fld) + check.recordDef(ident, fld) + } + } + + // addInvalid adds an embedded field of invalid type to the struct for + // fields with errors; this keeps the number of struct fields in sync + // with the source as long as the fields are _ or have different names + // (go.dev/issue/25627). + addInvalid := func(ident *syntax.Name, pos syntax.Pos) { + typ = Typ[Invalid] + tag = "" + add(ident, true, pos) + } + + var prev syntax.Expr + for i, f := range e.FieldList { + // Fields declared syntactically with the same type (e.g.: a, b, c T) + // share the same type expression. Only check type if it's a new type. + if i == 0 || f.Type != prev { + typ = check.varType(f.Type) + prev = f.Type + } + tag = "" + if i < len(e.TagList) { + tag = check.tag(e.TagList[i]) + } + if f.Name != nil { + // named field + add(f.Name, false, f.Name.Pos()) + } else { + // embedded field + // spec: "An embedded type must be specified as a type name T or as a + // pointer to a non-interface type name *T, and T itself may not be a + // pointer type." + pos := syntax.StartPos(f.Type) // position of type, for errors + name := embeddedFieldIdent(f.Type) + if name == nil { + check.errorf(pos, InvalidSyntaxTree, "invalid embedded field type %s", f.Type) + name = &syntax.Name{Value: "_"} // TODO(gri) need to set position to pos + addInvalid(name, pos) + continue + } + add(name, true, name.Pos()) // struct{p.T} field has position of T + + // Because we have a name, typ must be of the form T or *T, where T is the name + // of a (named or alias) type, and t (= deref(typ)) must be the type of T. + // We must delay this check to the end because we don't want to instantiate + // (via under(t)) a possibly incomplete type. + embeddedTyp := typ // for closure below + embeddedPos := pos + check.later(func() { + t, isPtr := deref(embeddedTyp) + switch u := under(t).(type) { + case *Basic: + if !isValid(t) { + // error was reported before + return + } + // unsafe.Pointer is treated like a regular pointer + if u.kind == UnsafePointer { + check.error(embeddedPos, InvalidPtrEmbed, "embedded field type cannot be unsafe.Pointer") + } + case *Pointer: + check.error(embeddedPos, InvalidPtrEmbed, "embedded field type cannot be a pointer") + case *Interface: + if isTypeParam(t) { + // The error code here is inconsistent with other error codes for + // invalid embedding, because this restriction may be relaxed in the + // future, and so it did not warrant a new error code. + check.error(embeddedPos, MisplacedTypeParam, "embedded field type cannot be a (pointer to a) type parameter") + break + } + if isPtr { + check.error(embeddedPos, InvalidPtrEmbed, "embedded field type cannot be a pointer to an interface") + } + } + }).describef(embeddedPos, "check embedded type %s", embeddedTyp) + } + } + + styp.fields = fields + styp.tags = tags + styp.markComplete() +} + +func embeddedFieldIdent(e syntax.Expr) *syntax.Name { + switch e := e.(type) { + case *syntax.Name: + return e + case *syntax.Operation: + if base := ptrBase(e); base != nil { + // *T is valid, but **T is not + if op, _ := base.(*syntax.Operation); op == nil || ptrBase(op) == nil { + return embeddedFieldIdent(e.X) + } + } + case *syntax.SelectorExpr: + return e.Sel + case *syntax.IndexExpr: + return embeddedFieldIdent(e.X) + } + return nil // invalid embedded field +} + +func (check *Checker) declareInSet(oset *objset, pos syntax.Pos, obj Object) bool { + if alt := oset.insert(obj); alt != nil { + var err error_ + err.code = DuplicateDecl + err.errorf(pos, "%s redeclared", obj.Name()) + err.recordAltDecl(alt) + check.report(&err) + return false + } + return true +} + +func (check *Checker) tag(t *syntax.BasicLit) string { + // If t.Bad, an error was reported during parsing. + if t != nil && !t.Bad { + if t.Kind == syntax.StringLit { + if val, err := strconv.Unquote(t.Value); err == nil { + return val + } + } + check.errorf(t, InvalidSyntaxTree, "incorrect tag syntax: %q", t.Value) + } + return "" +} + +func ptrBase(x *syntax.Operation) syntax.Expr { + if x.Op == syntax.Mul && x.Y == nil { + return x.X + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/subst.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/subst.go new file mode 100644 index 0000000000000000000000000000000000000000..1ad73c41ce1f08df4e91adf5d864880d703a7211 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/subst.go @@ -0,0 +1,440 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements type parameter substitution. + +package types2 + +import ( + "cmd/compile/internal/syntax" +) + +type substMap map[*TypeParam]Type + +// makeSubstMap creates a new substitution map mapping tpars[i] to targs[i]. +// If targs[i] is nil, tpars[i] is not substituted. +func makeSubstMap(tpars []*TypeParam, targs []Type) substMap { + assert(len(tpars) == len(targs)) + proj := make(substMap, len(tpars)) + for i, tpar := range tpars { + proj[tpar] = targs[i] + } + return proj +} + +// makeRenameMap is like makeSubstMap, but creates a map used to rename type +// parameters in from with the type parameters in to. +func makeRenameMap(from, to []*TypeParam) substMap { + assert(len(from) == len(to)) + proj := make(substMap, len(from)) + for i, tpar := range from { + proj[tpar] = to[i] + } + return proj +} + +func (m substMap) empty() bool { + return len(m) == 0 +} + +func (m substMap) lookup(tpar *TypeParam) Type { + if t := m[tpar]; t != nil { + return t + } + return tpar +} + +// subst returns the type typ with its type parameters tpars replaced by the +// corresponding type arguments targs, recursively. subst doesn't modify the +// incoming type. If a substitution took place, the result type is different +// from the incoming type. +// +// If expanding is non-nil, it is the instance type currently being expanded. +// One of expanding or ctxt must be non-nil. +func (check *Checker) subst(pos syntax.Pos, typ Type, smap substMap, expanding *Named, ctxt *Context) Type { + assert(expanding != nil || ctxt != nil) + + if smap.empty() { + return typ + } + + // common cases + switch t := typ.(type) { + case *Basic: + return typ // nothing to do + case *TypeParam: + return smap.lookup(t) + } + + // general case + subst := subster{ + pos: pos, + smap: smap, + check: check, + expanding: expanding, + ctxt: ctxt, + } + return subst.typ(typ) +} + +type subster struct { + pos syntax.Pos + smap substMap + check *Checker // nil if called via Instantiate + expanding *Named // if non-nil, the instance that is being expanded + ctxt *Context +} + +func (subst *subster) typ(typ Type) Type { + switch t := typ.(type) { + case nil: + // Call typOrNil if it's possible that typ is nil. + panic("nil typ") + + case *Basic: + // nothing to do + + case *Alias: + rhs := subst.typ(t.fromRHS) + if rhs != t.fromRHS { + // This branch cannot be reached because the RHS of an alias + // may only contain type parameters of an enclosing function. + // Such function bodies are never "instantiated" and thus + // substitution is not called on locally declared alias types. + // TODO(gri) adjust once parameterized aliases are supported + panic("unreachable for unparameterized aliases") + // return subst.check.newAlias(t.obj, rhs) + } + + case *Array: + elem := subst.typOrNil(t.elem) + if elem != t.elem { + return &Array{len: t.len, elem: elem} + } + + case *Slice: + elem := subst.typOrNil(t.elem) + if elem != t.elem { + return &Slice{elem: elem} + } + + case *Struct: + if fields, copied := subst.varList(t.fields); copied { + s := &Struct{fields: fields, tags: t.tags} + s.markComplete() + return s + } + + case *Pointer: + base := subst.typ(t.base) + if base != t.base { + return &Pointer{base: base} + } + + case *Tuple: + return subst.tuple(t) + + case *Signature: + // Preserve the receiver: it is handled during *Interface and *Named type + // substitution. + // + // Naively doing the substitution here can lead to an infinite recursion in + // the case where the receiver is an interface. For example, consider the + // following declaration: + // + // type T[A any] struct { f interface{ m() } } + // + // In this case, the type of f is an interface that is itself the receiver + // type of all of its methods. Because we have no type name to break + // cycles, substituting in the recv results in an infinite loop of + // recv->interface->recv->interface->... + recv := t.recv + + params := subst.tuple(t.params) + results := subst.tuple(t.results) + if params != t.params || results != t.results { + return &Signature{ + rparams: t.rparams, + // TODO(gri) why can't we nil out tparams here, rather than in instantiate? + tparams: t.tparams, + // instantiated signatures have a nil scope + recv: recv, + params: params, + results: results, + variadic: t.variadic, + } + } + + case *Union: + terms, copied := subst.termlist(t.terms) + if copied { + // term list substitution may introduce duplicate terms (unlikely but possible). + // This is ok; lazy type set computation will determine the actual type set + // in normal form. + return &Union{terms} + } + + case *Interface: + methods, mcopied := subst.funcList(t.methods) + embeddeds, ecopied := subst.typeList(t.embeddeds) + if mcopied || ecopied { + iface := subst.check.newInterface() + iface.embeddeds = embeddeds + iface.embedPos = t.embedPos + iface.implicit = t.implicit + assert(t.complete) // otherwise we are copying incomplete data + iface.complete = t.complete + // If we've changed the interface type, we may need to replace its + // receiver if the receiver type is the original interface. Receivers of + // *Named type are replaced during named type expansion. + // + // Notably, it's possible to reach here and not create a new *Interface, + // even though the receiver type may be parameterized. For example: + // + // type T[P any] interface{ m() } + // + // In this case the interface will not be substituted here, because its + // method signatures do not depend on the type parameter P, but we still + // need to create new interface methods to hold the instantiated + // receiver. This is handled by Named.expandUnderlying. + iface.methods, _ = replaceRecvType(methods, t, iface) + + // If check != nil, check.newInterface will have saved the interface for later completion. + if subst.check == nil { // golang/go#61561: all newly created interfaces must be completed + iface.typeSet() + } + return iface + } + + case *Map: + key := subst.typ(t.key) + elem := subst.typ(t.elem) + if key != t.key || elem != t.elem { + return &Map{key: key, elem: elem} + } + + case *Chan: + elem := subst.typ(t.elem) + if elem != t.elem { + return &Chan{dir: t.dir, elem: elem} + } + + case *Named: + // dump is for debugging + dump := func(string, ...interface{}) {} + if subst.check != nil && subst.check.conf.Trace { + subst.check.indent++ + defer func() { + subst.check.indent-- + }() + dump = func(format string, args ...interface{}) { + subst.check.trace(subst.pos, format, args...) + } + } + + // subst is called during expansion, so in this function we need to be + // careful not to call any methods that would cause t to be expanded: doing + // so would result in deadlock. + // + // So we call t.Origin().TypeParams() rather than t.TypeParams(). + orig := t.Origin() + n := orig.TypeParams().Len() + if n == 0 { + dump(">>> %s is not parameterized", t) + return t // type is not parameterized + } + + var newTArgs []Type + if t.TypeArgs().Len() != n { + return Typ[Invalid] // error reported elsewhere + } + + // already instantiated + dump(">>> %s already instantiated", t) + // For each (existing) type argument targ, determine if it needs + // to be substituted; i.e., if it is or contains a type parameter + // that has a type argument for it. + for i, targ := range t.TypeArgs().list() { + dump(">>> %d targ = %s", i, targ) + new_targ := subst.typ(targ) + if new_targ != targ { + dump(">>> substituted %d targ %s => %s", i, targ, new_targ) + if newTArgs == nil { + newTArgs = make([]Type, n) + copy(newTArgs, t.TypeArgs().list()) + } + newTArgs[i] = new_targ + } + } + + if newTArgs == nil { + dump(">>> nothing to substitute in %s", t) + return t // nothing to substitute + } + + // Create a new instance and populate the context to avoid endless + // recursion. The position used here is irrelevant because validation only + // occurs on t (we don't call validType on named), but we use subst.pos to + // help with debugging. + return subst.check.instance(subst.pos, orig, newTArgs, subst.expanding, subst.ctxt) + + case *TypeParam: + return subst.smap.lookup(t) + + default: + unreachable() + } + + return typ +} + +// typOrNil is like typ but if the argument is nil it is replaced with Typ[Invalid]. +// A nil type may appear in pathological cases such as type T[P any] []func(_ T([]_)) +// where an array/slice element is accessed before it is set up. +func (subst *subster) typOrNil(typ Type) Type { + if typ == nil { + return Typ[Invalid] + } + return subst.typ(typ) +} + +func (subst *subster) var_(v *Var) *Var { + if v != nil { + if typ := subst.typ(v.typ); typ != v.typ { + return substVar(v, typ) + } + } + return v +} + +func substVar(v *Var, typ Type) *Var { + copy := *v + copy.typ = typ + copy.origin = v.Origin() + return © +} + +func (subst *subster) tuple(t *Tuple) *Tuple { + if t != nil { + if vars, copied := subst.varList(t.vars); copied { + return &Tuple{vars: vars} + } + } + return t +} + +func (subst *subster) varList(in []*Var) (out []*Var, copied bool) { + out = in + for i, v := range in { + if w := subst.var_(v); w != v { + if !copied { + // first variable that got substituted => allocate new out slice + // and copy all variables + new := make([]*Var, len(in)) + copy(new, out) + out = new + copied = true + } + out[i] = w + } + } + return +} + +func (subst *subster) func_(f *Func) *Func { + if f != nil { + if typ := subst.typ(f.typ); typ != f.typ { + return substFunc(f, typ) + } + } + return f +} + +func substFunc(f *Func, typ Type) *Func { + copy := *f + copy.typ = typ + copy.origin = f.Origin() + return © +} + +func (subst *subster) funcList(in []*Func) (out []*Func, copied bool) { + out = in + for i, f := range in { + if g := subst.func_(f); g != f { + if !copied { + // first function that got substituted => allocate new out slice + // and copy all functions + new := make([]*Func, len(in)) + copy(new, out) + out = new + copied = true + } + out[i] = g + } + } + return +} + +func (subst *subster) typeList(in []Type) (out []Type, copied bool) { + out = in + for i, t := range in { + if u := subst.typ(t); u != t { + if !copied { + // first function that got substituted => allocate new out slice + // and copy all functions + new := make([]Type, len(in)) + copy(new, out) + out = new + copied = true + } + out[i] = u + } + } + return +} + +func (subst *subster) termlist(in []*Term) (out []*Term, copied bool) { + out = in + for i, t := range in { + if u := subst.typ(t.typ); u != t.typ { + if !copied { + // first function that got substituted => allocate new out slice + // and copy all functions + new := make([]*Term, len(in)) + copy(new, out) + out = new + copied = true + } + out[i] = NewTerm(t.tilde, u) + } + } + return +} + +// replaceRecvType updates any function receivers that have type old to have +// type new. It does not modify the input slice; if modifications are required, +// the input slice and any affected signatures will be copied before mutating. +// +// The resulting out slice contains the updated functions, and copied reports +// if anything was modified. +func replaceRecvType(in []*Func, old, new Type) (out []*Func, copied bool) { + out = in + for i, method := range in { + sig := method.Type().(*Signature) + if sig.recv != nil && sig.recv.Type() == old { + if !copied { + // Allocate a new methods slice before mutating for the first time. + // This is defensive, as we may share methods across instantiations of + // a given interface type if they do not get substituted. + out = make([]*Func, len(in)) + copy(out, in) + copied = true + } + newsig := *sig + newsig.recv = substVar(sig.recv, new) + out[i] = substFunc(method, &newsig) + } + } + return +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/termlist.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/termlist.go new file mode 100644 index 0000000000000000000000000000000000000000..196f8abf724f941ca7eb09cc42dddb9d06d01d22 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/termlist.go @@ -0,0 +1,161 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import "strings" + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// termSep is the separator used between individual terms. +const termSep = " | " + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf strings.Builder + for i, x := range xl { + if i > 0 { + buf.WriteString(termSep) + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/termlist_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/termlist_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3005d0edea00ca40fd0e7962320bff9e0e780571 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/termlist_test.go @@ -0,0 +1,284 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "strings" + "testing" +) + +// maketl makes a term list from a string of the term list. +func maketl(s string) termlist { + s = strings.ReplaceAll(s, " ", "") + names := strings.Split(s, "|") + r := make(termlist, len(names)) + for i, n := range names { + r[i] = testTerm(n) + } + return r +} + +func TestTermlistAll(t *testing.T) { + if !allTermlist.isAll() { + t.Errorf("allTermlist is not the set of all types") + } +} + +func TestTermlistString(t *testing.T) { + for _, want := range []string{ + "∅", + "𝓤", + "int", + "~int", + "myInt", + "∅ | ∅", + "𝓤 | 𝓤", + "∅ | 𝓤 | int", + "∅ | 𝓤 | int | myInt", + } { + if got := maketl(want).String(); got != want { + t.Errorf("(%v).String() == %v", want, got) + } + } +} + +func TestTermlistIsEmpty(t *testing.T) { + for test, want := range map[string]bool{ + "∅": true, + "∅ | ∅": true, + "∅ | ∅ | 𝓤": false, + "∅ | ∅ | myInt": false, + "𝓤": false, + "𝓤 | int": false, + "𝓤 | myInt | ∅": false, + } { + xl := maketl(test) + got := xl.isEmpty() + if got != want { + t.Errorf("(%v).isEmpty() == %v; want %v", test, got, want) + } + } +} + +func TestTermlistIsAll(t *testing.T) { + for test, want := range map[string]bool{ + "∅": false, + "∅ | ∅": false, + "int | ~string": false, + "~int | myInt": false, + "∅ | ∅ | 𝓤": true, + "𝓤": true, + "𝓤 | int": true, + "myInt | 𝓤": true, + } { + xl := maketl(test) + got := xl.isAll() + if got != want { + t.Errorf("(%v).isAll() == %v; want %v", test, got, want) + } + } +} + +func TestTermlistNorm(t *testing.T) { + for _, test := range []struct { + xl, want string + }{ + {"∅", "∅"}, + {"∅ | ∅", "∅"}, + {"∅ | int", "int"}, + {"∅ | myInt", "myInt"}, + {"𝓤 | int", "𝓤"}, + {"𝓤 | myInt", "𝓤"}, + {"int | myInt", "int | myInt"}, + {"~int | int", "~int"}, + {"~int | myInt", "~int"}, + {"int | ~string | int", "int | ~string"}, + {"~int | string | 𝓤 | ~string | int", "𝓤"}, + {"~int | string | myInt | ~string | int", "~int | ~string"}, + } { + xl := maketl(test.xl) + got := maketl(test.xl).norm() + if got.String() != test.want { + t.Errorf("(%v).norm() = %v; want %v", xl, got, test.want) + } + } +} + +func TestTermlistUnion(t *testing.T) { + for _, test := range []struct { + xl, yl, want string + }{ + + {"∅", "∅", "∅"}, + {"∅", "𝓤", "𝓤"}, + {"∅", "int", "int"}, + {"𝓤", "~int", "𝓤"}, + {"int", "~int", "~int"}, + {"int", "string", "int | string"}, + {"int", "myInt", "int | myInt"}, + {"~int", "myInt", "~int"}, + {"int | string", "~string", "int | ~string"}, + {"~int | string", "~string | int", "~int | ~string"}, + {"~int | string | ∅", "~string | int", "~int | ~string"}, + {"~int | myInt | ∅", "~string | int", "~int | ~string"}, + {"~int | string | 𝓤", "~string | int", "𝓤"}, + {"~int | string | myInt", "~string | int", "~int | ~string"}, + } { + xl := maketl(test.xl) + yl := maketl(test.yl) + got := xl.union(yl).String() + if got != test.want { + t.Errorf("(%v).union(%v) = %v; want %v", test.xl, test.yl, got, test.want) + } + } +} + +func TestTermlistIntersect(t *testing.T) { + for _, test := range []struct { + xl, yl, want string + }{ + + {"∅", "∅", "∅"}, + {"∅", "𝓤", "∅"}, + {"∅", "int", "∅"}, + {"∅", "myInt", "∅"}, + {"𝓤", "~int", "~int"}, + {"𝓤", "myInt", "myInt"}, + {"int", "~int", "int"}, + {"int", "string", "∅"}, + {"int", "myInt", "∅"}, + {"~int", "myInt", "myInt"}, + {"int | string", "~string", "string"}, + {"~int | string", "~string | int", "int | string"}, + {"~int | string | ∅", "~string | int", "int | string"}, + {"~int | myInt | ∅", "~string | int", "int"}, + {"~int | string | 𝓤", "~string | int", "int | ~string"}, + {"~int | string | myInt", "~string | int", "int | string"}, + } { + xl := maketl(test.xl) + yl := maketl(test.yl) + got := xl.intersect(yl).String() + if got != test.want { + t.Errorf("(%v).intersect(%v) = %v; want %v", test.xl, test.yl, got, test.want) + } + } +} + +func TestTermlistEqual(t *testing.T) { + for _, test := range []struct { + xl, yl string + want bool + }{ + {"∅", "∅", true}, + {"∅", "𝓤", false}, + {"𝓤", "𝓤", true}, + {"𝓤 | int", "𝓤", true}, + {"𝓤 | int", "string | 𝓤", true}, + {"𝓤 | myInt", "string | 𝓤", true}, + {"int | ~string", "string | int", false}, + {"~int | string", "string | myInt", false}, + {"int | ~string | ∅", "string | int | ~string", true}, + } { + xl := maketl(test.xl) + yl := maketl(test.yl) + got := xl.equal(yl) + if got != test.want { + t.Errorf("(%v).equal(%v) = %v; want %v", test.xl, test.yl, got, test.want) + } + } +} + +func TestTermlistIncludes(t *testing.T) { + for _, test := range []struct { + xl, typ string + want bool + }{ + {"∅", "int", false}, + {"𝓤", "int", true}, + {"~int", "int", true}, + {"int", "string", false}, + {"~int", "string", false}, + {"~int", "myInt", true}, + {"int | string", "string", true}, + {"~int | string", "int", true}, + {"~int | string", "myInt", true}, + {"~int | myInt | ∅", "myInt", true}, + {"myInt | ∅ | 𝓤", "int", true}, + } { + xl := maketl(test.xl) + yl := testTerm(test.typ).typ + got := xl.includes(yl) + if got != test.want { + t.Errorf("(%v).includes(%v) = %v; want %v", test.xl, yl, got, test.want) + } + } +} + +func TestTermlistSupersetOf(t *testing.T) { + for _, test := range []struct { + xl, typ string + want bool + }{ + {"∅", "∅", true}, + {"∅", "𝓤", false}, + {"∅", "int", false}, + {"𝓤", "∅", true}, + {"𝓤", "𝓤", true}, + {"𝓤", "int", true}, + {"𝓤", "~int", true}, + {"𝓤", "myInt", true}, + {"~int", "int", true}, + {"~int", "~int", true}, + {"~int", "myInt", true}, + {"int", "~int", false}, + {"myInt", "~int", false}, + {"int", "string", false}, + {"~int", "string", false}, + {"int | string", "string", true}, + {"int | string", "~string", false}, + {"~int | string", "int", true}, + {"~int | string", "myInt", true}, + {"~int | string | ∅", "string", true}, + {"~string | ∅ | 𝓤", "myInt", true}, + } { + xl := maketl(test.xl) + y := testTerm(test.typ) + got := xl.supersetOf(y) + if got != test.want { + t.Errorf("(%v).supersetOf(%v) = %v; want %v", test.xl, y, got, test.want) + } + } +} + +func TestTermlistSubsetOf(t *testing.T) { + for _, test := range []struct { + xl, yl string + want bool + }{ + {"∅", "∅", true}, + {"∅", "𝓤", true}, + {"𝓤", "∅", false}, + {"𝓤", "𝓤", true}, + {"int", "int | string", true}, + {"~int", "int | string", false}, + {"~int", "myInt | string", false}, + {"myInt", "~int | string", true}, + {"~int", "string | string | int | ~int", true}, + {"myInt", "string | string | ~int", true}, + {"int | string", "string", false}, + {"int | string", "string | int", true}, + {"int | ~string", "string | int", false}, + {"myInt | ~string", "string | int | 𝓤", true}, + {"int | ~string", "string | int | ∅ | string", false}, + {"int | myInt", "string | ~int | ∅ | string", true}, + } { + xl := maketl(test.xl) + yl := maketl(test.yl) + got := xl.subsetOf(yl) + if got != test.want { + t.Errorf("(%v).subsetOf(%v) = %v; want %v", test.xl, test.yl, got, test.want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/testdata/local/issue47996.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/testdata/local/issue47996.go new file mode 100644 index 0000000000000000000000000000000000000000..375a931f77fec56cd4f1484deaba3f106e86f90d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/testdata/local/issue47996.go @@ -0,0 +1,8 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +// don't crash +func T /* ERROR "missing" */ [P] /* ERROR "missing" */ m /* ERROR "unexpected" */ () /* ERROR ")" */ { /* ERROR "{" */ } /* ERROR "}" */ diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/testdata/manual.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/testdata/manual.go new file mode 100644 index 0000000000000000000000000000000000000000..57dcc227a5980279cc165f7154b3f81cfde1cfd5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/testdata/manual.go @@ -0,0 +1,8 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is tested when running "go test -run Manual" +// without source arguments. Use for one-off debugging. + +package p diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/tuple.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/tuple.go new file mode 100644 index 0000000000000000000000000000000000000000..1356aae0b018a464ef6c9deed88138e5d7115d7c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/tuple.go @@ -0,0 +1,34 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +// A Tuple represents an ordered list of variables; a nil *Tuple is a valid (empty) tuple. +// Tuples are used as components of signatures and to represent the type of multiple +// assignments; they are not first class types of Go. +type Tuple struct { + vars []*Var +} + +// NewTuple returns a new tuple for the given variables. +func NewTuple(x ...*Var) *Tuple { + if len(x) > 0 { + return &Tuple{vars: x} + } + return nil +} + +// Len returns the number variables of tuple t. +func (t *Tuple) Len() int { + if t != nil { + return len(t.vars) + } + return 0 +} + +// At returns the i'th variable of tuple t. +func (t *Tuple) At(i int) *Var { return t.vars[i] } + +func (t *Tuple) Underlying() Type { return t } +func (t *Tuple) String() string { return TypeString(t, nil) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/type.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/type.go new file mode 100644 index 0000000000000000000000000000000000000000..bd194213b21598a595fb15e070cfdbcf00182056 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/type.go @@ -0,0 +1,11 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import "cmd/compile/internal/syntax" + +// A Type represents a type of Go. +// All types implement the Type interface. +type Type = syntax.Type diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typelists.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typelists.go new file mode 100644 index 0000000000000000000000000000000000000000..a2aba4a9a553c9cdcd0895d747e0222afc14bd6d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typelists.go @@ -0,0 +1,69 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +// TypeParamList holds a list of type parameters. +type TypeParamList struct{ tparams []*TypeParam } + +// Len returns the number of type parameters in the list. +// It is safe to call on a nil receiver. +func (l *TypeParamList) Len() int { return len(l.list()) } + +// At returns the i'th type parameter in the list. +func (l *TypeParamList) At(i int) *TypeParam { return l.tparams[i] } + +// list is for internal use where we expect a []*TypeParam. +// TODO(rfindley): list should probably be eliminated: we can pass around a +// TypeParamList instead. +func (l *TypeParamList) list() []*TypeParam { + if l == nil { + return nil + } + return l.tparams +} + +// TypeList holds a list of types. +type TypeList struct{ types []Type } + +// newTypeList returns a new TypeList with the types in list. +func newTypeList(list []Type) *TypeList { + if len(list) == 0 { + return nil + } + return &TypeList{list} +} + +// Len returns the number of types in the list. +// It is safe to call on a nil receiver. +func (l *TypeList) Len() int { return len(l.list()) } + +// At returns the i'th type in the list. +func (l *TypeList) At(i int) Type { return l.types[i] } + +// list is for internal use where we expect a []Type. +// TODO(rfindley): list should probably be eliminated: we can pass around a +// TypeList instead. +func (l *TypeList) list() []Type { + if l == nil { + return nil + } + return l.types +} + +// ---------------------------------------------------------------------------- +// Implementation + +func bindTParams(list []*TypeParam) *TypeParamList { + if len(list) == 0 { + return nil + } + for i, typ := range list { + if typ.index >= 0 { + panic("type parameter bound more than once") + } + typ.index = i + } + return &TypeParamList{tparams: list} +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeparam.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeparam.go new file mode 100644 index 0000000000000000000000000000000000000000..5c6030b3fbb57fdd123d71e0232467688fbf0516 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeparam.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import "sync/atomic" + +// Note: This is a uint32 rather than a uint64 because the +// respective 64 bit atomic instructions are not available +// on all platforms. +var lastID atomic.Uint32 + +// nextID returns a value increasing monotonically by 1 with +// each call, starting with 1. It may be called concurrently. +func nextID() uint64 { return uint64(lastID.Add(1)) } + +// A TypeParam represents a type parameter type. +type TypeParam struct { + check *Checker // for lazy type bound completion + id uint64 // unique id, for debugging only + obj *TypeName // corresponding type name + index int // type parameter index in source order, starting at 0 + bound Type // any type, but underlying is eventually *Interface for correct programs (see TypeParam.iface) +} + +// NewTypeParam returns a new TypeParam. Type parameters may be set on a Named +// or Signature type by calling SetTypeParams. Setting a type parameter on more +// than one type will result in a panic. +// +// The constraint argument can be nil, and set later via SetConstraint. If the +// constraint is non-nil, it must be fully defined. +func NewTypeParam(obj *TypeName, constraint Type) *TypeParam { + return (*Checker)(nil).newTypeParam(obj, constraint) +} + +// check may be nil +func (check *Checker) newTypeParam(obj *TypeName, constraint Type) *TypeParam { + // Always increment lastID, even if it is not used. + id := nextID() + if check != nil { + check.nextID++ + id = check.nextID + } + typ := &TypeParam{check: check, id: id, obj: obj, index: -1, bound: constraint} + if obj.typ == nil { + obj.typ = typ + } + // iface may mutate typ.bound, so we must ensure that iface() is called + // at least once before the resulting TypeParam escapes. + if check != nil { + check.needsCleanup(typ) + } else if constraint != nil { + typ.iface() + } + return typ +} + +// Obj returns the type name for the type parameter t. +func (t *TypeParam) Obj() *TypeName { return t.obj } + +// Index returns the index of the type param within its param list, or -1 if +// the type parameter has not yet been bound to a type. +func (t *TypeParam) Index() int { + return t.index +} + +// Constraint returns the type constraint specified for t. +func (t *TypeParam) Constraint() Type { + return t.bound +} + +// SetConstraint sets the type constraint for t. +// +// It must be called by users of NewTypeParam after the bound's underlying is +// fully defined, and before using the type parameter in any way other than to +// form other types. Once SetConstraint returns the receiver, t is safe for +// concurrent use. +func (t *TypeParam) SetConstraint(bound Type) { + if bound == nil { + panic("nil constraint") + } + t.bound = bound + // iface may mutate t.bound (if bound is not an interface), so ensure that + // this is done before returning. + t.iface() +} + +func (t *TypeParam) Underlying() Type { + return t.iface() +} + +func (t *TypeParam) String() string { return TypeString(t, nil) } + +// ---------------------------------------------------------------------------- +// Implementation + +func (t *TypeParam) cleanup() { + t.iface() + t.check = nil +} + +// iface returns the constraint interface of t. +func (t *TypeParam) iface() *Interface { + bound := t.bound + + // determine constraint interface + var ityp *Interface + switch u := under(bound).(type) { + case *Basic: + if !isValid(u) { + // error is reported elsewhere + return &emptyInterface + } + case *Interface: + if isTypeParam(bound) { + // error is reported in Checker.collectTypeParams + return &emptyInterface + } + ityp = u + } + + // If we don't have an interface, wrap constraint into an implicit interface. + if ityp == nil { + ityp = NewInterfaceType(nil, []Type{bound}) + ityp.implicit = true + t.bound = ityp // update t.bound for next time (optimization) + } + + // compute type set if necessary + if ityp.tset == nil { + // pos is used for tracing output; start with the type parameter position. + pos := t.obj.pos + // use the (original or possibly instantiated) type bound position if we have one + if n := asNamed(bound); n != nil { + pos = n.obj.pos + } + computeInterfaceTypeSet(t.check, pos, ityp) + } + + return ityp +} + +// is calls f with the specific type terms of t's constraint and reports whether +// all calls to f returned true. If there are no specific terms, is +// returns the result of f(nil). +func (t *TypeParam) is(f func(*term) bool) bool { + return t.iface().typeSet().is(f) +} + +// underIs calls f with the underlying types of the specific type terms +// of t's constraint and reports whether all calls to f returned true. +// If there are no specific terms, underIs returns the result of f(nil). +func (t *TypeParam) underIs(f func(Type) bool) bool { + return t.iface().typeSet().underIs(f) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeset.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeset.go new file mode 100644 index 0000000000000000000000000000000000000000..a6ccfdb80cc727744c92c44d4cd5f92c6fcdefa4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeset.go @@ -0,0 +1,415 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + . "internal/types/errors" + "sort" + "strings" +) + +// ---------------------------------------------------------------------------- +// API + +// A _TypeSet represents the type set of an interface. +// Because of existing language restrictions, methods can be "factored out" +// from the terms. The actual type set is the intersection of the type set +// implied by the methods and the type set described by the terms and the +// comparable bit. To test whether a type is included in a type set +// ("implements" relation), the type must implement all methods _and_ be +// an element of the type set described by the terms and the comparable bit. +// If the term list describes the set of all types and comparable is true, +// only comparable types are meant; in all other cases comparable is false. +type _TypeSet struct { + methods []*Func // all methods of the interface; sorted by unique ID + terms termlist // type terms of the type set + comparable bool // invariant: !comparable || terms.isAll() +} + +// IsEmpty reports whether type set s is the empty set. +func (s *_TypeSet) IsEmpty() bool { return s.terms.isEmpty() } + +// IsAll reports whether type set s is the set of all types (corresponding to the empty interface). +func (s *_TypeSet) IsAll() bool { return s.IsMethodSet() && len(s.methods) == 0 } + +// IsMethodSet reports whether the interface t is fully described by its method set. +func (s *_TypeSet) IsMethodSet() bool { return !s.comparable && s.terms.isAll() } + +// IsComparable reports whether each type in the set is comparable. +func (s *_TypeSet) IsComparable(seen map[Type]bool) bool { + if s.terms.isAll() { + return s.comparable + } + return s.is(func(t *term) bool { + return t != nil && comparable(t.typ, false, seen, nil) + }) +} + +// NumMethods returns the number of methods available. +func (s *_TypeSet) NumMethods() int { return len(s.methods) } + +// Method returns the i'th method of type set s for 0 <= i < s.NumMethods(). +// The methods are ordered by their unique ID. +func (s *_TypeSet) Method(i int) *Func { return s.methods[i] } + +// LookupMethod returns the index of and method with matching package and name, or (-1, nil). +func (s *_TypeSet) LookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) { + return lookupMethod(s.methods, pkg, name, foldCase) +} + +func (s *_TypeSet) String() string { + switch { + case s.IsEmpty(): + return "∅" + case s.IsAll(): + return "𝓤" + } + + hasMethods := len(s.methods) > 0 + hasTerms := s.hasTerms() + + var buf strings.Builder + buf.WriteByte('{') + if s.comparable { + buf.WriteString("comparable") + if hasMethods || hasTerms { + buf.WriteString("; ") + } + } + for i, m := range s.methods { + if i > 0 { + buf.WriteString("; ") + } + buf.WriteString(m.String()) + } + if hasMethods && hasTerms { + buf.WriteString("; ") + } + if hasTerms { + buf.WriteString(s.terms.String()) + } + buf.WriteString("}") + return buf.String() +} + +// ---------------------------------------------------------------------------- +// Implementation + +// hasTerms reports whether the type set has specific type terms. +func (s *_TypeSet) hasTerms() bool { return !s.terms.isEmpty() && !s.terms.isAll() } + +// subsetOf reports whether s1 ⊆ s2. +func (s1 *_TypeSet) subsetOf(s2 *_TypeSet) bool { return s1.terms.subsetOf(s2.terms) } + +// TODO(gri) TypeSet.is and TypeSet.underIs should probably also go into termlist.go + +// is calls f with the specific type terms of s and reports whether +// all calls to f returned true. If there are no specific terms, is +// returns the result of f(nil). +func (s *_TypeSet) is(f func(*term) bool) bool { + if !s.hasTerms() { + return f(nil) + } + for _, t := range s.terms { + assert(t.typ != nil) + if !f(t) { + return false + } + } + return true +} + +// underIs calls f with the underlying types of the specific type terms +// of s and reports whether all calls to f returned true. If there are +// no specific terms, underIs returns the result of f(nil). +func (s *_TypeSet) underIs(f func(Type) bool) bool { + if !s.hasTerms() { + return f(nil) + } + for _, t := range s.terms { + assert(t.typ != nil) + // x == under(x) for ~x terms + u := t.typ + if !t.tilde { + u = under(u) + } + if debug { + assert(Identical(u, under(u))) + } + if !f(u) { + return false + } + } + return true +} + +// topTypeSet may be used as type set for the empty interface. +var topTypeSet = _TypeSet{terms: allTermlist} + +// computeInterfaceTypeSet may be called with check == nil. +func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_TypeSet { + if ityp.tset != nil { + return ityp.tset + } + + // If the interface is not fully set up yet, the type set will + // not be complete, which may lead to errors when using the + // type set (e.g. missing method). Don't compute a partial type + // set (and don't store it!), so that we still compute the full + // type set eventually. Instead, return the top type set and + // let any follow-on errors play out. + if !ityp.complete { + return &topTypeSet + } + + if check != nil && check.conf.Trace { + // Types don't generally have position information. + // If we don't have a valid pos provided, try to use + // one close enough. + if !pos.IsKnown() && len(ityp.methods) > 0 { + pos = ityp.methods[0].pos + } + + check.trace(pos, "-- type set for %s", ityp) + check.indent++ + defer func() { + check.indent-- + check.trace(pos, "=> %s ", ityp.typeSet()) + }() + } + + // An infinitely expanding interface (due to a cycle) is detected + // elsewhere (Checker.validType), so here we simply assume we only + // have valid interfaces. Mark the interface as complete to avoid + // infinite recursion if the validType check occurs later for some + // reason. + ityp.tset = &_TypeSet{terms: allTermlist} // TODO(gri) is this sufficient? + + var unionSets map[*Union]*_TypeSet + if check != nil { + if check.unionTypeSets == nil { + check.unionTypeSets = make(map[*Union]*_TypeSet) + } + unionSets = check.unionTypeSets + } else { + unionSets = make(map[*Union]*_TypeSet) + } + + // Methods of embedded interfaces are collected unchanged; i.e., the identity + // of a method I.m's Func Object of an interface I is the same as that of + // the method m in an interface that embeds interface I. On the other hand, + // if a method is embedded via multiple overlapping embedded interfaces, we + // don't provide a guarantee which "original m" got chosen for the embedding + // interface. See also go.dev/issue/34421. + // + // If we don't care to provide this identity guarantee anymore, instead of + // reusing the original method in embeddings, we can clone the method's Func + // Object and give it the position of a corresponding embedded interface. Then + // we can get rid of the mpos map below and simply use the cloned method's + // position. + + var seen objset + var allMethods []*Func + mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages + addMethod := func(pos syntax.Pos, m *Func, explicit bool) { + switch other := seen.insert(m); { + case other == nil: + allMethods = append(allMethods, m) + mpos[m] = pos + case explicit: + if check != nil { + var err error_ + err.code = DuplicateDecl + err.errorf(pos, "duplicate method %s", m.name) + err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name) + check.report(&err) + } + default: + // We have a duplicate method name in an embedded (not explicitly declared) method. + // Check method signatures after all types are computed (go.dev/issue/33656). + // If we're pre-go1.14 (overlapping embeddings are not permitted), report that + // error here as well (even though we could do it eagerly) because it's the same + // error message. + if check != nil { + check.later(func() { + if !check.allowVersion(m.pkg, pos, go1_14) || !Identical(m.typ, other.Type()) { + var err error_ + err.code = DuplicateDecl + err.errorf(pos, "duplicate method %s", m.name) + err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name) + check.report(&err) + } + }).describef(pos, "duplicate method check for %s", m.name) + } + } + } + + for _, m := range ityp.methods { + addMethod(m.pos, m, true) + } + + // collect embedded elements + allTerms := allTermlist + allComparable := false + for i, typ := range ityp.embeddeds { + // The embedding position is nil for imported interfaces + // and also for interface copies after substitution (but + // in that case we don't need to report errors again). + var pos syntax.Pos // embedding position + if ityp.embedPos != nil { + pos = (*ityp.embedPos)[i] + } + var comparable bool + var terms termlist + switch u := under(typ).(type) { + case *Interface: + // For now we don't permit type parameters as constraints. + assert(!isTypeParam(typ)) + tset := computeInterfaceTypeSet(check, pos, u) + // If typ is local, an error was already reported where typ is specified/defined. + if check != nil && check.isImportedConstraint(typ) && !check.verifyVersionf(pos, go1_18, "embedding constraint interface %s", typ) { + continue + } + comparable = tset.comparable + for _, m := range tset.methods { + addMethod(pos, m, false) // use embedding position pos rather than m.pos + } + terms = tset.terms + case *Union: + if check != nil && !check.verifyVersionf(pos, go1_18, "embedding interface element %s", u) { + continue + } + tset := computeUnionTypeSet(check, unionSets, pos, u) + if tset == &invalidTypeSet { + continue // ignore invalid unions + } + assert(!tset.comparable) + assert(len(tset.methods) == 0) + terms = tset.terms + default: + if !isValid(u) { + continue + } + if check != nil && !check.verifyVersionf(pos, go1_18, "embedding non-interface type %s", typ) { + continue + } + terms = termlist{{false, typ}} + } + + // The type set of an interface is the intersection of the type sets of all its elements. + // Due to language restrictions, only embedded interfaces can add methods, they are handled + // separately. Here we only need to intersect the term lists and comparable bits. + allTerms, allComparable = intersectTermLists(allTerms, allComparable, terms, comparable) + } + + ityp.tset.comparable = allComparable + if len(allMethods) != 0 { + sortMethods(allMethods) + ityp.tset.methods = allMethods + } + ityp.tset.terms = allTerms + + return ityp.tset +} + +// TODO(gri) The intersectTermLists function belongs to the termlist implementation. +// The comparable type set may also be best represented as a term (using +// a special type). + +// intersectTermLists computes the intersection of two term lists and respective comparable bits. +// xcomp, ycomp are valid only if xterms.isAll() and yterms.isAll() respectively. +func intersectTermLists(xterms termlist, xcomp bool, yterms termlist, ycomp bool) (termlist, bool) { + terms := xterms.intersect(yterms) + // If one of xterms or yterms is marked as comparable, + // the result must only include comparable types. + comp := xcomp || ycomp + if comp && !terms.isAll() { + // only keep comparable terms + i := 0 + for _, t := range terms { + assert(t.typ != nil) + if comparable(t.typ, false /* strictly comparable */, nil, nil) { + terms[i] = t + i++ + } + } + terms = terms[:i] + if !terms.isAll() { + comp = false + } + } + assert(!comp || terms.isAll()) // comparable invariant + return terms, comp +} + +func sortMethods(list []*Func) { + sort.Sort(byUniqueMethodName(list)) +} + +func assertSortedMethods(list []*Func) { + if !debug { + panic("assertSortedMethods called outside debug mode") + } + if !sort.IsSorted(byUniqueMethodName(list)) { + panic("methods not sorted") + } +} + +// byUniqueMethodName method lists can be sorted by their unique method names. +type byUniqueMethodName []*Func + +func (a byUniqueMethodName) Len() int { return len(a) } +func (a byUniqueMethodName) Less(i, j int) bool { return a[i].less(&a[j].object) } +func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// invalidTypeSet is a singleton type set to signal an invalid type set +// due to an error. It's also a valid empty type set, so consumers of +// type sets may choose to ignore it. +var invalidTypeSet _TypeSet + +// computeUnionTypeSet may be called with check == nil. +// The result is &invalidTypeSet if the union overflows. +func computeUnionTypeSet(check *Checker, unionSets map[*Union]*_TypeSet, pos syntax.Pos, utyp *Union) *_TypeSet { + if tset, _ := unionSets[utyp]; tset != nil { + return tset + } + + // avoid infinite recursion (see also computeInterfaceTypeSet) + unionSets[utyp] = new(_TypeSet) + + var allTerms termlist + for _, t := range utyp.terms { + var terms termlist + u := under(t.typ) + if ui, _ := u.(*Interface); ui != nil { + // For now we don't permit type parameters as constraints. + assert(!isTypeParam(t.typ)) + terms = computeInterfaceTypeSet(check, pos, ui).terms + } else if !isValid(u) { + continue + } else { + if t.tilde && !Identical(t.typ, u) { + // There is no underlying type which is t.typ. + // The corresponding type set is empty. + t = nil // ∅ term + } + terms = termlist{(*term)(t)} + } + // The type set of a union expression is the union + // of the type sets of each term. + allTerms = allTerms.union(terms) + if len(allTerms) > maxTermCount { + if check != nil { + check.errorf(pos, InvalidUnion, "cannot handle more than %d union terms (implementation limitation)", maxTermCount) + } + unionSets[utyp] = &invalidTypeSet + return unionSets[utyp] + } + } + unionSets[utyp].terms = allTerms + + return unionSets[utyp] +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeset_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeset_test.go new file mode 100644 index 0000000000000000000000000000000000000000..40ca28e525feb5c9c62f8adf411d933837b83acc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeset_test.go @@ -0,0 +1,80 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "strings" + "testing" +) + +func TestInvalidTypeSet(t *testing.T) { + if !invalidTypeSet.IsEmpty() { + t.Error("invalidTypeSet is not empty") + } +} + +func TestTypeSetString(t *testing.T) { + for body, want := range map[string]string{ + "{}": "𝓤", + "{int}": "{int}", + "{~int}": "{~int}", + "{int|string}": "{int | string}", + "{int; string}": "∅", + + "{comparable}": "{comparable}", + "{comparable; int}": "{int}", + "{~int; comparable}": "{~int}", + "{int|string; comparable}": "{int | string}", + "{comparable; int; string}": "∅", + + "{m()}": "{func (p.T).m()}", + "{m1(); m2() int }": "{func (p.T).m1(); func (p.T).m2() int}", + "{error}": "{func (error).Error() string}", + "{m(); comparable}": "{comparable; func (p.T).m()}", + "{m1(); comparable; m2() int }": "{comparable; func (p.T).m1(); func (p.T).m2() int}", + "{comparable; error}": "{comparable; func (error).Error() string}", + + "{m(); comparable; int|float32|string}": "{func (p.T).m(); int | float32 | string}", + "{m1(); int; m2(); comparable }": "{func (p.T).m1(); func (p.T).m2(); int}", + + "{E}; type E interface{}": "𝓤", + "{E}; type E interface{int;string}": "∅", + "{E}; type E interface{comparable}": "{comparable}", + } { + // parse + errh := func(error) {} // dummy error handler so that parsing continues in presence of errors + src := "package p; type T interface" + body + file, err := syntax.Parse(nil, strings.NewReader(src), errh, nil, 0) + if err != nil { + t.Fatalf("%s: %v (invalid test case)", body, err) + } + + // type check + var conf Config + pkg, err := conf.Check(file.PkgName.Value, []*syntax.File{file}, nil) + if err != nil { + t.Fatalf("%s: %v (invalid test case)", body, err) + } + + // lookup T + obj := pkg.scope.Lookup("T") + if obj == nil { + t.Fatalf("%s: T not found (invalid test case)", body) + } + T, ok := under(obj.Type()).(*Interface) + if !ok { + t.Fatalf("%s: %v is not an interface (invalid test case)", body, obj) + } + + // verify test case + got := T.typeSet().String() + if got != want { + t.Errorf("%s: got %s; want %s", body, got, want) + } + } +} + +// TODO(gri) add more tests diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typestring.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typestring.go new file mode 100644 index 0000000000000000000000000000000000000000..4b410af6b75a776ed1372e8b3387f1ee24096e54 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typestring.go @@ -0,0 +1,504 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements printing of types. + +package types2 + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +// A Qualifier controls how named package-level objects are printed in +// calls to TypeString, ObjectString, and SelectionString. +// +// These three formatting routines call the Qualifier for each +// package-level object O, and if the Qualifier returns a non-empty +// string p, the object is printed in the form p.O. +// If it returns an empty string, only the object name O is printed. +// +// Using a nil Qualifier is equivalent to using (*Package).Path: the +// object is qualified by the import path, e.g., "encoding/json.Marshal". +type Qualifier func(*Package) string + +// RelativeTo returns a Qualifier that fully qualifies members of +// all packages other than pkg. +func RelativeTo(pkg *Package) Qualifier { + if pkg == nil { + return nil + } + return func(other *Package) string { + if pkg == other { + return "" // same package; unqualified + } + return other.Path() + } +} + +// TypeString returns the string representation of typ. +// The Qualifier controls the printing of +// package-level objects, and may be nil. +func TypeString(typ Type, qf Qualifier) string { + var buf bytes.Buffer + WriteType(&buf, typ, qf) + return buf.String() +} + +// WriteType writes the string representation of typ to buf. +// The Qualifier controls the printing of +// package-level objects, and may be nil. +func WriteType(buf *bytes.Buffer, typ Type, qf Qualifier) { + newTypeWriter(buf, qf).typ(typ) +} + +// WriteSignature writes the representation of the signature sig to buf, +// without a leading "func" keyword. The Qualifier controls the printing +// of package-level objects, and may be nil. +func WriteSignature(buf *bytes.Buffer, sig *Signature, qf Qualifier) { + newTypeWriter(buf, qf).signature(sig) +} + +type typeWriter struct { + buf *bytes.Buffer + seen map[Type]bool + qf Qualifier + ctxt *Context // if non-nil, we are type hashing + tparams *TypeParamList // local type parameters + paramNames bool // if set, write function parameter names, otherwise, write types only + tpSubscripts bool // if set, write type parameter indices as subscripts + pkgInfo bool // package-annotate first unexported-type field to avoid confusing type description +} + +func newTypeWriter(buf *bytes.Buffer, qf Qualifier) *typeWriter { + return &typeWriter{buf, make(map[Type]bool), qf, nil, nil, true, false, false} +} + +func newTypeHasher(buf *bytes.Buffer, ctxt *Context) *typeWriter { + assert(ctxt != nil) + return &typeWriter{buf, make(map[Type]bool), nil, ctxt, nil, false, false, false} +} + +func (w *typeWriter) byte(b byte) { + if w.ctxt != nil { + if b == ' ' { + b = '#' + } + w.buf.WriteByte(b) + return + } + w.buf.WriteByte(b) + if b == ',' || b == ';' { + w.buf.WriteByte(' ') + } +} + +func (w *typeWriter) string(s string) { + w.buf.WriteString(s) +} + +func (w *typeWriter) error(msg string) { + if w.ctxt != nil { + panic(msg) + } + w.buf.WriteString("<" + msg + ">") +} + +func (w *typeWriter) typ(typ Type) { + if w.seen[typ] { + w.error("cycle to " + goTypeName(typ)) + return + } + w.seen[typ] = true + defer delete(w.seen, typ) + + switch t := typ.(type) { + case nil: + w.error("nil") + + case *Basic: + // exported basic types go into package unsafe + // (currently this is just unsafe.Pointer) + if isExported(t.name) { + if obj, _ := Unsafe.scope.Lookup(t.name).(*TypeName); obj != nil { + w.typeName(obj) + break + } + } + w.string(t.name) + + case *Array: + w.byte('[') + w.string(strconv.FormatInt(t.len, 10)) + w.byte(']') + w.typ(t.elem) + + case *Slice: + w.string("[]") + w.typ(t.elem) + + case *Struct: + w.string("struct{") + for i, f := range t.fields { + if i > 0 { + w.byte(';') + } + + // If disambiguating one struct for another, look for the first unexported field. + // Do this first in case of nested structs; tag the first-outermost field. + pkgAnnotate := false + if w.qf == nil && w.pkgInfo && !isExported(f.name) { + // note for embedded types, type name is field name, and "string" etc are lower case hence unexported. + pkgAnnotate = true + w.pkgInfo = false // only tag once + } + + // This doesn't do the right thing for embedded type + // aliases where we should print the alias name, not + // the aliased type (see go.dev/issue/44410). + if !f.embedded { + w.string(f.name) + w.byte(' ') + } + w.typ(f.typ) + if pkgAnnotate { + w.string(" /* package ") + w.string(f.pkg.Path()) + w.string(" */ ") + } + if tag := t.Tag(i); tag != "" { + w.byte(' ') + // TODO(gri) If tag contains blanks, replacing them with '#' + // in Context.TypeHash may produce another tag + // accidentally. + w.string(strconv.Quote(tag)) + } + } + w.byte('}') + + case *Pointer: + w.byte('*') + w.typ(t.base) + + case *Tuple: + w.tuple(t, false) + + case *Signature: + w.string("func") + w.signature(t) + + case *Union: + // Unions only appear as (syntactic) embedded elements + // in interfaces and syntactically cannot be empty. + if t.Len() == 0 { + w.error("empty union") + break + } + for i, t := range t.terms { + if i > 0 { + w.string(termSep) + } + if t.tilde { + w.byte('~') + } + w.typ(t.typ) + } + + case *Interface: + if w.ctxt == nil { + if t == universeAny.Type() { + // When not hashing, we can try to improve type strings by writing "any" + // for a type that is pointer-identical to universeAny. This logic should + // be deprecated by more robust handling for aliases. + w.string("any") + break + } + if t == asNamed(universeComparable.Type()).underlying { + w.string("interface{comparable}") + break + } + } + if t.implicit { + if len(t.methods) == 0 && len(t.embeddeds) == 1 { + w.typ(t.embeddeds[0]) + break + } + // Something's wrong with the implicit interface. + // Print it as such and continue. + w.string("/* implicit */ ") + } + w.string("interface{") + first := true + if w.ctxt != nil { + w.typeSet(t.typeSet()) + } else { + for _, m := range t.methods { + if !first { + w.byte(';') + } + first = false + w.string(m.name) + w.signature(m.typ.(*Signature)) + } + for _, typ := range t.embeddeds { + if !first { + w.byte(';') + } + first = false + w.typ(typ) + } + } + w.byte('}') + + case *Map: + w.string("map[") + w.typ(t.key) + w.byte(']') + w.typ(t.elem) + + case *Chan: + var s string + var parens bool + switch t.dir { + case SendRecv: + s = "chan " + // chan (<-chan T) requires parentheses + if c, _ := t.elem.(*Chan); c != nil && c.dir == RecvOnly { + parens = true + } + case SendOnly: + s = "chan<- " + case RecvOnly: + s = "<-chan " + default: + w.error("unknown channel direction") + } + w.string(s) + if parens { + w.byte('(') + } + w.typ(t.elem) + if parens { + w.byte(')') + } + + case *Named: + // If hashing, write a unique prefix for t to represent its identity, since + // named type identity is pointer identity. + if w.ctxt != nil { + w.string(strconv.Itoa(w.ctxt.getID(t))) + } + w.typeName(t.obj) // when hashing written for readability of the hash only + if t.inst != nil { + // instantiated type + w.typeList(t.inst.targs.list()) + } else if w.ctxt == nil && t.TypeParams().Len() != 0 { // For type hashing, don't need to format the TypeParams + // parameterized type + w.tParamList(t.TypeParams().list()) + } + + case *TypeParam: + if t.obj == nil { + w.error("unnamed type parameter") + break + } + if i := tparamIndex(w.tparams.list(), t); i >= 0 { + // The names of type parameters that are declared by the type being + // hashed are not part of the type identity. Replace them with a + // placeholder indicating their index. + w.string(fmt.Sprintf("$%d", i)) + } else { + w.string(t.obj.name) + if w.tpSubscripts || w.ctxt != nil { + w.string(subscript(t.id)) + } + // If the type parameter name is the same as a predeclared object + // (say int), point out where it is declared to avoid confusing + // error messages. This doesn't need to be super-elegant; we just + // need a clear indication that this is not a predeclared name. + if w.ctxt == nil && Universe.Lookup(t.obj.name) != nil { + w.string(fmt.Sprintf(" /* with %s declared at %s */", t.obj.name, t.obj.Pos())) + } + } + + case *Alias: + w.typeName(t.obj) + if w.ctxt != nil { + // TODO(gri) do we need to print the alias type name, too? + w.typ(Unalias(t.obj.typ)) + } + + default: + // For externally defined implementations of Type. + // Note: In this case cycles won't be caught. + w.string(t.String()) + } +} + +// typeSet writes a canonical hash for an interface type set. +func (w *typeWriter) typeSet(s *_TypeSet) { + assert(w.ctxt != nil) + first := true + for _, m := range s.methods { + if !first { + w.byte(';') + } + first = false + w.string(m.name) + w.signature(m.typ.(*Signature)) + } + switch { + case s.terms.isAll(): + // nothing to do + case s.terms.isEmpty(): + w.string(s.terms.String()) + default: + var termHashes []string + for _, term := range s.terms { + // terms are not canonically sorted, so we sort their hashes instead. + var buf bytes.Buffer + if term.tilde { + buf.WriteByte('~') + } + newTypeHasher(&buf, w.ctxt).typ(term.typ) + termHashes = append(termHashes, buf.String()) + } + sort.Strings(termHashes) + if !first { + w.byte(';') + } + w.string(strings.Join(termHashes, "|")) + } +} + +func (w *typeWriter) typeList(list []Type) { + w.byte('[') + for i, typ := range list { + if i > 0 { + w.byte(',') + } + w.typ(typ) + } + w.byte(']') +} + +func (w *typeWriter) tParamList(list []*TypeParam) { + w.byte('[') + var prev Type + for i, tpar := range list { + // Determine the type parameter and its constraint. + // list is expected to hold type parameter names, + // but don't crash if that's not the case. + if tpar == nil { + w.error("nil type parameter") + continue + } + if i > 0 { + if tpar.bound != prev { + // bound changed - write previous one before advancing + w.byte(' ') + w.typ(prev) + } + w.byte(',') + } + prev = tpar.bound + w.typ(tpar) + } + if prev != nil { + w.byte(' ') + w.typ(prev) + } + w.byte(']') +} + +func (w *typeWriter) typeName(obj *TypeName) { + w.string(packagePrefix(obj.pkg, w.qf)) + w.string(obj.name) +} + +func (w *typeWriter) tuple(tup *Tuple, variadic bool) { + w.byte('(') + if tup != nil { + for i, v := range tup.vars { + if i > 0 { + w.byte(',') + } + // parameter names are ignored for type identity and thus type hashes + if w.ctxt == nil && v.name != "" && w.paramNames { + w.string(v.name) + w.byte(' ') + } + typ := v.typ + if variadic && i == len(tup.vars)-1 { + if s, ok := typ.(*Slice); ok { + w.string("...") + typ = s.elem + } else { + // special case: + // append(s, "foo"...) leads to signature func([]byte, string...) + if t, _ := under(typ).(*Basic); t == nil || t.kind != String { + w.error("expected string type") + continue + } + w.typ(typ) + w.string("...") + continue + } + } + w.typ(typ) + } + } + w.byte(')') +} + +func (w *typeWriter) signature(sig *Signature) { + if sig.TypeParams().Len() != 0 { + if w.ctxt != nil { + assert(w.tparams == nil) + w.tparams = sig.TypeParams() + defer func() { + w.tparams = nil + }() + } + w.tParamList(sig.TypeParams().list()) + } + + w.tuple(sig.params, sig.variadic) + + n := sig.results.Len() + if n == 0 { + // no result + return + } + + w.byte(' ') + if n == 1 && (w.ctxt != nil || sig.results.vars[0].name == "") { + // single unnamed result (if type hashing, name must be ignored) + w.typ(sig.results.vars[0].typ) + return + } + + // multiple or named result(s) + w.tuple(sig.results, false) +} + +// subscript returns the decimal (utf8) representation of x using subscript digits. +func subscript(x uint64) string { + const w = len("₀") // all digits 0...9 have the same utf8 width + var buf [32 * w]byte + i := len(buf) + for { + i -= w + utf8.EncodeRune(buf[i:], '₀'+rune(x%10)) // '₀' == U+2080 + x /= 10 + if x == 0 { + break + } + } + return string(buf[i:]) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typestring_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typestring_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c2be40da291d9dd0d59b833af595b0b18de00175 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typestring_test.go @@ -0,0 +1,166 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2_test + +import ( + "internal/testenv" + "testing" + + . "cmd/compile/internal/types2" +) + +const filename = "" + +type testEntry struct { + src, str string +} + +// dup returns a testEntry where both src and str are the same. +func dup(s string) testEntry { + return testEntry{s, s} +} + +// types that don't depend on any other type declarations +var independentTestTypes = []testEntry{ + // basic types + dup("int"), + dup("float32"), + dup("string"), + + // arrays + dup("[10]int"), + + // slices + dup("[]int"), + dup("[][]int"), + + // structs + dup("struct{}"), + dup("struct{x int}"), + {`struct { + x, y int + z float32 "foo" + }`, `struct{x int; y int; z float32 "foo"}`}, + {`struct { + string + elems []complex128 + }`, `struct{string; elems []complex128}`}, + + // pointers + dup("*int"), + dup("***struct{}"), + dup("*struct{a int; b float32}"), + + // functions + dup("func()"), + dup("func(x int)"), + {"func(x, y int)", "func(x int, y int)"}, + {"func(x, y int, z string)", "func(x int, y int, z string)"}, + dup("func(int)"), + {"func(int, string, byte)", "func(int, string, byte)"}, + + dup("func() int"), + {"func() (string)", "func() string"}, + dup("func() (u int)"), + {"func() (u, v int, w string)", "func() (u int, v int, w string)"}, + + dup("func(int) string"), + dup("func(x int) string"), + dup("func(x int) (u string)"), + {"func(x, y int) (u string)", "func(x int, y int) (u string)"}, + + dup("func(...int) string"), + dup("func(x ...int) string"), + dup("func(x ...int) (u string)"), + {"func(x int, y ...int) (u string)", "func(x int, y ...int) (u string)"}, + + // interfaces + dup("interface{}"), + dup("interface{m()}"), + dup(`interface{String() string; m(int) float32}`), + dup("interface{int | float32 | complex128}"), + dup("interface{int | ~float32 | ~complex128}"), + dup("any"), + dup("interface{comparable}"), + {"comparable", "interface{comparable}"}, + {"error", "interface{Error() string}"}, + + // maps + dup("map[string]int"), + {"map[struct{x, y int}][]byte", "map[struct{x int; y int}][]byte"}, + + // channels + dup("chan<- chan int"), + dup("chan<- <-chan int"), + dup("<-chan <-chan int"), + dup("chan (<-chan int)"), + dup("chan<- func()"), + dup("<-chan []func() int"), +} + +// types that depend on other type declarations (src in TestTypes) +var dependentTestTypes = []testEntry{ + // interfaces + dup(`interface{io.Reader; io.Writer}`), + dup(`interface{m() int; io.Writer}`), + {`interface{m() interface{T}}`, `interface{m() interface{generic_p.T}}`}, +} + +func TestTypeString(t *testing.T) { + // The Go command is needed for the importer to determine the locations of stdlib .a files. + testenv.MustHaveGoBuild(t) + + var tests []testEntry + tests = append(tests, independentTestTypes...) + tests = append(tests, dependentTestTypes...) + + for _, test := range tests { + src := `package generic_p; import "io"; type _ io.Writer; type T ` + test.src + pkg, err := typecheck(src, nil, nil) + if err != nil { + t.Errorf("%s: %s", src, err) + continue + } + obj := pkg.Scope().Lookup("T") + if obj == nil { + t.Errorf("%s: T not found", test.src) + continue + } + typ := obj.Type().Underlying() + if got := typ.String(); got != test.str { + t.Errorf("%s: got %s, want %s", test.src, got, test.str) + } + } +} + +func TestQualifiedTypeString(t *testing.T) { + p := mustTypecheck("package p; type T int", nil, nil) + q := mustTypecheck("package q", nil, nil) + + pT := p.Scope().Lookup("T").Type() + for _, test := range []struct { + typ Type + this *Package + want string + }{ + {nil, nil, ""}, + {pT, nil, "p.T"}, + {pT, p, "T"}, + {pT, q, "p.T"}, + {NewPointer(pT), p, "*T"}, + {NewPointer(pT), q, "*p.T"}, + } { + qualifier := func(pkg *Package) string { + if pkg != test.this { + return pkg.Name() + } + return "" + } + if got := TypeString(test.typ, qualifier); got != test.want { + t.Errorf("TypeString(%s, %s) = %s, want %s", + test.this, test.typ, got, test.want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeterm.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeterm.go new file mode 100644 index 0000000000000000000000000000000000000000..97791324e1e75c668a1f5ce5cc0516e337ff6e96 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeterm.go @@ -0,0 +1,165 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +type term struct { + tilde bool // valid if typ != nil + typ Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !Identical(ux, uy) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeterm_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeterm_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6d9c8db0348ecffea365522fef9ee1b66ad00483 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typeterm_test.go @@ -0,0 +1,239 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "strings" + "testing" +) + +var myInt = func() Type { + tname := NewTypeName(nopos, nil, "myInt", nil) + return NewNamed(tname, Typ[Int], nil) +}() + +var testTerms = map[string]*term{ + "∅": nil, + "𝓤": {}, + "int": {false, Typ[Int]}, + "~int": {true, Typ[Int]}, + "string": {false, Typ[String]}, + "~string": {true, Typ[String]}, + "myInt": {false, myInt}, +} + +func TestTermString(t *testing.T) { + for want, x := range testTerms { + if got := x.String(); got != want { + t.Errorf("%v.String() == %v; want %v", x, got, want) + } + } +} + +func split(s string, n int) []string { + r := strings.Split(s, " ") + if len(r) != n { + panic("invalid test case: " + s) + } + return r +} + +func testTerm(name string) *term { + r, ok := testTerms[name] + if !ok { + panic("invalid test argument: " + name) + } + return r +} + +func TestTermEqual(t *testing.T) { + for _, test := range []string{ + "∅ ∅ T", + "𝓤 𝓤 T", + "int int T", + "~int ~int T", + "myInt myInt T", + "∅ 𝓤 F", + "∅ int F", + "∅ ~int F", + "𝓤 int F", + "𝓤 ~int F", + "𝓤 myInt F", + "int ~int F", + "int myInt F", + "~int myInt F", + } { + args := split(test, 3) + x := testTerm(args[0]) + y := testTerm(args[1]) + want := args[2] == "T" + if got := x.equal(y); got != want { + t.Errorf("%v.equal(%v) = %v; want %v", x, y, got, want) + } + // equal is symmetric + x, y = y, x + if got := x.equal(y); got != want { + t.Errorf("%v.equal(%v) = %v; want %v", x, y, got, want) + } + } +} + +func TestTermUnion(t *testing.T) { + for _, test := range []string{ + "∅ ∅ ∅ ∅", + "∅ 𝓤 𝓤 ∅", + "∅ int int ∅", + "∅ ~int ~int ∅", + "∅ myInt myInt ∅", + "𝓤 𝓤 𝓤 ∅", + "𝓤 int 𝓤 ∅", + "𝓤 ~int 𝓤 ∅", + "𝓤 myInt 𝓤 ∅", + "int int int ∅", + "int ~int ~int ∅", + "int string int string", + "int ~string int ~string", + "int myInt int myInt", + "~int ~string ~int ~string", + "~int myInt ~int ∅", + + // union is symmetric, but the result order isn't - repeat symmetric cases explicitly + "𝓤 ∅ 𝓤 ∅", + "int ∅ int ∅", + "~int ∅ ~int ∅", + "myInt ∅ myInt ∅", + "int 𝓤 𝓤 ∅", + "~int 𝓤 𝓤 ∅", + "myInt 𝓤 𝓤 ∅", + "~int int ~int ∅", + "string int string int", + "~string int ~string int", + "myInt int myInt int", + "~string ~int ~string ~int", + "myInt ~int ~int ∅", + } { + args := split(test, 4) + x := testTerm(args[0]) + y := testTerm(args[1]) + want1 := testTerm(args[2]) + want2 := testTerm(args[3]) + if got1, got2 := x.union(y); !got1.equal(want1) || !got2.equal(want2) { + t.Errorf("%v.union(%v) = %v, %v; want %v, %v", x, y, got1, got2, want1, want2) + } + } +} + +func TestTermIntersection(t *testing.T) { + for _, test := range []string{ + "∅ ∅ ∅", + "∅ 𝓤 ∅", + "∅ int ∅", + "∅ ~int ∅", + "∅ myInt ∅", + "𝓤 𝓤 𝓤", + "𝓤 int int", + "𝓤 ~int ~int", + "𝓤 myInt myInt", + "int int int", + "int ~int int", + "int string ∅", + "int ~string ∅", + "int string ∅", + "~int ~string ∅", + "~int myInt myInt", + } { + args := split(test, 3) + x := testTerm(args[0]) + y := testTerm(args[1]) + want := testTerm(args[2]) + if got := x.intersect(y); !got.equal(want) { + t.Errorf("%v.intersect(%v) = %v; want %v", x, y, got, want) + } + // intersect is symmetric + x, y = y, x + if got := x.intersect(y); !got.equal(want) { + t.Errorf("%v.intersect(%v) = %v; want %v", x, y, got, want) + } + } +} + +func TestTermIncludes(t *testing.T) { + for _, test := range []string{ + "∅ int F", + "𝓤 int T", + "int int T", + "~int int T", + "~int myInt T", + "string int F", + "~string int F", + "myInt int F", + } { + args := split(test, 3) + x := testTerm(args[0]) + y := testTerm(args[1]).typ + want := args[2] == "T" + if got := x.includes(y); got != want { + t.Errorf("%v.includes(%v) = %v; want %v", x, y, got, want) + } + } +} + +func TestTermSubsetOf(t *testing.T) { + for _, test := range []string{ + "∅ ∅ T", + "𝓤 𝓤 T", + "int int T", + "~int ~int T", + "myInt myInt T", + "∅ 𝓤 T", + "∅ int T", + "∅ ~int T", + "∅ myInt T", + "𝓤 int F", + "𝓤 ~int F", + "𝓤 myInt F", + "int ~int T", + "int myInt F", + "~int myInt F", + "myInt int F", + "myInt ~int T", + } { + args := split(test, 3) + x := testTerm(args[0]) + y := testTerm(args[1]) + want := args[2] == "T" + if got := x.subsetOf(y); got != want { + t.Errorf("%v.subsetOf(%v) = %v; want %v", x, y, got, want) + } + } +} + +func TestTermDisjoint(t *testing.T) { + for _, test := range []string{ + "int int F", + "~int ~int F", + "int ~int F", + "int string T", + "int ~string T", + "int myInt T", + "~int ~string T", + "~int myInt F", + "string myInt T", + "~string myInt T", + } { + args := split(test, 3) + x := testTerm(args[0]) + y := testTerm(args[1]) + want := args[2] == "T" + if got := x.disjoint(y); got != want { + t.Errorf("%v.disjoint(%v) = %v; want %v", x, y, got, want) + } + // disjoint is symmetric + x, y = y, x + if got := x.disjoint(y); got != want { + t.Errorf("%v.disjoint(%v) = %v; want %v", x, y, got, want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typexpr.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typexpr.go new file mode 100644 index 0000000000000000000000000000000000000000..81adcbd9cfd0105d26eff12c3bbdcf24d2780148 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/typexpr.go @@ -0,0 +1,551 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements type-checking of identifiers and type expressions. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "fmt" + "go/constant" + . "internal/types/errors" + "strings" +) + +// ident type-checks identifier e and initializes x with the value or type of e. +// If an error occurred, x.mode is set to invalid. +// For the meaning of def, see Checker.definedType, below. +// If wantType is set, the identifier e is expected to denote a type. +func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType bool) { + x.mode = invalid + x.expr = e + + // Note that we cannot use check.lookup here because the returned scope + // may be different from obj.Parent(). See also Scope.LookupParent doc. + scope, obj := check.scope.LookupParent(e.Value, check.pos) + switch obj { + case nil: + if e.Value == "_" { + // Blank identifiers are never declared, but the current identifier may + // be a placeholder for a receiver type parameter. In this case we can + // resolve its type and object from Checker.recvTParamMap. + if tpar := check.recvTParamMap[e]; tpar != nil { + x.mode = typexpr + x.typ = tpar + } else { + check.error(e, InvalidBlank, "cannot use _ as value or type") + } + } else { + check.errorf(e, UndeclaredName, "undefined: %s", e.Value) + } + return + case universeAny, universeComparable: + if !check.verifyVersionf(e, go1_18, "predeclared %s", e.Value) { + return // avoid follow-on errors + } + } + check.recordUse(e, obj) + + // Type-check the object. + // Only call Checker.objDecl if the object doesn't have a type yet + // (in which case we must actually determine it) or the object is a + // TypeName and we also want a type (in which case we might detect + // a cycle which needs to be reported). Otherwise we can skip the + // call and avoid a possible cycle error in favor of the more + // informative "not a type/value" error that this function's caller + // will issue (see go.dev/issue/25790). + typ := obj.Type() + if _, gotType := obj.(*TypeName); typ == nil || gotType && wantType { + check.objDecl(obj, def) + typ = obj.Type() // type must have been assigned by Checker.objDecl + } + assert(typ != nil) + + // The object may have been dot-imported. + // If so, mark the respective package as used. + // (This code is only needed for dot-imports. Without them, + // we only have to mark variables, see *Var case below). + if pkgName := check.dotImportMap[dotImportKey{scope, obj.Name()}]; pkgName != nil { + pkgName.used = true + } + + switch obj := obj.(type) { + case *PkgName: + check.errorf(e, InvalidPkgUse, "use of package %s not in selector", obj.name) + return + + case *Const: + check.addDeclDep(obj) + if !isValid(typ) { + return + } + if obj == universeIota { + if check.iota == nil { + check.error(e, InvalidIota, "cannot use iota outside constant declaration") + return + } + x.val = check.iota + } else { + x.val = obj.val + } + assert(x.val != nil) + x.mode = constant_ + + case *TypeName: + if !check.enableAlias && check.isBrokenAlias(obj) { + check.errorf(e, InvalidDeclCycle, "invalid use of type alias %s in recursive type (see go.dev/issue/50729)", obj.name) + return + } + x.mode = typexpr + + case *Var: + // It's ok to mark non-local variables, but ignore variables + // from other packages to avoid potential race conditions with + // dot-imported variables. + if obj.pkg == check.pkg { + obj.used = true + } + check.addDeclDep(obj) + if !isValid(typ) { + return + } + x.mode = variable + + case *Func: + check.addDeclDep(obj) + x.mode = value + + case *Builtin: + x.id = obj.id + x.mode = builtin + + case *Nil: + x.mode = nilvalue + + default: + unreachable() + } + + x.typ = typ +} + +// typ type-checks the type expression e and returns its type, or Typ[Invalid]. +// The type must not be an (uninstantiated) generic type. +func (check *Checker) typ(e syntax.Expr) Type { + return check.definedType(e, nil) +} + +// varType type-checks the type expression e and returns its type, or Typ[Invalid]. +// The type must not be an (uninstantiated) generic type and it must not be a +// constraint interface. +func (check *Checker) varType(e syntax.Expr) Type { + typ := check.definedType(e, nil) + check.validVarType(e, typ) + return typ +} + +// validVarType reports an error if typ is a constraint interface. +// The expression e is used for error reporting, if any. +func (check *Checker) validVarType(e syntax.Expr, typ Type) { + // If we have a type parameter there's nothing to do. + if isTypeParam(typ) { + return + } + + // We don't want to call under() or complete interfaces while we are in + // the middle of type-checking parameter declarations that might belong + // to interface methods. Delay this check to the end of type-checking. + check.later(func() { + if t, _ := under(typ).(*Interface); t != nil { + pos := syntax.StartPos(e) + tset := computeInterfaceTypeSet(check, pos, t) // TODO(gri) is this the correct position? + if !tset.IsMethodSet() { + if tset.comparable { + check.softErrorf(pos, MisplacedConstraintIface, "cannot use type %s outside a type constraint: interface is (or embeds) comparable", typ) + } else { + check.softErrorf(pos, MisplacedConstraintIface, "cannot use type %s outside a type constraint: interface contains type constraints", typ) + } + } + } + }).describef(e, "check var type %s", typ) +} + +// definedType is like typ but also accepts a type name def. +// If def != nil, e is the type specification for the type named def, declared +// in a type declaration, and def.typ.underlying will be set to the type of e +// before any components of e are type-checked. +func (check *Checker) definedType(e syntax.Expr, def *TypeName) Type { + typ := check.typInternal(e, def) + assert(isTyped(typ)) + if isGeneric(typ) { + check.errorf(e, WrongTypeArgCount, "cannot use generic type %s without instantiation", typ) + typ = Typ[Invalid] + } + check.recordTypeAndValue(e, typexpr, typ, nil) + return typ +} + +// genericType is like typ but the type must be an (uninstantiated) generic +// type. If cause is non-nil and the type expression was a valid type but not +// generic, cause will be populated with a message describing the error. +func (check *Checker) genericType(e syntax.Expr, cause *string) Type { + typ := check.typInternal(e, nil) + assert(isTyped(typ)) + if isValid(typ) && !isGeneric(typ) { + if cause != nil { + *cause = check.sprintf("%s is not a generic type", typ) + } + typ = Typ[Invalid] + } + // TODO(gri) what is the correct call below? + check.recordTypeAndValue(e, typexpr, typ, nil) + return typ +} + +// goTypeName returns the Go type name for typ and +// removes any occurrences of "types2." from that name. +func goTypeName(typ Type) string { + return strings.ReplaceAll(fmt.Sprintf("%T", typ), "types2.", "") +} + +// typInternal drives type checking of types. +// Must only be called by definedType or genericType. +func (check *Checker) typInternal(e0 syntax.Expr, def *TypeName) (T Type) { + if check.conf.Trace { + check.trace(e0.Pos(), "-- type %s", e0) + check.indent++ + defer func() { + check.indent-- + var under Type + if T != nil { + // Calling under() here may lead to endless instantiations. + // Test case: type T[P any] *T[P] + under = safeUnderlying(T) + } + if T == under { + check.trace(e0.Pos(), "=> %s // %s", T, goTypeName(T)) + } else { + check.trace(e0.Pos(), "=> %s (under = %s) // %s", T, under, goTypeName(T)) + } + }() + } + + switch e := e0.(type) { + case *syntax.BadExpr: + // ignore - error reported before + + case *syntax.Name: + var x operand + check.ident(&x, e, def, true) + + switch x.mode { + case typexpr: + typ := x.typ + setDefType(def, typ) + return typ + case invalid: + // ignore - error reported before + case novalue: + check.errorf(&x, NotAType, "%s used as type", &x) + default: + check.errorf(&x, NotAType, "%s is not a type", &x) + } + + case *syntax.SelectorExpr: + var x operand + check.selector(&x, e, def, true) + + switch x.mode { + case typexpr: + typ := x.typ + setDefType(def, typ) + return typ + case invalid: + // ignore - error reported before + case novalue: + check.errorf(&x, NotAType, "%s used as type", &x) + default: + check.errorf(&x, NotAType, "%s is not a type", &x) + } + + case *syntax.IndexExpr: + check.verifyVersionf(e, go1_18, "type instantiation") + return check.instantiatedType(e.X, syntax.UnpackListExpr(e.Index), def) + + case *syntax.ParenExpr: + // Generic types must be instantiated before they can be used in any form. + // Consequently, generic types cannot be parenthesized. + return check.definedType(e.X, def) + + case *syntax.ArrayType: + typ := new(Array) + setDefType(def, typ) + if e.Len != nil { + typ.len = check.arrayLength(e.Len) + } else { + // [...]array + check.error(e, BadDotDotDotSyntax, "invalid use of [...] array (outside a composite literal)") + typ.len = -1 + } + typ.elem = check.varType(e.Elem) + if typ.len >= 0 { + return typ + } + // report error if we encountered [...] + + case *syntax.SliceType: + typ := new(Slice) + setDefType(def, typ) + typ.elem = check.varType(e.Elem) + return typ + + case *syntax.DotsType: + // dots are handled explicitly where they are legal + // (array composite literals and parameter lists) + check.error(e, InvalidDotDotDot, "invalid use of '...'") + check.use(e.Elem) + + case *syntax.StructType: + typ := new(Struct) + setDefType(def, typ) + check.structType(typ, e) + return typ + + case *syntax.Operation: + if e.Op == syntax.Mul && e.Y == nil { + typ := new(Pointer) + typ.base = Typ[Invalid] // avoid nil base in invalid recursive type declaration + setDefType(def, typ) + typ.base = check.varType(e.X) + // If typ.base is invalid, it's unlikely that *base is particularly + // useful - even a valid dereferenciation will lead to an invalid + // type again, and in some cases we get unexpected follow-on errors + // (e.g., go.dev/issue/49005). Return an invalid type instead. + if !isValid(typ.base) { + return Typ[Invalid] + } + return typ + } + + check.errorf(e0, NotAType, "%s is not a type", e0) + check.use(e0) + + case *syntax.FuncType: + typ := new(Signature) + setDefType(def, typ) + check.funcType(typ, nil, nil, e) + return typ + + case *syntax.InterfaceType: + typ := check.newInterface() + setDefType(def, typ) + check.interfaceType(typ, e, def) + return typ + + case *syntax.MapType: + typ := new(Map) + setDefType(def, typ) + + typ.key = check.varType(e.Key) + typ.elem = check.varType(e.Value) + + // spec: "The comparison operators == and != must be fully defined + // for operands of the key type; thus the key type must not be a + // function, map, or slice." + // + // Delay this check because it requires fully setup types; + // it is safe to continue in any case (was go.dev/issue/6667). + check.later(func() { + if !Comparable(typ.key) { + var why string + if isTypeParam(typ.key) { + why = " (missing comparable constraint)" + } + check.errorf(e.Key, IncomparableMapKey, "invalid map key type %s%s", typ.key, why) + } + }).describef(e.Key, "check map key %s", typ.key) + + return typ + + case *syntax.ChanType: + typ := new(Chan) + setDefType(def, typ) + + dir := SendRecv + switch e.Dir { + case 0: + // nothing to do + case syntax.SendOnly: + dir = SendOnly + case syntax.RecvOnly: + dir = RecvOnly + default: + check.errorf(e, InvalidSyntaxTree, "unknown channel direction %d", e.Dir) + // ok to continue + } + + typ.dir = dir + typ.elem = check.varType(e.Elem) + return typ + + default: + check.errorf(e0, NotAType, "%s is not a type", e0) + check.use(e0) + } + + typ := Typ[Invalid] + setDefType(def, typ) + return typ +} + +func setDefType(def *TypeName, typ Type) { + if def != nil { + switch t := def.typ.(type) { + case *Alias: + // t.fromRHS should always be set, either to an invalid type + // in the beginning, or to typ in certain cyclic declarations. + if t.fromRHS != Typ[Invalid] && t.fromRHS != typ { + panic(sprintf(nil, true, "t.fromRHS = %s, typ = %s\n", t.fromRHS, typ)) + } + t.fromRHS = typ + case *Basic: + assert(t == Typ[Invalid]) + case *Named: + t.underlying = typ + default: + panic(fmt.Sprintf("unexpected type %T", t)) + } + } +} + +func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *TypeName) (res Type) { + if check.conf.Trace { + check.trace(x.Pos(), "-- instantiating type %s with %s", x, xlist) + check.indent++ + defer func() { + check.indent-- + // Don't format the underlying here. It will always be nil. + check.trace(x.Pos(), "=> %s", res) + }() + } + + var cause string + gtyp := check.genericType(x, &cause) + if cause != "" { + check.errorf(x, NotAGenericType, invalidOp+"%s%s (%s)", x, xlist, cause) + } + if !isValid(gtyp) { + return gtyp // error already reported + } + + orig := asNamed(gtyp) + if orig == nil { + panic(fmt.Sprintf("%v: cannot instantiate %v", x.Pos(), gtyp)) + } + + // evaluate arguments + targs := check.typeList(xlist) + if targs == nil { + setDefType(def, Typ[Invalid]) // avoid errors later due to lazy instantiation + return Typ[Invalid] + } + + // create the instance + inst := asNamed(check.instance(x.Pos(), orig, targs, nil, check.context())) + setDefType(def, inst) + + // orig.tparams may not be set up, so we need to do expansion later. + check.later(func() { + // This is an instance from the source, not from recursive substitution, + // and so it must be resolved during type-checking so that we can report + // errors. + check.recordInstance(x, inst.TypeArgs().list(), inst) + + if check.validateTArgLen(x.Pos(), inst.obj.name, inst.TypeParams().Len(), inst.TypeArgs().Len()) { + if i, err := check.verify(x.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), check.context()); err != nil { + // best position for error reporting + pos := x.Pos() + if i < len(xlist) { + pos = syntax.StartPos(xlist[i]) + } + check.softErrorf(pos, InvalidTypeArg, "%s", err) + } else { + check.mono.recordInstance(check.pkg, x.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), xlist) + } + } + + // TODO(rfindley): remove this call: we don't need to call validType here, + // as cycles can only occur for types used inside a Named type declaration, + // and so it suffices to call validType from declared types. + check.validType(inst) + }).describef(x, "resolve instance %s", inst) + + return inst +} + +// arrayLength type-checks the array length expression e +// and returns the constant length >= 0, or a value < 0 +// to indicate an error (and thus an unknown length). +func (check *Checker) arrayLength(e syntax.Expr) int64 { + // If e is an identifier, the array declaration might be an + // attempt at a parameterized type declaration with missing + // constraint. Provide an error message that mentions array + // length. + if name, _ := e.(*syntax.Name); name != nil { + obj := check.lookup(name.Value) + if obj == nil { + check.errorf(name, InvalidArrayLen, "undefined array length %s or missing type constraint", name.Value) + return -1 + } + if _, ok := obj.(*Const); !ok { + check.errorf(name, InvalidArrayLen, "invalid array length %s", name.Value) + return -1 + } + } + + var x operand + check.expr(nil, &x, e) + if x.mode != constant_ { + if x.mode != invalid { + check.errorf(&x, InvalidArrayLen, "array length %s must be constant", &x) + } + return -1 + } + + if isUntyped(x.typ) || isInteger(x.typ) { + if val := constant.ToInt(x.val); val.Kind() == constant.Int { + if representableConst(val, check, Typ[Int], nil) { + if n, ok := constant.Int64Val(val); ok && n >= 0 { + return n + } + } + } + } + + var msg string + if isInteger(x.typ) { + msg = "invalid array length %s" + } else { + msg = "array length %s must be integer" + } + check.errorf(&x, InvalidArrayLen, msg, &x) + return -1 +} + +// typeList provides the list of types corresponding to the incoming expression list. +// If an error occurred, the result is nil, but all list elements were type-checked. +func (check *Checker) typeList(list []syntax.Expr) []Type { + res := make([]Type, len(list)) // res != nil even if len(list) == 0 + for i, x := range list { + t := check.varType(x) + if !isValid(t) { + res = nil + } + if res != nil { + res[i] = t + } + } + return res +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/under.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/under.go new file mode 100644 index 0000000000000000000000000000000000000000..6b24399de43f0ec49d911653991fa55a6586b009 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/under.go @@ -0,0 +1,114 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +// under returns the true expanded underlying type. +// If it doesn't exist, the result is Typ[Invalid]. +// under must only be called when a type is known +// to be fully set up. +func under(t Type) Type { + if t := asNamed(t); t != nil { + return t.under() + } + return t.Underlying() +} + +// If t is not a type parameter, coreType returns the underlying type. +// If t is a type parameter, coreType returns the single underlying +// type of all types in its type set if it exists, or nil otherwise. If the +// type set contains only unrestricted and restricted channel types (with +// identical element types), the single underlying type is the restricted +// channel type if the restrictions are always the same, or nil otherwise. +func coreType(t Type) Type { + tpar, _ := t.(*TypeParam) + if tpar == nil { + return under(t) + } + + var su Type + if tpar.underIs(func(u Type) bool { + if u == nil { + return false + } + if su != nil { + u = match(su, u) + if u == nil { + return false + } + } + // su == nil || match(su, u) != nil + su = u + return true + }) { + return su + } + return nil +} + +// coreString is like coreType but also considers []byte +// and strings as identical. In this case, if successful and we saw +// a string, the result is of type (possibly untyped) string. +func coreString(t Type) Type { + tpar, _ := t.(*TypeParam) + if tpar == nil { + return under(t) // string or untyped string + } + + var su Type + hasString := false + if tpar.underIs(func(u Type) bool { + if u == nil { + return false + } + if isString(u) { + u = NewSlice(universeByte) + hasString = true + } + if su != nil { + u = match(su, u) + if u == nil { + return false + } + } + // su == nil || match(su, u) != nil + su = u + return true + }) { + if hasString { + return Typ[String] + } + return su + } + return nil +} + +// If x and y are identical, match returns x. +// If x and y are identical channels but for their direction +// and one of them is unrestricted, match returns the channel +// with the restricted direction. +// In all other cases, match returns nil. +func match(x, y Type) Type { + // Common case: we don't have channels. + if Identical(x, y) { + return x + } + + // We may have channels that differ in direction only. + if x, _ := x.(*Chan); x != nil { + if y, _ := y.(*Chan); y != nil && Identical(x.elem, y.elem) { + // We have channels that differ in direction only. + // If there's an unrestricted channel, select the restricted one. + switch { + case x.dir == SendRecv: + return y + case y.dir == SendRecv: + return x + } + } + } + + // types are different + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/unify.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/unify.go new file mode 100644 index 0000000000000000000000000000000000000000..8218939b6834771987ef77e2eaa5c7e82c662f66 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/unify.go @@ -0,0 +1,796 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements type unification. +// +// Type unification attempts to make two types x and y structurally +// equivalent by determining the types for a given list of (bound) +// type parameters which may occur within x and y. If x and y are +// structurally different (say []T vs chan T), or conflicting +// types are determined for type parameters, unification fails. +// If unification succeeds, as a side-effect, the types of the +// bound type parameters may be determined. +// +// Unification typically requires multiple calls u.unify(x, y) to +// a given unifier u, with various combinations of types x and y. +// In each call, additional type parameter types may be determined +// as a side effect and recorded in u. +// If a call fails (returns false), unification fails. +// +// In the unification context, structural equivalence of two types +// ignores the difference between a defined type and its underlying +// type if one type is a defined type and the other one is not. +// It also ignores the difference between an (external, unbound) +// type parameter and its core type. +// If two types are not structurally equivalent, they cannot be Go +// identical types. On the other hand, if they are structurally +// equivalent, they may be Go identical or at least assignable, or +// they may be in the type set of a constraint. +// Whether they indeed are identical or assignable is determined +// upon instantiation and function argument passing. + +package types2 + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +const ( + // Upper limit for recursion depth. Used to catch infinite recursions + // due to implementation issues (e.g., see issues go.dev/issue/48619, go.dev/issue/48656). + unificationDepthLimit = 50 + + // Whether to panic when unificationDepthLimit is reached. + // If disabled, a recursion depth overflow results in a (quiet) + // unification failure. + panicAtUnificationDepthLimit = true + + // If enableCoreTypeUnification is set, unification will consider + // the core types, if any, of non-local (unbound) type parameters. + enableCoreTypeUnification = true + + // If traceInference is set, unification will print a trace of its operation. + // Interpretation of trace: + // x ≡ y attempt to unify types x and y + // p ➞ y type parameter p is set to type y (p is inferred to be y) + // p ⇄ q type parameters p and q match (p is inferred to be q and vice versa) + // x ≢ y types x and y cannot be unified + // [p, q, ...] ➞ [x, y, ...] mapping from type parameters to types + traceInference = false +) + +// A unifier maintains a list of type parameters and +// corresponding types inferred for each type parameter. +// A unifier is created by calling newUnifier. +type unifier struct { + // handles maps each type parameter to its inferred type through + // an indirection *Type called (inferred type) "handle". + // Initially, each type parameter has its own, separate handle, + // with a nil (i.e., not yet inferred) type. + // After a type parameter P is unified with a type parameter Q, + // P and Q share the same handle (and thus type). This ensures + // that inferring the type for a given type parameter P will + // automatically infer the same type for all other parameters + // unified (joined) with P. + handles map[*TypeParam]*Type + depth int // recursion depth during unification + enableInterfaceInference bool // use shared methods for better inference +} + +// newUnifier returns a new unifier initialized with the given type parameter +// and corresponding type argument lists. The type argument list may be shorter +// than the type parameter list, and it may contain nil types. Matching type +// parameters and arguments must have the same index. +func newUnifier(tparams []*TypeParam, targs []Type, enableInterfaceInference bool) *unifier { + assert(len(tparams) >= len(targs)) + handles := make(map[*TypeParam]*Type, len(tparams)) + // Allocate all handles up-front: in a correct program, all type parameters + // must be resolved and thus eventually will get a handle. + // Also, sharing of handles caused by unified type parameters is rare and + // so it's ok to not optimize for that case (and delay handle allocation). + for i, x := range tparams { + var t Type + if i < len(targs) { + t = targs[i] + } + handles[x] = &t + } + return &unifier{handles, 0, enableInterfaceInference} +} + +// unifyMode controls the behavior of the unifier. +type unifyMode uint + +const ( + // If assign is set, we are unifying types involved in an assignment: + // they may match inexactly at the top, but element types must match + // exactly. + assign unifyMode = 1 << iota + + // If exact is set, types unify if they are identical (or can be + // made identical with suitable arguments for type parameters). + // Otherwise, a named type and a type literal unify if their + // underlying types unify, channel directions are ignored, and + // if there is an interface, the other type must implement the + // interface. + exact +) + +func (m unifyMode) String() string { + switch m { + case 0: + return "inexact" + case assign: + return "assign" + case exact: + return "exact" + case assign | exact: + return "assign, exact" + } + return fmt.Sprintf("mode %d", m) +} + +// unify attempts to unify x and y and reports whether it succeeded. +// As a side-effect, types may be inferred for type parameters. +// The mode parameter controls how types are compared. +func (u *unifier) unify(x, y Type, mode unifyMode) bool { + return u.nify(x, y, mode, nil) +} + +func (u *unifier) tracef(format string, args ...interface{}) { + fmt.Println(strings.Repeat(". ", u.depth) + sprintf(nil, true, format, args...)) +} + +// String returns a string representation of the current mapping +// from type parameters to types. +func (u *unifier) String() string { + // sort type parameters for reproducible strings + tparams := make(typeParamsById, len(u.handles)) + i := 0 + for tpar := range u.handles { + tparams[i] = tpar + i++ + } + sort.Sort(tparams) + + var buf bytes.Buffer + w := newTypeWriter(&buf, nil) + w.byte('[') + for i, x := range tparams { + if i > 0 { + w.string(", ") + } + w.typ(x) + w.string(": ") + w.typ(u.at(x)) + } + w.byte(']') + return buf.String() +} + +type typeParamsById []*TypeParam + +func (s typeParamsById) Len() int { return len(s) } +func (s typeParamsById) Less(i, j int) bool { return s[i].id < s[j].id } +func (s typeParamsById) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// join unifies the given type parameters x and y. +// If both type parameters already have a type associated with them +// and they are not joined, join fails and returns false. +func (u *unifier) join(x, y *TypeParam) bool { + if traceInference { + u.tracef("%s ⇄ %s", x, y) + } + switch hx, hy := u.handles[x], u.handles[y]; { + case hx == hy: + // Both type parameters already share the same handle. Nothing to do. + case *hx != nil && *hy != nil: + // Both type parameters have (possibly different) inferred types. Cannot join. + return false + case *hx != nil: + // Only type parameter x has an inferred type. Use handle of x. + u.setHandle(y, hx) + // This case is treated like the default case. + // case *hy != nil: + // // Only type parameter y has an inferred type. Use handle of y. + // u.setHandle(x, hy) + default: + // Neither type parameter has an inferred type. Use handle of y. + u.setHandle(x, hy) + } + return true +} + +// asTypeParam returns x.(*TypeParam) if x is a type parameter recorded with u. +// Otherwise, the result is nil. +func (u *unifier) asTypeParam(x Type) *TypeParam { + if x, _ := x.(*TypeParam); x != nil { + if _, found := u.handles[x]; found { + return x + } + } + return nil +} + +// setHandle sets the handle for type parameter x +// (and all its joined type parameters) to h. +func (u *unifier) setHandle(x *TypeParam, h *Type) { + hx := u.handles[x] + assert(hx != nil) + for y, hy := range u.handles { + if hy == hx { + u.handles[y] = h + } + } +} + +// at returns the (possibly nil) type for type parameter x. +func (u *unifier) at(x *TypeParam) Type { + return *u.handles[x] +} + +// set sets the type t for type parameter x; +// t must not be nil. +func (u *unifier) set(x *TypeParam, t Type) { + assert(t != nil) + if traceInference { + u.tracef("%s ➞ %s", x, t) + } + *u.handles[x] = t +} + +// unknowns returns the number of type parameters for which no type has been set yet. +func (u *unifier) unknowns() int { + n := 0 + for _, h := range u.handles { + if *h == nil { + n++ + } + } + return n +} + +// inferred returns the list of inferred types for the given type parameter list. +// The result is never nil and has the same length as tparams; result types that +// could not be inferred are nil. Corresponding type parameters and result types +// have identical indices. +func (u *unifier) inferred(tparams []*TypeParam) []Type { + list := make([]Type, len(tparams)) + for i, x := range tparams { + list[i] = u.at(x) + } + return list +} + +// asInterface returns the underlying type of x as an interface if +// it is a non-type parameter interface. Otherwise it returns nil. +func asInterface(x Type) (i *Interface) { + if _, ok := x.(*TypeParam); !ok { + i, _ = under(x).(*Interface) + } + return i +} + +// nify implements the core unification algorithm which is an +// adapted version of Checker.identical. For changes to that +// code the corresponding changes should be made here. +// Must not be called directly from outside the unifier. +func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { + u.depth++ + if traceInference { + u.tracef("%s ≡ %s\t// %s", x, y, mode) + } + defer func() { + if traceInference && !result { + u.tracef("%s ≢ %s", x, y) + } + u.depth-- + }() + + x = Unalias(x) + y = Unalias(y) + + // nothing to do if x == y + if x == y { + return true + } + + // Stop gap for cases where unification fails. + if u.depth > unificationDepthLimit { + if traceInference { + u.tracef("depth %d >= %d", u.depth, unificationDepthLimit) + } + if panicAtUnificationDepthLimit { + panic("unification reached recursion depth limit") + } + return false + } + + // Unification is symmetric, so we can swap the operands. + // Ensure that if we have at least one + // - defined type, make sure one is in y + // - type parameter recorded with u, make sure one is in x + if asNamed(x) != nil || u.asTypeParam(y) != nil { + if traceInference { + u.tracef("%s ≡ %s\t// swap", y, x) + } + x, y = y, x + } + + // Unification will fail if we match a defined type against a type literal. + // If we are matching types in an assignment, at the top-level, types with + // the same type structure are permitted as long as at least one of them + // is not a defined type. To accommodate for that possibility, we continue + // unification with the underlying type of a defined type if the other type + // is a type literal. This is controlled by the exact unification mode. + // We also continue if the other type is a basic type because basic types + // are valid underlying types and may appear as core types of type constraints. + // If we exclude them, inferred defined types for type parameters may not + // match against the core types of their constraints (even though they might + // correctly match against some of the types in the constraint's type set). + // Finally, if unification (incorrectly) succeeds by matching the underlying + // type of a defined type against a basic type (because we include basic types + // as type literals here), and if that leads to an incorrectly inferred type, + // we will fail at function instantiation or argument assignment time. + // + // If we have at least one defined type, there is one in y. + if ny := asNamed(y); mode&exact == 0 && ny != nil && isTypeLit(x) && !(u.enableInterfaceInference && IsInterface(x)) { + if traceInference { + u.tracef("%s ≡ under %s", x, ny) + } + y = ny.under() + // Per the spec, a defined type cannot have an underlying type + // that is a type parameter. + assert(!isTypeParam(y)) + // x and y may be identical now + if x == y { + return true + } + } + + // Cases where at least one of x or y is a type parameter recorded with u. + // If we have at least one type parameter, there is one in x. + // If we have exactly one type parameter, because it is in x, + // isTypeLit(x) is false and y was not changed above. In other + // words, if y was a defined type, it is still a defined type + // (relevant for the logic below). + switch px, py := u.asTypeParam(x), u.asTypeParam(y); { + case px != nil && py != nil: + // both x and y are type parameters + if u.join(px, py) { + return true + } + // both x and y have an inferred type - they must match + return u.nify(u.at(px), u.at(py), mode, p) + + case px != nil: + // x is a type parameter, y is not + if x := u.at(px); x != nil { + // x has an inferred type which must match y + if u.nify(x, y, mode, p) { + // We have a match, possibly through underlying types. + xi := asInterface(x) + yi := asInterface(y) + xn := asNamed(x) != nil + yn := asNamed(y) != nil + // If we have two interfaces, what to do depends on + // whether they are named and their method sets. + if xi != nil && yi != nil { + // Both types are interfaces. + // If both types are defined types, they must be identical + // because unification doesn't know which type has the "right" name. + if xn && yn { + return Identical(x, y) + } + // In all other cases, the method sets must match. + // The types unified so we know that corresponding methods + // match and we can simply compare the number of methods. + // TODO(gri) We may be able to relax this rule and select + // the more general interface. But if one of them is a defined + // type, it's not clear how to choose and whether we introduce + // an order dependency or not. Requiring the same method set + // is conservative. + if len(xi.typeSet().methods) != len(yi.typeSet().methods) { + return false + } + } else if xi != nil || yi != nil { + // One but not both of them are interfaces. + // In this case, either x or y could be viable matches for the corresponding + // type parameter, which means choosing either introduces an order dependence. + // Therefore, we must fail unification (go.dev/issue/60933). + return false + } + // If we have inexact unification and one of x or y is a defined type, select the + // defined type. This ensures that in a series of types, all matching against the + // same type parameter, we infer a defined type if there is one, independent of + // order. Type inference or assignment may fail, which is ok. + // Selecting a defined type, if any, ensures that we don't lose the type name; + // and since we have inexact unification, a value of equally named or matching + // undefined type remains assignable (go.dev/issue/43056). + // + // Similarly, if we have inexact unification and there are no defined types but + // channel types, select a directed channel, if any. This ensures that in a series + // of unnamed types, all matching against the same type parameter, we infer the + // directed channel if there is one, independent of order. + // Selecting a directional channel, if any, ensures that a value of another + // inexactly unifying channel type remains assignable (go.dev/issue/62157). + // + // If we have multiple defined channel types, they are either identical or we + // have assignment conflicts, so we can ignore directionality in this case. + // + // If we have defined and literal channel types, a defined type wins to avoid + // order dependencies. + if mode&exact == 0 { + switch { + case xn: + // x is a defined type: nothing to do. + case yn: + // x is not a defined type and y is a defined type: select y. + u.set(px, y) + default: + // Neither x nor y are defined types. + if yc, _ := under(y).(*Chan); yc != nil && yc.dir != SendRecv { + // y is a directed channel type: select y. + u.set(px, y) + } + } + } + return true + } + return false + } + // otherwise, infer type from y + u.set(px, y) + return true + } + + // x != y if we get here + assert(x != y) + + // If u.EnableInterfaceInference is set and we don't require exact unification, + // if both types are interfaces, one interface must have a subset of the + // methods of the other and corresponding method signatures must unify. + // If only one type is an interface, all its methods must be present in the + // other type and corresponding method signatures must unify. + if u.enableInterfaceInference && mode&exact == 0 { + // One or both interfaces may be defined types. + // Look under the name, but not under type parameters (go.dev/issue/60564). + xi := asInterface(x) + yi := asInterface(y) + // If we have two interfaces, check the type terms for equivalence, + // and unify common methods if possible. + if xi != nil && yi != nil { + xset := xi.typeSet() + yset := yi.typeSet() + if xset.comparable != yset.comparable { + return false + } + // For now we require terms to be equal. + // We should be able to relax this as well, eventually. + if !xset.terms.equal(yset.terms) { + return false + } + // Interface types are the only types where cycles can occur + // that are not "terminated" via named types; and such cycles + // can only be created via method parameter types that are + // anonymous interfaces (directly or indirectly) embedding + // the current interface. Example: + // + // type T interface { + // m() interface{T} + // } + // + // If two such (differently named) interfaces are compared, + // endless recursion occurs if the cycle is not detected. + // + // If x and y were compared before, they must be equal + // (if they were not, the recursion would have stopped); + // search the ifacePair stack for the same pair. + // + // This is a quadratic algorithm, but in practice these stacks + // are extremely short (bounded by the nesting depth of interface + // type declarations that recur via parameter types, an extremely + // rare occurrence). An alternative implementation might use a + // "visited" map, but that is probably less efficient overall. + q := &ifacePair{xi, yi, p} + for p != nil { + if p.identical(q) { + return true // same pair was compared before + } + p = p.prev + } + // The method set of x must be a subset of the method set + // of y or vice versa, and the common methods must unify. + xmethods := xset.methods + ymethods := yset.methods + // The smaller method set must be the subset, if it exists. + if len(xmethods) > len(ymethods) { + xmethods, ymethods = ymethods, xmethods + } + // len(xmethods) <= len(ymethods) + // Collect the ymethods in a map for quick lookup. + ymap := make(map[string]*Func, len(ymethods)) + for _, ym := range ymethods { + ymap[ym.Id()] = ym + } + // All xmethods must exist in ymethods and corresponding signatures must unify. + for _, xm := range xmethods { + if ym := ymap[xm.Id()]; ym == nil || !u.nify(xm.typ, ym.typ, exact, p) { + return false + } + } + return true + } + + // We don't have two interfaces. If we have one, make sure it's in xi. + if yi != nil { + xi = yi + y = x + } + + // If we have one interface, at a minimum each of the interface methods + // must be implemented and thus unify with a corresponding method from + // the non-interface type, otherwise unification fails. + if xi != nil { + // All xi methods must exist in y and corresponding signatures must unify. + xmethods := xi.typeSet().methods + for _, xm := range xmethods { + obj, _, _ := LookupFieldOrMethod(y, false, xm.pkg, xm.name) + if ym, _ := obj.(*Func); ym == nil || !u.nify(xm.typ, ym.typ, exact, p) { + return false + } + } + return true + } + } + + // Unless we have exact unification, neither x nor y are interfaces now. + // Except for unbound type parameters (see below), x and y must be structurally + // equivalent to unify. + + // If we get here and x or y is a type parameter, they are unbound + // (not recorded with the unifier). + // Ensure that if we have at least one type parameter, it is in x + // (the earlier swap checks for _recorded_ type parameters only). + // This ensures that the switch switches on the type parameter. + // + // TODO(gri) Factor out type parameter handling from the switch. + if isTypeParam(y) { + if traceInference { + u.tracef("%s ≡ %s\t// swap", y, x) + } + x, y = y, x + } + + // Type elements (array, slice, etc. elements) use emode for unification. + // Element types must match exactly if the types are used in an assignment. + emode := mode + if mode&assign != 0 { + emode |= exact + } + + switch x := x.(type) { + case *Basic: + // Basic types are singletons except for the rune and byte + // aliases, thus we cannot solely rely on the x == y check + // above. See also comment in TypeName.IsAlias. + if y, ok := y.(*Basic); ok { + return x.kind == y.kind + } + + case *Array: + // Two array types unify if they have the same array length + // and their element types unify. + if y, ok := y.(*Array); ok { + // If one or both array lengths are unknown (< 0) due to some error, + // assume they are the same to avoid spurious follow-on errors. + return (x.len < 0 || y.len < 0 || x.len == y.len) && u.nify(x.elem, y.elem, emode, p) + } + + case *Slice: + // Two slice types unify if their element types unify. + if y, ok := y.(*Slice); ok { + return u.nify(x.elem, y.elem, emode, p) + } + + case *Struct: + // Two struct types unify if they have the same sequence of fields, + // and if corresponding fields have the same names, their (field) types unify, + // and they have identical tags. Two embedded fields are considered to have the same + // name. Lower-case field names from different packages are always different. + if y, ok := y.(*Struct); ok { + if x.NumFields() == y.NumFields() { + for i, f := range x.fields { + g := y.fields[i] + if f.embedded != g.embedded || + x.Tag(i) != y.Tag(i) || + !f.sameId(g.pkg, g.name) || + !u.nify(f.typ, g.typ, emode, p) { + return false + } + } + return true + } + } + + case *Pointer: + // Two pointer types unify if their base types unify. + if y, ok := y.(*Pointer); ok { + return u.nify(x.base, y.base, emode, p) + } + + case *Tuple: + // Two tuples types unify if they have the same number of elements + // and the types of corresponding elements unify. + if y, ok := y.(*Tuple); ok { + if x.Len() == y.Len() { + if x != nil { + for i, v := range x.vars { + w := y.vars[i] + if !u.nify(v.typ, w.typ, mode, p) { + return false + } + } + } + return true + } + } + + case *Signature: + // Two function types unify if they have the same number of parameters + // and result values, corresponding parameter and result types unify, + // and either both functions are variadic or neither is. + // Parameter and result names are not required to match. + // TODO(gri) handle type parameters or document why we can ignore them. + if y, ok := y.(*Signature); ok { + return x.variadic == y.variadic && + u.nify(x.params, y.params, emode, p) && + u.nify(x.results, y.results, emode, p) + } + + case *Interface: + assert(!u.enableInterfaceInference || mode&exact != 0) // handled before this switch + + // Two interface types unify if they have the same set of methods with + // the same names, and corresponding function types unify. + // Lower-case method names from different packages are always different. + // The order of the methods is irrelevant. + if y, ok := y.(*Interface); ok { + xset := x.typeSet() + yset := y.typeSet() + if xset.comparable != yset.comparable { + return false + } + if !xset.terms.equal(yset.terms) { + return false + } + a := xset.methods + b := yset.methods + if len(a) == len(b) { + // Interface types are the only types where cycles can occur + // that are not "terminated" via named types; and such cycles + // can only be created via method parameter types that are + // anonymous interfaces (directly or indirectly) embedding + // the current interface. Example: + // + // type T interface { + // m() interface{T} + // } + // + // If two such (differently named) interfaces are compared, + // endless recursion occurs if the cycle is not detected. + // + // If x and y were compared before, they must be equal + // (if they were not, the recursion would have stopped); + // search the ifacePair stack for the same pair. + // + // This is a quadratic algorithm, but in practice these stacks + // are extremely short (bounded by the nesting depth of interface + // type declarations that recur via parameter types, an extremely + // rare occurrence). An alternative implementation might use a + // "visited" map, but that is probably less efficient overall. + q := &ifacePair{x, y, p} + for p != nil { + if p.identical(q) { + return true // same pair was compared before + } + p = p.prev + } + if debug { + assertSortedMethods(a) + assertSortedMethods(b) + } + for i, f := range a { + g := b[i] + if f.Id() != g.Id() || !u.nify(f.typ, g.typ, exact, q) { + return false + } + } + return true + } + } + + case *Map: + // Two map types unify if their key and value types unify. + if y, ok := y.(*Map); ok { + return u.nify(x.key, y.key, emode, p) && u.nify(x.elem, y.elem, emode, p) + } + + case *Chan: + // Two channel types unify if their value types unify + // and if they have the same direction. + // The channel direction is ignored for inexact unification. + if y, ok := y.(*Chan); ok { + return (mode&exact == 0 || x.dir == y.dir) && u.nify(x.elem, y.elem, emode, p) + } + + case *Named: + // Two named types unify if their type names originate in the same type declaration. + // If they are instantiated, their type argument lists must unify. + if y := asNamed(y); y != nil { + // Check type arguments before origins so they unify + // even if the origins don't match; for better error + // messages (see go.dev/issue/53692). + xargs := x.TypeArgs().list() + yargs := y.TypeArgs().list() + if len(xargs) != len(yargs) { + return false + } + for i, xarg := range xargs { + if !u.nify(xarg, yargs[i], mode, p) { + return false + } + } + return identicalOrigin(x, y) + } + + case *TypeParam: + // x must be an unbound type parameter (see comment above). + if debug { + assert(u.asTypeParam(x) == nil) + } + // By definition, a valid type argument must be in the type set of + // the respective type constraint. Therefore, the type argument's + // underlying type must be in the set of underlying types of that + // constraint. If there is a single such underlying type, it's the + // constraint's core type. It must match the type argument's under- + // lying type, irrespective of whether the actual type argument, + // which may be a defined type, is actually in the type set (that + // will be determined at instantiation time). + // Thus, if we have the core type of an unbound type parameter, + // we know the structure of the possible types satisfying such + // parameters. Use that core type for further unification + // (see go.dev/issue/50755 for a test case). + if enableCoreTypeUnification { + // Because the core type is always an underlying type, + // unification will take care of matching against a + // defined or literal type automatically. + // If y is also an unbound type parameter, we will end + // up here again with x and y swapped, so we don't + // need to take care of that case separately. + if cx := coreType(x); cx != nil { + if traceInference { + u.tracef("core %s ≡ %s", x, y) + } + // If y is a defined type, it may not match against cx which + // is an underlying type (incl. int, string, etc.). Use assign + // mode here so that the unifier automatically takes under(y) + // if necessary. + return u.nify(cx, y, assign, p) + } + } + // x != y and there's nothing to do + + case nil: + // avoid a crash in case of nil type + + default: + panic(sprintf(nil, true, "u.nify(%s, %s, %d)", x, y, mode)) + } + + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/union.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/union.go new file mode 100644 index 0000000000000000000000000000000000000000..1bf4353f264e34c7a6dc4c95e1f7bb4b48f3790e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/union.go @@ -0,0 +1,199 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + . "internal/types/errors" +) + +// ---------------------------------------------------------------------------- +// API + +// A Union represents a union of terms embedded in an interface. +type Union struct { + terms []*Term // list of syntactical terms (not a canonicalized termlist) +} + +// NewUnion returns a new Union type with the given terms. +// It is an error to create an empty union; they are syntactically not possible. +func NewUnion(terms []*Term) *Union { + if len(terms) == 0 { + panic("empty union") + } + return &Union{terms} +} + +func (u *Union) Len() int { return len(u.terms) } +func (u *Union) Term(i int) *Term { return u.terms[i] } + +func (u *Union) Underlying() Type { return u } +func (u *Union) String() string { return TypeString(u, nil) } + +// A Term represents a term in a Union. +type Term term + +// NewTerm returns a new union term. +func NewTerm(tilde bool, typ Type) *Term { return &Term{tilde, typ} } + +func (t *Term) Tilde() bool { return t.tilde } +func (t *Term) Type() Type { return t.typ } +func (t *Term) String() string { return (*term)(t).String() } + +// ---------------------------------------------------------------------------- +// Implementation + +// Avoid excessive type-checking times due to quadratic termlist operations. +const maxTermCount = 100 + +// parseUnion parses uexpr as a union of expressions. +// The result is a Union type, or Typ[Invalid] for some errors. +func parseUnion(check *Checker, uexpr syntax.Expr) Type { + blist, tlist := flattenUnion(nil, uexpr) + assert(len(blist) == len(tlist)-1) + + var terms []*Term + + var u Type + for i, x := range tlist { + term := parseTilde(check, x) + if len(tlist) == 1 && !term.tilde { + // Single type. Ok to return early because all relevant + // checks have been performed in parseTilde (no need to + // run through term validity check below). + return term.typ // typ already recorded through check.typ in parseTilde + } + if len(terms) >= maxTermCount { + if isValid(u) { + check.errorf(x, InvalidUnion, "cannot handle more than %d union terms (implementation limitation)", maxTermCount) + u = Typ[Invalid] + } + } else { + terms = append(terms, term) + u = &Union{terms} + } + + if i > 0 { + check.recordTypeAndValue(blist[i-1], typexpr, u, nil) + } + } + + if !isValid(u) { + return u + } + + // Check validity of terms. + // Do this check later because it requires types to be set up. + // Note: This is a quadratic algorithm, but unions tend to be short. + check.later(func() { + for i, t := range terms { + if !isValid(t.typ) { + continue + } + + u := under(t.typ) + f, _ := u.(*Interface) + if t.tilde { + if f != nil { + check.errorf(tlist[i], InvalidUnion, "invalid use of ~ (%s is an interface)", t.typ) + continue // don't report another error for t + } + + if !Identical(u, t.typ) { + check.errorf(tlist[i], InvalidUnion, "invalid use of ~ (underlying type of %s is %s)", t.typ, u) + continue + } + } + + // Stand-alone embedded interfaces are ok and are handled by the single-type case + // in the beginning. Embedded interfaces with tilde are excluded above. If we reach + // here, we must have at least two terms in the syntactic term list (but not necessarily + // in the term list of the union's type set). + if f != nil { + tset := f.typeSet() + switch { + case tset.NumMethods() != 0: + check.errorf(tlist[i], InvalidUnion, "cannot use %s in union (%s contains methods)", t, t) + case t.typ == universeComparable.Type(): + check.error(tlist[i], InvalidUnion, "cannot use comparable in union") + case tset.comparable: + check.errorf(tlist[i], InvalidUnion, "cannot use %s in union (%s embeds comparable)", t, t) + } + continue // terms with interface types are not subject to the no-overlap rule + } + + // Report overlapping (non-disjoint) terms such as + // a|a, a|~a, ~a|~a, and ~a|A (where under(A) == a). + if j := overlappingTerm(terms[:i], t); j >= 0 { + check.softErrorf(tlist[i], InvalidUnion, "overlapping terms %s and %s", t, terms[j]) + } + } + }).describef(uexpr, "check term validity %s", uexpr) + + return u +} + +func parseTilde(check *Checker, tx syntax.Expr) *Term { + x := tx + var tilde bool + if op, _ := x.(*syntax.Operation); op != nil && op.Op == syntax.Tilde { + x = op.X + tilde = true + } + typ := check.typ(x) + // Embedding stand-alone type parameters is not permitted (go.dev/issue/47127). + // We don't need this restriction anymore if we make the underlying type of a type + // parameter its constraint interface: if we embed a lone type parameter, we will + // simply use its underlying type (like we do for other named, embedded interfaces), + // and since the underlying type is an interface the embedding is well defined. + if isTypeParam(typ) { + if tilde { + check.errorf(x, MisplacedTypeParam, "type in term %s cannot be a type parameter", tx) + } else { + check.error(x, MisplacedTypeParam, "term cannot be a type parameter") + } + typ = Typ[Invalid] + } + term := NewTerm(tilde, typ) + if tilde { + check.recordTypeAndValue(tx, typexpr, &Union{[]*Term{term}}, nil) + } + return term +} + +// overlappingTerm reports the index of the term x in terms which is +// overlapping (not disjoint) from y. The result is < 0 if there is no +// such term. The type of term y must not be an interface, and terms +// with an interface type are ignored in the terms list. +func overlappingTerm(terms []*Term, y *Term) int { + assert(!IsInterface(y.typ)) + for i, x := range terms { + if IsInterface(x.typ) { + continue + } + // disjoint requires non-nil, non-top arguments, + // and non-interface types as term types. + if debug { + if x == nil || x.typ == nil || y == nil || y.typ == nil { + panic("empty or top union term") + } + } + if !(*term)(x).disjoint((*term)(y)) { + return i + } + } + return -1 +} + +// flattenUnion walks a union type expression of the form A | B | C | ..., +// extracting both the binary exprs (blist) and leaf types (tlist). +func flattenUnion(list []syntax.Expr, x syntax.Expr) (blist, tlist []syntax.Expr) { + if o, _ := x.(*syntax.Operation); o != nil && o.Op == syntax.Or { + blist, tlist = flattenUnion(list, o.X) + blist = append(blist, o) + x = o.Y + } + return blist, append(tlist, x) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/universe.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/universe.go new file mode 100644 index 0000000000000000000000000000000000000000..c8be81b9ebe708889b8a594ca225695dcf5f0544 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/universe.go @@ -0,0 +1,288 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file sets up the universe scope and the unsafe package. + +package types2 + +import ( + "go/constant" + "strings" +) + +// The Universe scope contains all predeclared objects of Go. +// It is the outermost scope of any chain of nested scopes. +var Universe *Scope + +// The Unsafe package is the package returned by an importer +// for the import path "unsafe". +var Unsafe *Package + +var ( + universeIota Object + universeByte Type // uint8 alias, but has name "byte" + universeRune Type // int32 alias, but has name "rune" + universeAny Object + universeError Type + universeComparable Object +) + +// Typ contains the predeclared *Basic types indexed by their +// corresponding BasicKind. +// +// The *Basic type for Typ[Byte] will have the name "uint8". +// Use Universe.Lookup("byte").Type() to obtain the specific +// alias basic type named "byte" (and analogous for "rune"). +var Typ = [...]*Basic{ + Invalid: {Invalid, 0, "invalid type"}, + + Bool: {Bool, IsBoolean, "bool"}, + Int: {Int, IsInteger, "int"}, + Int8: {Int8, IsInteger, "int8"}, + Int16: {Int16, IsInteger, "int16"}, + Int32: {Int32, IsInteger, "int32"}, + Int64: {Int64, IsInteger, "int64"}, + Uint: {Uint, IsInteger | IsUnsigned, "uint"}, + Uint8: {Uint8, IsInteger | IsUnsigned, "uint8"}, + Uint16: {Uint16, IsInteger | IsUnsigned, "uint16"}, + Uint32: {Uint32, IsInteger | IsUnsigned, "uint32"}, + Uint64: {Uint64, IsInteger | IsUnsigned, "uint64"}, + Uintptr: {Uintptr, IsInteger | IsUnsigned, "uintptr"}, + Float32: {Float32, IsFloat, "float32"}, + Float64: {Float64, IsFloat, "float64"}, + Complex64: {Complex64, IsComplex, "complex64"}, + Complex128: {Complex128, IsComplex, "complex128"}, + String: {String, IsString, "string"}, + UnsafePointer: {UnsafePointer, 0, "Pointer"}, + + UntypedBool: {UntypedBool, IsBoolean | IsUntyped, "untyped bool"}, + UntypedInt: {UntypedInt, IsInteger | IsUntyped, "untyped int"}, + UntypedRune: {UntypedRune, IsInteger | IsUntyped, "untyped rune"}, + UntypedFloat: {UntypedFloat, IsFloat | IsUntyped, "untyped float"}, + UntypedComplex: {UntypedComplex, IsComplex | IsUntyped, "untyped complex"}, + UntypedString: {UntypedString, IsString | IsUntyped, "untyped string"}, + UntypedNil: {UntypedNil, IsUntyped, "untyped nil"}, +} + +var aliases = [...]*Basic{ + {Byte, IsInteger | IsUnsigned, "byte"}, + {Rune, IsInteger, "rune"}, +} + +func defPredeclaredTypes() { + for _, t := range Typ { + def(NewTypeName(nopos, nil, t.name, t)) + } + for _, t := range aliases { + def(NewTypeName(nopos, nil, t.name, t)) + } + + // type any = interface{} + // Note: don't use &emptyInterface for the type of any. Using a unique + // pointer allows us to detect any and format it as "any" rather than + // interface{}, which clarifies user-facing error messages significantly. + def(NewTypeName(nopos, nil, "any", &Interface{complete: true, tset: &topTypeSet})) + + // type error interface{ Error() string } + { + obj := NewTypeName(nopos, nil, "error", nil) + obj.setColor(black) + typ := NewNamed(obj, nil, nil) + + // error.Error() string + recv := NewVar(nopos, nil, "", typ) + res := NewVar(nopos, nil, "", Typ[String]) + sig := NewSignatureType(recv, nil, nil, nil, NewTuple(res), false) + err := NewFunc(nopos, nil, "Error", sig) + + // interface{ Error() string } + ityp := &Interface{methods: []*Func{err}, complete: true} + computeInterfaceTypeSet(nil, nopos, ityp) // prevent races due to lazy computation of tset + + typ.SetUnderlying(ityp) + def(obj) + } + + // type comparable interface{} // marked as comparable + { + obj := NewTypeName(nopos, nil, "comparable", nil) + obj.setColor(black) + typ := NewNamed(obj, nil, nil) + + // interface{} // marked as comparable + ityp := &Interface{complete: true, tset: &_TypeSet{nil, allTermlist, true}} + + typ.SetUnderlying(ityp) + def(obj) + } +} + +var predeclaredConsts = [...]struct { + name string + kind BasicKind + val constant.Value +}{ + {"true", UntypedBool, constant.MakeBool(true)}, + {"false", UntypedBool, constant.MakeBool(false)}, + {"iota", UntypedInt, constant.MakeInt64(0)}, +} + +func defPredeclaredConsts() { + for _, c := range predeclaredConsts { + def(NewConst(nopos, nil, c.name, Typ[c.kind], c.val)) + } +} + +func defPredeclaredNil() { + def(&Nil{object{name: "nil", typ: Typ[UntypedNil], color_: black}}) +} + +// A builtinId is the id of a builtin function. +type builtinId int + +const ( + // universe scope + _Append builtinId = iota + _Cap + _Clear + _Close + _Complex + _Copy + _Delete + _Imag + _Len + _Make + _Max + _Min + _New + _Panic + _Print + _Println + _Real + _Recover + + // package unsafe + _Add + _Alignof + _Offsetof + _Sizeof + _Slice + _SliceData + _String + _StringData + + // testing support + _Assert + _Trace +) + +var predeclaredFuncs = [...]struct { + name string + nargs int + variadic bool + kind exprKind +}{ + _Append: {"append", 1, true, expression}, + _Cap: {"cap", 1, false, expression}, + _Clear: {"clear", 1, false, statement}, + _Close: {"close", 1, false, statement}, + _Complex: {"complex", 2, false, expression}, + _Copy: {"copy", 2, false, statement}, + _Delete: {"delete", 2, false, statement}, + _Imag: {"imag", 1, false, expression}, + _Len: {"len", 1, false, expression}, + _Make: {"make", 1, true, expression}, + // To disable max/min, remove the next two lines. + _Max: {"max", 1, true, expression}, + _Min: {"min", 1, true, expression}, + _New: {"new", 1, false, expression}, + _Panic: {"panic", 1, false, statement}, + _Print: {"print", 0, true, statement}, + _Println: {"println", 0, true, statement}, + _Real: {"real", 1, false, expression}, + _Recover: {"recover", 0, false, statement}, + + _Add: {"Add", 2, false, expression}, + _Alignof: {"Alignof", 1, false, expression}, + _Offsetof: {"Offsetof", 1, false, expression}, + _Sizeof: {"Sizeof", 1, false, expression}, + _Slice: {"Slice", 2, false, expression}, + _SliceData: {"SliceData", 1, false, expression}, + _String: {"String", 2, false, expression}, + _StringData: {"StringData", 1, false, expression}, + + _Assert: {"assert", 1, false, statement}, + _Trace: {"trace", 0, true, statement}, +} + +func defPredeclaredFuncs() { + for i := range predeclaredFuncs { + id := builtinId(i) + if id == _Assert || id == _Trace { + continue // only define these in testing environment + } + def(newBuiltin(id)) + } +} + +// DefPredeclaredTestFuncs defines the assert and trace built-ins. +// These built-ins are intended for debugging and testing of this +// package only. +func DefPredeclaredTestFuncs() { + if Universe.Lookup("assert") != nil { + return // already defined + } + def(newBuiltin(_Assert)) + def(newBuiltin(_Trace)) +} + +func init() { + Universe = NewScope(nil, nopos, nopos, "universe") + Unsafe = NewPackage("unsafe", "unsafe") + Unsafe.complete = true + + defPredeclaredTypes() + defPredeclaredConsts() + defPredeclaredNil() + defPredeclaredFuncs() + + universeIota = Universe.Lookup("iota") + universeByte = Universe.Lookup("byte").Type() + universeRune = Universe.Lookup("rune").Type() + universeAny = Universe.Lookup("any") + universeError = Universe.Lookup("error").Type() + universeComparable = Universe.Lookup("comparable") +} + +// Objects with names containing blanks are internal and not entered into +// a scope. Objects with exported names are inserted in the unsafe package +// scope; other objects are inserted in the universe scope. +func def(obj Object) { + assert(obj.color() == black) + name := obj.Name() + if strings.Contains(name, " ") { + return // nothing to do + } + // fix Obj link for named types + if typ := asNamed(obj.Type()); typ != nil { + typ.obj = obj.(*TypeName) + } + // exported identifiers go into package unsafe + scope := Universe + if obj.Exported() { + scope = Unsafe.scope + // set Pkg field + switch obj := obj.(type) { + case *TypeName: + obj.pkg = Unsafe + case *Builtin: + obj.pkg = Unsafe + default: + unreachable() + } + } + if scope.Insert(obj) != nil { + panic("double declaration of predeclared identifier") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/util.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/util.go new file mode 100644 index 0000000000000000000000000000000000000000..01da1c12ca341ad371dc4a3c67544a8dac47179f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/util.go @@ -0,0 +1,22 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains various functionality that is +// different between go/types and types2. Factoring +// out this code allows more of the rest of the code +// to be shared. + +package types2 + +import "cmd/compile/internal/syntax" + +// cmpPos compares the positions p and q and returns a result r as follows: +// +// r < 0: p is before q +// r == 0: p and q are the same position (but may not be identical) +// r > 0: p is after q +// +// If p and q are in different files, p is before q if the filename +// of p sorts lexicographically before the filename of q. +func cmpPos(p, q syntax.Pos) int { return p.Cmp(q) } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/util_test.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/util_test.go new file mode 100644 index 0000000000000000000000000000000000000000..70058aad84e5de5c7364aeed17244601349b9bee --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/util_test.go @@ -0,0 +1,17 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file exports various functionality of util.go +// so that it can be used in (package-external) tests. + +package types2 + +import ( + "cmd/compile/internal/syntax" +) + +func CmpPos(p, q syntax.Pos) int { return cmpPos(p, q) } + +func ScopeComment(s *Scope) string { return s.comment } +func ObjectScopePos(obj Object) syntax.Pos { return obj.scopePos() } diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/validtype.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/validtype.go new file mode 100644 index 0000000000000000000000000000000000000000..a880a3d93320123f3e62b9569c57990dad80f022 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/validtype.go @@ -0,0 +1,256 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +// validType verifies that the given type does not "expand" indefinitely +// producing a cycle in the type graph. +// (Cycles involving alias types, as in "type A = [10]A" are detected +// earlier, via the objDecl cycle detection mechanism.) +func (check *Checker) validType(typ *Named) { + check.validType0(typ, nil, nil) +} + +// validType0 checks if the given type is valid. If typ is a type parameter +// its value is looked up in the type argument list of the instantiated +// (enclosing) type, if it exists. Otherwise the type parameter must be from +// an enclosing function and can be ignored. +// The nest list describes the stack (the "nest in memory") of types which +// contain (or embed in the case of interfaces) other types. For instance, a +// struct named S which contains a field of named type F contains (the memory +// of) F in S, leading to the nest S->F. If a type appears in its own nest +// (say S->F->S) we have an invalid recursive type. The path list is the full +// path of named types in a cycle, it is only needed for error reporting. +func (check *Checker) validType0(typ Type, nest, path []*Named) bool { + switch t := Unalias(typ).(type) { + case nil: + // We should never see a nil type but be conservative and panic + // only in debug mode. + if debug { + panic("validType0(nil)") + } + + case *Array: + return check.validType0(t.elem, nest, path) + + case *Struct: + for _, f := range t.fields { + if !check.validType0(f.typ, nest, path) { + return false + } + } + + case *Union: + for _, t := range t.terms { + if !check.validType0(t.typ, nest, path) { + return false + } + } + + case *Interface: + for _, etyp := range t.embeddeds { + if !check.validType0(etyp, nest, path) { + return false + } + } + + case *Named: + // Exit early if we already know t is valid. + // This is purely an optimization but it prevents excessive computation + // times in pathological cases such as testdata/fixedbugs/issue6977.go. + // (Note: The valids map could also be allocated locally, once for each + // validType call.) + if check.valids.lookup(t) != nil { + break + } + + // Don't report a 2nd error if we already know the type is invalid + // (e.g., if a cycle was detected earlier, via under). + // Note: ensure that t.orig is fully resolved by calling Underlying(). + if !isValid(t.Underlying()) { + return false + } + + // If the current type t is also found in nest, (the memory of) t is + // embedded in itself, indicating an invalid recursive type. + for _, e := range nest { + if Identical(e, t) { + // We have a cycle. If t != t.Origin() then t is an instance of + // the generic type t.Origin(). Because t is in the nest, t must + // occur within the definition (RHS) of the generic type t.Origin(), + // directly or indirectly, after expansion of the RHS. + // Therefore t.Origin() must be invalid, no matter how it is + // instantiated since the instantiation t of t.Origin() happens + // inside t.Origin()'s RHS and thus is always the same and always + // present. + // Therefore we can mark the underlying of both t and t.Origin() + // as invalid. If t is not an instance of a generic type, t and + // t.Origin() are the same. + // Furthermore, because we check all types in a package for validity + // before type checking is complete, any exported type that is invalid + // will have an invalid underlying type and we can't reach here with + // such a type (invalid types are excluded above). + // Thus, if we reach here with a type t, both t and t.Origin() (if + // different in the first place) must be from the current package; + // they cannot have been imported. + // Therefore it is safe to change their underlying types; there is + // no chance for a race condition (the types of the current package + // are not yet available to other goroutines). + assert(t.obj.pkg == check.pkg) + assert(t.Origin().obj.pkg == check.pkg) + t.underlying = Typ[Invalid] + t.Origin().underlying = Typ[Invalid] + + // Find the starting point of the cycle and report it. + // Because each type in nest must also appear in path (see invariant below), + // type t must be in path since it was found in nest. But not every type in path + // is in nest. Specifically t may appear in path with an earlier index than the + // index of t in nest. Search again. + for start, p := range path { + if Identical(p, t) { + check.cycleError(makeObjList(path[start:])) + return false + } + } + panic("cycle start not found") + } + } + + // No cycle was found. Check the RHS of t. + // Every type added to nest is also added to path; thus every type that is in nest + // must also be in path (invariant). But not every type in path is in nest, since + // nest may be pruned (see below, *TypeParam case). + if !check.validType0(t.Origin().fromRHS, append(nest, t), append(path, t)) { + return false + } + + check.valids.add(t) // t is valid + + case *TypeParam: + // A type parameter stands for the type (argument) it was instantiated with. + // Check the corresponding type argument for validity if we are in an + // instantiated type. + if len(nest) > 0 { + inst := nest[len(nest)-1] // the type instance + // Find the corresponding type argument for the type parameter + // and proceed with checking that type argument. + for i, tparam := range inst.TypeParams().list() { + // The type parameter and type argument lists should + // match in length but be careful in case of errors. + if t == tparam && i < inst.TypeArgs().Len() { + targ := inst.TypeArgs().At(i) + // The type argument must be valid in the enclosing + // type (where inst was instantiated), hence we must + // check targ's validity in the type nest excluding + // the current (instantiated) type (see the example + // at the end of this file). + // For error reporting we keep the full path. + return check.validType0(targ, nest[:len(nest)-1], path) + } + } + } + } + + return true +} + +// makeObjList returns the list of type name objects for the given +// list of named types. +func makeObjList(tlist []*Named) []Object { + olist := make([]Object, len(tlist)) + for i, t := range tlist { + olist[i] = t.obj + } + return olist +} + +// Here is an example illustrating why we need to exclude the +// instantiated type from nest when evaluating the validity of +// a type parameter. Given the declarations +// +// var _ A[A[string]] +// +// type A[P any] struct { _ B[P] } +// type B[P any] struct { _ P } +// +// we want to determine if the type A[A[string]] is valid. +// We start evaluating A[A[string]] outside any type nest: +// +// A[A[string]] +// nest = +// path = +// +// The RHS of A is now evaluated in the A[A[string]] nest: +// +// struct{_ B[P₁]} +// nest = A[A[string]] +// path = A[A[string]] +// +// The struct has a single field of type B[P₁] with which +// we continue: +// +// B[P₁] +// nest = A[A[string]] +// path = A[A[string]] +// +// struct{_ P₂} +// nest = A[A[string]]->B[P] +// path = A[A[string]]->B[P] +// +// Eventually we reach the type parameter P of type B (P₂): +// +// P₂ +// nest = A[A[string]]->B[P] +// path = A[A[string]]->B[P] +// +// The type argument for P of B is the type parameter P of A (P₁). +// It must be evaluated in the type nest that existed when B was +// instantiated: +// +// P₁ +// nest = A[A[string]] <== type nest at B's instantiation time +// path = A[A[string]]->B[P] +// +// If we'd use the current nest it would correspond to the path +// which will be wrong as we will see shortly. P's type argument +// is A[string], which again must be evaluated in the type nest +// that existed when A was instantiated with A[string]. That type +// nest is empty: +// +// A[string] +// nest = <== type nest at A's instantiation time +// path = A[A[string]]->B[P] +// +// Evaluation then proceeds as before for A[string]: +// +// struct{_ B[P₁]} +// nest = A[string] +// path = A[A[string]]->B[P]->A[string] +// +// Now we reach B[P] again. If we had not adjusted nest, it would +// correspond to path, and we would find B[P] in nest, indicating +// a cycle, which would clearly be wrong since there's no cycle in +// A[string]: +// +// B[P₁] +// nest = A[string] +// path = A[A[string]]->B[P]->A[string] <== path contains B[P]! +// +// But because we use the correct type nest, evaluation proceeds without +// errors and we get the evaluation sequence: +// +// struct{_ P₂} +// nest = A[string]->B[P] +// path = A[A[string]]->B[P]->A[string]->B[P] +// P₂ +// nest = A[string]->B[P] +// path = A[A[string]]->B[P]->A[string]->B[P] +// P₁ +// nest = A[string] +// path = A[A[string]]->B[P]->A[string]->B[P] +// string +// nest = +// path = A[A[string]]->B[P]->A[string]->B[P] +// +// At this point we're done and A[A[string]] and is valid. diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/version.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/version.go new file mode 100644 index 0000000000000000000000000000000000000000..5aa3c803b54996b345f11c881c28d522b7821956 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/types2/version.go @@ -0,0 +1,126 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package types2 + +import ( + "cmd/compile/internal/syntax" + "fmt" + "go/version" + "internal/goversion" + "strings" +) + +// A goVersion is a Go language version string of the form "go1.%d" +// where d is the minor version number. goVersion strings don't +// contain release numbers ("go1.20.1" is not a valid goVersion). +type goVersion string + +// asGoVersion returns v as a goVersion (e.g., "go1.20.1" becomes "go1.20"). +// If v is not a valid Go version, the result is the empty string. +func asGoVersion(v string) goVersion { + return goVersion(version.Lang(v)) +} + +// isValid reports whether v is a valid Go version. +func (v goVersion) isValid() bool { + return v != "" +} + +// cmp returns -1, 0, or +1 depending on whether x < y, x == y, or x > y, +// interpreted as Go versions. +func (x goVersion) cmp(y goVersion) int { + return version.Compare(string(x), string(y)) +} + +var ( + // Go versions that introduced language changes + go1_9 = asGoVersion("go1.9") + go1_13 = asGoVersion("go1.13") + go1_14 = asGoVersion("go1.14") + go1_17 = asGoVersion("go1.17") + go1_18 = asGoVersion("go1.18") + go1_20 = asGoVersion("go1.20") + go1_21 = asGoVersion("go1.21") + go1_22 = asGoVersion("go1.22") + + // current (deployed) Go version + go_current = asGoVersion(fmt.Sprintf("go1.%d", goversion.Version)) +) + +// langCompat reports an error if the representation of a numeric +// literal is not compatible with the current language version. +func (check *Checker) langCompat(lit *syntax.BasicLit) { + s := lit.Value + if len(s) <= 2 || check.allowVersion(check.pkg, lit, go1_13) { + return + } + // len(s) > 2 + if strings.Contains(s, "_") { + check.versionErrorf(lit, go1_13, "underscores in numeric literals") + return + } + if s[0] != '0' { + return + } + radix := s[1] + if radix == 'b' || radix == 'B' { + check.versionErrorf(lit, go1_13, "binary literals") + return + } + if radix == 'o' || radix == 'O' { + check.versionErrorf(lit, go1_13, "0o/0O-style octal literals") + return + } + if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') { + check.versionErrorf(lit, go1_13, "hexadecimal floating-point literals") + } +} + +// allowVersion reports whether the given package is allowed to use version v. +func (check *Checker) allowVersion(pkg *Package, at poser, v goVersion) bool { + // We assume that imported packages have all been checked, + // so we only have to check for the local package. + if pkg != check.pkg { + return true + } + + // If no explicit file version is specified, + // fileVersion corresponds to the module version. + var fileVersion goVersion + if pos := at.Pos(); pos.IsKnown() { + // We need version.Lang below because file versions + // can be (unaltered) Config.GoVersion strings that + // may contain dot-release information. + fileVersion = asGoVersion(check.versions[base(pos)]) + } + return !fileVersion.isValid() || fileVersion.cmp(v) >= 0 +} + +// verifyVersionf is like allowVersion but also accepts a format string and arguments +// which are used to report a version error if allowVersion returns false. It uses the +// current package. +func (check *Checker) verifyVersionf(at poser, v goVersion, format string, args ...interface{}) bool { + if !check.allowVersion(check.pkg, at, v) { + check.versionErrorf(at, v, format, args...) + return false + } + return true +} + +// base finds the underlying PosBase of the source file containing pos, +// skipping over intermediate PosBase layers created by //line directives. +// The positions must be known. +func base(pos syntax.Pos) *syntax.PosBase { + assert(pos.IsKnown()) + b := pos.Base() + for { + bb := b.Pos().Base() + if bb == nil || bb == b { + break + } + b = bb + } + return b +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/assign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/assign.go new file mode 100644 index 0000000000000000000000000000000000000000..63b6a1d2c14163c216974daf9704e4f1c9dda8bb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/assign.go @@ -0,0 +1,742 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "go/constant" + "internal/abi" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// walkAssign walks an OAS (AssignExpr) or OASOP (AssignOpExpr) node. +func walkAssign(init *ir.Nodes, n ir.Node) ir.Node { + init.Append(ir.TakeInit(n)...) + + var left, right ir.Node + switch n.Op() { + case ir.OAS: + n := n.(*ir.AssignStmt) + left, right = n.X, n.Y + case ir.OASOP: + n := n.(*ir.AssignOpStmt) + left, right = n.X, n.Y + } + + // Recognize m[k] = append(m[k], ...) so we can reuse + // the mapassign call. + var mapAppend *ir.CallExpr + if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND { + left := left.(*ir.IndexExpr) + mapAppend = right.(*ir.CallExpr) + if !ir.SameSafeExpr(left, mapAppend.Args[0]) { + base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args[0]) + } + } + + left = walkExpr(left, init) + left = safeExpr(left, init) + if mapAppend != nil { + mapAppend.Args[0] = left + } + + if n.Op() == ir.OASOP { + // Rewrite x op= y into x = x op y. + n = ir.NewAssignStmt(base.Pos, left, typecheck.Expr(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right))) + } else { + n.(*ir.AssignStmt).X = left + } + as := n.(*ir.AssignStmt) + + if oaslit(as, init) { + return ir.NewBlockStmt(as.Pos(), nil) + } + + if as.Y == nil { + // TODO(austin): Check all "implicit zeroing" + return as + } + + if !base.Flag.Cfg.Instrumenting && ir.IsZero(as.Y) { + return as + } + + switch as.Y.Op() { + default: + as.Y = walkExpr(as.Y, init) + + case ir.ORECV: + // x = <-c; as.Left is x, as.Right.Left is c. + // order.stmt made sure x is addressable. + recv := as.Y.(*ir.UnaryExpr) + recv.X = walkExpr(recv.X, init) + + n1 := typecheck.NodAddr(as.X) + r := recv.X // the channel + return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1) + + case ir.OAPPEND: + // x = append(...) + call := as.Y.(*ir.CallExpr) + if call.Type().Elem().NotInHeap() { + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem()) + } + var r ir.Node + switch { + case isAppendOfMake(call): + // x = append(y, make([]T, y)...) + r = extendSlice(call, init) + case call.IsDDD: + r = appendSlice(call, init) // also works for append(slice, string). + default: + r = walkAppend(call, init, as) + } + as.Y = r + if r.Op() == ir.OAPPEND { + r := r.(*ir.CallExpr) + // Left in place for back end. + // Do not add a new write barrier. + // Set up address of type for back end. + r.Fun = reflectdata.AppendElemRType(base.Pos, r) + return as + } + // Otherwise, lowered for race detector. + // Treat as ordinary assignment. + } + + if as.X != nil && as.Y != nil { + return convas(as, init) + } + return as +} + +// walkAssignDotType walks an OAS2DOTTYPE node. +func walkAssignDotType(n *ir.AssignListStmt, init *ir.Nodes) ir.Node { + walkExprListSafe(n.Lhs, init) + n.Rhs[0] = walkExpr(n.Rhs[0], init) + return n +} + +// walkAssignFunc walks an OAS2FUNC node. +func walkAssignFunc(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { + init.Append(ir.TakeInit(n)...) + + r := n.Rhs[0] + walkExprListSafe(n.Lhs, init) + r = walkExpr(r, init) + + if ir.IsIntrinsicCall(r.(*ir.CallExpr)) { + n.Rhs = []ir.Node{r} + return n + } + init.Append(r) + + ll := ascompatet(n.Lhs, r.Type()) + return ir.NewBlockStmt(src.NoXPos, ll) +} + +// walkAssignList walks an OAS2 node. +func walkAssignList(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { + init.Append(ir.TakeInit(n)...) + return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs)) +} + +// walkAssignMapRead walks an OAS2MAPR node. +func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { + init.Append(ir.TakeInit(n)...) + + r := n.Rhs[0].(*ir.IndexExpr) + walkExprListSafe(n.Lhs, init) + r.X = walkExpr(r.X, init) + r.Index = walkExpr(r.Index, init) + t := r.X.Type() + + fast := mapfast(t) + key := mapKeyArg(fast, r, r.Index, false) + + // from: + // a,b = m[i] + // to: + // var,b = mapaccess2*(t, m, i) + // a = *var + a := n.Lhs[0] + + var call *ir.CallExpr + if w := t.Elem().Size(); w <= abi.ZeroValSize { + fn := mapfn(mapaccess2[fast], t, false) + call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key) + } else { + fn := mapfn("mapaccess2_fat", t, true) + z := reflectdata.ZeroAddr(w) + call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key, z) + } + + // mapaccess2* returns a typed bool, but due to spec changes, + // the boolean result of i.(T) is now untyped so we make it the + // same type as the variable on the lhs. + if ok := n.Lhs[1]; !ir.IsBlank(ok) && ok.Type().IsBoolean() { + call.Type().Field(1).Type = ok.Type() + } + n.Rhs = []ir.Node{call} + n.SetOp(ir.OAS2FUNC) + + // don't generate a = *var if a is _ + if ir.IsBlank(a) { + return walkExpr(typecheck.Stmt(n), init) + } + + var_ := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(t.Elem())) + var_.SetTypecheck(1) + var_.MarkNonNil() // mapaccess always returns a non-nil pointer + + n.Lhs[0] = var_ + init.Append(walkExpr(n, init)) + + as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_)) + return walkExpr(typecheck.Stmt(as), init) +} + +// walkAssignRecv walks an OAS2RECV node. +func walkAssignRecv(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { + init.Append(ir.TakeInit(n)...) + + r := n.Rhs[0].(*ir.UnaryExpr) // recv + walkExprListSafe(n.Lhs, init) + r.X = walkExpr(r.X, init) + var n1 ir.Node + if ir.IsBlank(n.Lhs[0]) { + n1 = typecheck.NodNil() + } else { + n1 = typecheck.NodAddr(n.Lhs[0]) + } + fn := chanfn("chanrecv2", 2, r.X.Type()) + ok := n.Lhs[1] + call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1) + return typecheck.Stmt(ir.NewAssignStmt(base.Pos, ok, call)) +} + +// walkReturn walks an ORETURN node. +func walkReturn(n *ir.ReturnStmt) ir.Node { + fn := ir.CurFunc + + fn.NumReturns++ + if len(n.Results) == 0 { + return n + } + + results := fn.Type().Results() + dsts := make([]ir.Node, len(results)) + for i, v := range results { + // TODO(mdempsky): typecheck should have already checked the result variables. + dsts[i] = typecheck.AssignExpr(v.Nname.(*ir.Name)) + } + + n.Results = ascompatee(n.Op(), dsts, n.Results) + return n +} + +// check assign type list to +// an expression list. called in +// +// expr-list = func() +func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node { + if len(nl) != nr.NumFields() { + base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields()) + } + + var nn ir.Nodes + for i, l := range nl { + if ir.IsBlank(l) { + continue + } + r := nr.Field(i) + + // Order should have created autotemps of the appropriate type for + // us to store results into. + if tmp, ok := l.(*ir.Name); !ok || !tmp.AutoTemp() || !types.Identical(tmp.Type(), r.Type) { + base.FatalfAt(l.Pos(), "assigning %v to %+v", r.Type, l) + } + + res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH) + res.Index = int64(i) + res.SetType(r.Type) + res.SetTypecheck(1) + + nn.Append(ir.NewAssignStmt(base.Pos, l, res)) + } + return nn +} + +// check assign expression list to +// an expression list. called in +// +// expr-list = expr-list +func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node { + // cannot happen: should have been rejected during type checking + if len(nl) != len(nr) { + base.Fatalf("assignment operands mismatch: %+v / %+v", ir.Nodes(nl), ir.Nodes(nr)) + } + + var assigned ir.NameSet + var memWrite, deferResultWrite bool + + // affected reports whether expression n could be affected by + // the assignments applied so far. + affected := func(n ir.Node) bool { + if deferResultWrite { + return true + } + return ir.Any(n, func(n ir.Node) bool { + if n.Op() == ir.ONAME && assigned.Has(n.(*ir.Name)) { + return true + } + if memWrite && readsMemory(n) { + return true + } + return false + }) + } + + // If a needed expression may be affected by an + // earlier assignment, make an early copy of that + // expression and use the copy instead. + var early ir.Nodes + save := func(np *ir.Node) { + if n := *np; affected(n) { + *np = copyExpr(n, n.Type(), &early) + } + } + + var late ir.Nodes + for i, lorig := range nl { + l, r := lorig, nr[i] + + // Do not generate 'x = x' during return. See issue 4014. + if op == ir.ORETURN && ir.SameSafeExpr(l, r) { + continue + } + + // Save subexpressions needed on left side. + // Drill through non-dereferences. + for { + // If an expression has init statements, they must be evaluated + // before any of its saved sub-operands (#45706). + // TODO(mdempsky): Disallow init statements on lvalues. + init := ir.TakeInit(l) + walkStmtList(init) + early.Append(init...) + + switch ll := l.(type) { + case *ir.IndexExpr: + if ll.X.Type().IsArray() { + save(&ll.Index) + l = ll.X + continue + } + case *ir.ParenExpr: + l = ll.X + continue + case *ir.SelectorExpr: + if ll.Op() == ir.ODOT { + l = ll.X + continue + } + } + break + } + + var name *ir.Name + switch l.Op() { + default: + base.Fatalf("unexpected lvalue %v", l.Op()) + case ir.ONAME: + name = l.(*ir.Name) + case ir.OINDEX, ir.OINDEXMAP: + l := l.(*ir.IndexExpr) + save(&l.X) + save(&l.Index) + case ir.ODEREF: + l := l.(*ir.StarExpr) + save(&l.X) + case ir.ODOTPTR: + l := l.(*ir.SelectorExpr) + save(&l.X) + } + + // Save expression on right side. + save(&r) + + appendWalkStmt(&late, convas(ir.NewAssignStmt(base.Pos, lorig, r), &late)) + + // Check for reasons why we may need to compute later expressions + // before this assignment happens. + + if name == nil { + // Not a direct assignment to a declared variable. + // Conservatively assume any memory access might alias. + memWrite = true + continue + } + + if name.Class == ir.PPARAMOUT && ir.CurFunc.HasDefer() { + // Assignments to a result parameter in a function with defers + // becomes visible early if evaluation of any later expression + // panics (#43835). + deferResultWrite = true + continue + } + + if ir.IsBlank(name) { + // We can ignore assignments to blank or anonymous result parameters. + // These can't appear in expressions anyway. + continue + } + + if name.Addrtaken() || !name.OnStack() { + // Global variable, heap escaped, or just addrtaken. + // Conservatively assume any memory access might alias. + memWrite = true + continue + } + + // Local, non-addrtaken variable. + // Assignments can only alias with direct uses of this variable. + assigned.Add(name) + } + + early.Append(late.Take()...) + return early +} + +// readsMemory reports whether the evaluation n directly reads from +// memory that might be written to indirectly. +func readsMemory(n ir.Node) bool { + switch n.Op() { + case ir.ONAME: + n := n.(*ir.Name) + if n.Class == ir.PFUNC { + return false + } + return n.Addrtaken() || !n.OnStack() + + case ir.OADD, + ir.OAND, + ir.OANDAND, + ir.OANDNOT, + ir.OBITNOT, + ir.OCONV, + ir.OCONVIFACE, + ir.OCONVNOP, + ir.ODIV, + ir.ODOT, + ir.ODOTTYPE, + ir.OLITERAL, + ir.OLSH, + ir.OMOD, + ir.OMUL, + ir.ONEG, + ir.ONIL, + ir.OOR, + ir.OOROR, + ir.OPAREN, + ir.OPLUS, + ir.ORSH, + ir.OSUB, + ir.OXOR: + return false + } + + // Be conservative. + return true +} + +// expand append(l1, l2...) to +// +// init { +// s := l1 +// newLen := s.len + l2.len +// // Compare as uint so growslice can panic on overflow. +// if uint(newLen) <= uint(s.cap) { +// s = s[:newLen] +// } else { +// s = growslice(s.ptr, s.len, s.cap, l2.len, T) +// } +// memmove(&s[s.len-l2.len], &l2[0], l2.len*sizeof(T)) +// } +// s +// +// l2 is allowed to be a string. +func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { + walkAppendArgs(n, init) + + l1 := n.Args[0] + l2 := n.Args[1] + l2 = cheapExpr(l2, init) + n.Args[1] = l2 + + var nodes ir.Nodes + + // var s []T + s := typecheck.TempAt(base.Pos, ir.CurFunc, l1.Type()) + nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1 + + elemtype := s.Type().Elem() + + // Decompose slice. + oldPtr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s) + oldLen := ir.NewUnaryExpr(base.Pos, ir.OLEN, s) + oldCap := ir.NewUnaryExpr(base.Pos, ir.OCAP, s) + + // Number of elements we are adding + num := ir.NewUnaryExpr(base.Pos, ir.OLEN, l2) + + // newLen := oldLen + num + newLen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT]) + nodes.Append(ir.NewAssignStmt(base.Pos, newLen, ir.NewBinaryExpr(base.Pos, ir.OADD, oldLen, num))) + + // if uint(newLen) <= uint(oldCap) + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) + nuint := typecheck.Conv(newLen, types.Types[types.TUINT]) + scapuint := typecheck.Conv(oldCap, types.Types[types.TUINT]) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLE, nuint, scapuint) + nif.Likely = true + + // then { s = s[:newLen] } + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, newLen, nil) + slice.SetBounded(true) + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, slice)} + + // else { s = growslice(oldPtr, newLen, oldCap, num, T) } + call := walkGrowslice(s, nif.PtrInit(), oldPtr, newLen, oldCap, num) + nif.Else = []ir.Node{ir.NewAssignStmt(base.Pos, s, call)} + + nodes.Append(nif) + + // Index to start copying into s. + // idx = newLen - len(l2) + // We use this expression instead of oldLen because it avoids + // a spill/restore of oldLen. + // Note: this doesn't work optimally currently because + // the compiler optimizer undoes this arithmetic. + idx := ir.NewBinaryExpr(base.Pos, ir.OSUB, newLen, ir.NewUnaryExpr(base.Pos, ir.OLEN, l2)) + + var ncopy ir.Node + if elemtype.HasPointers() { + // copy(s[idx:], l2) + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, idx, nil, nil) + slice.SetType(s.Type()) + slice.SetBounded(true) + + ir.CurFunc.SetWBPos(n.Pos()) + + // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int + fn := typecheck.LookupRuntime("typedslicecopy", l1.Type().Elem(), l2.Type().Elem()) + ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes)) + ptr2, len2 := backingArrayPtrLen(l2) + ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.AppendElemRType(base.Pos, n), ptr1, len1, ptr2, len2) + } else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime { + // rely on runtime to instrument: + // copy(s[idx:], l2) + // l2 can be a slice or string. + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, idx, nil, nil) + slice.SetType(s.Type()) + slice.SetBounded(true) + + ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes)) + ptr2, len2 := backingArrayPtrLen(l2) + + fn := typecheck.LookupRuntime("slicecopy", ptr1.Type().Elem(), ptr2.Type().Elem()) + ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(base.Pos, elemtype.Size())) + } else { + // memmove(&s[idx], &l2[0], len(l2)*sizeof(T)) + ix := ir.NewIndexExpr(base.Pos, s, idx) + ix.SetBounded(true) + addr := typecheck.NodAddr(ix) + + sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2) + + nwid := cheapExpr(typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes) + nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(base.Pos, elemtype.Size())) + + // instantiate func memmove(to *any, frm *any, length uintptr) + fn := typecheck.LookupRuntime("memmove", elemtype, elemtype) + ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid) + } + ln := append(nodes, ncopy) + + typecheck.Stmts(ln) + walkStmtList(ln) + init.Append(ln...) + return s +} + +// isAppendOfMake reports whether n is of the form append(x, make([]T, y)...). +// isAppendOfMake assumes n has already been typechecked. +func isAppendOfMake(n ir.Node) bool { + if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { + return false + } + + if n.Typecheck() == 0 { + base.Fatalf("missing typecheck: %+v", n) + } + + if n.Op() != ir.OAPPEND { + return false + } + call := n.(*ir.CallExpr) + if !call.IsDDD || len(call.Args) != 2 || call.Args[1].Op() != ir.OMAKESLICE { + return false + } + + mk := call.Args[1].(*ir.MakeExpr) + if mk.Cap != nil { + return false + } + + // y must be either an integer constant or the largest possible positive value + // of variable y needs to fit into a uint. + + // typecheck made sure that constant arguments to make are not negative and fit into an int. + + // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime. + y := mk.Len + if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() { + return false + } + + return true +} + +// extendSlice rewrites append(l1, make([]T, l2)...) to +// +// init { +// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true) +// } else { +// panicmakeslicelen() +// } +// s := l1 +// if l2 != 0 { +// n := len(s) + l2 +// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2. +// // cap is a positive int and n can become negative when len(s) + l2 +// // overflows int. Interpreting n when negative as uint makes it larger +// // than cap(s). growslice will check the int n arg and panic if n is +// // negative. This prevents the overflow from being undetected. +// if uint(n) <= uint(cap(s)) { +// s = s[:n] +// } else { +// s = growslice(T, s.ptr, n, s.cap, l2, T) +// } +// // clear the new portion of the underlying array. +// hp := &s[len(s)-l2] +// hn := l2 * sizeof(T) +// memclr(hp, hn) +// } +// } +// s +// +// if T has pointers, the final memclr can go inside the "then" branch, as +// growslice will have done the clearing for us. + +func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node { + // isAppendOfMake made sure all possible positive values of l2 fit into a uint. + // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit + // check of l2 < 0 at runtime which is generated below. + l2 := typecheck.Conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT]) + l2 = typecheck.Expr(l2) + n.Args[1] = l2 // walkAppendArgs expects l2 in n.List.Second(). + + walkAppendArgs(n, init) + + l1 := n.Args[0] + l2 = n.Args[1] // re-read l2, as it may have been updated by walkAppendArgs + + var nodes []ir.Node + + // if l2 >= 0 (likely happens), do nothing + nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, ir.NewInt(base.Pos, 0)), nil, nil) + nifneg.Likely = true + + // else panicmakeslicelen() + nifneg.Else = []ir.Node{mkcall("panicmakeslicelen", nil, init)} + nodes = append(nodes, nifneg) + + // s := l1 + s := typecheck.TempAt(base.Pos, ir.CurFunc, l1.Type()) + nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1)) + + // if l2 != 0 { + // Avoid work if we're not appending anything. But more importantly, + // avoid allowing hp to be a past-the-end pointer when clearing. See issue 67255. + nifnz := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, l2, ir.NewInt(base.Pos, 0)), nil, nil) + nifnz.Likely = true + nodes = append(nodes, nifnz) + + elemtype := s.Type().Elem() + + // n := s.len + l2 + nn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT]) + nifnz.Body = append(nifnz.Body, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2))) + + // if uint(n) <= uint(s.cap) + nuint := typecheck.Conv(nn, types.Types[types.TUINT]) + capuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]) + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, nuint, capuint), nil, nil) + nif.Likely = true + + // then { s = s[:n] } + nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, nn, nil) + nt.SetBounded(true) + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, nt)} + + // else { s = growslice(s.ptr, n, s.cap, l2, T) } + nif.Else = []ir.Node{ + ir.NewAssignStmt(base.Pos, s, walkGrowslice(s, nif.PtrInit(), + ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), + nn, + ir.NewUnaryExpr(base.Pos, ir.OCAP, s), + l2)), + } + + nifnz.Body = append(nifnz.Body, nif) + + // hp := &s[s.len - l2] + // TODO: &s[s.len] - hn? + ix := ir.NewIndexExpr(base.Pos, s, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2)) + ix.SetBounded(true) + hp := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR]) + + // hn := l2 * sizeof(elem(s)) + hn := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(base.Pos, elemtype.Size())), types.Types[types.TUINTPTR]) + + clrname := "memclrNoHeapPointers" + hasPointers := elemtype.HasPointers() + if hasPointers { + clrname = "memclrHasPointers" + ir.CurFunc.SetWBPos(n.Pos()) + } + + var clr ir.Nodes + clrfn := mkcall(clrname, nil, &clr, hp, hn) + clr.Append(clrfn) + if hasPointers { + // growslice will have cleared the new entries, so only + // if growslice isn't called do we need to do the zeroing ourselves. + nif.Body = append(nif.Body, clr...) + } else { + nifnz.Body = append(nifnz.Body, clr...) + } + + typecheck.Stmts(nodes) + walkStmtList(nodes) + init.Append(nodes...) + return s +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/builtin.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/builtin.go new file mode 100644 index 0000000000000000000000000000000000000000..37143baa28ceaf3c2e25eb08b4783ee6f2ebab8e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/builtin.go @@ -0,0 +1,888 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "fmt" + "go/constant" + "go/token" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/escape" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" +) + +// Rewrite append(src, x, y, z) so that any side effects in +// x, y, z (including runtime panics) are evaluated in +// initialization statements before the append. +// For normal code generation, stop there and leave the +// rest to ssagen. +// +// For race detector, expand append(src, a [, b]* ) to +// +// init { +// s := src +// const argc = len(args) - 1 +// newLen := s.len + argc +// if uint(newLen) <= uint(s.cap) { +// s = s[:newLen] +// } else { +// s = growslice(s.ptr, newLen, s.cap, argc, elemType) +// } +// s[s.len - argc] = a +// s[s.len - argc + 1] = b +// ... +// } +// s +func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node { + if !ir.SameSafeExpr(dst, n.Args[0]) { + n.Args[0] = safeExpr(n.Args[0], init) + n.Args[0] = walkExpr(n.Args[0], init) + } + walkExprListSafe(n.Args[1:], init) + + nsrc := n.Args[0] + + // walkExprListSafe will leave OINDEX (s[n]) alone if both s + // and n are name or literal, but those may index the slice we're + // modifying here. Fix explicitly. + // Using cheapExpr also makes sure that the evaluation + // of all arguments (and especially any panics) happen + // before we begin to modify the slice in a visible way. + ls := n.Args[1:] + for i, n := range ls { + n = cheapExpr(n, init) + if !types.Identical(n.Type(), nsrc.Type().Elem()) { + n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append") + n = walkExpr(n, init) + } + ls[i] = n + } + + argc := len(n.Args) - 1 + if argc < 1 { + return nsrc + } + + // General case, with no function calls left as arguments. + // Leave for ssagen, except that instrumentation requires the old form. + if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime { + return n + } + + var l []ir.Node + + // s = slice to append to + s := typecheck.TempAt(base.Pos, ir.CurFunc, nsrc.Type()) + l = append(l, ir.NewAssignStmt(base.Pos, s, nsrc)) + + // num = number of things to append + num := ir.NewInt(base.Pos, int64(argc)) + + // newLen := s.len + num + newLen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT]) + l = append(l, ir.NewAssignStmt(base.Pos, newLen, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), num))) + + // if uint(newLen) <= uint(s.cap) + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLE, typecheck.Conv(newLen, types.Types[types.TUINT]), typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])) + nif.Likely = true + + // then { s = s[:n] } + slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, newLen, nil) + slice.SetBounded(true) + nif.Body = []ir.Node{ + ir.NewAssignStmt(base.Pos, s, slice), + } + + // else { s = growslice(s.ptr, n, s.cap, a, T) } + nif.Else = []ir.Node{ + ir.NewAssignStmt(base.Pos, s, walkGrowslice(s, nif.PtrInit(), + ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), + newLen, + ir.NewUnaryExpr(base.Pos, ir.OCAP, s), + num)), + } + + l = append(l, nif) + + ls = n.Args[1:] + for i, n := range ls { + // s[s.len-argc+i] = arg + ix := ir.NewIndexExpr(base.Pos, s, ir.NewBinaryExpr(base.Pos, ir.OSUB, newLen, ir.NewInt(base.Pos, int64(argc-i)))) + ix.SetBounded(true) + l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) + } + + typecheck.Stmts(l) + walkStmtList(l) + init.Append(l...) + return s +} + +// growslice(ptr *T, newLen, oldCap, num int, ) (ret []T) +func walkGrowslice(slice *ir.Name, init *ir.Nodes, oldPtr, newLen, oldCap, num ir.Node) *ir.CallExpr { + elemtype := slice.Type().Elem() + fn := typecheck.LookupRuntime("growslice", elemtype, elemtype) + elemtypeptr := reflectdata.TypePtrAt(base.Pos, elemtype) + return mkcall1(fn, slice.Type(), init, oldPtr, newLen, oldCap, num, elemtypeptr) +} + +// walkClear walks an OCLEAR node. +func walkClear(n *ir.UnaryExpr) ir.Node { + typ := n.X.Type() + switch { + case typ.IsSlice(): + if n := arrayClear(n.X.Pos(), n.X, nil); n != nil { + return n + } + // If n == nil, we are clearing an array which takes zero memory, do nothing. + return ir.NewBlockStmt(n.Pos(), nil) + case typ.IsMap(): + return mapClear(n.X, reflectdata.TypePtrAt(n.X.Pos(), n.X.Type())) + } + panic("unreachable") +} + +// walkClose walks an OCLOSE node. +func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node { + // cannot use chanfn - closechan takes any, not chan any + fn := typecheck.LookupRuntime("closechan", n.X.Type()) + return mkcall1(fn, nil, init, n.X) +} + +// Lower copy(a, b) to a memmove call or a runtime call. +// +// init { +// n := len(a) +// if n > len(b) { n = len(b) } +// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) } +// } +// n; +// +// Also works if b is a string. +func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node { + if n.X.Type().Elem().HasPointers() { + ir.CurFunc.SetWBPos(n.Pos()) + fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem()) + n.X = cheapExpr(n.X, init) + ptrL, lenL := backingArrayPtrLen(n.X) + n.Y = cheapExpr(n.Y, init) + ptrR, lenR := backingArrayPtrLen(n.Y) + return mkcall1(fn, n.Type(), init, reflectdata.CopyElemRType(base.Pos, n), ptrL, lenL, ptrR, lenR) + } + + if runtimecall { + // rely on runtime to instrument: + // copy(n.Left, n.Right) + // n.Right can be a slice or string. + + n.X = cheapExpr(n.X, init) + ptrL, lenL := backingArrayPtrLen(n.X) + n.Y = cheapExpr(n.Y, init) + ptrR, lenR := backingArrayPtrLen(n.Y) + + fn := typecheck.LookupRuntime("slicecopy", ptrL.Type().Elem(), ptrR.Type().Elem()) + + return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(base.Pos, n.X.Type().Elem().Size())) + } + + n.X = walkExpr(n.X, init) + n.Y = walkExpr(n.Y, init) + nl := typecheck.TempAt(base.Pos, ir.CurFunc, n.X.Type()) + nr := typecheck.TempAt(base.Pos, ir.CurFunc, n.Y.Type()) + var l []ir.Node + l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X)) + l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y)) + + nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr) + nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl) + + nlen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT]) + + // n = len(to) + l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl))) + + // if n > len(frm) { n = len(frm) } + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) + + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)) + nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))) + l = append(l, nif) + + // if to.ptr != frm.ptr { memmove( ... ) } + ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil) + ne.Likely = true + l = append(l, ne) + + fn := typecheck.LookupRuntime("memmove", nl.Type().Elem(), nl.Type().Elem()) + nwid := ir.Node(typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])) + setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR])) + ne.Body.Append(setwid) + nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(base.Pos, nl.Type().Elem().Size())) + call := mkcall1(fn, nil, init, nto, nfrm, nwid) + ne.Body.Append(call) + + typecheck.Stmts(l) + walkStmtList(l) + init.Append(l...) + return nlen +} + +// walkDelete walks an ODELETE node. +func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node { + init.Append(ir.TakeInit(n)...) + map_ := n.Args[0] + key := n.Args[1] + map_ = walkExpr(map_, init) + key = walkExpr(key, init) + + t := map_.Type() + fast := mapfast(t) + key = mapKeyArg(fast, n, key, false) + return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.DeleteMapRType(base.Pos, n), map_, key) +} + +// walkLenCap walks an OLEN or OCAP node. +func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node { + if isRuneCount(n) { + // Replace len([]rune(string)) with runtime.countrunes(string). + return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING])) + } + if isByteCount(n) { + conv := n.X.(*ir.ConvExpr) + walkStmtList(conv.Init()) + init.Append(ir.TakeInit(conv)...) + _, len := backingArrayPtrLen(cheapExpr(conv.X, init)) + return len + } + + n.X = walkExpr(n.X, init) + + // replace len(*[10]int) with 10. + // delayed until now to preserve side effects. + t := n.X.Type() + + if t.IsPtr() { + t = t.Elem() + } + if t.IsArray() { + safeExpr(n.X, init) + con := ir.NewConstExpr(constant.MakeInt64(t.NumElem()), n) + con.SetTypecheck(1) + return con + } + return n +} + +// walkMakeChan walks an OMAKECHAN node. +func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node { + // When size fits into int, use makechan instead of + // makechan64, which is faster and shorter on 32 bit platforms. + size := n.Len + fnname := "makechan64" + argtype := types.Types[types.TINT64] + + // Type checking guarantees that TIDEAL size is positive and fits in an int. + // The case of size overflow when converting TUINT or TUINTPTR to TINT + // will be handled by the negative range checks in makechan during runtime. + if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() { + fnname = "makechan" + argtype = types.Types[types.TINT] + } + + return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.MakeChanRType(base.Pos, n), typecheck.Conv(size, argtype)) +} + +// walkMakeMap walks an OMAKEMAP node. +func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { + t := n.Type() + hmapType := reflectdata.MapType() + hint := n.Len + + // var h *hmap + var h ir.Node + if n.Esc() == ir.EscNone { + // Allocate hmap on stack. + + // var hv hmap + // h = &hv + h = stackTempAddr(init, hmapType) + + // Allocate one bucket pointed to by hmap.buckets on stack if hint + // is not larger than BUCKETSIZE. In case hint is larger than + // BUCKETSIZE runtime.makemap will allocate the buckets on the heap. + // Maximum key and elem size is 128 bytes, larger objects + // are stored with an indirection. So max bucket size is 2048+eps. + if !ir.IsConst(hint, constant.Int) || + constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) { + + // In case hint is larger than BUCKETSIZE runtime.makemap + // will allocate the buckets on the heap, see #20184 + // + // if hint <= BUCKETSIZE { + // var bv bmap + // b = &bv + // h.buckets = b + // } + + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, reflectdata.BUCKETSIZE)), nil, nil) + nif.Likely = true + + // var bv bmap + // b = &bv + b := stackTempAddr(&nif.Body, reflectdata.MapBucketType(t)) + + // h.buckets = b + bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap + na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR])) + nif.Body.Append(na) + appendWalkStmt(init, nif) + } + } + + if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) { + // Handling make(map[any]any) and + // make(map[any]any, hint) where hint <= BUCKETSIZE + // special allows for faster map initialization and + // improves binary size by using calls with fewer arguments. + // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false + // and no buckets will be allocated by makemap. Therefore, + // no buckets need to be allocated in this code path. + if n.Esc() == ir.EscNone { + // Only need to initialize h.hash0 since + // hmap h has been allocated on the stack already. + // h.hash0 = rand32() + rand := mkcall("rand32", types.Types[types.TUINT32], init) + hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand)) + return typecheck.ConvNop(h, t) + } + // Call runtime.makehmap to allocate an + // hmap on the heap and initialize hmap's hash0 field. + fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem()) + return mkcall1(fn, n.Type(), init) + } + + if n.Esc() != ir.EscNone { + h = typecheck.NodNil() + } + // Map initialization with a variable or large hint is + // more complicated. We therefore generate a call to + // runtime.makemap to initialize hmap and allocate the + // map buckets. + + // When hint fits into int, use makemap instead of + // makemap64, which is faster and shorter on 32 bit platforms. + fnname := "makemap64" + argtype := types.Types[types.TINT64] + + // Type checking guarantees that TIDEAL hint is positive and fits in an int. + // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function. + // The case of hint overflow when converting TUINT or TUINTPTR to TINT + // will be handled by the negative range checks in makemap during runtime. + if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() { + fnname = "makemap" + argtype = types.Types[types.TINT] + } + + fn := typecheck.LookupRuntime(fnname, hmapType, t.Key(), t.Elem()) + return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h) +} + +// walkMakeSlice walks an OMAKESLICE node. +func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node { + l := n.Len + r := n.Cap + if r == nil { + r = safeExpr(l, init) + l = r + } + t := n.Type() + if t.Elem().NotInHeap() { + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) + } + if n.Esc() == ir.EscNone { + if why := escape.HeapAllocReason(n); why != "" { + base.Fatalf("%v has EscNone, but %v", n, why) + } + // var arr [r]T + // n = arr[:l] + i := typecheck.IndexConst(r) + if i < 0 { + base.Fatalf("walkExpr: invalid index %v", r) + } + + // cap is constrained to [0,2^31) or [0,2^63) depending on whether + // we're in 32-bit or 64-bit systems. So it's safe to do: + // + // if uint64(len) > cap { + // if len < 0 { panicmakeslicelen() } + // panicmakeslicecap() + // } + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(base.Pos, i)), nil, nil) + niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(base.Pos, 0)), nil, nil) + niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)} + nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init)) + init.Append(typecheck.Stmt(nif)) + + t = types.NewArray(t.Elem(), i) // [r]T + var_ := typecheck.TempAt(base.Pos, ir.CurFunc, t) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp + r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_, nil, l, nil) // arr[:l] + // The conv is necessary in case n.Type is named. + return walkExpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init) + } + + // n escapes; set up a call to makeslice. + // When len and cap can fit into int, use makeslice instead of + // makeslice64, which is faster and shorter on 32 bit platforms. + + len, cap := l, r + + fnname := "makeslice64" + argtype := types.Types[types.TINT64] + + // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. + // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT + // will be handled by the negative range checks in makeslice during runtime. + if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) && + (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) { + fnname = "makeslice" + argtype = types.Types[types.TINT] + } + fn := typecheck.LookupRuntime(fnname) + ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.MakeSliceElemRType(base.Pos, n), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype)) + ptr.MarkNonNil() + len = typecheck.Conv(len, types.Types[types.TINT]) + cap = typecheck.Conv(cap, types.Types[types.TINT]) + sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, len, cap) + return walkExpr(typecheck.Expr(sh), init) +} + +// walkMakeSliceCopy walks an OMAKESLICECOPY node. +func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node { + if n.Esc() == ir.EscNone { + base.Fatalf("OMAKESLICECOPY with EscNone: %v", n) + } + + t := n.Type() + if t.Elem().NotInHeap() { + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem()) + } + + length := typecheck.Conv(n.Len, types.Types[types.TINT]) + copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap) + copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap) + + if !t.Elem().HasPointers() && n.Bounded() { + // When len(to)==len(from) and elements have no pointers: + // replace make+copy with runtime.mallocgc+runtime.memmove. + + // We do not check for overflow of len(to)*elem.Width here + // since len(from) is an existing checked slice capacity + // with same elem.Width for the from slice. + size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(base.Pos, t.Elem().Size()), types.Types[types.TUINTPTR])) + + // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer + fn := typecheck.LookupRuntime("mallocgc") + ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(base.Pos, false)) + ptr.MarkNonNil() + sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length) + + s := typecheck.TempAt(base.Pos, ir.CurFunc, t) + r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh)) + r = walkExpr(r, init) + init.Append(r) + + // instantiate memmove(to *any, frm *any, size uintptr) + fn = typecheck.LookupRuntime("memmove", t.Elem(), t.Elem()) + ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size) + init.Append(walkExpr(typecheck.Stmt(ncopy), init)) + + return s + } + // Replace make+copy with runtime.makeslicecopy. + // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer + fn := typecheck.LookupRuntime("makeslicecopy") + ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.MakeSliceElemRType(base.Pos, n), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR])) + ptr.MarkNonNil() + sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length) + return walkExpr(typecheck.Expr(sh), init) +} + +// walkNew walks an ONEW node. +func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node { + t := n.Type().Elem() + if t.NotInHeap() { + base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem()) + } + if n.Esc() == ir.EscNone { + if t.Size() > ir.MaxImplicitStackVarSize { + base.Fatalf("large ONEW with EscNone: %v", n) + } + return stackTempAddr(init, t) + } + types.CalcSize(t) + n.MarkNonNil() + return n +} + +func walkMinMax(n *ir.CallExpr, init *ir.Nodes) ir.Node { + init.Append(ir.TakeInit(n)...) + walkExprList(n.Args, init) + return n +} + +// generate code for print. +func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node { + // Hoist all the argument evaluation up before the lock. + walkExprListCheap(nn.Args, init) + + // For println, add " " between elements and "\n" at the end. + if nn.Op() == ir.OPRINTLN { + s := nn.Args + t := make([]ir.Node, 0, len(s)*2) + for i, n := range s { + if i != 0 { + t = append(t, ir.NewString(base.Pos, " ")) + } + t = append(t, n) + } + t = append(t, ir.NewString(base.Pos, "\n")) + nn.Args = t + } + + // Collapse runs of constant strings. + s := nn.Args + t := make([]ir.Node, 0, len(s)) + for i := 0; i < len(s); { + var strs []string + for i < len(s) && ir.IsConst(s[i], constant.String) { + strs = append(strs, ir.StringVal(s[i])) + i++ + } + if len(strs) > 0 { + t = append(t, ir.NewString(base.Pos, strings.Join(strs, ""))) + } + if i < len(s) { + t = append(t, s[i]) + i++ + } + } + nn.Args = t + + calls := []ir.Node{mkcall("printlock", nil, init)} + for i, n := range nn.Args { + if n.Op() == ir.OLITERAL { + if n.Type() == types.UntypedRune { + n = typecheck.DefaultLit(n, types.RuneType) + } + + switch n.Val().Kind() { + case constant.Int: + n = typecheck.DefaultLit(n, types.Types[types.TINT64]) + + case constant.Float: + n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64]) + } + } + + if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL { + n = typecheck.DefaultLit(n, types.Types[types.TINT64]) + } + n = typecheck.DefaultLit(n, nil) + nn.Args[i] = n + if n.Type() == nil || n.Type().Kind() == types.TFORW { + continue + } + + var on *ir.Name + switch n.Type().Kind() { + case types.TINTER: + if n.Type().IsEmptyInterface() { + on = typecheck.LookupRuntime("printeface", n.Type()) + } else { + on = typecheck.LookupRuntime("printiface", n.Type()) + } + case types.TPTR: + if n.Type().Elem().NotInHeap() { + on = typecheck.LookupRuntime("printuintptr") + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) + n.SetType(types.Types[types.TUNSAFEPTR]) + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) + n.SetType(types.Types[types.TUINTPTR]) + break + } + fallthrough + case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR: + on = typecheck.LookupRuntime("printpointer", n.Type()) + case types.TSLICE: + on = typecheck.LookupRuntime("printslice", n.Type()) + case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR: + if types.RuntimeSymName(n.Type().Sym()) == "hex" { + on = typecheck.LookupRuntime("printhex") + } else { + on = typecheck.LookupRuntime("printuint") + } + case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64: + on = typecheck.LookupRuntime("printint") + case types.TFLOAT32, types.TFLOAT64: + on = typecheck.LookupRuntime("printfloat") + case types.TCOMPLEX64, types.TCOMPLEX128: + on = typecheck.LookupRuntime("printcomplex") + case types.TBOOL: + on = typecheck.LookupRuntime("printbool") + case types.TSTRING: + cs := "" + if ir.IsConst(n, constant.String) { + cs = ir.StringVal(n) + } + switch cs { + case " ": + on = typecheck.LookupRuntime("printsp") + case "\n": + on = typecheck.LookupRuntime("printnl") + default: + on = typecheck.LookupRuntime("printstring") + } + default: + badtype(ir.OPRINT, n.Type(), nil) + continue + } + + r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil) + if params := on.Type().Params(); len(params) > 0 { + t := params[0].Type + n = typecheck.Conv(n, t) + r.Args.Append(n) + } + calls = append(calls, r) + } + + calls = append(calls, mkcall("printunlock", nil, init)) + + typecheck.Stmts(calls) + walkExprList(calls, init) + + r := ir.NewBlockStmt(base.Pos, nil) + r.List = calls + return walkStmt(typecheck.Stmt(r)) +} + +// walkRecoverFP walks an ORECOVERFP node. +func walkRecoverFP(nn *ir.CallExpr, init *ir.Nodes) ir.Node { + return mkcall("gorecover", nn.Type(), init, walkExpr(nn.Args[0], init)) +} + +// walkUnsafeData walks an OUNSAFESLICEDATA or OUNSAFESTRINGDATA expression. +func walkUnsafeData(n *ir.UnaryExpr, init *ir.Nodes) ir.Node { + slice := walkExpr(n.X, init) + res := typecheck.Expr(ir.NewUnaryExpr(n.Pos(), ir.OSPTR, slice)) + res.SetType(n.Type()) + return walkExpr(res, init) +} + +func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { + ptr := safeExpr(n.X, init) + len := safeExpr(n.Y, init) + sliceType := n.Type() + + lenType := types.Types[types.TINT64] + unsafePtr := typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]) + + // If checkptr enabled, call runtime.unsafeslicecheckptr to check ptr and len. + // for simplicity, unsafeslicecheckptr always uses int64. + // Type checking guarantees that TIDEAL len/cap are positive and fit in an int. + // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT + // will be handled by the negative range checks in unsafeslice during runtime. + if ir.ShouldCheckPtr(ir.CurFunc, 1) { + fnname := "unsafeslicecheckptr" + fn := typecheck.LookupRuntime(fnname) + init.Append(mkcall1(fn, nil, init, reflectdata.UnsafeSliceElemRType(base.Pos, n), unsafePtr, typecheck.Conv(len, lenType))) + } else { + // Otherwise, open code unsafe.Slice to prevent runtime call overhead. + // Keep this code in sync with runtime.unsafeslice{,64} + if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() { + lenType = types.Types[types.TINT] + } else { + // len64 := int64(len) + // if int64(int(len64)) != len64 { + // panicunsafeslicelen() + // } + len64 := typecheck.Conv(len, lenType) + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, typecheck.Conv(typecheck.Conv(len64, types.Types[types.TINT]), lenType), len64) + nif.Body.Append(mkcall("panicunsafeslicelen", nil, &nif.Body)) + appendWalkStmt(init, nif) + } + + // if len < 0 { panicunsafeslicelen() } + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, typecheck.Conv(len, lenType), ir.NewInt(base.Pos, 0)) + nif.Body.Append(mkcall("panicunsafeslicelen", nil, &nif.Body)) + appendWalkStmt(init, nif) + + if sliceType.Elem().Size() == 0 { + // if ptr == nil && len > 0 { + // panicunsafesliceptrnil() + // } + nifPtr := ir.NewIfStmt(base.Pos, nil, nil, nil) + isNil := ir.NewBinaryExpr(base.Pos, ir.OEQ, unsafePtr, typecheck.NodNil()) + gtZero := ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(len, lenType), ir.NewInt(base.Pos, 0)) + nifPtr.Cond = + ir.NewLogicalExpr(base.Pos, ir.OANDAND, isNil, gtZero) + nifPtr.Body.Append(mkcall("panicunsafeslicenilptr", nil, &nifPtr.Body)) + appendWalkStmt(init, nifPtr) + + h := ir.NewSliceHeaderExpr(n.Pos(), sliceType, + typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]), + typecheck.Conv(len, types.Types[types.TINT]), + typecheck.Conv(len, types.Types[types.TINT])) + return walkExpr(typecheck.Expr(h), init) + } + + // mem, overflow := math.mulUintptr(et.size, len) + mem := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR]) + overflow := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL]) + + decl := types.NewSignature(nil, + []*types.Field{ + types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]), + types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]), + }, + []*types.Field{ + types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]), + types.NewField(base.Pos, nil, types.Types[types.TBOOL]), + }) + + fn := ir.NewFunc(n.Pos(), n.Pos(), math_MulUintptr, decl) + + call := mkcall1(fn.Nname, fn.Type().ResultsTuple(), init, ir.NewInt(base.Pos, sliceType.Elem().Size()), typecheck.Conv(typecheck.Conv(len, lenType), types.Types[types.TUINTPTR])) + appendWalkStmt(init, ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{mem, overflow}, []ir.Node{call})) + + // if overflow || mem > -uintptr(ptr) { + // if ptr == nil { + // panicunsafesliceptrnil() + // } + // panicunsafeslicelen() + // } + nif = ir.NewIfStmt(base.Pos, nil, nil, nil) + memCond := ir.NewBinaryExpr(base.Pos, ir.OGT, mem, ir.NewUnaryExpr(base.Pos, ir.ONEG, typecheck.Conv(unsafePtr, types.Types[types.TUINTPTR]))) + nif.Cond = ir.NewLogicalExpr(base.Pos, ir.OOROR, overflow, memCond) + nifPtr := ir.NewIfStmt(base.Pos, nil, nil, nil) + nifPtr.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, unsafePtr, typecheck.NodNil()) + nifPtr.Body.Append(mkcall("panicunsafeslicenilptr", nil, &nifPtr.Body)) + nif.Body.Append(nifPtr, mkcall("panicunsafeslicelen", nil, &nif.Body)) + appendWalkStmt(init, nif) + } + + h := ir.NewSliceHeaderExpr(n.Pos(), sliceType, + typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]), + typecheck.Conv(len, types.Types[types.TINT]), + typecheck.Conv(len, types.Types[types.TINT])) + return walkExpr(typecheck.Expr(h), init) +} + +var math_MulUintptr = &types.Sym{Pkg: types.NewPkg("runtime/internal/math", "math"), Name: "MulUintptr"} + +func walkUnsafeString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { + ptr := safeExpr(n.X, init) + len := safeExpr(n.Y, init) + + lenType := types.Types[types.TINT64] + unsafePtr := typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]) + + // If checkptr enabled, call runtime.unsafestringcheckptr to check ptr and len. + // for simplicity, unsafestringcheckptr always uses int64. + // Type checking guarantees that TIDEAL len are positive and fit in an int. + if ir.ShouldCheckPtr(ir.CurFunc, 1) { + fnname := "unsafestringcheckptr" + fn := typecheck.LookupRuntime(fnname) + init.Append(mkcall1(fn, nil, init, unsafePtr, typecheck.Conv(len, lenType))) + } else { + // Otherwise, open code unsafe.String to prevent runtime call overhead. + // Keep this code in sync with runtime.unsafestring{,64} + if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() { + lenType = types.Types[types.TINT] + } else { + // len64 := int64(len) + // if int64(int(len64)) != len64 { + // panicunsafestringlen() + // } + len64 := typecheck.Conv(len, lenType) + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, typecheck.Conv(typecheck.Conv(len64, types.Types[types.TINT]), lenType), len64) + nif.Body.Append(mkcall("panicunsafestringlen", nil, &nif.Body)) + appendWalkStmt(init, nif) + } + + // if len < 0 { panicunsafestringlen() } + nif := ir.NewIfStmt(base.Pos, nil, nil, nil) + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, typecheck.Conv(len, lenType), ir.NewInt(base.Pos, 0)) + nif.Body.Append(mkcall("panicunsafestringlen", nil, &nif.Body)) + appendWalkStmt(init, nif) + + // if uintpr(len) > -uintptr(ptr) { + // if ptr == nil { + // panicunsafestringnilptr() + // } + // panicunsafeslicelen() + // } + nifLen := ir.NewIfStmt(base.Pos, nil, nil, nil) + nifLen.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(len, types.Types[types.TUINTPTR]), ir.NewUnaryExpr(base.Pos, ir.ONEG, typecheck.Conv(unsafePtr, types.Types[types.TUINTPTR]))) + nifPtr := ir.NewIfStmt(base.Pos, nil, nil, nil) + nifPtr.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, unsafePtr, typecheck.NodNil()) + nifPtr.Body.Append(mkcall("panicunsafestringnilptr", nil, &nifPtr.Body)) + nifLen.Body.Append(nifPtr, mkcall("panicunsafestringlen", nil, &nifLen.Body)) + appendWalkStmt(init, nifLen) + } + h := ir.NewStringHeaderExpr(n.Pos(), + typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]), + typecheck.Conv(len, types.Types[types.TINT]), + ) + return walkExpr(typecheck.Expr(h), init) +} + +func badtype(op ir.Op, tl, tr *types.Type) { + var s string + if tl != nil { + s += fmt.Sprintf("\n\t%v", tl) + } + if tr != nil { + s += fmt.Sprintf("\n\t%v", tr) + } + + // common mistake: *struct and *interface. + if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() { + if tl.Elem().IsStruct() && tr.Elem().IsInterface() { + s += "\n\t(*struct vs *interface)" + } else if tl.Elem().IsInterface() && tr.Elem().IsStruct() { + s += "\n\t(*interface vs *struct)" + } + } + + base.Errorf("illegal types for operand: %v%s", op, s) +} + +func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node { + return typecheck.LookupRuntime(name, l, r) +} + +// isRuneCount reports whether n is of the form len([]rune(string)). +// These are optimized into a call to runtime.countrunes. +func isRuneCount(n ir.Node) bool { + return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES +} + +// isByteCount reports whether n is of the form len(string([]byte)). +func isByteCount(n ir.Node) bool { + return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && + (n.(*ir.UnaryExpr).X.Op() == ir.OBYTES2STR || n.(*ir.UnaryExpr).X.Op() == ir.OBYTES2STRTMP) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/closure.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/closure.go new file mode 100644 index 0000000000000000000000000000000000000000..38c6c03dc496faba9741b1ad3e7b6279d94cb04b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/closure.go @@ -0,0 +1,230 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// directClosureCall rewrites a direct call of a function literal into +// a normal function call with closure variables passed as arguments. +// This avoids allocation of a closure object. +// +// For illustration, the following call: +// +// func(a int) { +// println(byval) +// byref++ +// }(42) +// +// becomes: +// +// func(byval int, &byref *int, a int) { +// println(byval) +// (*&byref)++ +// }(byval, &byref, 42) +func directClosureCall(n *ir.CallExpr) { + clo := n.Fun.(*ir.ClosureExpr) + clofn := clo.Func + + if ir.IsTrivialClosure(clo) { + return // leave for walkClosure to handle + } + + // We are going to insert captured variables before input args. + var params []*types.Field + var decls []*ir.Name + for _, v := range clofn.ClosureVars { + if !v.Byval() { + // If v of type T is captured by reference, + // we introduce function param &v *T + // and v remains PAUTOHEAP with &v heapaddr + // (accesses will implicitly deref &v). + + addr := ir.NewNameAt(clofn.Pos(), typecheck.Lookup("&"+v.Sym().Name), types.NewPtr(v.Type())) + addr.Curfn = clofn + v.Heapaddr = addr + v = addr + } + + v.Class = ir.PPARAM + decls = append(decls, v) + + fld := types.NewField(src.NoXPos, v.Sym(), v.Type()) + fld.Nname = v + params = append(params, fld) + } + + // f is ONAME of the actual function. + f := clofn.Nname + typ := f.Type() + + // Create new function type with parameters prepended, and + // then update type and declarations. + typ = types.NewSignature(nil, append(params, typ.Params()...), typ.Results()) + f.SetType(typ) + clofn.Dcl = append(decls, clofn.Dcl...) + + // Rewrite call. + n.Fun = f + n.Args.Prepend(closureArgs(clo)...) + + // Update the call expression's type. We need to do this + // because typecheck gave it the result type of the OCLOSURE + // node, but we only rewrote the ONAME node's type. Logically, + // they're the same, but the stack offsets probably changed. + if typ.NumResults() == 1 { + n.SetType(typ.Result(0).Type) + } else { + n.SetType(typ.ResultsTuple()) + } + + // Add to Closures for enqueueFunc. It's no longer a proper + // closure, but we may have already skipped over it in the + // functions list as a non-trivial closure, so this just + // ensures it's compiled. + ir.CurFunc.Closures = append(ir.CurFunc.Closures, clofn) +} + +func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { + clofn := clo.Func + + // If no closure vars, don't bother wrapping. + if ir.IsTrivialClosure(clo) { + if base.Debug.Closure > 0 { + base.WarnfAt(clo.Pos(), "closure converted to global") + } + return clofn.Nname + } + + // The closure is not trivial or directly called, so it's going to stay a closure. + ir.ClosureDebugRuntimeCheck(clo) + clofn.SetNeedctxt(true) + + // The closure expression may be walked more than once if it appeared in composite + // literal initialization (e.g, see issue #49029). + // + // Don't add the closure function to compilation queue more than once, since when + // compiling a function twice would lead to an ICE. + if !clofn.Walked() { + clofn.SetWalked(true) + ir.CurFunc.Closures = append(ir.CurFunc.Closures, clofn) + } + + typ := typecheck.ClosureType(clo) + + clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, typ, nil) + clos.SetEsc(clo.Esc()) + clos.List = append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, clofn.Nname)}, closureArgs(clo)...) + for i, value := range clos.List { + clos.List[i] = ir.NewStructKeyExpr(base.Pos, typ.Field(i), value) + } + + addr := typecheck.NodAddr(clos) + addr.SetEsc(clo.Esc()) + + // Force type conversion from *struct to the func type. + cfn := typecheck.ConvNop(addr, clo.Type()) + + // non-escaping temp to use, if any. + if x := clo.Prealloc; x != nil { + if !types.Identical(typ, x.Type()) { + panic("closure type does not match order's assigned type") + } + addr.Prealloc = x + clo.Prealloc = nil + } + + return walkExpr(cfn, init) +} + +// closureArgs returns a slice of expressions that can be used to +// initialize the given closure's free variables. These correspond +// one-to-one with the variables in clo.Func.ClosureVars, and will be +// either an ONAME node (if the variable is captured by value) or an +// OADDR-of-ONAME node (if not). +func closureArgs(clo *ir.ClosureExpr) []ir.Node { + fn := clo.Func + + args := make([]ir.Node, len(fn.ClosureVars)) + for i, v := range fn.ClosureVars { + var outer ir.Node + outer = v.Outer + if !v.Byval() { + outer = typecheck.NodAddrAt(fn.Pos(), outer) + } + args[i] = typecheck.Expr(outer) + } + return args +} + +func walkMethodValue(n *ir.SelectorExpr, init *ir.Nodes) ir.Node { + // Create closure in the form of a composite literal. + // For x.M with receiver (x) type T, the generated code looks like: + // + // clos = &struct{F uintptr; R T}{T.M·f, x} + // + // Like walkClosure above. + + if n.X.Type().IsInterface() { + // Trigger panic for method on nil interface now. + // Otherwise it happens in the wrapper and is confusing. + n.X = cheapExpr(n.X, init) + n.X = walkExpr(n.X, nil) + + tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X) + check := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab) + init.Append(typecheck.Stmt(check)) + } + + typ := typecheck.MethodValueType(n) + + clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, typ, nil) + clos.SetEsc(n.Esc()) + clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, methodValueWrapper(n)), n.X} + + addr := typecheck.NodAddr(clos) + addr.SetEsc(n.Esc()) + + // Force type conversion from *struct to the func type. + cfn := typecheck.ConvNop(addr, n.Type()) + + // non-escaping temp to use, if any. + if x := n.Prealloc; x != nil { + if !types.Identical(typ, x.Type()) { + panic("partial call type does not match order's assigned type") + } + addr.Prealloc = x + n.Prealloc = nil + } + + return walkExpr(cfn, init) +} + +// methodValueWrapper returns the ONAME node representing the +// wrapper function (*-fm) needed for the given method value. If the +// wrapper function hasn't already been created yet, it's created and +// added to typecheck.Target.Decls. +func methodValueWrapper(dot *ir.SelectorExpr) *ir.Name { + if dot.Op() != ir.OMETHVALUE { + base.Fatalf("methodValueWrapper: unexpected %v (%v)", dot, dot.Op()) + } + + meth := dot.Sel + rcvrtype := dot.X.Type() + sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm") + + if sym.Uniq() { + return sym.Def.(*ir.Name) + } + sym.SetUniq(true) + + base.FatalfAt(dot.Pos(), "missing wrapper for %v", meth) + panic("unreachable") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/compare.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/compare.go new file mode 100644 index 0000000000000000000000000000000000000000..625cfecee0c8feadaeec0421fe35ddd82c49af82 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/compare.go @@ -0,0 +1,514 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "encoding/binary" + "fmt" + "go/constant" + "hash/fnv" + "io" + + "cmd/compile/internal/base" + "cmd/compile/internal/compare" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" +) + +func fakePC(n ir.Node) ir.Node { + // In order to get deterministic IDs, we include the package path, absolute filename, line number, column number + // in the calculation of the fakePC for the IR node. + hash := fnv.New32() + // We ignore the errors here because the `io.Writer` in the `hash.Hash` interface never returns an error. + io.WriteString(hash, base.Ctxt.Pkgpath) + io.WriteString(hash, base.Ctxt.PosTable.Pos(n.Pos()).AbsFilename()) + binary.Write(hash, binary.LittleEndian, int64(n.Pos().Line())) + binary.Write(hash, binary.LittleEndian, int64(n.Pos().Col())) + // We also include the string representation of the node to distinguish autogenerated expression since + // those get the same `src.XPos` + io.WriteString(hash, fmt.Sprintf("%v", n)) + + return ir.NewInt(base.Pos, int64(hash.Sum32())) +} + +// The result of walkCompare MUST be assigned back to n, e.g. +// +// n.Left = walkCompare(n.Left, init) +func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { + if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL { + return walkCompareInterface(n, init) + } + + if n.X.Type().IsString() && n.Y.Type().IsString() { + return walkCompareString(n, init) + } + + n.X = walkExpr(n.X, init) + n.Y = walkExpr(n.Y, init) + + // Given mixed interface/concrete comparison, + // rewrite into types-equal && data-equal. + // This is efficient, avoids allocations, and avoids runtime calls. + // + // TODO(mdempsky): It would be more general and probably overall + // simpler to just extend walkCompareInterface to optimize when one + // operand is an OCONVIFACE. + if n.X.Type().IsInterface() != n.Y.Type().IsInterface() { + // Preserve side-effects in case of short-circuiting; see #32187. + l := cheapExpr(n.X, init) + r := cheapExpr(n.Y, init) + // Swap so that l is the interface value and r is the concrete value. + if n.Y.Type().IsInterface() { + l, r = r, l + } + + // Handle both == and !=. + eq := n.Op() + andor := ir.OOROR + if eq == ir.OEQ { + andor = ir.OANDAND + } + // Check for types equal. + // For empty interface, this is: + // l.tab == type(r) + // For non-empty interface, this is: + // l.tab != nil && l.tab._type == type(r) + // + // TODO(mdempsky): For non-empty interface comparisons, just + // compare against the itab address directly? + var eqtype ir.Node + tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l) + rtyp := reflectdata.CompareRType(base.Pos, n) + if l.Type().IsEmptyInterface() { + tab.SetType(types.NewPtr(types.Types[types.TUINT8])) + tab.SetTypecheck(1) + eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp) + } else { + nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), typecheck.NodNil(), tab) + match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp) + eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match) + } + // Check for data equal. + eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r) + // Put it all together. + expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata) + return finishCompare(n, expr, init) + } + + // Must be comparison of array or struct. + // Otherwise back end handles it. + // While we're here, decide whether to + // inline or call an eq alg. + t := n.X.Type() + var inline bool + + maxcmpsize := int64(4) + unalignedLoad := ssagen.Arch.LinkArch.CanMergeLoads + if unalignedLoad { + // Keep this low enough to generate less code than a function call. + maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize) + } + + switch t.Kind() { + default: + if base.Debug.Libfuzzer != 0 && t.IsInteger() && (n.X.Name() == nil || !n.X.Name().Libfuzzer8BitCounter()) { + n.X = cheapExpr(n.X, init) + n.Y = cheapExpr(n.Y, init) + + // If exactly one comparison operand is + // constant, invoke the constcmp functions + // instead, and arrange for the constant + // operand to be the first argument. + l, r := n.X, n.Y + if r.Op() == ir.OLITERAL { + l, r = r, l + } + constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL + + var fn string + var paramType *types.Type + switch t.Size() { + case 1: + fn = "libfuzzerTraceCmp1" + if constcmp { + fn = "libfuzzerTraceConstCmp1" + } + paramType = types.Types[types.TUINT8] + case 2: + fn = "libfuzzerTraceCmp2" + if constcmp { + fn = "libfuzzerTraceConstCmp2" + } + paramType = types.Types[types.TUINT16] + case 4: + fn = "libfuzzerTraceCmp4" + if constcmp { + fn = "libfuzzerTraceConstCmp4" + } + paramType = types.Types[types.TUINT32] + case 8: + fn = "libfuzzerTraceCmp8" + if constcmp { + fn = "libfuzzerTraceConstCmp8" + } + paramType = types.Types[types.TUINT64] + default: + base.Fatalf("unexpected integer size %d for %v", t.Size(), t) + } + init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init), fakePC(n))) + } + return n + case types.TARRAY: + // We can compare several elements at once with 2/4/8 byte integer compares + inline = t.NumElem() <= 1 || (types.IsSimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Size()*t.NumElem() <= maxcmpsize)) + case types.TSTRUCT: + inline = compare.EqStructCost(t) <= 4 + } + + cmpl := n.X + for cmpl != nil && cmpl.Op() == ir.OCONVNOP { + cmpl = cmpl.(*ir.ConvExpr).X + } + cmpr := n.Y + for cmpr != nil && cmpr.Op() == ir.OCONVNOP { + cmpr = cmpr.(*ir.ConvExpr).X + } + + // Chose not to inline. Call equality function directly. + if !inline { + // eq algs take pointers; cmpl and cmpr must be addressable + if !ir.IsAddressable(cmpl) || !ir.IsAddressable(cmpr) { + base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr) + } + + // Should only arrive here with large memory or + // a struct/array containing a non-memory field/element. + // Small memory is handled inline, and single non-memory + // is handled by walkCompare. + fn, needsLength := reflectdata.EqFor(t) + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) + call.Args.Append(typecheck.NodAddr(cmpl)) + call.Args.Append(typecheck.NodAddr(cmpr)) + if needsLength { + call.Args.Append(ir.NewInt(base.Pos, t.Size())) + } + res := ir.Node(call) + if n.Op() != ir.OEQ { + res = ir.NewUnaryExpr(base.Pos, ir.ONOT, res) + } + return finishCompare(n, res, init) + } + + // inline: build boolean expression comparing element by element + andor := ir.OANDAND + if n.Op() == ir.ONE { + andor = ir.OOROR + } + var expr ir.Node + comp := func(el, er ir.Node) { + a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er) + if expr == nil { + expr = a + } else { + expr = ir.NewLogicalExpr(base.Pos, andor, expr, a) + } + } + and := func(cond ir.Node) { + if expr == nil { + expr = cond + } else { + expr = ir.NewLogicalExpr(base.Pos, andor, expr, cond) + } + } + cmpl = safeExpr(cmpl, init) + cmpr = safeExpr(cmpr, init) + if t.IsStruct() { + conds, _ := compare.EqStruct(t, cmpl, cmpr) + if n.Op() == ir.OEQ { + for _, cond := range conds { + and(cond) + } + } else { + for _, cond := range conds { + notCond := ir.NewUnaryExpr(base.Pos, ir.ONOT, cond) + and(notCond) + } + } + } else { + step := int64(1) + remains := t.NumElem() * t.Elem().Size() + combine64bit := unalignedLoad && types.RegSize == 8 && t.Elem().Size() <= 4 && t.Elem().IsInteger() + combine32bit := unalignedLoad && t.Elem().Size() <= 2 && t.Elem().IsInteger() + combine16bit := unalignedLoad && t.Elem().Size() == 1 && t.Elem().IsInteger() + for i := int64(0); remains > 0; { + var convType *types.Type + switch { + case remains >= 8 && combine64bit: + convType = types.Types[types.TINT64] + step = 8 / t.Elem().Size() + case remains >= 4 && combine32bit: + convType = types.Types[types.TUINT32] + step = 4 / t.Elem().Size() + case remains >= 2 && combine16bit: + convType = types.Types[types.TUINT16] + step = 2 / t.Elem().Size() + default: + step = 1 + } + if step == 1 { + comp( + ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(base.Pos, i)), + ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(base.Pos, i)), + ) + i++ + remains -= t.Elem().Size() + } else { + elemType := t.Elem().ToUnsigned() + cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(base.Pos, i))) + cmplw = typecheck.Conv(cmplw, elemType) // convert to unsigned + cmplw = typecheck.Conv(cmplw, convType) // widen + cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(base.Pos, i))) + cmprw = typecheck.Conv(cmprw, elemType) + cmprw = typecheck.Conv(cmprw, convType) + // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... + // ssa will generate a single large load. + for offset := int64(1); offset < step; offset++ { + lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(base.Pos, i+offset))) + lb = typecheck.Conv(lb, elemType) + lb = typecheck.Conv(lb, convType) + lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(base.Pos, 8*t.Elem().Size()*offset)) + cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb) + rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(base.Pos, i+offset))) + rb = typecheck.Conv(rb, elemType) + rb = typecheck.Conv(rb, convType) + rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(base.Pos, 8*t.Elem().Size()*offset)) + cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb) + } + comp(cmplw, cmprw) + i += step + remains -= step * t.Elem().Size() + } + } + } + if expr == nil { + expr = ir.NewBool(base.Pos, n.Op() == ir.OEQ) + // We still need to use cmpl and cmpr, in case they contain + // an expression which might panic. See issue 23837. + a1 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.BlankNode, cmpl)) + a2 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, ir.BlankNode, cmpr)) + init.Append(a1, a2) + } + return finishCompare(n, expr, init) +} + +func walkCompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { + n.Y = cheapExpr(n.Y, init) + n.X = cheapExpr(n.X, init) + eqtab, eqdata := compare.EqInterface(n.X, n.Y) + var cmp ir.Node + if n.Op() == ir.OEQ { + cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata) + } else { + eqtab.SetOp(ir.ONE) + cmp = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqtab, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqdata)) + } + return finishCompare(n, cmp, init) +} + +func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { + if base.Debug.Libfuzzer != 0 { + if !ir.IsConst(n.X, constant.String) || !ir.IsConst(n.Y, constant.String) { + fn := "libfuzzerHookStrCmp" + n.X = cheapExpr(n.X, init) + n.Y = cheapExpr(n.Y, init) + paramType := types.Types[types.TSTRING] + init.Append(mkcall(fn, nil, init, tracecmpArg(n.X, paramType, init), tracecmpArg(n.Y, paramType, init), fakePC(n))) + } + } + // Rewrite comparisons to short constant strings as length+byte-wise comparisons. + var cs, ncs ir.Node // const string, non-const string + switch { + case ir.IsConst(n.X, constant.String) && ir.IsConst(n.Y, constant.String): + // ignore; will be constant evaluated + case ir.IsConst(n.X, constant.String): + cs = n.X + ncs = n.Y + case ir.IsConst(n.Y, constant.String): + cs = n.Y + ncs = n.X + } + if cs != nil { + cmp := n.Op() + // Our comparison below assumes that the non-constant string + // is on the left hand side, so rewrite "" cmp x to x cmp "". + // See issue 24817. + if ir.IsConst(n.X, constant.String) { + cmp = brrev(cmp) + } + + // maxRewriteLen was chosen empirically. + // It is the value that minimizes cmd/go file size + // across most architectures. + // See the commit description for CL 26758 for details. + maxRewriteLen := 6 + // Some architectures can load unaligned byte sequence as 1 word. + // So we can cover longer strings with the same amount of code. + canCombineLoads := ssagen.Arch.LinkArch.CanMergeLoads + combine64bit := false + if canCombineLoads { + // Keep this low enough to generate less code than a function call. + maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize + combine64bit = ssagen.Arch.LinkArch.RegSize >= 8 + } + + var and ir.Op + switch cmp { + case ir.OEQ: + and = ir.OANDAND + case ir.ONE: + and = ir.OOROR + default: + // Don't do byte-wise comparisons for <, <=, etc. + // They're fairly complicated. + // Length-only checks are ok, though. + maxRewriteLen = 0 + } + if s := ir.StringVal(cs); len(s) <= maxRewriteLen { + if len(s) > 0 { + ncs = safeExpr(ncs, init) + } + r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), ir.NewInt(base.Pos, int64(len(s))))) + remains := len(s) + for i := 0; remains > 0; { + if remains == 1 || !canCombineLoads { + cb := ir.NewInt(base.Pos, int64(s[i])) + ncb := ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(base.Pos, int64(i))) + r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb)) + remains-- + i++ + continue + } + var step int + var convType *types.Type + switch { + case remains >= 8 && combine64bit: + convType = types.Types[types.TINT64] + step = 8 + case remains >= 4: + convType = types.Types[types.TUINT32] + step = 4 + case remains >= 2: + convType = types.Types[types.TUINT16] + step = 2 + } + ncsubstr := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(base.Pos, int64(i))), convType) + csubstr := int64(s[i]) + // Calculate large constant from bytes as sequence of shifts and ors. + // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... + // ssa will combine this into a single large load. + for offset := 1; offset < step; offset++ { + b := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(base.Pos, int64(i+offset))), convType) + b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(base.Pos, int64(8*offset))) + ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b) + csubstr |= int64(s[i+offset]) << uint8(8*offset) + } + csubstrPart := ir.NewInt(base.Pos, csubstr) + // Compare "step" bytes as once + r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr)) + remains -= step + i += step + } + return finishCompare(n, r, init) + } + } + + var r ir.Node + if n.Op() == ir.OEQ || n.Op() == ir.ONE { + // prepare for rewrite below + n.X = cheapExpr(n.X, init) + n.Y = cheapExpr(n.Y, init) + eqlen, eqmem := compare.EqString(n.X, n.Y) + // quick check of len before full compare for == or !=. + // memequal then tests equality up to length len. + if n.Op() == ir.OEQ { + // len(left) == len(right) && memequal(left, right, len) + r = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqlen, eqmem) + } else { + // len(left) != len(right) || !memequal(left, right, len) + eqlen.SetOp(ir.ONE) + r = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqlen, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqmem)) + } + } else { + // sys_cmpstring(s1, s2) :: 0 + r = mkcall("cmpstring", types.Types[types.TINT], init, typecheck.Conv(n.X, types.Types[types.TSTRING]), typecheck.Conv(n.Y, types.Types[types.TSTRING])) + r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(base.Pos, 0)) + } + + return finishCompare(n, r, init) +} + +// The result of finishCompare MUST be assigned back to n, e.g. +// +// n.Left = finishCompare(n.Left, x, r, init) +func finishCompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node { + r = typecheck.Expr(r) + r = typecheck.Conv(r, n.Type()) + r = walkExpr(r, init) + return r +} + +// brcom returns !(op). +// For example, brcom(==) is !=. +func brcom(op ir.Op) ir.Op { + switch op { + case ir.OEQ: + return ir.ONE + case ir.ONE: + return ir.OEQ + case ir.OLT: + return ir.OGE + case ir.OGT: + return ir.OLE + case ir.OLE: + return ir.OGT + case ir.OGE: + return ir.OLT + } + base.Fatalf("brcom: no com for %v\n", op) + return op +} + +// brrev returns reverse(op). +// For example, Brrev(<) is >. +func brrev(op ir.Op) ir.Op { + switch op { + case ir.OEQ: + return ir.OEQ + case ir.ONE: + return ir.ONE + case ir.OLT: + return ir.OGT + case ir.OGT: + return ir.OLT + case ir.OLE: + return ir.OGE + case ir.OGE: + return ir.OLE + } + base.Fatalf("brrev: no rev for %v\n", op) + return op +} + +func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { + // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc. + if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 { + n = copyExpr(n, n.Type(), init) + } + + return typecheck.Conv(n, t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/complit.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/complit.go new file mode 100644 index 0000000000000000000000000000000000000000..adc44ca49d07e5b0d93cc2c758b750e727f9344c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/complit.go @@ -0,0 +1,684 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/ssa" + "cmd/compile/internal/staticdata" + "cmd/compile/internal/staticinit" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" +) + +// walkCompLit walks a composite literal node: +// OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT (all CompLitExpr), or OPTRLIT (AddrExpr). +func walkCompLit(n ir.Node, init *ir.Nodes) ir.Node { + if isStaticCompositeLiteral(n) && !ssa.CanSSA(n.Type()) { + n := n.(*ir.CompLitExpr) // not OPTRLIT + // n can be directly represented in the read-only data section. + // Make direct reference to the static data. See issue 12841. + vstat := readonlystaticname(n.Type()) + fixedlit(inInitFunction, initKindStatic, n, vstat, init) + return typecheck.Expr(vstat) + } + var_ := typecheck.TempAt(base.Pos, ir.CurFunc, n.Type()) + anylit(n, var_, init) + return var_ +} + +// initContext is the context in which static data is populated. +// It is either in an init function or in any other function. +// Static data populated in an init function will be written either +// zero times (as a readonly, static data symbol) or +// one time (during init function execution). +// Either way, there is no opportunity for races or further modification, +// so the data can be written to a (possibly readonly) data symbol. +// Static data populated in any other function needs to be local to +// that function to allow multiple instances of that function +// to execute concurrently without clobbering each others' data. +type initContext uint8 + +const ( + inInitFunction initContext = iota + inNonInitFunction +) + +func (c initContext) String() string { + if c == inInitFunction { + return "inInitFunction" + } + return "inNonInitFunction" +} + +// readonlystaticname returns a name backed by a read-only static data symbol. +func readonlystaticname(t *types.Type) *ir.Name { + n := staticinit.StaticName(t) + n.MarkReadonly() + n.Linksym().Set(obj.AttrContentAddressable, true) + n.Linksym().Set(obj.AttrLocal, true) + return n +} + +func isSimpleName(nn ir.Node) bool { + if nn.Op() != ir.ONAME || ir.IsBlank(nn) { + return false + } + n := nn.(*ir.Name) + return n.OnStack() +} + +// initGenType is a bitmap indicating the types of generation that will occur for a static value. +type initGenType uint8 + +const ( + initDynamic initGenType = 1 << iota // contains some dynamic values, for which init code will be generated + initConst // contains some constant values, which may be written into data symbols +) + +// getdyn calculates the initGenType for n. +// If top is false, getdyn is recursing. +func getdyn(n ir.Node, top bool) initGenType { + switch n.Op() { + default: + if ir.IsConstNode(n) { + return initConst + } + return initDynamic + + case ir.OSLICELIT: + n := n.(*ir.CompLitExpr) + if !top { + return initDynamic + } + if n.Len/4 > int64(len(n.List)) { + // <25% of entries have explicit values. + // Very rough estimation, it takes 4 bytes of instructions + // to initialize 1 byte of result. So don't use a static + // initializer if the dynamic initialization code would be + // smaller than the static value. + // See issue 23780. + return initDynamic + } + + case ir.OARRAYLIT, ir.OSTRUCTLIT: + } + lit := n.(*ir.CompLitExpr) + + var mode initGenType + for _, n1 := range lit.List { + switch n1.Op() { + case ir.OKEY: + n1 = n1.(*ir.KeyExpr).Value + case ir.OSTRUCTKEY: + n1 = n1.(*ir.StructKeyExpr).Value + } + mode |= getdyn(n1, false) + if mode == initDynamic|initConst { + break + } + } + return mode +} + +// isStaticCompositeLiteral reports whether n is a compile-time constant. +func isStaticCompositeLiteral(n ir.Node) bool { + switch n.Op() { + case ir.OSLICELIT: + return false + case ir.OARRAYLIT: + n := n.(*ir.CompLitExpr) + for _, r := range n.List { + if r.Op() == ir.OKEY { + r = r.(*ir.KeyExpr).Value + } + if !isStaticCompositeLiteral(r) { + return false + } + } + return true + case ir.OSTRUCTLIT: + n := n.(*ir.CompLitExpr) + for _, r := range n.List { + r := r.(*ir.StructKeyExpr) + if !isStaticCompositeLiteral(r.Value) { + return false + } + } + return true + case ir.OLITERAL, ir.ONIL: + return true + case ir.OCONVIFACE: + // See staticassign's OCONVIFACE case for comments. + n := n.(*ir.ConvExpr) + val := ir.Node(n) + for val.Op() == ir.OCONVIFACE { + val = val.(*ir.ConvExpr).X + } + if val.Type().IsInterface() { + return val.Op() == ir.ONIL + } + if types.IsDirectIface(val.Type()) && val.Op() == ir.ONIL { + return true + } + return isStaticCompositeLiteral(val) + } + return false +} + +// initKind is a kind of static initialization: static, dynamic, or local. +// Static initialization represents literals and +// literal components of composite literals. +// Dynamic initialization represents non-literals and +// non-literal components of composite literals. +// LocalCode initialization represents initialization +// that occurs purely in generated code local to the function of use. +// Initialization code is sometimes generated in passes, +// first static then dynamic. +type initKind uint8 + +const ( + initKindStatic initKind = iota + 1 + initKindDynamic + initKindLocalCode +) + +// fixedlit handles struct, array, and slice literals. +// TODO: expand documentation. +func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) { + isBlank := var_ == ir.BlankNode + var splitnode func(ir.Node) (a ir.Node, value ir.Node) + switch n.Op() { + case ir.OARRAYLIT, ir.OSLICELIT: + var k int64 + splitnode = func(r ir.Node) (ir.Node, ir.Node) { + if r.Op() == ir.OKEY { + kv := r.(*ir.KeyExpr) + k = typecheck.IndexConst(kv.Key) + if k < 0 { + base.Fatalf("fixedlit: invalid index %v", kv.Key) + } + r = kv.Value + } + a := ir.NewIndexExpr(base.Pos, var_, ir.NewInt(base.Pos, k)) + k++ + if isBlank { + return ir.BlankNode, r + } + return a, r + } + case ir.OSTRUCTLIT: + splitnode = func(rn ir.Node) (ir.Node, ir.Node) { + r := rn.(*ir.StructKeyExpr) + if r.Sym().IsBlank() || isBlank { + return ir.BlankNode, r.Value + } + ir.SetPos(r) + return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Sym()), r.Value + } + default: + base.Fatalf("fixedlit bad op: %v", n.Op()) + } + + for _, r := range n.List { + a, value := splitnode(r) + if a == ir.BlankNode && !staticinit.AnySideEffects(value) { + // Discard. + continue + } + + switch value.Op() { + case ir.OSLICELIT: + value := value.(*ir.CompLitExpr) + if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) { + var sinit ir.Nodes + slicelit(ctxt, value, a, &sinit) + if kind == initKindStatic { + // When doing static initialization, init statements may contain dynamic + // expression, which will be initialized later, causing liveness analysis + // confuses about variables lifetime. So making sure those expressions + // are ordered correctly here. See issue #52673. + orderBlock(&sinit, map[string][]*ir.Name{}) + typecheck.Stmts(sinit) + walkStmtList(sinit) + } + init.Append(sinit...) + continue + } + + case ir.OARRAYLIT, ir.OSTRUCTLIT: + value := value.(*ir.CompLitExpr) + fixedlit(ctxt, kind, value, a, init) + continue + } + + islit := ir.IsConstNode(value) + if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) { + continue + } + + // build list of assignments: var[index] = expr + ir.SetPos(a) + as := ir.NewAssignStmt(base.Pos, a, value) + as = typecheck.Stmt(as).(*ir.AssignStmt) + switch kind { + case initKindStatic: + genAsStatic(as) + case initKindDynamic, initKindLocalCode: + appendWalkStmt(init, orderStmtInPlace(as, map[string][]*ir.Name{})) + default: + base.Fatalf("fixedlit: bad kind %d", kind) + } + + } +} + +func isSmallSliceLit(n *ir.CompLitExpr) bool { + if n.Op() != ir.OSLICELIT { + return false + } + + return n.Type().Elem().Size() == 0 || n.Len <= ir.MaxSmallArraySize/n.Type().Elem().Size() +} + +func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) { + // make an array type corresponding the number of elements we have + t := types.NewArray(n.Type().Elem(), n.Len) + types.CalcSize(t) + + if ctxt == inNonInitFunction { + // put everything into static array + vstat := staticinit.StaticName(t) + + fixedlit(ctxt, initKindStatic, n, vstat, init) + fixedlit(ctxt, initKindDynamic, n, vstat, init) + + // copy static to slice + var_ = typecheck.AssignExpr(var_) + name, offset, ok := staticinit.StaticLoc(var_) + if !ok || name.Class != ir.PEXTERN { + base.Fatalf("slicelit: %v", var_) + } + staticdata.InitSlice(name, offset, vstat.Linksym(), t.NumElem()) + return + } + + // recipe for var = []t{...} + // 1. make a static array + // var vstat [...]t + // 2. assign (data statements) the constant part + // vstat = constpart{} + // 3. make an auto pointer to array and allocate heap to it + // var vauto *[...]t = new([...]t) + // 4. copy the static array to the auto array + // *vauto = vstat + // 5. for each dynamic part assign to the array + // vauto[i] = dynamic part + // 6. assign slice of allocated heap to var + // var = vauto[:] + // + // an optimization is done if there is no constant part + // 3. var vauto *[...]t = new([...]t) + // 5. vauto[i] = dynamic part + // 6. var = vauto[:] + + // if the literal contains constants, + // make static initialized array (1),(2) + var vstat ir.Node + + mode := getdyn(n, true) + if mode&initConst != 0 && !isSmallSliceLit(n) { + if ctxt == inInitFunction { + vstat = readonlystaticname(t) + } else { + vstat = staticinit.StaticName(t) + } + fixedlit(ctxt, initKindStatic, n, vstat, init) + } + + // make new auto *array (3 declare) + vauto := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(t)) + + // set auto to point at new temp or heap (3 assign) + var a ir.Node + if x := n.Prealloc; x != nil { + // temp allocated during order.go for dddarg + if !types.Identical(t, x.Type()) { + panic("dotdotdot base type does not match order's assigned type") + } + a = initStackTemp(init, x, vstat) + } else if n.Esc() == ir.EscNone { + a = initStackTemp(init, typecheck.TempAt(base.Pos, ir.CurFunc, t), vstat) + } else { + a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t)) + } + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, vauto, a)) + + if vstat != nil && n.Prealloc == nil && n.Esc() != ir.EscNone { + // If we allocated on the heap with ONEW, copy the static to the + // heap (4). We skip this for stack temporaries, because + // initStackTemp already handled the copy. + a = ir.NewStarExpr(base.Pos, vauto) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, a, vstat)) + } + + // put dynamics into array (5) + var index int64 + for _, value := range n.List { + if value.Op() == ir.OKEY { + kv := value.(*ir.KeyExpr) + index = typecheck.IndexConst(kv.Key) + if index < 0 { + base.Fatalf("slicelit: invalid index %v", kv.Key) + } + value = kv.Value + } + a := ir.NewIndexExpr(base.Pos, vauto, ir.NewInt(base.Pos, index)) + a.SetBounded(true) + index++ + + // TODO need to check bounds? + + switch value.Op() { + case ir.OSLICELIT: + break + + case ir.OARRAYLIT, ir.OSTRUCTLIT: + value := value.(*ir.CompLitExpr) + k := initKindDynamic + if vstat == nil { + // Generate both static and dynamic initializations. + // See issue #31987. + k = initKindLocalCode + } + fixedlit(ctxt, k, value, a, init) + continue + } + + if vstat != nil && ir.IsConstNode(value) { // already set by copy from static value + continue + } + + // build list of vauto[c] = expr + ir.SetPos(value) + as := ir.NewAssignStmt(base.Pos, a, value) + appendWalkStmt(init, orderStmtInPlace(typecheck.Stmt(as), map[string][]*ir.Name{})) + } + + // make slice out of heap (6) + a = ir.NewAssignStmt(base.Pos, var_, ir.NewSliceExpr(base.Pos, ir.OSLICE, vauto, nil, nil, nil)) + appendWalkStmt(init, orderStmtInPlace(typecheck.Stmt(a), map[string][]*ir.Name{})) +} + +func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) { + // make the map var + args := []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(base.Pos, n.Len+int64(len(n.List)))} + a := typecheck.Expr(ir.NewCallExpr(base.Pos, ir.OMAKE, nil, args)).(*ir.MakeExpr) + a.RType = n.RType + a.SetEsc(n.Esc()) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, m, a)) + + entries := n.List + + // The order pass already removed any dynamic (runtime-computed) entries. + // All remaining entries are static. Double-check that. + for _, r := range entries { + r := r.(*ir.KeyExpr) + if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) { + base.Fatalf("maplit: entry is not a literal: %v", r) + } + } + + if len(entries) > 25 { + // For a large number of entries, put them in an array and loop. + + // build types [count]Tindex and [count]Tvalue + tk := types.NewArray(n.Type().Key(), int64(len(entries))) + te := types.NewArray(n.Type().Elem(), int64(len(entries))) + + // TODO(#47904): mark tk and te NoAlg here once the + // compiler/linker can handle NoAlg types correctly. + + types.CalcSize(tk) + types.CalcSize(te) + + // make and initialize static arrays + vstatk := readonlystaticname(tk) + vstate := readonlystaticname(te) + + datak := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil) + datae := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil) + for _, r := range entries { + r := r.(*ir.KeyExpr) + datak.List.Append(r.Key) + datae.List.Append(r.Value) + } + fixedlit(inInitFunction, initKindStatic, datak, vstatk, init) + fixedlit(inInitFunction, initKindStatic, datae, vstate, init) + + // loop adding structure elements to map + // for i = 0; i < len(vstatk); i++ { + // map[vstatk[i]] = vstate[i] + // } + i := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT]) + rhs := ir.NewIndexExpr(base.Pos, vstate, i) + rhs.SetBounded(true) + + kidx := ir.NewIndexExpr(base.Pos, vstatk, i) + kidx.SetBounded(true) + + // typechecker rewrites OINDEX to OINDEXMAP + lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, kidx)).(*ir.IndexExpr) + base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs) + lhs.RType = n.RType + + zero := ir.NewAssignStmt(base.Pos, i, ir.NewInt(base.Pos, 0)) + cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(base.Pos, tk.NumElem())) + incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(base.Pos, 1))) + + var body ir.Node = ir.NewAssignStmt(base.Pos, lhs, rhs) + body = typecheck.Stmt(body) + body = orderStmtInPlace(body, map[string][]*ir.Name{}) + + loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil, false) + loop.Body = []ir.Node{body} + loop.SetInit([]ir.Node{zero}) + + appendWalkStmt(init, loop) + return + } + // For a small number of entries, just add them directly. + + // Build list of var[c] = expr. + // Use temporaries so that mapassign1 can have addressable key, elem. + // TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys. + // TODO(khr): assign these temps in order phase so we can reuse them across multiple maplits? + tmpkey := typecheck.TempAt(base.Pos, ir.CurFunc, m.Type().Key()) + tmpelem := typecheck.TempAt(base.Pos, ir.CurFunc, m.Type().Elem()) + + for _, r := range entries { + r := r.(*ir.KeyExpr) + index, elem := r.Key, r.Value + + ir.SetPos(index) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpkey, index)) + + ir.SetPos(elem) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpelem, elem)) + + ir.SetPos(tmpelem) + + // typechecker rewrites OINDEX to OINDEXMAP + lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, tmpkey)).(*ir.IndexExpr) + base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs) + lhs.RType = n.RType + + var a ir.Node = ir.NewAssignStmt(base.Pos, lhs, tmpelem) + a = typecheck.Stmt(a) + a = orderStmtInPlace(a, map[string][]*ir.Name{}) + appendWalkStmt(init, a) + } +} + +func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) { + t := n.Type() + switch n.Op() { + default: + base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n) + + case ir.ONAME: + n := n.(*ir.Name) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n)) + + case ir.OMETHEXPR: + n := n.(*ir.SelectorExpr) + anylit(n.FuncName(), var_, init) + + case ir.OPTRLIT: + n := n.(*ir.AddrExpr) + if !t.IsPtr() { + base.Fatalf("anylit: not ptr") + } + + var r ir.Node + if n.Prealloc != nil { + // n.Prealloc is stack temporary used as backing store. + r = initStackTemp(init, n.Prealloc, nil) + } else { + r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type())) + r.SetEsc(n.Esc()) + } + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, r)) + + var_ = ir.NewStarExpr(base.Pos, var_) + var_ = typecheck.AssignExpr(var_) + anylit(n.X, var_, init) + + case ir.OSTRUCTLIT, ir.OARRAYLIT: + n := n.(*ir.CompLitExpr) + if !t.IsStruct() && !t.IsArray() { + base.Fatalf("anylit: not struct/array") + } + + if isSimpleName(var_) && len(n.List) > 4 { + // lay out static data + vstat := readonlystaticname(t) + + ctxt := inInitFunction + if n.Op() == ir.OARRAYLIT { + ctxt = inNonInitFunction + } + fixedlit(ctxt, initKindStatic, n, vstat, init) + + // copy static to var + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, vstat)) + + // add expressions to automatic + fixedlit(inInitFunction, initKindDynamic, n, var_, init) + break + } + + var components int64 + if n.Op() == ir.OARRAYLIT { + components = t.NumElem() + } else { + components = int64(t.NumFields()) + } + // initialization of an array or struct with unspecified components (missing fields or arrays) + if isSimpleName(var_) || int64(len(n.List)) < components { + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) + } + + fixedlit(inInitFunction, initKindLocalCode, n, var_, init) + + case ir.OSLICELIT: + n := n.(*ir.CompLitExpr) + slicelit(inInitFunction, n, var_, init) + + case ir.OMAPLIT: + n := n.(*ir.CompLitExpr) + if !t.IsMap() { + base.Fatalf("anylit: not map") + } + maplit(n, var_, init) + } +} + +// oaslit handles special composite literal assignments. +// It returns true if n's effects have been added to init, +// in which case n should be dropped from the program by the caller. +func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool { + if n.X == nil || n.Y == nil { + // not a special composite literal assignment + return false + } + if n.X.Type() == nil || n.Y.Type() == nil { + // not a special composite literal assignment + return false + } + if !isSimpleName(n.X) { + // not a special composite literal assignment + return false + } + x := n.X.(*ir.Name) + if !types.Identical(n.X.Type(), n.Y.Type()) { + // not a special composite literal assignment + return false + } + if x.Addrtaken() { + // If x is address-taken, the RHS may (implicitly) uses LHS. + // Not safe to do a special composite literal assignment + // (which may expand to multiple assignments). + return false + } + + switch n.Y.Op() { + default: + // not a special composite literal assignment + return false + + case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT: + if ir.Any(n.Y, func(y ir.Node) bool { return ir.Uses(y, x) }) { + // not safe to do a special composite literal assignment if RHS uses LHS. + return false + } + anylit(n.Y, n.X, init) + } + + return true +} + +func genAsStatic(as *ir.AssignStmt) { + if as.X.Type() == nil { + base.Fatalf("genAsStatic as.Left not typechecked") + } + + name, offset, ok := staticinit.StaticLoc(as.X) + if !ok || (name.Class != ir.PEXTERN && as.X != ir.BlankNode) { + base.Fatalf("genAsStatic: lhs %v", as.X) + } + + switch r := as.Y; r.Op() { + case ir.OLITERAL: + staticdata.InitConst(name, offset, r, int(r.Type().Size())) + return + case ir.OMETHEXPR: + r := r.(*ir.SelectorExpr) + staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r.FuncName())) + return + case ir.ONAME: + r := r.(*ir.Name) + if r.Offset_ != 0 { + base.Fatalf("genAsStatic %+v", as) + } + if r.Class == ir.PFUNC { + staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r)) + return + } + } + base.Fatalf("genAsStatic: rhs %v", as.Y) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/convert.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/convert.go new file mode 100644 index 0000000000000000000000000000000000000000..280b3b65e82dc63751c7d392d302e46a95f20128 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/convert.go @@ -0,0 +1,536 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "encoding/binary" + "go/constant" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/sys" +) + +// walkConv walks an OCONV or OCONVNOP (but not OCONVIFACE) node. +func walkConv(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + if n.Op() == ir.OCONVNOP && n.Type() == n.X.Type() { + return n.X + } + if n.Op() == ir.OCONVNOP && ir.ShouldCheckPtr(ir.CurFunc, 1) { + if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { // uintptr to unsafe.Pointer + return walkCheckPtrArithmetic(n, init) + } + } + param, result := rtconvfn(n.X.Type(), n.Type()) + if param == types.Txxx { + return n + } + fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result] + return typecheck.Conv(mkcall(fn, types.Types[result], init, typecheck.Conv(n.X, types.Types[param])), n.Type()) +} + +// walkConvInterface walks an OCONVIFACE node. +func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + + n.X = walkExpr(n.X, init) + + fromType := n.X.Type() + toType := n.Type() + if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) { + // skip unnamed functions (func _()) + if fromType.HasShape() { + // Unified IR uses OCONVIFACE for converting all derived types + // to interface type. Avoid assertion failure in + // MarkTypeUsedInInterface, because we've marked used types + // separately anyway. + } else { + reflectdata.MarkTypeUsedInInterface(fromType, ir.CurFunc.LSym) + } + } + + if !fromType.IsInterface() { + typeWord := reflectdata.ConvIfaceTypeWord(base.Pos, n) + l := ir.NewBinaryExpr(base.Pos, ir.OMAKEFACE, typeWord, dataWord(n, init)) + l.SetType(toType) + l.SetTypecheck(n.Typecheck()) + return l + } + if fromType.IsEmptyInterface() { + base.Fatalf("OCONVIFACE can't operate on an empty interface") + } + + // Evaluate the input interface. + c := typecheck.TempAt(base.Pos, ir.CurFunc, fromType) + init.Append(ir.NewAssignStmt(base.Pos, c, n.X)) + + if toType.IsEmptyInterface() { + // Implement interface to empty interface conversion: + // + // var res *uint8 + // res = (*uint8)(unsafe.Pointer(itab)) + // if res != nil { + // res = res.type + // } + + // Grab its parts. + itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, c) + itab.SetType(types.Types[types.TUINTPTR].PtrTo()) + itab.SetTypecheck(1) + data := ir.NewUnaryExpr(n.Pos(), ir.OIDATA, c) + data.SetType(types.Types[types.TUINT8].PtrTo()) // Type is generic pointer - we're just passing it through. + data.SetTypecheck(1) + + typeWord := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewPtr(types.Types[types.TUINT8])) + init.Append(ir.NewAssignStmt(base.Pos, typeWord, typecheck.Conv(typecheck.Conv(itab, types.Types[types.TUNSAFEPTR]), typeWord.Type()))) + nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, typeWord, typecheck.NodNil())), nil, nil) + nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, typeWord, itabType(typeWord))} + init.Append(nif) + + // Build the result. + // e = iface{typeWord, data} + e := ir.NewBinaryExpr(base.Pos, ir.OMAKEFACE, typeWord, data) + e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE. + e.SetTypecheck(1) + return e + } + + // Must be converting I2I (more specific to less specific interface). + // Use the same code as e, _ = c.(T). + var rhs ir.Node + if n.TypeWord == nil || n.TypeWord.Op() == ir.OADDR && n.TypeWord.(*ir.AddrExpr).X.Op() == ir.OLINKSYMOFFSET { + // Fixed (not loaded from a dictionary) type. + ta := ir.NewTypeAssertExpr(base.Pos, c, toType) + ta.SetOp(ir.ODOTTYPE2) + // Allocate a descriptor for this conversion to pass to the runtime. + ta.Descriptor = makeTypeAssertDescriptor(toType, true) + rhs = ta + } else { + ta := ir.NewDynamicTypeAssertExpr(base.Pos, ir.ODYNAMICDOTTYPE2, c, n.TypeWord) + rhs = ta + } + rhs.SetType(toType) + rhs.SetTypecheck(1) + + res := typecheck.TempAt(base.Pos, ir.CurFunc, toType) + as := ir.NewAssignListStmt(base.Pos, ir.OAS2DOTTYPE, []ir.Node{res, ir.BlankNode}, []ir.Node{rhs}) + init.Append(as) + return res +} + +// Returns the data word (the second word) used to represent conv.X in +// an interface. +func dataWord(conv *ir.ConvExpr, init *ir.Nodes) ir.Node { + pos, n := conv.Pos(), conv.X + fromType := n.Type() + + // If it's a pointer, it is its own representation. + if types.IsDirectIface(fromType) { + return n + } + + isInteger := fromType.IsInteger() + isBool := fromType.IsBoolean() + if sc := fromType.SoleComponent(); sc != nil { + isInteger = sc.IsInteger() + isBool = sc.IsBoolean() + } + // Try a bunch of cases to avoid an allocation. + var value ir.Node + switch { + case fromType.Size() == 0: + // n is zero-sized. Use zerobase. + cheapExpr(n, init) // Evaluate n for side-effects. See issue 19246. + value = ir.NewLinksymExpr(base.Pos, ir.Syms.Zerobase, types.Types[types.TUINTPTR]) + case isBool || fromType.Size() == 1 && isInteger: + // n is a bool/byte. Use staticuint64s[n * 8] on little-endian + // and staticuint64s[n * 8 + 7] on big-endian. + n = cheapExpr(n, init) + n = soleComponent(init, n) + // byteindex widens n so that the multiplication doesn't overflow. + index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n), ir.NewInt(base.Pos, 3)) + if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian { + index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(base.Pos, 7)) + } + // The actual type is [256]uint64, but we use [256*8]uint8 so we can address + // individual bytes. + staticuint64s := ir.NewLinksymExpr(base.Pos, ir.Syms.Staticuint64s, types.NewArray(types.Types[types.TUINT8], 256*8)) + xe := ir.NewIndexExpr(base.Pos, staticuint64s, index) + xe.SetBounded(true) + value = xe + case n.Op() == ir.ONAME && n.(*ir.Name).Class == ir.PEXTERN && n.(*ir.Name).Readonly(): + // n is a readonly global; use it directly. + value = n + case conv.Esc() == ir.EscNone && fromType.Size() <= 1024: + // n does not escape. Use a stack temporary initialized to n. + value = typecheck.TempAt(base.Pos, ir.CurFunc, fromType) + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n))) + } + if value != nil { + // The interface data word is &value. + return typecheck.Expr(typecheck.NodAddr(value)) + } + + // Time to do an allocation. We'll call into the runtime for that. + fnname, argType, needsaddr := dataWordFuncName(fromType) + var fn *ir.Name + + var args []ir.Node + if needsaddr { + // Types of large or unknown size are passed by reference. + // Orderexpr arranged for n to be a temporary for all + // the conversions it could see. Comparison of an interface + // with a non-interface, especially in a switch on interface value + // with non-interface cases, is not visible to order.stmt, so we + // have to fall back on allocating a temp here. + if !ir.IsAddressable(n) { + n = copyExpr(n, fromType, init) + } + fn = typecheck.LookupRuntime(fnname, fromType) + args = []ir.Node{reflectdata.ConvIfaceSrcRType(base.Pos, conv), typecheck.NodAddr(n)} + } else { + // Use a specialized conversion routine that takes the type being + // converted by value, not by pointer. + fn = typecheck.LookupRuntime(fnname) + var arg ir.Node + switch { + case fromType == argType: + // already in the right type, nothing to do + arg = n + case fromType.Kind() == argType.Kind(), + fromType.IsPtrShaped() && argType.IsPtrShaped(): + // can directly convert (e.g. named type to underlying type, or one pointer to another) + // TODO: never happens because pointers are directIface? + arg = ir.NewConvExpr(pos, ir.OCONVNOP, argType, n) + case fromType.IsInteger() && argType.IsInteger(): + // can directly convert (e.g. int32 to uint32) + arg = ir.NewConvExpr(pos, ir.OCONV, argType, n) + default: + // unsafe cast through memory + arg = copyExpr(n, fromType, init) + var addr ir.Node = typecheck.NodAddr(arg) + addr = ir.NewConvExpr(pos, ir.OCONVNOP, argType.PtrTo(), addr) + arg = ir.NewStarExpr(pos, addr) + arg.SetType(argType) + } + args = []ir.Node{arg} + } + call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil) + call.Args = args + return safeExpr(walkExpr(typecheck.Expr(call), init), init) +} + +// walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node. +func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + a := typecheck.NodNil() + if n.Esc() == ir.EscNone { + // Create temporary buffer for string on stack. + a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8]) + } + if n.Op() == ir.ORUNES2STR { + // slicerunetostring(*[32]byte, []rune) string + return mkcall("slicerunetostring", n.Type(), init, a, n.X) + } + // slicebytetostring(*[32]byte, ptr *byte, n int) string + n.X = cheapExpr(n.X, init) + ptr, len := backingArrayPtrLen(n.X) + return mkcall("slicebytetostring", n.Type(), init, a, ptr, len) +} + +// walkBytesToStringTemp walks an OBYTES2STRTMP node. +func walkBytesToStringTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + if !base.Flag.Cfg.Instrumenting { + // Let the backend handle OBYTES2STRTMP directly + // to avoid a function call to slicebytetostringtmp. + return n + } + // slicebytetostringtmp(ptr *byte, n int) string + n.X = cheapExpr(n.X, init) + ptr, len := backingArrayPtrLen(n.X) + return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len) +} + +// walkRuneToString walks an ORUNESTR node. +func walkRuneToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + a := typecheck.NodNil() + if n.Esc() == ir.EscNone { + a = stackBufAddr(4, types.Types[types.TUINT8]) + } + // intstring(*[4]byte, rune) + return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64])) +} + +// walkStringToBytes walks an OSTR2BYTES node. +func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + s := n.X + if ir.IsConst(s, constant.String) { + sc := ir.StringVal(s) + + // Allocate a [n]byte of the right size. + t := types.NewArray(types.Types[types.TUINT8], int64(len(sc))) + var a ir.Node + if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) { + a = stackBufAddr(t.NumElem(), t.Elem()) + } else { + types.CalcSize(t) + a = ir.NewUnaryExpr(base.Pos, ir.ONEW, nil) + a.SetType(types.NewPtr(t)) + a.SetTypecheck(1) + a.MarkNonNil() + } + p := typecheck.TempAt(base.Pos, ir.CurFunc, t.PtrTo()) // *[n]byte + init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a))) + + // Copy from the static string data to the [n]byte. + if len(sc) > 0 { + sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s) + sptr.SetBounded(true) + as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, typecheck.ConvNop(sptr, t.PtrTo()))) + appendWalkStmt(init, as) + } + + // Slice the [n]byte to a []byte. + slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p, nil, nil, nil) + slice.SetType(n.Type()) + slice.SetTypecheck(1) + return walkExpr(slice, init) + } + + a := typecheck.NodNil() + if n.Esc() == ir.EscNone { + // Create temporary buffer for slice on stack. + a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8]) + } + // stringtoslicebyte(*32[byte], string) []byte + return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING])) +} + +// walkStringToBytesTemp walks an OSTR2BYTESTMP node. +func walkStringToBytesTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + // []byte(string) conversion that creates a slice + // referring to the actual string bytes. + // This conversion is handled later by the backend and + // is only for use by internal compiler optimizations + // that know that the slice won't be mutated. + // The only such case today is: + // for i, c := range []byte(string) + n.X = walkExpr(n.X, init) + return n +} + +// walkStringToRunes walks an OSTR2RUNES node. +func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + a := typecheck.NodNil() + if n.Esc() == ir.EscNone { + // Create temporary buffer for slice on stack. + a = stackBufAddr(tmpstringbufsize, types.Types[types.TINT32]) + } + // stringtoslicerune(*[32]rune, string) []rune + return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING])) +} + +// dataWordFuncName returns the name of the function used to convert a value of type "from" +// to the data word of an interface. +// argType is the type the argument needs to be coerced to. +// needsaddr reports whether the value should be passed (needaddr==false) or its address (needsaddr==true). +func dataWordFuncName(from *types.Type) (fnname string, argType *types.Type, needsaddr bool) { + if from.IsInterface() { + base.Fatalf("can only handle non-interfaces") + } + switch { + case from.Size() == 2 && uint8(from.Alignment()) == 2: + return "convT16", types.Types[types.TUINT16], false + case from.Size() == 4 && uint8(from.Alignment()) == 4 && !from.HasPointers(): + return "convT32", types.Types[types.TUINT32], false + case from.Size() == 8 && uint8(from.Alignment()) == uint8(types.Types[types.TUINT64].Alignment()) && !from.HasPointers(): + return "convT64", types.Types[types.TUINT64], false + } + if sc := from.SoleComponent(); sc != nil { + switch { + case sc.IsString(): + return "convTstring", types.Types[types.TSTRING], false + case sc.IsSlice(): + return "convTslice", types.NewSlice(types.Types[types.TUINT8]), false // the element type doesn't matter + } + } + + if from.HasPointers() { + return "convT", types.Types[types.TUNSAFEPTR], true + } + return "convTnoptr", types.Types[types.TUNSAFEPTR], true +} + +// rtconvfn returns the parameter and result types that will be used by a +// runtime function to convert from type src to type dst. The runtime function +// name can be derived from the names of the returned types. +// +// If no such function is necessary, it returns (Txxx, Txxx). +func rtconvfn(src, dst *types.Type) (param, result types.Kind) { + if ssagen.Arch.SoftFloat { + return types.Txxx, types.Txxx + } + + switch ssagen.Arch.LinkArch.Family { + case sys.ARM, sys.MIPS: + if src.IsFloat() { + switch dst.Kind() { + case types.TINT64, types.TUINT64: + return types.TFLOAT64, dst.Kind() + } + } + if dst.IsFloat() { + switch src.Kind() { + case types.TINT64, types.TUINT64: + return src.Kind(), dst.Kind() + } + } + + case sys.I386: + if src.IsFloat() { + switch dst.Kind() { + case types.TINT64, types.TUINT64: + return types.TFLOAT64, dst.Kind() + case types.TUINT32, types.TUINT, types.TUINTPTR: + return types.TFLOAT64, types.TUINT32 + } + } + if dst.IsFloat() { + switch src.Kind() { + case types.TINT64, types.TUINT64: + return src.Kind(), dst.Kind() + case types.TUINT32, types.TUINT, types.TUINTPTR: + return types.TUINT32, types.TFLOAT64 + } + } + } + return types.Txxx, types.Txxx +} + +func soleComponent(init *ir.Nodes, n ir.Node) ir.Node { + if n.Type().SoleComponent() == nil { + return n + } + // Keep in sync with cmd/compile/internal/types/type.go:Type.SoleComponent. + for { + switch { + case n.Type().IsStruct(): + if n.Type().Field(0).Sym.IsBlank() { + // Treat blank fields as the zero value as the Go language requires. + n = typecheck.TempAt(base.Pos, ir.CurFunc, n.Type().Field(0).Type) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, n, nil)) + continue + } + n = typecheck.DotField(n.Pos(), n, 0) + case n.Type().IsArray(): + n = typecheck.Expr(ir.NewIndexExpr(n.Pos(), n, ir.NewInt(base.Pos, 0))) + default: + return n + } + } +} + +// byteindex converts n, which is byte-sized, to an int used to index into an array. +// We cannot use conv, because we allow converting bool to int here, +// which is forbidden in user code. +func byteindex(n ir.Node) ir.Node { + // We cannot convert from bool to int directly. + // While converting from int8 to int is possible, it would yield + // the wrong result for negative values. + // Reinterpreting the value as an unsigned byte solves both cases. + if !types.Identical(n.Type(), types.Types[types.TUINT8]) { + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) + n.SetType(types.Types[types.TUINT8]) + n.SetTypecheck(1) + } + n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n) + n.SetType(types.Types[types.TINT]) + n.SetTypecheck(1) + return n +} + +func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + // Calling cheapExpr(n, init) below leads to a recursive call to + // walkExpr, which leads us back here again. Use n.Checkptr to + // prevent infinite loops. + if n.CheckPtr() { + return n + } + n.SetCheckPtr(true) + defer n.SetCheckPtr(false) + + // TODO(mdempsky): Make stricter. We only need to exempt + // reflect.Value.Pointer and reflect.Value.UnsafeAddr. + switch n.X.Op() { + case ir.OCALLMETH: + base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck") + case ir.OCALLFUNC, ir.OCALLINTER: + return n + } + + if n.X.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(n.X) { + return n + } + + // Find original unsafe.Pointer operands involved in this + // arithmetic expression. + // + // "It is valid both to add and to subtract offsets from a + // pointer in this way. It is also valid to use &^ to round + // pointers, usually for alignment." + var originals []ir.Node + var walk func(n ir.Node) + walk = func(n ir.Node) { + switch n.Op() { + case ir.OADD: + n := n.(*ir.BinaryExpr) + walk(n.X) + walk(n.Y) + case ir.OSUB, ir.OANDNOT: + n := n.(*ir.BinaryExpr) + walk(n.X) + case ir.OCONVNOP: + n := n.(*ir.ConvExpr) + if n.X.Type().IsUnsafePtr() { + n.X = cheapExpr(n.X, init) + originals = append(originals, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR])) + } + } + } + walk(n.X) + + cheap := cheapExpr(n, init) + + slice := typecheck.MakeDotArgs(base.Pos, types.NewSlice(types.Types[types.TUNSAFEPTR]), originals) + slice.SetEsc(ir.EscNone) + + init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice)) + // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse + // the backing store for multiple calls to checkptrArithmetic. + + return cheap +} + +// walkSliceToArray walks an OSLICE2ARR expression. +func walkSliceToArray(n *ir.ConvExpr, init *ir.Nodes) ir.Node { + // Replace T(x) with *(*T)(x). + conv := typecheck.Expr(ir.NewConvExpr(base.Pos, ir.OCONV, types.NewPtr(n.Type()), n.X)).(*ir.ConvExpr) + deref := typecheck.Expr(ir.NewStarExpr(base.Pos, conv)).(*ir.StarExpr) + + // The OSLICE2ARRPTR conversion handles checking the slice length, + // so the dereference can't fail. + // + // However, this is more than just an optimization: if T is a + // zero-length array, then x (and thus (*T)(x)) can be nil, but T(x) + // should *not* panic. So suppressing the nil check here is + // necessary for correctness in that case. + deref.SetBounded(true) + + return walkExpr(deref, init) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/expr.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/expr.go new file mode 100644 index 0000000000000000000000000000000000000000..268f793dc966fb614f2b02d7928af4f23a71c8e4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/expr.go @@ -0,0 +1,1096 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "fmt" + "go/constant" + "internal/abi" + "internal/buildcfg" + "strings" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/rttype" + "cmd/compile/internal/staticdata" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/objabi" +) + +// The result of walkExpr MUST be assigned back to n, e.g. +// +// n.Left = walkExpr(n.Left, init) +func walkExpr(n ir.Node, init *ir.Nodes) ir.Node { + if n == nil { + return n + } + + if n, ok := n.(ir.InitNode); ok && init == n.PtrInit() { + // not okay to use n->ninit when walking n, + // because we might replace n with some other node + // and would lose the init list. + base.Fatalf("walkExpr init == &n->ninit") + } + + if len(n.Init()) != 0 { + walkStmtList(n.Init()) + init.Append(ir.TakeInit(n)...) + } + + lno := ir.SetPos(n) + + if base.Flag.LowerW > 1 { + ir.Dump("before walk expr", n) + } + + if n.Typecheck() != 1 { + base.Fatalf("missed typecheck: %+v", n) + } + + if n.Type().IsUntyped() { + base.Fatalf("expression has untyped type: %+v", n) + } + + n = walkExpr1(n, init) + + // Eagerly compute sizes of all expressions for the back end. + if typ := n.Type(); typ != nil && typ.Kind() != types.TBLANK && !typ.IsFuncArgStruct() { + types.CheckSize(typ) + } + if n, ok := n.(*ir.Name); ok && n.Heapaddr != nil { + types.CheckSize(n.Heapaddr.Type()) + } + if ir.IsConst(n, constant.String) { + // Emit string symbol now to avoid emitting + // any concurrently during the backend. + _ = staticdata.StringSym(n.Pos(), constant.StringVal(n.Val())) + } + + if base.Flag.LowerW != 0 && n != nil { + ir.Dump("after walk expr", n) + } + + base.Pos = lno + return n +} + +func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node { + switch n.Op() { + default: + ir.Dump("walk", n) + base.Fatalf("walkExpr: switch 1 unknown op %+v", n.Op()) + panic("unreachable") + + case ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP: + return n + + case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET: + // TODO(mdempsky): Just return n; see discussion on CL 38655. + // Perhaps refactor to use Node.mayBeShared for these instead. + // If these return early, make sure to still call + // StringSym for constant strings. + return n + + case ir.OMETHEXPR: + // TODO(mdempsky): Do this right after type checking. + n := n.(*ir.SelectorExpr) + return n.FuncName() + + case ir.OMIN, ir.OMAX: + n := n.(*ir.CallExpr) + return walkMinMax(n, init) + + case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA: + n := n.(*ir.UnaryExpr) + n.X = walkExpr(n.X, init) + return n + + case ir.ODOTMETH, ir.ODOTINTER: + n := n.(*ir.SelectorExpr) + n.X = walkExpr(n.X, init) + return n + + case ir.OADDR: + n := n.(*ir.AddrExpr) + n.X = walkExpr(n.X, init) + return n + + case ir.ODEREF: + n := n.(*ir.StarExpr) + n.X = walkExpr(n.X, init) + return n + + case ir.OMAKEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH, + ir.OUNSAFEADD: + n := n.(*ir.BinaryExpr) + n.X = walkExpr(n.X, init) + n.Y = walkExpr(n.Y, init) + return n + + case ir.OUNSAFESLICE: + n := n.(*ir.BinaryExpr) + return walkUnsafeSlice(n, init) + + case ir.OUNSAFESTRING: + n := n.(*ir.BinaryExpr) + return walkUnsafeString(n, init) + + case ir.OUNSAFESTRINGDATA, ir.OUNSAFESLICEDATA: + n := n.(*ir.UnaryExpr) + return walkUnsafeData(n, init) + + case ir.ODOT, ir.ODOTPTR: + n := n.(*ir.SelectorExpr) + return walkDot(n, init) + + case ir.ODOTTYPE, ir.ODOTTYPE2: + n := n.(*ir.TypeAssertExpr) + return walkDotType(n, init) + + case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2: + n := n.(*ir.DynamicTypeAssertExpr) + return walkDynamicDotType(n, init) + + case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) + return walkLenCap(n, init) + + case ir.OCOMPLEX: + n := n.(*ir.BinaryExpr) + n.X = walkExpr(n.X, init) + n.Y = walkExpr(n.Y, init) + return n + + case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + n := n.(*ir.BinaryExpr) + return walkCompare(n, init) + + case ir.OANDAND, ir.OOROR: + n := n.(*ir.LogicalExpr) + return walkLogical(n, init) + + case ir.OPRINT, ir.OPRINTLN: + return walkPrint(n.(*ir.CallExpr), init) + + case ir.OPANIC: + n := n.(*ir.UnaryExpr) + return mkcall("gopanic", nil, init, n.X) + + case ir.ORECOVERFP: + return walkRecoverFP(n.(*ir.CallExpr), init) + + case ir.OCFUNC: + return n + + case ir.OCALLINTER, ir.OCALLFUNC: + n := n.(*ir.CallExpr) + return walkCall(n, init) + + case ir.OAS, ir.OASOP: + return walkAssign(init, n) + + case ir.OAS2: + n := n.(*ir.AssignListStmt) + return walkAssignList(init, n) + + // a,b,... = fn() + case ir.OAS2FUNC: + n := n.(*ir.AssignListStmt) + return walkAssignFunc(init, n) + + // x, y = <-c + // order.stmt made sure x is addressable or blank. + case ir.OAS2RECV: + n := n.(*ir.AssignListStmt) + return walkAssignRecv(init, n) + + // a,b = m[i] + case ir.OAS2MAPR: + n := n.(*ir.AssignListStmt) + return walkAssignMapRead(init, n) + + case ir.ODELETE: + n := n.(*ir.CallExpr) + return walkDelete(init, n) + + case ir.OAS2DOTTYPE: + n := n.(*ir.AssignListStmt) + return walkAssignDotType(n, init) + + case ir.OCONVIFACE: + n := n.(*ir.ConvExpr) + return walkConvInterface(n, init) + + case ir.OCONV, ir.OCONVNOP: + n := n.(*ir.ConvExpr) + return walkConv(n, init) + + case ir.OSLICE2ARR: + n := n.(*ir.ConvExpr) + return walkSliceToArray(n, init) + + case ir.OSLICE2ARRPTR: + n := n.(*ir.ConvExpr) + n.X = walkExpr(n.X, init) + return n + + case ir.ODIV, ir.OMOD: + n := n.(*ir.BinaryExpr) + return walkDivMod(n, init) + + case ir.OINDEX: + n := n.(*ir.IndexExpr) + return walkIndex(n, init) + + case ir.OINDEXMAP: + n := n.(*ir.IndexExpr) + return walkIndexMap(n, init) + + case ir.ORECV: + base.Fatalf("walkExpr ORECV") // should see inside OAS only + panic("unreachable") + + case ir.OSLICEHEADER: + n := n.(*ir.SliceHeaderExpr) + return walkSliceHeader(n, init) + + case ir.OSTRINGHEADER: + n := n.(*ir.StringHeaderExpr) + return walkStringHeader(n, init) + + case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: + n := n.(*ir.SliceExpr) + return walkSlice(n, init) + + case ir.ONEW: + n := n.(*ir.UnaryExpr) + return walkNew(n, init) + + case ir.OADDSTR: + return walkAddString(n.(*ir.AddStringExpr), init) + + case ir.OAPPEND: + // order should make sure we only see OAS(node, OAPPEND), which we handle above. + base.Fatalf("append outside assignment") + panic("unreachable") + + case ir.OCOPY: + return walkCopy(n.(*ir.BinaryExpr), init, base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime) + + case ir.OCLEAR: + n := n.(*ir.UnaryExpr) + return walkClear(n) + + case ir.OCLOSE: + n := n.(*ir.UnaryExpr) + return walkClose(n, init) + + case ir.OMAKECHAN: + n := n.(*ir.MakeExpr) + return walkMakeChan(n, init) + + case ir.OMAKEMAP: + n := n.(*ir.MakeExpr) + return walkMakeMap(n, init) + + case ir.OMAKESLICE: + n := n.(*ir.MakeExpr) + return walkMakeSlice(n, init) + + case ir.OMAKESLICECOPY: + n := n.(*ir.MakeExpr) + return walkMakeSliceCopy(n, init) + + case ir.ORUNESTR: + n := n.(*ir.ConvExpr) + return walkRuneToString(n, init) + + case ir.OBYTES2STR, ir.ORUNES2STR: + n := n.(*ir.ConvExpr) + return walkBytesRunesToString(n, init) + + case ir.OBYTES2STRTMP: + n := n.(*ir.ConvExpr) + return walkBytesToStringTemp(n, init) + + case ir.OSTR2BYTES: + n := n.(*ir.ConvExpr) + return walkStringToBytes(n, init) + + case ir.OSTR2BYTESTMP: + n := n.(*ir.ConvExpr) + return walkStringToBytesTemp(n, init) + + case ir.OSTR2RUNES: + n := n.(*ir.ConvExpr) + return walkStringToRunes(n, init) + + case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT: + return walkCompLit(n, init) + + case ir.OSEND: + n := n.(*ir.SendStmt) + return walkSend(n, init) + + case ir.OCLOSURE: + return walkClosure(n.(*ir.ClosureExpr), init) + + case ir.OMETHVALUE: + return walkMethodValue(n.(*ir.SelectorExpr), init) + } + + // No return! Each case must return (or panic), + // to avoid confusion about what gets returned + // in the presence of type assertions. +} + +// walk the whole tree of the body of an +// expression or simple statement. +// the types expressions are calculated. +// compile-time constants are evaluated. +// complex side effects like statements are appended to init. +func walkExprList(s []ir.Node, init *ir.Nodes) { + for i := range s { + s[i] = walkExpr(s[i], init) + } +} + +func walkExprListCheap(s []ir.Node, init *ir.Nodes) { + for i, n := range s { + s[i] = cheapExpr(n, init) + s[i] = walkExpr(s[i], init) + } +} + +func walkExprListSafe(s []ir.Node, init *ir.Nodes) { + for i, n := range s { + s[i] = safeExpr(n, init) + s[i] = walkExpr(s[i], init) + } +} + +// return side-effect free and cheap n, appending side effects to init. +// result may not be assignable. +func cheapExpr(n ir.Node, init *ir.Nodes) ir.Node { + switch n.Op() { + case ir.ONAME, ir.OLITERAL, ir.ONIL: + return n + } + + return copyExpr(n, n.Type(), init) +} + +// return side effect-free n, appending side effects to init. +// result is assignable if n is. +func safeExpr(n ir.Node, init *ir.Nodes) ir.Node { + if n == nil { + return nil + } + + if len(n.Init()) != 0 { + walkStmtList(n.Init()) + init.Append(ir.TakeInit(n)...) + } + + switch n.Op() { + case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET: + return n + + case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) + l := safeExpr(n.X, init) + if l == n.X { + return n + } + a := ir.Copy(n).(*ir.UnaryExpr) + a.X = l + return walkExpr(typecheck.Expr(a), init) + + case ir.ODOT, ir.ODOTPTR: + n := n.(*ir.SelectorExpr) + l := safeExpr(n.X, init) + if l == n.X { + return n + } + a := ir.Copy(n).(*ir.SelectorExpr) + a.X = l + return walkExpr(typecheck.Expr(a), init) + + case ir.ODEREF: + n := n.(*ir.StarExpr) + l := safeExpr(n.X, init) + if l == n.X { + return n + } + a := ir.Copy(n).(*ir.StarExpr) + a.X = l + return walkExpr(typecheck.Expr(a), init) + + case ir.OINDEX, ir.OINDEXMAP: + n := n.(*ir.IndexExpr) + l := safeExpr(n.X, init) + r := safeExpr(n.Index, init) + if l == n.X && r == n.Index { + return n + } + a := ir.Copy(n).(*ir.IndexExpr) + a.X = l + a.Index = r + return walkExpr(typecheck.Expr(a), init) + + case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT: + n := n.(*ir.CompLitExpr) + if isStaticCompositeLiteral(n) { + return n + } + } + + // make a copy; must not be used as an lvalue + if ir.IsAddressable(n) { + base.Fatalf("missing lvalue case in safeExpr: %v", n) + } + return cheapExpr(n, init) +} + +func copyExpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node { + l := typecheck.TempAt(base.Pos, ir.CurFunc, t) + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n)) + return l +} + +func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node { + c := len(n.List) + + if c < 2 { + base.Fatalf("walkAddString count %d too small", c) + } + + buf := typecheck.NodNil() + if n.Esc() == ir.EscNone { + sz := int64(0) + for _, n1 := range n.List { + if n1.Op() == ir.OLITERAL { + sz += int64(len(ir.StringVal(n1))) + } + } + + // Don't allocate the buffer if the result won't fit. + if sz < tmpstringbufsize { + // Create temporary buffer for result string on stack. + buf = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8]) + } + } + + // build list of string arguments + args := []ir.Node{buf} + for _, n2 := range n.List { + args = append(args, typecheck.Conv(n2, types.Types[types.TSTRING])) + } + + var fn string + if c <= 5 { + // small numbers of strings use direct runtime helpers. + // note: order.expr knows this cutoff too. + fn = fmt.Sprintf("concatstring%d", c) + } else { + // large numbers of strings are passed to the runtime as a slice. + fn = "concatstrings" + + t := types.NewSlice(types.Types[types.TSTRING]) + // args[1:] to skip buf arg + slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, t, args[1:]) + slice.Prealloc = n.Prealloc + args = []ir.Node{buf, slice} + slice.SetEsc(ir.EscNone) + } + + cat := typecheck.LookupRuntime(fn) + r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil) + r.Args = args + r1 := typecheck.Expr(r) + r1 = walkExpr(r1, init) + r1.SetType(n.Type()) + + return r1 +} + +type hookInfo struct { + paramType types.Kind + argsNum int + runtimeFunc string +} + +var hooks = map[string]hookInfo{ + "strings.EqualFold": {paramType: types.TSTRING, argsNum: 2, runtimeFunc: "libfuzzerHookEqualFold"}, +} + +// walkCall walks an OCALLFUNC or OCALLINTER node. +func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node { + if n.Op() == ir.OCALLMETH { + base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck") + } + if n.Op() == ir.OCALLINTER || n.Fun.Op() == ir.OMETHEXPR { + // We expect both interface call reflect.Type.Method and concrete + // call reflect.(*rtype).Method. + usemethod(n) + } + if n.Op() == ir.OCALLINTER { + reflectdata.MarkUsedIfaceMethod(n) + } + + if n.Op() == ir.OCALLFUNC && n.Fun.Op() == ir.OCLOSURE { + directClosureCall(n) + } + + if ir.IsFuncPCIntrinsic(n) { + // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function, rewrite + // it to the address of the function of the ABI fn is defined. + name := n.Fun.(*ir.Name).Sym().Name + arg := n.Args[0] + var wantABI obj.ABI + switch name { + case "FuncPCABI0": + wantABI = obj.ABI0 + case "FuncPCABIInternal": + wantABI = obj.ABIInternal + } + if n.Type() != types.Types[types.TUINTPTR] { + base.FatalfAt(n.Pos(), "FuncPC intrinsic should return uintptr, got %v", n.Type()) // as expected by typecheck.FuncPC. + } + n := ir.FuncPC(n.Pos(), arg, wantABI) + return walkExpr(n, init) + } + + if name, ok := n.Fun.(*ir.Name); ok { + sym := name.Sym() + if sym.Pkg.Path == "go.runtime" && sym.Name == "deferrangefunc" { + // Call to runtime.deferrangefunc is being shared with a range-over-func + // body that might add defers to this frame, so we cannot use open-coded defers + // and we need to call deferreturn even if we don't see any other explicit defers. + ir.CurFunc.SetHasDefer(true) + ir.CurFunc.SetOpenCodedDeferDisallowed(true) + } + } + + walkCall1(n, init) + return n +} + +func walkCall1(n *ir.CallExpr, init *ir.Nodes) { + if n.Walked() { + return // already walked + } + n.SetWalked(true) + + if n.Op() == ir.OCALLMETH { + base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck") + } + + args := n.Args + params := n.Fun.Type().Params() + + n.Fun = walkExpr(n.Fun, init) + walkExprList(args, init) + + for i, arg := range args { + // Validate argument and parameter types match. + param := params[i] + if !types.Identical(arg.Type(), param.Type) { + base.FatalfAt(n.Pos(), "assigning %L to parameter %v (type %v)", arg, param.Sym, param.Type) + } + + // For any argument whose evaluation might require a function call, + // store that argument into a temporary variable, + // to prevent that calls from clobbering arguments already on the stack. + if mayCall(arg) { + // assignment of arg to Temp + tmp := typecheck.TempAt(base.Pos, ir.CurFunc, param.Type) + init.Append(convas(typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, arg)).(*ir.AssignStmt), init)) + // replace arg with temp + args[i] = tmp + } + } + + funSym := n.Fun.Sym() + if base.Debug.Libfuzzer != 0 && funSym != nil { + if hook, found := hooks[funSym.Pkg.Path+"."+funSym.Name]; found { + if len(args) != hook.argsNum { + panic(fmt.Sprintf("%s.%s expects %d arguments, but received %d", funSym.Pkg.Path, funSym.Name, hook.argsNum, len(args))) + } + var hookArgs []ir.Node + for _, arg := range args { + hookArgs = append(hookArgs, tracecmpArg(arg, types.Types[hook.paramType], init)) + } + hookArgs = append(hookArgs, fakePC(n)) + init.Append(mkcall(hook.runtimeFunc, nil, init, hookArgs...)) + } + } +} + +// walkDivMod walks an ODIV or OMOD node. +func walkDivMod(n *ir.BinaryExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + n.Y = walkExpr(n.Y, init) + + // rewrite complex div into function call. + et := n.X.Type().Kind() + + if types.IsComplex[et] && n.Op() == ir.ODIV { + t := n.Type() + call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, typecheck.Conv(n.X, types.Types[types.TCOMPLEX128]), typecheck.Conv(n.Y, types.Types[types.TCOMPLEX128])) + return typecheck.Conv(call, t) + } + + // Nothing to do for float divisions. + if types.IsFloat[et] { + return n + } + + // rewrite 64-bit div and mod on 32-bit architectures. + // TODO: Remove this code once we can introduce + // runtime calls late in SSA processing. + if types.RegSize < 8 && (et == types.TINT64 || et == types.TUINT64) { + if n.Y.Op() == ir.OLITERAL { + // Leave div/mod by constant powers of 2 or small 16-bit constants. + // The SSA backend will handle those. + switch et { + case types.TINT64: + c := ir.Int64Val(n.Y) + if c < 0 { + c = -c + } + if c != 0 && c&(c-1) == 0 { + return n + } + case types.TUINT64: + c := ir.Uint64Val(n.Y) + if c < 1<<16 { + return n + } + if c != 0 && c&(c-1) == 0 { + return n + } + } + } + var fn string + if et == types.TINT64 { + fn = "int64" + } else { + fn = "uint64" + } + if n.Op() == ir.ODIV { + fn += "div" + } else { + fn += "mod" + } + return mkcall(fn, n.Type(), init, typecheck.Conv(n.X, types.Types[et]), typecheck.Conv(n.Y, types.Types[et])) + } + return n +} + +// walkDot walks an ODOT or ODOTPTR node. +func walkDot(n *ir.SelectorExpr, init *ir.Nodes) ir.Node { + usefield(n) + n.X = walkExpr(n.X, init) + return n +} + +// walkDotType walks an ODOTTYPE or ODOTTYPE2 node. +func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + // Set up interface type addresses for back end. + if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() { + n.ITab = reflectdata.ITabAddrAt(base.Pos, n.Type(), n.X.Type()) + } + if n.X.Type().IsInterface() && n.Type().IsInterface() && !n.Type().IsEmptyInterface() { + // This kind of conversion needs a runtime call. Allocate + // a descriptor for that call. + n.Descriptor = makeTypeAssertDescriptor(n.Type(), n.Op() == ir.ODOTTYPE2) + } + return n +} + +func makeTypeAssertDescriptor(target *types.Type, canFail bool) *obj.LSym { + // When converting from an interface to a non-empty interface. Needs a runtime call. + // Allocate an internal/abi.TypeAssert descriptor for that call. + lsym := types.LocalPkg.Lookup(fmt.Sprintf(".typeAssert.%d", typeAssertGen)).LinksymABI(obj.ABI0) + typeAssertGen++ + c := rttype.NewCursor(lsym, 0, rttype.TypeAssert) + c.Field("Cache").WritePtr(typecheck.LookupRuntimeVar("emptyTypeAssertCache")) + c.Field("Inter").WritePtr(reflectdata.TypeSym(target).Linksym()) + c.Field("CanFail").WriteBool(canFail) + objw.Global(lsym, int32(rttype.TypeAssert.Size()), obj.LOCAL) + lsym.Gotype = reflectdata.TypeLinksym(rttype.TypeAssert) + return lsym +} + +var typeAssertGen int + +// walkDynamicDotType walks an ODYNAMICDOTTYPE or ODYNAMICDOTTYPE2 node. +func walkDynamicDotType(n *ir.DynamicTypeAssertExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + n.RType = walkExpr(n.RType, init) + n.ITab = walkExpr(n.ITab, init) + // Convert to non-dynamic if we can. + if n.RType != nil && n.RType.Op() == ir.OADDR { + addr := n.RType.(*ir.AddrExpr) + if addr.X.Op() == ir.OLINKSYMOFFSET { + r := ir.NewTypeAssertExpr(n.Pos(), n.X, n.Type()) + if n.Op() == ir.ODYNAMICDOTTYPE2 { + r.SetOp(ir.ODOTTYPE2) + } + r.SetType(n.Type()) + r.SetTypecheck(1) + return walkExpr(r, init) + } + } + return n +} + +// walkIndex walks an OINDEX node. +func walkIndex(n *ir.IndexExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + + // save the original node for bounds checking elision. + // If it was a ODIV/OMOD walk might rewrite it. + r := n.Index + + n.Index = walkExpr(n.Index, init) + + // if range of type cannot exceed static array bound, + // disable bounds check. + if n.Bounded() { + return n + } + t := n.X.Type() + if t != nil && t.IsPtr() { + t = t.Elem() + } + if t.IsArray() { + n.SetBounded(bounded(r, t.NumElem())) + if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) { + base.Warn("index bounds check elided") + } + } else if ir.IsConst(n.X, constant.String) { + n.SetBounded(bounded(r, int64(len(ir.StringVal(n.X))))) + if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) { + base.Warn("index bounds check elided") + } + } + return n +} + +// mapKeyArg returns an expression for key that is suitable to be passed +// as the key argument for runtime map* functions. +// n is the map indexing or delete Node (to provide Pos). +func mapKeyArg(fast int, n, key ir.Node, assigned bool) ir.Node { + if fast == mapslow { + // standard version takes key by reference. + // orderState.expr made sure key is addressable. + return typecheck.NodAddr(key) + } + if assigned { + // mapassign does distinguish pointer vs. integer key. + return key + } + // mapaccess and mapdelete don't distinguish pointer vs. integer key. + switch fast { + case mapfast32ptr: + return ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.Types[types.TUINT32], key) + case mapfast64ptr: + return ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.Types[types.TUINT64], key) + default: + // fast version takes key by value. + return key + } +} + +// walkIndexMap walks an OINDEXMAP node. +// It replaces m[k] with *map{access1,assign}(maptype, m, &k) +func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + n.Index = walkExpr(n.Index, init) + map_ := n.X + t := map_.Type() + fast := mapfast(t) + key := mapKeyArg(fast, n, n.Index, n.Assigned) + args := []ir.Node{reflectdata.IndexMapRType(base.Pos, n), map_, key} + + var mapFn ir.Node + switch { + case n.Assigned: + mapFn = mapfn(mapassign[fast], t, false) + case t.Elem().Size() > abi.ZeroValSize: + args = append(args, reflectdata.ZeroAddr(t.Elem().Size())) + mapFn = mapfn("mapaccess1_fat", t, true) + default: + mapFn = mapfn(mapaccess1[fast], t, false) + } + call := mkcall1(mapFn, nil, init, args...) + call.SetType(types.NewPtr(t.Elem())) + call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers. + star := ir.NewStarExpr(base.Pos, call) + star.SetType(t.Elem()) + star.SetTypecheck(1) + return star +} + +// walkLogical walks an OANDAND or OOROR node. +func walkLogical(n *ir.LogicalExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + + // cannot put side effects from n.Right on init, + // because they cannot run before n.Left is checked. + // save elsewhere and store on the eventual n.Right. + var ll ir.Nodes + + n.Y = walkExpr(n.Y, &ll) + n.Y = ir.InitExpr(ll, n.Y) + return n +} + +// walkSend walks an OSEND node. +func walkSend(n *ir.SendStmt, init *ir.Nodes) ir.Node { + n1 := n.Value + n1 = typecheck.AssignConv(n1, n.Chan.Type().Elem(), "chan send") + n1 = walkExpr(n1, init) + n1 = typecheck.NodAddr(n1) + return mkcall1(chanfn("chansend1", 2, n.Chan.Type()), nil, init, n.Chan, n1) +} + +// walkSlice walks an OSLICE, OSLICEARR, OSLICESTR, OSLICE3, or OSLICE3ARR node. +func walkSlice(n *ir.SliceExpr, init *ir.Nodes) ir.Node { + n.X = walkExpr(n.X, init) + n.Low = walkExpr(n.Low, init) + if n.Low != nil && ir.IsZero(n.Low) { + // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k]. + n.Low = nil + } + n.High = walkExpr(n.High, init) + n.Max = walkExpr(n.Max, init) + + if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && n.Low == nil && n.High == nil { + // Reduce x[:] to x. + if base.Debug.Slice > 0 { + base.Warn("slice: omit slice operation") + } + return n.X + } + return n +} + +// walkSliceHeader walks an OSLICEHEADER node. +func walkSliceHeader(n *ir.SliceHeaderExpr, init *ir.Nodes) ir.Node { + n.Ptr = walkExpr(n.Ptr, init) + n.Len = walkExpr(n.Len, init) + n.Cap = walkExpr(n.Cap, init) + return n +} + +// walkStringHeader walks an OSTRINGHEADER node. +func walkStringHeader(n *ir.StringHeaderExpr, init *ir.Nodes) ir.Node { + n.Ptr = walkExpr(n.Ptr, init) + n.Len = walkExpr(n.Len, init) + return n +} + +// return 1 if integer n must be in range [0, max), 0 otherwise. +func bounded(n ir.Node, max int64) bool { + if n.Type() == nil || !n.Type().IsInteger() { + return false + } + + sign := n.Type().IsSigned() + bits := int32(8 * n.Type().Size()) + + if ir.IsSmallIntConst(n) { + v := ir.Int64Val(n) + return 0 <= v && v < max + } + + switch n.Op() { + case ir.OAND, ir.OANDNOT: + n := n.(*ir.BinaryExpr) + v := int64(-1) + switch { + case ir.IsSmallIntConst(n.X): + v = ir.Int64Val(n.X) + case ir.IsSmallIntConst(n.Y): + v = ir.Int64Val(n.Y) + if n.Op() == ir.OANDNOT { + v = ^v + if !sign { + v &= 1< 0 && v >= 2 { + bits-- + v >>= 1 + } + } + + case ir.ORSH: + n := n.(*ir.BinaryExpr) + if !sign && ir.IsSmallIntConst(n.Y) { + v := ir.Int64Val(n.Y) + if v > int64(bits) { + return true + } + bits -= int32(v) + } + } + + if !sign && bits <= 62 && 1< 1 { + s := fmt.Sprintf("\nbefore order %v", fn.Sym()) + ir.DumpList(s, fn.Body) + } + ir.SetPos(fn) // Set reasonable position for instrumenting code. See issue 53688. + orderBlock(&fn.Body, map[string][]*ir.Name{}) +} + +// append typechecks stmt and appends it to out. +func (o *orderState) append(stmt ir.Node) { + o.out = append(o.out, typecheck.Stmt(stmt)) +} + +// newTemp allocates a new temporary with the given type, +// pushes it onto the temp stack, and returns it. +// If clear is true, newTemp emits code to zero the temporary. +func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name { + var v *ir.Name + key := t.LinkString() + if a := o.free[key]; len(a) > 0 { + v = a[len(a)-1] + if !types.Identical(t, v.Type()) { + base.Fatalf("expected %L to have type %v", v, t) + } + o.free[key] = a[:len(a)-1] + } else { + v = typecheck.TempAt(base.Pos, ir.CurFunc, t) + } + if clear { + o.append(ir.NewAssignStmt(base.Pos, v, nil)) + } + + o.temp = append(o.temp, v) + return v +} + +// copyExpr behaves like newTemp but also emits +// code to initialize the temporary to the value n. +func (o *orderState) copyExpr(n ir.Node) *ir.Name { + return o.copyExpr1(n, false) +} + +// copyExprClear is like copyExpr but clears the temp before assignment. +// It is provided for use when the evaluation of tmp = n turns into +// a function call that is passed a pointer to the temporary as the output space. +// If the call blocks before tmp has been written, +// the garbage collector will still treat the temporary as live, +// so we must zero it before entering that call. +// Today, this only happens for channel receive operations. +// (The other candidate would be map access, but map access +// returns a pointer to the result data instead of taking a pointer +// to be filled in.) +func (o *orderState) copyExprClear(n ir.Node) *ir.Name { + return o.copyExpr1(n, true) +} + +func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name { + t := n.Type() + v := o.newTemp(t, clear) + o.append(ir.NewAssignStmt(base.Pos, v, n)) + return v +} + +// cheapExpr returns a cheap version of n. +// The definition of cheap is that n is a variable or constant. +// If not, cheapExpr allocates a new tmp, emits tmp = n, +// and then returns tmp. +func (o *orderState) cheapExpr(n ir.Node) ir.Node { + if n == nil { + return nil + } + + switch n.Op() { + case ir.ONAME, ir.OLITERAL, ir.ONIL: + return n + case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) + l := o.cheapExpr(n.X) + if l == n.X { + return n + } + a := ir.Copy(n).(*ir.UnaryExpr) + a.X = l + return typecheck.Expr(a) + } + + return o.copyExpr(n) +} + +// safeExpr returns a safe version of n. +// The definition of safe is that n can appear multiple times +// without violating the semantics of the original program, +// and that assigning to the safe version has the same effect +// as assigning to the original n. +// +// The intended use is to apply to x when rewriting x += y into x = x + y. +func (o *orderState) safeExpr(n ir.Node) ir.Node { + switch n.Op() { + case ir.ONAME, ir.OLITERAL, ir.ONIL: + return n + + case ir.OLEN, ir.OCAP: + n := n.(*ir.UnaryExpr) + l := o.safeExpr(n.X) + if l == n.X { + return n + } + a := ir.Copy(n).(*ir.UnaryExpr) + a.X = l + return typecheck.Expr(a) + + case ir.ODOT: + n := n.(*ir.SelectorExpr) + l := o.safeExpr(n.X) + if l == n.X { + return n + } + a := ir.Copy(n).(*ir.SelectorExpr) + a.X = l + return typecheck.Expr(a) + + case ir.ODOTPTR: + n := n.(*ir.SelectorExpr) + l := o.cheapExpr(n.X) + if l == n.X { + return n + } + a := ir.Copy(n).(*ir.SelectorExpr) + a.X = l + return typecheck.Expr(a) + + case ir.ODEREF: + n := n.(*ir.StarExpr) + l := o.cheapExpr(n.X) + if l == n.X { + return n + } + a := ir.Copy(n).(*ir.StarExpr) + a.X = l + return typecheck.Expr(a) + + case ir.OINDEX, ir.OINDEXMAP: + n := n.(*ir.IndexExpr) + var l ir.Node + if n.X.Type().IsArray() { + l = o.safeExpr(n.X) + } else { + l = o.cheapExpr(n.X) + } + r := o.cheapExpr(n.Index) + if l == n.X && r == n.Index { + return n + } + a := ir.Copy(n).(*ir.IndexExpr) + a.X = l + a.Index = r + return typecheck.Expr(a) + + default: + base.Fatalf("order.safeExpr %v", n.Op()) + return nil // not reached + } +} + +// addrTemp ensures that n is okay to pass by address to runtime routines. +// If the original argument n is not okay, addrTemp creates a tmp, emits +// tmp = n, and then returns tmp. +// The result of addrTemp MUST be assigned back to n, e.g. +// +// n.Left = o.addrTemp(n.Left) +func (o *orderState) addrTemp(n ir.Node) ir.Node { + if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL { + // TODO: expand this to all static composite literal nodes? + n = typecheck.DefaultLit(n, nil) + types.CalcSize(n.Type()) + vstat := readonlystaticname(n.Type()) + var s staticinit.Schedule + s.StaticAssign(vstat, 0, n, n.Type()) + if s.Out != nil { + base.Fatalf("staticassign of const generated code: %+v", n) + } + vstat = typecheck.Expr(vstat).(*ir.Name) + return vstat + } + + // Prevent taking the address of an SSA-able local variable (#63332). + // + // TODO(mdempsky): Note that OuterValue unwraps OCONVNOPs, but + // IsAddressable does not. It should be possible to skip copying for + // at least some of these OCONVNOPs (e.g., reinsert them after the + // OADDR operation), but at least walkCompare needs to be fixed to + // support that (see trybot failures on go.dev/cl/541715, PS1). + if ir.IsAddressable(n) { + if name, ok := ir.OuterValue(n).(*ir.Name); ok && name.Op() == ir.ONAME { + if name.Class == ir.PAUTO && !name.Addrtaken() && ssa.CanSSA(name.Type()) { + goto Copy + } + } + + return n + } + +Copy: + return o.copyExpr(n) +} + +// mapKeyTemp prepares n to be a key in a map runtime call and returns n. +// The first parameter is the position of n's containing node, for use in case +// that n's position is not unique (e.g., if n is an ONAME). +func (o *orderState) mapKeyTemp(outerPos src.XPos, t *types.Type, n ir.Node) ir.Node { + pos := outerPos + if ir.HasUniquePos(n) { + pos = n.Pos() + } + // Most map calls need to take the address of the key. + // Exception: map*_fast* calls. See golang.org/issue/19015. + alg := mapfast(t) + if alg == mapslow { + return o.addrTemp(n) + } + var kt *types.Type + switch alg { + case mapfast32: + kt = types.Types[types.TUINT32] + case mapfast64: + kt = types.Types[types.TUINT64] + case mapfast32ptr, mapfast64ptr: + kt = types.Types[types.TUNSAFEPTR] + case mapfaststr: + kt = types.Types[types.TSTRING] + } + nt := n.Type() + switch { + case nt == kt: + return n + case nt.Kind() == kt.Kind(), nt.IsPtrShaped() && kt.IsPtrShaped(): + // can directly convert (e.g. named type to underlying type, or one pointer to another) + return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONVNOP, kt, n)) + case nt.IsInteger() && kt.IsInteger(): + // can directly convert (e.g. int32 to uint32) + if n.Op() == ir.OLITERAL && nt.IsSigned() { + // avoid constant overflow error + n = ir.NewConstExpr(constant.MakeUint64(uint64(ir.Int64Val(n))), n) + n.SetType(kt) + return n + } + return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, kt, n)) + default: + // Unsafe cast through memory. + // We'll need to do a load with type kt. Create a temporary of type kt to + // ensure sufficient alignment. nt may be under-aligned. + if uint8(kt.Alignment()) < uint8(nt.Alignment()) { + base.Fatalf("mapKeyTemp: key type is not sufficiently aligned, kt=%v nt=%v", kt, nt) + } + tmp := o.newTemp(kt, true) + // *(*nt)(&tmp) = n + var e ir.Node = typecheck.NodAddr(tmp) + e = ir.NewConvExpr(pos, ir.OCONVNOP, nt.PtrTo(), e) + e = ir.NewStarExpr(pos, e) + o.append(ir.NewAssignStmt(pos, e, n)) + return tmp + } +} + +// mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP +// in n to avoid string allocations for keys in map lookups. +// Returns a bool that signals if a modification was made. +// +// For: +// +// x = m[string(k)] +// x = m[T1{... Tn{..., string(k), ...}}] +// +// where k is []byte, T1 to Tn is a nesting of struct and array literals, +// the allocation of backing bytes for the string can be avoided +// by reusing the []byte backing array. These are special cases +// for avoiding allocations when converting byte slices to strings. +// It would be nice to handle these generally, but because +// []byte keys are not allowed in maps, the use of string(k) +// comes up in important cases in practice. See issue 3512. +func mapKeyReplaceStrConv(n ir.Node) bool { + var replaced bool + switch n.Op() { + case ir.OBYTES2STR: + n := n.(*ir.ConvExpr) + n.SetOp(ir.OBYTES2STRTMP) + replaced = true + case ir.OSTRUCTLIT: + n := n.(*ir.CompLitExpr) + for _, elem := range n.List { + elem := elem.(*ir.StructKeyExpr) + if mapKeyReplaceStrConv(elem.Value) { + replaced = true + } + } + case ir.OARRAYLIT: + n := n.(*ir.CompLitExpr) + for _, elem := range n.List { + if elem.Op() == ir.OKEY { + elem = elem.(*ir.KeyExpr).Value + } + if mapKeyReplaceStrConv(elem) { + replaced = true + } + } + } + return replaced +} + +type ordermarker int + +// markTemp returns the top of the temporary variable stack. +func (o *orderState) markTemp() ordermarker { + return ordermarker(len(o.temp)) +} + +// popTemp pops temporaries off the stack until reaching the mark, +// which must have been returned by markTemp. +func (o *orderState) popTemp(mark ordermarker) { + for _, n := range o.temp[mark:] { + key := n.Type().LinkString() + o.free[key] = append(o.free[key], n) + } + o.temp = o.temp[:mark] +} + +// stmtList orders each of the statements in the list. +func (o *orderState) stmtList(l ir.Nodes) { + s := l + for i := range s { + orderMakeSliceCopy(s[i:]) + o.stmt(s[i]) + } +} + +// orderMakeSliceCopy matches the pattern: +// +// m = OMAKESLICE([]T, x); OCOPY(m, s) +// +// and rewrites it to: +// +// m = OMAKESLICECOPY([]T, x, s); nil +func orderMakeSliceCopy(s []ir.Node) { + if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting { + return + } + if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY { + return + } + + as := s[0].(*ir.AssignStmt) + cp := s[1].(*ir.BinaryExpr) + if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) || + as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME || + as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() { + // The line above this one is correct with the differing equality operators: + // we want as.X and cp.X to be the same name, + // but we want the initial data to be coming from a different name. + return + } + + mk := as.Y.(*ir.MakeExpr) + if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil { + return + } + mk.SetOp(ir.OMAKESLICECOPY) + mk.Cap = cp.Y + // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s) + mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y)) + as.Y = typecheck.Expr(mk) + s[1] = nil // remove separate copy call +} + +// edge inserts coverage instrumentation for libfuzzer. +func (o *orderState) edge() { + if base.Debug.Libfuzzer == 0 { + return + } + + // Create a new uint8 counter to be allocated in section __sancov_cntrs + counter := staticinit.StaticName(types.Types[types.TUINT8]) + counter.SetLibfuzzer8BitCounter(true) + // As well as setting SetLibfuzzer8BitCounter, we preemptively set the + // symbol type to SLIBFUZZER_8BIT_COUNTER so that the race detector + // instrumentation pass (which does not have access to the flags set by + // SetLibfuzzer8BitCounter) knows to ignore them. This information is + // lost by the time it reaches the compile step, so SetLibfuzzer8BitCounter + // is still necessary. + counter.Linksym().Type = objabi.SLIBFUZZER_8BIT_COUNTER + + // We guarantee that the counter never becomes zero again once it has been + // incremented once. This implementation follows the NeverZero optimization + // presented by the paper: + // "AFL++: Combining Incremental Steps of Fuzzing Research" + // The NeverZero policy avoids the overflow to 0 by setting the counter to one + // after it reaches 255 and so, if an edge is executed at least one time, the entry is + // never 0. + // Another policy presented in the paper is the Saturated Counters policy which + // freezes the counter when it reaches the value of 255. However, a range + // of experiments showed that that decreases overall performance. + o.append(ir.NewIfStmt(base.Pos, + ir.NewBinaryExpr(base.Pos, ir.OEQ, counter, ir.NewInt(base.Pos, 0xff)), + []ir.Node{ir.NewAssignStmt(base.Pos, counter, ir.NewInt(base.Pos, 1))}, + []ir.Node{ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(base.Pos, 1))})) +} + +// orderBlock orders the block of statements in n into a new slice, +// and then replaces the old slice in n with the new slice. +// free is a map that can be used to obtain temporary variables by type. +func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) { + if len(*n) != 0 { + // Set reasonable position for instrumenting code. See issue 53688. + // It would be nice if ir.Nodes had a position (the opening {, probably), + // but it doesn't. So we use the first statement's position instead. + ir.SetPos((*n)[0]) + } + var order orderState + order.free = free + mark := order.markTemp() + order.edge() + order.stmtList(*n) + order.popTemp(mark) + *n = order.out +} + +// exprInPlace orders the side effects in *np and +// leaves them as the init list of the final *np. +// The result of exprInPlace MUST be assigned back to n, e.g. +// +// n.Left = o.exprInPlace(n.Left) +func (o *orderState) exprInPlace(n ir.Node) ir.Node { + var order orderState + order.free = o.free + n = order.expr(n, nil) + n = ir.InitExpr(order.out, n) + + // insert new temporaries from order + // at head of outer list. + o.temp = append(o.temp, order.temp...) + return n +} + +// orderStmtInPlace orders the side effects of the single statement *np +// and replaces it with the resulting statement list. +// The result of orderStmtInPlace MUST be assigned back to n, e.g. +// +// n.Left = orderStmtInPlace(n.Left) +// +// free is a map that can be used to obtain temporary variables by type. +func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node { + var order orderState + order.free = free + mark := order.markTemp() + order.stmt(n) + order.popTemp(mark) + return ir.NewBlockStmt(src.NoXPos, order.out) +} + +// init moves n's init list to o.out. +func (o *orderState) init(n ir.Node) { + if ir.MayBeShared(n) { + // For concurrency safety, don't mutate potentially shared nodes. + // First, ensure that no work is required here. + if len(n.Init()) > 0 { + base.Fatalf("order.init shared node with ninit") + } + return + } + o.stmtList(ir.TakeInit(n)) +} + +// call orders the call expression n. +// n.Op is OCALLFUNC/OCALLINTER or a builtin like OCOPY. +func (o *orderState) call(nn ir.Node) { + if len(nn.Init()) > 0 { + // Caller should have already called o.init(nn). + base.Fatalf("%v with unexpected ninit", nn.Op()) + } + if nn.Op() == ir.OCALLMETH { + base.FatalfAt(nn.Pos(), "OCALLMETH missed by typecheck") + } + + // Builtin functions. + if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLINTER { + switch n := nn.(type) { + default: + base.Fatalf("unexpected call: %+v", n) + case *ir.UnaryExpr: + n.X = o.expr(n.X, nil) + case *ir.ConvExpr: + n.X = o.expr(n.X, nil) + case *ir.BinaryExpr: + n.X = o.expr(n.X, nil) + n.Y = o.expr(n.Y, nil) + case *ir.MakeExpr: + n.Len = o.expr(n.Len, nil) + n.Cap = o.expr(n.Cap, nil) + case *ir.CallExpr: + o.exprList(n.Args) + } + return + } + + n := nn.(*ir.CallExpr) + typecheck.AssertFixedCall(n) + + if ir.IsFuncPCIntrinsic(n) && ir.IsIfaceOfFunc(n.Args[0]) != nil { + // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function, + // do not introduce temporaries here, so it is easier to rewrite it + // to symbol address reference later in walk. + return + } + + n.Fun = o.expr(n.Fun, nil) + o.exprList(n.Args) +} + +// mapAssign appends n to o.out. +func (o *orderState) mapAssign(n ir.Node) { + switch n.Op() { + default: + base.Fatalf("order.mapAssign %v", n.Op()) + + case ir.OAS: + n := n.(*ir.AssignStmt) + if n.X.Op() == ir.OINDEXMAP { + n.Y = o.safeMapRHS(n.Y) + } + o.out = append(o.out, n) + case ir.OASOP: + n := n.(*ir.AssignOpStmt) + if n.X.Op() == ir.OINDEXMAP { + n.Y = o.safeMapRHS(n.Y) + } + o.out = append(o.out, n) + } +} + +func (o *orderState) safeMapRHS(r ir.Node) ir.Node { + // Make sure we evaluate the RHS before starting the map insert. + // We need to make sure the RHS won't panic. See issue 22881. + if r.Op() == ir.OAPPEND { + r := r.(*ir.CallExpr) + s := r.Args[1:] + for i, n := range s { + s[i] = o.cheapExpr(n) + } + return r + } + return o.cheapExpr(r) +} + +// stmt orders the statement n, appending to o.out. +func (o *orderState) stmt(n ir.Node) { + if n == nil { + return + } + + lno := ir.SetPos(n) + o.init(n) + + switch n.Op() { + default: + base.Fatalf("order.stmt %v", n.Op()) + + case ir.OINLMARK: + o.out = append(o.out, n) + + case ir.OAS: + n := n.(*ir.AssignStmt) + t := o.markTemp() + + // There's a delicate interaction here between two OINDEXMAP + // optimizations. + // + // First, we want to handle m[k] = append(m[k], ...) with a single + // runtime call to mapassign. This requires the m[k] expressions to + // satisfy ir.SameSafeExpr in walkAssign. + // + // But if k is a slow map key type that's passed by reference (e.g., + // byte), then we want to avoid marking user variables as addrtaken, + // if that might prevent the compiler from keeping k in a register. + // + // TODO(mdempsky): It would be better if walk was responsible for + // inserting temporaries as needed. + mapAppend := n.X.Op() == ir.OINDEXMAP && n.Y.Op() == ir.OAPPEND && + ir.SameSafeExpr(n.X, n.Y.(*ir.CallExpr).Args[0]) + + n.X = o.expr(n.X, nil) + if mapAppend { + indexLHS := n.X.(*ir.IndexExpr) + indexLHS.X = o.cheapExpr(indexLHS.X) + indexLHS.Index = o.cheapExpr(indexLHS.Index) + + call := n.Y.(*ir.CallExpr) + arg0 := call.Args[0] + // ir.SameSafeExpr skips OCONVNOPs, so we must do the same here (#66096). + for arg0.Op() == ir.OCONVNOP { + arg0 = arg0.(*ir.ConvExpr).X + } + indexRHS := arg0.(*ir.IndexExpr) + indexRHS.X = indexLHS.X + indexRHS.Index = indexLHS.Index + + o.exprList(call.Args[1:]) + } else { + n.Y = o.expr(n.Y, n.X) + } + o.mapAssign(n) + o.popTemp(t) + + case ir.OASOP: + n := n.(*ir.AssignOpStmt) + t := o.markTemp() + n.X = o.expr(n.X, nil) + n.Y = o.expr(n.Y, nil) + + if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) { + // Rewrite m[k] op= r into m[k] = m[k] op r so + // that we can ensure that if op panics + // because r is zero, the panic happens before + // the map assignment. + // DeepCopy is a big hammer here, but safeExpr + // makes sure there is nothing too deep being copied. + l1 := o.safeExpr(n.X) + l2 := ir.DeepCopy(src.NoXPos, l1) + if l2.Op() == ir.OINDEXMAP { + l2 := l2.(*ir.IndexExpr) + l2.Assigned = false + } + l2 = o.copyExpr(l2) + r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil) + as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r)) + o.mapAssign(as) + o.popTemp(t) + return + } + + o.mapAssign(n) + o.popTemp(t) + + case ir.OAS2: + n := n.(*ir.AssignListStmt) + t := o.markTemp() + o.exprList(n.Lhs) + o.exprList(n.Rhs) + o.out = append(o.out, n) + o.popTemp(t) + + // Special: avoid copy of func call n.Right + case ir.OAS2FUNC: + n := n.(*ir.AssignListStmt) + t := o.markTemp() + o.exprList(n.Lhs) + call := n.Rhs[0] + o.init(call) + if ic, ok := call.(*ir.InlinedCallExpr); ok { + o.stmtList(ic.Body) + + n.SetOp(ir.OAS2) + n.Rhs = ic.ReturnVars + + o.exprList(n.Rhs) + o.out = append(o.out, n) + } else { + o.call(call) + o.as2func(n) + } + o.popTemp(t) + + // Special: use temporary variables to hold result, + // so that runtime can take address of temporary. + // No temporary for blank assignment. + // + // OAS2MAPR: make sure key is addressable if needed, + // and make sure OINDEXMAP is not copied out. + case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR: + n := n.(*ir.AssignListStmt) + t := o.markTemp() + o.exprList(n.Lhs) + + switch r := n.Rhs[0]; r.Op() { + case ir.ODOTTYPE2: + r := r.(*ir.TypeAssertExpr) + r.X = o.expr(r.X, nil) + case ir.ODYNAMICDOTTYPE2: + r := r.(*ir.DynamicTypeAssertExpr) + r.X = o.expr(r.X, nil) + r.RType = o.expr(r.RType, nil) + r.ITab = o.expr(r.ITab, nil) + case ir.ORECV: + r := r.(*ir.UnaryExpr) + r.X = o.expr(r.X, nil) + case ir.OINDEXMAP: + r := r.(*ir.IndexExpr) + r.X = o.expr(r.X, nil) + r.Index = o.expr(r.Index, nil) + // See similar conversion for OINDEXMAP below. + _ = mapKeyReplaceStrConv(r.Index) + r.Index = o.mapKeyTemp(r.Pos(), r.X.Type(), r.Index) + default: + base.Fatalf("order.stmt: %v", r.Op()) + } + + o.as2ok(n) + o.popTemp(t) + + // Special: does not save n onto out. + case ir.OBLOCK: + n := n.(*ir.BlockStmt) + o.stmtList(n.List) + + // Special: n->left is not an expression; save as is. + case ir.OBREAK, + ir.OCONTINUE, + ir.ODCL, + ir.OFALL, + ir.OGOTO, + ir.OLABEL, + ir.OTAILCALL: + o.out = append(o.out, n) + + // Special: handle call arguments. + case ir.OCALLFUNC, ir.OCALLINTER: + n := n.(*ir.CallExpr) + t := o.markTemp() + o.call(n) + o.out = append(o.out, n) + o.popTemp(t) + + case ir.OINLCALL: + n := n.(*ir.InlinedCallExpr) + o.stmtList(n.Body) + + // discard results; double-check for no side effects + for _, result := range n.ReturnVars { + if staticinit.AnySideEffects(result) { + base.FatalfAt(result.Pos(), "inlined call result has side effects: %v", result) + } + } + + case ir.OCHECKNIL, ir.OCLEAR, ir.OCLOSE, ir.OPANIC, ir.ORECV: + n := n.(*ir.UnaryExpr) + t := o.markTemp() + n.X = o.expr(n.X, nil) + o.out = append(o.out, n) + o.popTemp(t) + + case ir.OCOPY: + n := n.(*ir.BinaryExpr) + t := o.markTemp() + n.X = o.expr(n.X, nil) + n.Y = o.expr(n.Y, nil) + o.out = append(o.out, n) + o.popTemp(t) + + case ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP: + n := n.(*ir.CallExpr) + t := o.markTemp() + o.call(n) + o.out = append(o.out, n) + o.popTemp(t) + + // Special: order arguments to inner call but not call itself. + case ir.ODEFER, ir.OGO: + n := n.(*ir.GoDeferStmt) + t := o.markTemp() + o.init(n.Call) + o.call(n.Call) + o.out = append(o.out, n) + o.popTemp(t) + + case ir.ODELETE: + n := n.(*ir.CallExpr) + t := o.markTemp() + n.Args[0] = o.expr(n.Args[0], nil) + n.Args[1] = o.expr(n.Args[1], nil) + n.Args[1] = o.mapKeyTemp(n.Pos(), n.Args[0].Type(), n.Args[1]) + o.out = append(o.out, n) + o.popTemp(t) + + // Clean temporaries from condition evaluation at + // beginning of loop body and after for statement. + case ir.OFOR: + n := n.(*ir.ForStmt) + t := o.markTemp() + n.Cond = o.exprInPlace(n.Cond) + orderBlock(&n.Body, o.free) + n.Post = orderStmtInPlace(n.Post, o.free) + o.out = append(o.out, n) + o.popTemp(t) + + // Clean temporaries from condition at + // beginning of both branches. + case ir.OIF: + n := n.(*ir.IfStmt) + t := o.markTemp() + n.Cond = o.exprInPlace(n.Cond) + o.popTemp(t) + orderBlock(&n.Body, o.free) + orderBlock(&n.Else, o.free) + o.out = append(o.out, n) + + case ir.ORANGE: + // n.Right is the expression being ranged over. + // order it, and then make a copy if we need one. + // We almost always do, to ensure that we don't + // see any value changes made during the loop. + // Usually the copy is cheap (e.g., array pointer, + // chan, slice, string are all tiny). + // The exception is ranging over an array value + // (not a slice, not a pointer to array), + // which must make a copy to avoid seeing updates made during + // the range body. Ranging over an array value is uncommon though. + + // Mark []byte(str) range expression to reuse string backing storage. + // It is safe because the storage cannot be mutated. + n := n.(*ir.RangeStmt) + if x, ok := n.X.(*ir.ConvExpr); ok { + switch x.Op() { + case ir.OSTR2BYTES: + x.SetOp(ir.OSTR2BYTESTMP) + fallthrough + case ir.OSTR2BYTESTMP: + x.MarkNonNil() // "range []byte(nil)" is fine + } + } + + t := o.markTemp() + n.X = o.expr(n.X, nil) + + orderBody := true + xt := typecheck.RangeExprType(n.X.Type()) + switch k := xt.Kind(); { + default: + base.Fatalf("order.stmt range %v", n.Type()) + + case types.IsInt[k]: + // Used only once, no need to copy. + + case k == types.TARRAY, k == types.TSLICE: + if n.Value == nil || ir.IsBlank(n.Value) { + // for i := range x will only use x once, to compute len(x). + // No need to copy it. + break + } + fallthrough + + case k == types.TCHAN, k == types.TSTRING: + // chan, string, slice, array ranges use value multiple times. + // make copy. + r := n.X + + if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] { + r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r) + r.SetType(types.Types[types.TSTRING]) + r = typecheck.Expr(r) + } + + n.X = o.copyExpr(r) + + case k == types.TMAP: + if isMapClear(n) { + // Preserve the body of the map clear pattern so it can + // be detected during walk. The loop body will not be used + // when optimizing away the range loop to a runtime call. + orderBody = false + break + } + + // copy the map value in case it is a map literal. + // TODO(rsc): Make tmp = literal expressions reuse tmp. + // For maps tmp is just one word so it hardly matters. + r := n.X + n.X = o.copyExpr(r) + + // n.Prealloc is the temp for the iterator. + // MapIterType contains pointers and needs to be zeroed. + n.Prealloc = o.newTemp(reflectdata.MapIterType(), true) + } + n.Key = o.exprInPlace(n.Key) + n.Value = o.exprInPlace(n.Value) + if orderBody { + orderBlock(&n.Body, o.free) + } + o.out = append(o.out, n) + o.popTemp(t) + + case ir.ORETURN: + n := n.(*ir.ReturnStmt) + o.exprList(n.Results) + o.out = append(o.out, n) + + // Special: clean case temporaries in each block entry. + // Select must enter one of its blocks, so there is no + // need for a cleaning at the end. + // Doubly special: evaluation order for select is stricter + // than ordinary expressions. Even something like p.c + // has to be hoisted into a temporary, so that it cannot be + // reordered after the channel evaluation for a different + // case (if p were nil, then the timing of the fault would + // give this away). + case ir.OSELECT: + n := n.(*ir.SelectStmt) + t := o.markTemp() + for _, ncas := range n.Cases { + r := ncas.Comm + ir.SetPos(ncas) + + // Append any new body prologue to ninit. + // The next loop will insert ninit into nbody. + if len(ncas.Init()) != 0 { + base.Fatalf("order select ninit") + } + if r == nil { + continue + } + switch r.Op() { + default: + ir.Dump("select case", r) + base.Fatalf("unknown op in select %v", r.Op()) + + case ir.OSELRECV2: + // case x, ok = <-c + r := r.(*ir.AssignListStmt) + recv := r.Rhs[0].(*ir.UnaryExpr) + recv.X = o.expr(recv.X, nil) + if !ir.IsAutoTmp(recv.X) { + recv.X = o.copyExpr(recv.X) + } + init := ir.TakeInit(r) + + colas := r.Def + do := func(i int, t *types.Type) { + n := r.Lhs[i] + if ir.IsBlank(n) { + return + } + // If this is case x := <-ch or case x, y := <-ch, the case has + // the ODCL nodes to declare x and y. We want to delay that + // declaration (and possible allocation) until inside the case body. + // Delete the ODCL nodes here and recreate them inside the body below. + if colas { + if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n { + init = init[1:] + + // iimport may have added a default initialization assignment, + // due to how it handles ODCL statements. + if len(init) > 0 && init[0].Op() == ir.OAS && init[0].(*ir.AssignStmt).X == n { + init = init[1:] + } + } + dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name))) + ncas.PtrInit().Append(dcl) + } + tmp := o.newTemp(t, t.HasPointers()) + as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type()))) + ncas.PtrInit().Append(as) + r.Lhs[i] = tmp + } + do(0, recv.X.Type().Elem()) + do(1, types.Types[types.TBOOL]) + if len(init) != 0 { + ir.DumpList("ninit", init) + base.Fatalf("ninit on select recv") + } + orderBlock(ncas.PtrInit(), o.free) + + case ir.OSEND: + r := r.(*ir.SendStmt) + if len(r.Init()) != 0 { + ir.DumpList("ninit", r.Init()) + base.Fatalf("ninit on select send") + } + + // case c <- x + // r->left is c, r->right is x, both are always evaluated. + r.Chan = o.expr(r.Chan, nil) + + if !ir.IsAutoTmp(r.Chan) { + r.Chan = o.copyExpr(r.Chan) + } + r.Value = o.expr(r.Value, nil) + if !ir.IsAutoTmp(r.Value) { + r.Value = o.copyExpr(r.Value) + } + } + } + // Now that we have accumulated all the temporaries, clean them. + // Also insert any ninit queued during the previous loop. + // (The temporary cleaning must follow that ninit work.) + for _, cas := range n.Cases { + orderBlock(&cas.Body, o.free) + + // TODO(mdempsky): Is this actually necessary? + // walkSelect appears to walk Ninit. + cas.Body.Prepend(ir.TakeInit(cas)...) + } + + o.out = append(o.out, n) + o.popTemp(t) + + // Special: value being sent is passed as a pointer; make it addressable. + case ir.OSEND: + n := n.(*ir.SendStmt) + t := o.markTemp() + n.Chan = o.expr(n.Chan, nil) + n.Value = o.expr(n.Value, nil) + if base.Flag.Cfg.Instrumenting { + // Force copying to the stack so that (chan T)(nil) <- x + // is still instrumented as a read of x. + n.Value = o.copyExpr(n.Value) + } else { + n.Value = o.addrTemp(n.Value) + } + o.out = append(o.out, n) + o.popTemp(t) + + // TODO(rsc): Clean temporaries more aggressively. + // Note that because walkSwitch will rewrite some of the + // switch into a binary search, this is not as easy as it looks. + // (If we ran that code here we could invoke order.stmt on + // the if-else chain instead.) + // For now just clean all the temporaries at the end. + // In practice that's fine. + case ir.OSWITCH: + n := n.(*ir.SwitchStmt) + if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) { + // Add empty "default:" case for instrumentation. + n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil)) + } + + t := o.markTemp() + n.Tag = o.expr(n.Tag, nil) + for _, ncas := range n.Cases { + o.exprListInPlace(ncas.List) + orderBlock(&ncas.Body, o.free) + } + + o.out = append(o.out, n) + o.popTemp(t) + } + + base.Pos = lno +} + +func hasDefaultCase(n *ir.SwitchStmt) bool { + for _, ncas := range n.Cases { + if len(ncas.List) == 0 { + return true + } + } + return false +} + +// exprList orders the expression list l into o. +func (o *orderState) exprList(l ir.Nodes) { + s := l + for i := range s { + s[i] = o.expr(s[i], nil) + } +} + +// exprListInPlace orders the expression list l but saves +// the side effects on the individual expression ninit lists. +func (o *orderState) exprListInPlace(l ir.Nodes) { + s := l + for i := range s { + s[i] = o.exprInPlace(s[i]) + } +} + +func (o *orderState) exprNoLHS(n ir.Node) ir.Node { + return o.expr(n, nil) +} + +// expr orders a single expression, appending side +// effects to o.out as needed. +// If this is part of an assignment lhs = *np, lhs is given. +// Otherwise lhs == nil. (When lhs != nil it may be possible +// to avoid copying the result of the expression to a temporary.) +// The result of expr MUST be assigned back to n, e.g. +// +// n.Left = o.expr(n.Left, lhs) +func (o *orderState) expr(n, lhs ir.Node) ir.Node { + if n == nil { + return n + } + lno := ir.SetPos(n) + n = o.expr1(n, lhs) + base.Pos = lno + return n +} + +func (o *orderState) expr1(n, lhs ir.Node) ir.Node { + o.init(n) + + switch n.Op() { + default: + if o.edit == nil { + o.edit = o.exprNoLHS // create closure once + } + ir.EditChildren(n, o.edit) + return n + + // Addition of strings turns into a function call. + // Allocate a temporary to hold the strings. + // Fewer than 5 strings use direct runtime helpers. + case ir.OADDSTR: + n := n.(*ir.AddStringExpr) + o.exprList(n.List) + + if len(n.List) > 5 { + t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List))) + n.Prealloc = o.newTemp(t, false) + } + + // Mark string(byteSlice) arguments to reuse byteSlice backing + // buffer during conversion. String concatenation does not + // memorize the strings for later use, so it is safe. + // However, we can do it only if there is at least one non-empty string literal. + // Otherwise if all other arguments are empty strings, + // concatstrings will return the reference to the temp string + // to the caller. + hasbyte := false + + haslit := false + for _, n1 := range n.List { + hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR + haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0 + } + + if haslit && hasbyte { + for _, n2 := range n.List { + if n2.Op() == ir.OBYTES2STR { + n2 := n2.(*ir.ConvExpr) + n2.SetOp(ir.OBYTES2STRTMP) + } + } + } + return n + + case ir.OINDEXMAP: + n := n.(*ir.IndexExpr) + n.X = o.expr(n.X, nil) + n.Index = o.expr(n.Index, nil) + needCopy := false + + if !n.Assigned { + // Enforce that any []byte slices we are not copying + // can not be changed before the map index by forcing + // the map index to happen immediately following the + // conversions. See copyExpr a few lines below. + needCopy = mapKeyReplaceStrConv(n.Index) + + if base.Flag.Cfg.Instrumenting { + // Race detector needs the copy. + needCopy = true + } + } + + // key may need to be be addressable + n.Index = o.mapKeyTemp(n.Pos(), n.X.Type(), n.Index) + if needCopy { + return o.copyExpr(n) + } + return n + + // concrete type (not interface) argument might need an addressable + // temporary to pass to the runtime conversion routine. + case ir.OCONVIFACE: + n := n.(*ir.ConvExpr) + n.X = o.expr(n.X, nil) + if n.X.Type().IsInterface() { + return n + } + if _, _, needsaddr := dataWordFuncName(n.X.Type()); needsaddr || isStaticCompositeLiteral(n.X) { + // Need a temp if we need to pass the address to the conversion function. + // We also process static composite literal node here, making a named static global + // whose address we can put directly in an interface (see OCONVIFACE case in walk). + n.X = o.addrTemp(n.X) + } + return n + + case ir.OCONVNOP: + n := n.(*ir.ConvExpr) + if n.X.Op() == ir.OCALLMETH { + base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck") + } + if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER) { + call := n.X.(*ir.CallExpr) + // When reordering unsafe.Pointer(f()) into a separate + // statement, the conversion and function call must stay + // together. See golang.org/issue/15329. + o.init(call) + o.call(call) + if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting { + return o.copyExpr(n) + } + } else { + n.X = o.expr(n.X, nil) + } + return n + + case ir.OANDAND, ir.OOROR: + // ... = LHS && RHS + // + // var r bool + // r = LHS + // if r { // or !r, for OROR + // r = RHS + // } + // ... = r + + n := n.(*ir.LogicalExpr) + r := o.newTemp(n.Type(), false) + + // Evaluate left-hand side. + lhs := o.expr(n.X, nil) + o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs))) + + // Evaluate right-hand side, save generated code. + saveout := o.out + o.out = nil + t := o.markTemp() + o.edge() + rhs := o.expr(n.Y, nil) + o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs))) + o.popTemp(t) + gen := o.out + o.out = saveout + + // If left-hand side doesn't cause a short-circuit, issue right-hand side. + nif := ir.NewIfStmt(base.Pos, r, nil, nil) + if n.Op() == ir.OANDAND { + nif.Body = gen + } else { + nif.Else = gen + } + o.out = append(o.out, nif) + return r + + case ir.OCALLMETH: + base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck") + panic("unreachable") + + case ir.OCALLFUNC, + ir.OCALLINTER, + ir.OCAP, + ir.OCOMPLEX, + ir.OCOPY, + ir.OIMAG, + ir.OLEN, + ir.OMAKECHAN, + ir.OMAKEMAP, + ir.OMAKESLICE, + ir.OMAKESLICECOPY, + ir.OMAX, + ir.OMIN, + ir.ONEW, + ir.OREAL, + ir.ORECOVERFP, + ir.OSTR2BYTES, + ir.OSTR2BYTESTMP, + ir.OSTR2RUNES: + + if isRuneCount(n) { + // len([]rune(s)) is rewritten to runtime.countrunes(s) later. + conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr) + conv.X = o.expr(conv.X, nil) + } else { + o.call(n) + } + + if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting { + return o.copyExpr(n) + } + return n + + case ir.OINLCALL: + n := n.(*ir.InlinedCallExpr) + o.stmtList(n.Body) + return n.SingleResult() + + case ir.OAPPEND: + // Check for append(x, make([]T, y)...) . + n := n.(*ir.CallExpr) + if isAppendOfMake(n) { + n.Args[0] = o.expr(n.Args[0], nil) // order x + mk := n.Args[1].(*ir.MakeExpr) + mk.Len = o.expr(mk.Len, nil) // order y + } else { + o.exprList(n.Args) + } + + if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) { + return o.copyExpr(n) + } + return n + + case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR: + n := n.(*ir.SliceExpr) + n.X = o.expr(n.X, nil) + n.Low = o.cheapExpr(o.expr(n.Low, nil)) + n.High = o.cheapExpr(o.expr(n.High, nil)) + n.Max = o.cheapExpr(o.expr(n.Max, nil)) + if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) { + return o.copyExpr(n) + } + return n + + case ir.OCLOSURE: + n := n.(*ir.ClosureExpr) + if n.Transient() && len(n.Func.ClosureVars) > 0 { + n.Prealloc = o.newTemp(typecheck.ClosureType(n), false) + } + return n + + case ir.OMETHVALUE: + n := n.(*ir.SelectorExpr) + n.X = o.expr(n.X, nil) + if n.Transient() { + t := typecheck.MethodValueType(n) + n.Prealloc = o.newTemp(t, false) + } + return n + + case ir.OSLICELIT: + n := n.(*ir.CompLitExpr) + o.exprList(n.List) + if n.Transient() { + t := types.NewArray(n.Type().Elem(), n.Len) + n.Prealloc = o.newTemp(t, false) + } + return n + + case ir.ODOTTYPE, ir.ODOTTYPE2: + n := n.(*ir.TypeAssertExpr) + n.X = o.expr(n.X, nil) + if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting { + return o.copyExprClear(n) + } + return n + + case ir.ORECV: + n := n.(*ir.UnaryExpr) + n.X = o.expr(n.X, nil) + return o.copyExprClear(n) + + case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE: + n := n.(*ir.BinaryExpr) + n.X = o.expr(n.X, nil) + n.Y = o.expr(n.Y, nil) + + t := n.X.Type() + switch { + case t.IsString(): + // Mark string(byteSlice) arguments to reuse byteSlice backing + // buffer during conversion. String comparison does not + // memorize the strings for later use, so it is safe. + if n.X.Op() == ir.OBYTES2STR { + n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) + } + if n.Y.Op() == ir.OBYTES2STR { + n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP) + } + + case t.IsStruct() || t.IsArray(): + // for complex comparisons, we need both args to be + // addressable so we can pass them to the runtime. + n.X = o.addrTemp(n.X) + n.Y = o.addrTemp(n.Y) + } + return n + + case ir.OMAPLIT: + // Order map by converting: + // map[int]int{ + // a(): b(), + // c(): d(), + // e(): f(), + // } + // to + // m := map[int]int{} + // m[a()] = b() + // m[c()] = d() + // m[e()] = f() + // Then order the result. + // Without this special case, order would otherwise compute all + // the keys and values before storing any of them to the map. + // See issue 26552. + n := n.(*ir.CompLitExpr) + entries := n.List + statics := entries[:0] + var dynamics []*ir.KeyExpr + for _, r := range entries { + r := r.(*ir.KeyExpr) + + if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) { + dynamics = append(dynamics, r) + continue + } + + // Recursively ordering some static entries can change them to dynamic; + // e.g., OCONVIFACE nodes. See #31777. + r = o.expr(r, nil).(*ir.KeyExpr) + if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) { + dynamics = append(dynamics, r) + continue + } + + statics = append(statics, r) + } + n.List = statics + + if len(dynamics) == 0 { + return n + } + + // Emit the creation of the map (with all its static entries). + m := o.newTemp(n.Type(), false) + as := ir.NewAssignStmt(base.Pos, m, n) + typecheck.Stmt(as) + o.stmt(as) + + // Emit eval+insert of dynamic entries, one at a time. + for _, r := range dynamics { + lhs := typecheck.AssignExpr(ir.NewIndexExpr(base.Pos, m, r.Key)).(*ir.IndexExpr) + base.AssertfAt(lhs.Op() == ir.OINDEXMAP, lhs.Pos(), "want OINDEXMAP, have %+v", lhs) + lhs.RType = n.RType + + as := ir.NewAssignStmt(base.Pos, lhs, r.Value) + typecheck.Stmt(as) + o.stmt(as) + } + + // Remember that we issued these assignments so we can include that count + // in the map alloc hint. + // We're assuming here that all the keys in the map literal are distinct. + // If any are equal, this will be an overcount. Probably not worth accounting + // for that, as equal keys in map literals are rare, and at worst we waste + // a bit of space. + n.Len += int64(len(dynamics)) + + return m + } + + // No return - type-assertions above. Each case must return for itself. +} + +// as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment. +// The caller should order the right-hand side of the assignment before calling order.as2func. +// It rewrites, +// +// a, b, a = ... +// +// as +// +// tmp1, tmp2, tmp3 = ... +// a, b, a = tmp1, tmp2, tmp3 +// +// This is necessary to ensure left to right assignment order. +func (o *orderState) as2func(n *ir.AssignListStmt) { + results := n.Rhs[0].Type() + as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil) + for i, nl := range n.Lhs { + if !ir.IsBlank(nl) { + typ := results.Field(i).Type + tmp := o.newTemp(typ, typ.HasPointers()) + n.Lhs[i] = tmp + as.Lhs = append(as.Lhs, nl) + as.Rhs = append(as.Rhs, tmp) + } + } + + o.out = append(o.out, n) + o.stmt(typecheck.Stmt(as)) +} + +// as2ok orders OAS2XXX with ok. +// Just like as2func, this also adds temporaries to ensure left-to-right assignment. +func (o *orderState) as2ok(n *ir.AssignListStmt) { + as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil) + + do := func(i int, typ *types.Type) { + if nl := n.Lhs[i]; !ir.IsBlank(nl) { + var tmp ir.Node = o.newTemp(typ, typ.HasPointers()) + n.Lhs[i] = tmp + as.Lhs = append(as.Lhs, nl) + if i == 1 { + // The "ok" result is an untyped boolean according to the Go + // spec. We need to explicitly convert it to the LHS type in + // case the latter is a defined boolean type (#8475). + tmp = typecheck.Conv(tmp, nl.Type()) + } + as.Rhs = append(as.Rhs, tmp) + } + } + + do(0, n.Rhs[0].Type()) + do(1, types.Types[types.TBOOL]) + + o.out = append(o.out, n) + o.stmt(typecheck.Stmt(as)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/range.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/range.go new file mode 100644 index 0000000000000000000000000000000000000000..93898b3a66f296a715ddd89e5fdf75a672a86df9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/range.go @@ -0,0 +1,576 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "unicode/utf8" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/src" + "cmd/internal/sys" +) + +func cheapComputableIndex(width int64) bool { + switch ssagen.Arch.LinkArch.Family { + // MIPS does not have R+R addressing + // Arm64 may lack ability to generate this code in our assembler, + // but the architecture supports it. + case sys.PPC64, sys.S390X: + return width == 1 + case sys.AMD64, sys.I386, sys.ARM64, sys.ARM: + switch width { + case 1, 2, 4, 8: + return true + } + } + return false +} + +// walkRange transforms various forms of ORANGE into +// simpler forms. The result must be assigned back to n. +// Node n may also be modified in place, and may also be +// the returned node. +func walkRange(nrange *ir.RangeStmt) ir.Node { + base.Assert(!nrange.DistinctVars) // Should all be rewritten before escape analysis + if isMapClear(nrange) { + return mapRangeClear(nrange) + } + + nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil, nrange.DistinctVars) + nfor.SetInit(nrange.Init()) + nfor.Label = nrange.Label + + // variable name conventions: + // ohv1, hv1, hv2: hidden (old) val 1, 2 + // ha, hit: hidden aggregate, iterator + // hn, hp: hidden len, pointer + // hb: hidden bool + // a, v1, v2: not hidden aggregate, val 1, 2 + + a := nrange.X + t := a.Type() + lno := ir.SetPos(a) + + v1, v2 := nrange.Key, nrange.Value + + if ir.IsBlank(v2) { + v2 = nil + } + + if ir.IsBlank(v1) && v2 == nil { + v1 = nil + } + + if v1 == nil && v2 != nil { + base.Fatalf("walkRange: v2 != nil while v1 == nil") + } + + var body []ir.Node + var init []ir.Node + switch k := t.Kind(); { + default: + base.Fatalf("walkRange") + + case types.IsInt[k]: + hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t) + hn := typecheck.TempAt(base.Pos, ir.CurFunc, t) + + init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil)) + init = append(init, ir.NewAssignStmt(base.Pos, hn, a)) + + nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn) + nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1))) + + if v1 != nil { + body = []ir.Node{rangeAssign(nrange, hv1)} + } + + case k == types.TARRAY, k == types.TSLICE, k == types.TPTR: // TPTR is pointer-to-array + if nn := arrayRangeClear(nrange, v1, v2, a); nn != nil { + base.Pos = lno + return nn + } + + // Element type of the iteration + var elem *types.Type + switch t.Kind() { + case types.TSLICE, types.TARRAY: + elem = t.Elem() + case types.TPTR: + elem = t.Elem().Elem() + } + + // order.stmt arranged for a copy of the array/slice variable if needed. + ha := a + + hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT]) + hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT]) + + init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil)) + init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))) + + nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn) + nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1))) + + // for range ha { body } + if v1 == nil { + break + } + + // for v1 := range ha { body } + if v2 == nil { + body = []ir.Node{rangeAssign(nrange, hv1)} + break + } + + // for v1, v2 := range ha { body } + if cheapComputableIndex(elem.Size()) { + // v1, v2 = hv1, ha[hv1] + tmp := ir.NewIndexExpr(base.Pos, ha, hv1) + tmp.SetBounded(true) + body = []ir.Node{rangeAssign2(nrange, hv1, tmp)} + break + } + + // Slice to iterate over + var hs ir.Node + if t.IsSlice() { + hs = ha + } else { + var arr ir.Node + if t.IsPtr() { + arr = ha + } else { + arr = typecheck.NodAddr(ha) + arr.SetType(t.PtrTo()) + arr.SetTypecheck(1) + } + hs = ir.NewSliceExpr(base.Pos, ir.OSLICEARR, arr, nil, nil, nil) + // old typechecker doesn't know OSLICEARR, so we set types explicitly + hs.SetType(types.NewSlice(elem)) + hs.SetTypecheck(1) + } + + // We use a "pointer" to keep track of where we are in the backing array + // of the slice hs. This pointer starts at hs.ptr and gets incremented + // by the element size each time through the loop. + // + // It's tricky, though, as on the last iteration this pointer gets + // incremented to point past the end of the backing array. We can't + // let the garbage collector see that final out-of-bounds pointer. + // + // To avoid this, we keep the "pointer" alternately in 2 variables, one + // pointer typed and one uintptr typed. Most of the time it lives in the + // regular pointer variable, but when it might be out of bounds (after it + // has been incremented, but before the loop condition has been checked) + // it lives briefly in the uintptr variable. + // + // hp contains the pointer version (of type *T, where T is the element type). + // It is guaranteed to always be in range, keeps the backing store alive, + // and is updated on stack copies. If a GC occurs when this function is + // suspended at any safepoint, this variable ensures correct operation. + // + // hu contains the equivalent uintptr version. It may point past the + // end, but doesn't keep the backing store alive and doesn't get updated + // on a stack copy. If a GC occurs while this function is on the top of + // the stack, then the last frame is scanned conservatively and hu will + // act as a reference to the backing array to ensure it is not collected. + // + // The "pointer" we're moving across the backing array lives in one + // or the other of hp and hu as the loop proceeds. + // + // hp is live during most of the body of the loop. But it isn't live + // at the very top of the loop, when we haven't checked i 0 { + switch n.Op() { + case ir.OAS, ir.OAS2, ir.OBLOCK: + n.(ir.InitNode).PtrInit().Prepend(init...) + + default: + init.Append(n) + n = ir.NewBlockStmt(n.Pos(), init) + } + } + return n + + // special case for a receive where we throw away + // the value received. + case ir.ORECV: + n := n.(*ir.UnaryExpr) + return walkRecv(n) + + case ir.OBREAK, + ir.OCONTINUE, + ir.OFALL, + ir.OGOTO, + ir.OLABEL, + ir.OJUMPTABLE, + ir.OINTERFACESWITCH, + ir.ODCL, + ir.OCHECKNIL: + return n + + case ir.OBLOCK: + n := n.(*ir.BlockStmt) + walkStmtList(n.List) + return n + + case ir.OCASE: + base.Errorf("case statement out of place") + panic("unreachable") + + case ir.ODEFER: + n := n.(*ir.GoDeferStmt) + ir.CurFunc.SetHasDefer(true) + ir.CurFunc.NumDefers++ + if ir.CurFunc.NumDefers > maxOpenDefers || n.DeferAt != nil { + // Don't allow open-coded defers if there are more than + // 8 defers in the function, since we use a single + // byte to record active defers. + // Also don't allow if we need to use deferprocat. + ir.CurFunc.SetOpenCodedDeferDisallowed(true) + } + if n.Esc() != ir.EscNever { + // If n.Esc is not EscNever, then this defer occurs in a loop, + // so open-coded defers cannot be used in this function. + ir.CurFunc.SetOpenCodedDeferDisallowed(true) + } + fallthrough + case ir.OGO: + n := n.(*ir.GoDeferStmt) + return walkGoDefer(n) + + case ir.OFOR: + n := n.(*ir.ForStmt) + return walkFor(n) + + case ir.OIF: + n := n.(*ir.IfStmt) + return walkIf(n) + + case ir.ORETURN: + n := n.(*ir.ReturnStmt) + return walkReturn(n) + + case ir.OTAILCALL: + n := n.(*ir.TailCallStmt) + + var init ir.Nodes + n.Call.Fun = walkExpr(n.Call.Fun, &init) + + if len(init) > 0 { + init.Append(n) + return ir.NewBlockStmt(n.Pos(), init) + } + return n + + case ir.OINLMARK: + n := n.(*ir.InlineMarkStmt) + return n + + case ir.OSELECT: + n := n.(*ir.SelectStmt) + walkSelect(n) + return n + + case ir.OSWITCH: + n := n.(*ir.SwitchStmt) + walkSwitch(n) + return n + + case ir.ORANGE: + n := n.(*ir.RangeStmt) + return walkRange(n) + } + + // No return! Each case must return (or panic), + // to avoid confusion about what gets returned + // in the presence of type assertions. +} + +func walkStmtList(s []ir.Node) { + for i := range s { + s[i] = walkStmt(s[i]) + } +} + +// walkFor walks an OFOR node. +func walkFor(n *ir.ForStmt) ir.Node { + if n.Cond != nil { + init := ir.TakeInit(n.Cond) + walkStmtList(init) + n.Cond = walkExpr(n.Cond, &init) + n.Cond = ir.InitExpr(init, n.Cond) + } + + n.Post = walkStmt(n.Post) + walkStmtList(n.Body) + return n +} + +// validGoDeferCall reports whether call is a valid call to appear in +// a go or defer statement; that is, whether it's a regular function +// call without arguments or results. +func validGoDeferCall(call ir.Node) bool { + if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC && len(call.KeepAlive) == 0 { + sig := call.Fun.Type() + return sig.NumParams()+sig.NumResults() == 0 + } + return false +} + +// walkGoDefer walks an OGO or ODEFER node. +func walkGoDefer(n *ir.GoDeferStmt) ir.Node { + if !validGoDeferCall(n.Call) { + base.FatalfAt(n.Pos(), "invalid %v call: %v", n.Op(), n.Call) + } + + var init ir.Nodes + + call := n.Call.(*ir.CallExpr) + call.Fun = walkExpr(call.Fun, &init) + + if len(init) > 0 { + init.Append(n) + return ir.NewBlockStmt(n.Pos(), init) + } + return n +} + +// walkIf walks an OIF node. +func walkIf(n *ir.IfStmt) ir.Node { + n.Cond = walkExpr(n.Cond, n.PtrInit()) + walkStmtList(n.Body) + walkStmtList(n.Else) + return n +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/switch.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/switch.go new file mode 100644 index 0000000000000000000000000000000000000000..b67d0114c7b6381a226602c8e046a5cd4ee01d25 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/switch.go @@ -0,0 +1,966 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "fmt" + "go/constant" + "go/token" + "math/bits" + "sort" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/rttype" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/src" +) + +// walkSwitch walks a switch statement. +func walkSwitch(sw *ir.SwitchStmt) { + // Guard against double walk, see #25776. + if sw.Walked() { + return // Was fatal, but eliminating every possible source of double-walking is hard + } + sw.SetWalked(true) + + if sw.Tag != nil && sw.Tag.Op() == ir.OTYPESW { + walkSwitchType(sw) + } else { + walkSwitchExpr(sw) + } +} + +// walkSwitchExpr generates an AST implementing sw. sw is an +// expression switch. +func walkSwitchExpr(sw *ir.SwitchStmt) { + lno := ir.SetPos(sw) + + cond := sw.Tag + sw.Tag = nil + + // convert switch {...} to switch true {...} + if cond == nil { + cond = ir.NewBool(base.Pos, true) + cond = typecheck.Expr(cond) + cond = typecheck.DefaultLit(cond, nil) + } + + // Given "switch string(byteslice)", + // with all cases being side-effect free, + // use a zero-cost alias of the byte slice. + // Do this before calling walkExpr on cond, + // because walkExpr will lower the string + // conversion into a runtime call. + // See issue 24937 for more discussion. + if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) { + cond := cond.(*ir.ConvExpr) + cond.SetOp(ir.OBYTES2STRTMP) + } + + cond = walkExpr(cond, sw.PtrInit()) + if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL { + cond = copyExpr(cond, cond.Type(), &sw.Compiled) + } + + base.Pos = lno + + s := exprSwitch{ + pos: lno, + exprname: cond, + } + + var defaultGoto ir.Node + var body ir.Nodes + for _, ncase := range sw.Cases { + label := typecheck.AutoLabel(".s") + jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label) + + // Process case dispatch. + if len(ncase.List) == 0 { + if defaultGoto != nil { + base.Fatalf("duplicate default case not detected during typechecking") + } + defaultGoto = jmp + } + + for i, n1 := range ncase.List { + var rtype ir.Node + if i < len(ncase.RTypes) { + rtype = ncase.RTypes[i] + } + s.Add(ncase.Pos(), n1, rtype, jmp) + } + + // Process body. + body.Append(ir.NewLabelStmt(ncase.Pos(), label)) + body.Append(ncase.Body...) + if fall, pos := endsInFallthrough(ncase.Body); !fall { + br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) + br.SetPos(pos) + body.Append(br) + } + } + sw.Cases = nil + + if defaultGoto == nil { + br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) + br.SetPos(br.Pos().WithNotStmt()) + defaultGoto = br + } + + s.Emit(&sw.Compiled) + sw.Compiled.Append(defaultGoto) + sw.Compiled.Append(body.Take()...) + walkStmtList(sw.Compiled) +} + +// An exprSwitch walks an expression switch. +type exprSwitch struct { + pos src.XPos + exprname ir.Node // value being switched on + + done ir.Nodes + clauses []exprClause +} + +type exprClause struct { + pos src.XPos + lo, hi ir.Node + rtype ir.Node // *runtime._type for OEQ node + jmp ir.Node +} + +func (s *exprSwitch) Add(pos src.XPos, expr, rtype, jmp ir.Node) { + c := exprClause{pos: pos, lo: expr, hi: expr, rtype: rtype, jmp: jmp} + if types.IsOrdered[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL { + s.clauses = append(s.clauses, c) + return + } + + s.flush() + s.clauses = append(s.clauses, c) + s.flush() +} + +func (s *exprSwitch) Emit(out *ir.Nodes) { + s.flush() + out.Append(s.done.Take()...) +} + +func (s *exprSwitch) flush() { + cc := s.clauses + s.clauses = nil + if len(cc) == 0 { + return + } + + // Caution: If len(cc) == 1, then cc[0] might not an OLITERAL. + // The code below is structured to implicitly handle this case + // (e.g., sort.Slice doesn't need to invoke the less function + // when there's only a single slice element). + + if s.exprname.Type().IsString() && len(cc) >= 2 { + // Sort strings by length and then by value. It is + // much cheaper to compare lengths than values, and + // all we need here is consistency. We respect this + // sorting below. + sort.Slice(cc, func(i, j int) bool { + si := ir.StringVal(cc[i].lo) + sj := ir.StringVal(cc[j].lo) + if len(si) != len(sj) { + return len(si) < len(sj) + } + return si < sj + }) + + // runLen returns the string length associated with a + // particular run of exprClauses. + runLen := func(run []exprClause) int64 { return int64(len(ir.StringVal(run[0].lo))) } + + // Collapse runs of consecutive strings with the same length. + var runs [][]exprClause + start := 0 + for i := 1; i < len(cc); i++ { + if runLen(cc[start:]) != runLen(cc[i:]) { + runs = append(runs, cc[start:i]) + start = i + } + } + runs = append(runs, cc[start:]) + + // We have strings of more than one length. Generate an + // outer switch which switches on the length of the string + // and an inner switch in each case which resolves all the + // strings of the same length. The code looks something like this: + + // goto outerLabel + // len5: + // ... search among length 5 strings ... + // goto endLabel + // len8: + // ... search among length 8 strings ... + // goto endLabel + // ... other lengths ... + // outerLabel: + // switch len(s) { + // case 5: goto len5 + // case 8: goto len8 + // ... other lengths ... + // } + // endLabel: + + outerLabel := typecheck.AutoLabel(".s") + endLabel := typecheck.AutoLabel(".s") + + // Jump around all the individual switches for each length. + s.done.Append(ir.NewBranchStmt(s.pos, ir.OGOTO, outerLabel)) + + var outer exprSwitch + outer.exprname = ir.NewUnaryExpr(s.pos, ir.OLEN, s.exprname) + outer.exprname.SetType(types.Types[types.TINT]) + + for _, run := range runs { + // Target label to jump to when we match this length. + label := typecheck.AutoLabel(".s") + + // Search within this run of same-length strings. + pos := run[0].pos + s.done.Append(ir.NewLabelStmt(pos, label)) + stringSearch(s.exprname, run, &s.done) + s.done.Append(ir.NewBranchStmt(pos, ir.OGOTO, endLabel)) + + // Add length case to outer switch. + cas := ir.NewInt(pos, runLen(run)) + jmp := ir.NewBranchStmt(pos, ir.OGOTO, label) + outer.Add(pos, cas, nil, jmp) + } + s.done.Append(ir.NewLabelStmt(s.pos, outerLabel)) + outer.Emit(&s.done) + s.done.Append(ir.NewLabelStmt(s.pos, endLabel)) + return + } + + sort.Slice(cc, func(i, j int) bool { + return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val()) + }) + + // Merge consecutive integer cases. + if s.exprname.Type().IsInteger() { + consecutive := func(last, next constant.Value) bool { + delta := constant.BinaryOp(next, token.SUB, last) + return constant.Compare(delta, token.EQL, constant.MakeInt64(1)) + } + + merged := cc[:1] + for _, c := range cc[1:] { + last := &merged[len(merged)-1] + if last.jmp == c.jmp && consecutive(last.hi.Val(), c.lo.Val()) { + last.hi = c.lo + } else { + merged = append(merged, c) + } + } + cc = merged + } + + s.search(cc, &s.done) +} + +func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) { + if s.tryJumpTable(cc, out) { + return + } + binarySearch(len(cc), out, + func(i int) ir.Node { + return ir.NewBinaryExpr(base.Pos, ir.OLE, s.exprname, cc[i-1].hi) + }, + func(i int, nif *ir.IfStmt) { + c := &cc[i] + nif.Cond = c.test(s.exprname) + nif.Body = []ir.Node{c.jmp} + }, + ) +} + +// Try to implement the clauses with a jump table. Returns true if successful. +func (s *exprSwitch) tryJumpTable(cc []exprClause, out *ir.Nodes) bool { + const minCases = 8 // have at least minCases cases in the switch + const minDensity = 4 // use at least 1 out of every minDensity entries + + if base.Flag.N != 0 || !ssagen.Arch.LinkArch.CanJumpTable || base.Ctxt.Retpoline { + return false + } + if len(cc) < minCases { + return false // not enough cases for it to be worth it + } + if cc[0].lo.Val().Kind() != constant.Int { + return false // e.g. float + } + if s.exprname.Type().Size() > int64(types.PtrSize) { + return false // 64-bit switches on 32-bit archs + } + min := cc[0].lo.Val() + max := cc[len(cc)-1].hi.Val() + width := constant.BinaryOp(constant.BinaryOp(max, token.SUB, min), token.ADD, constant.MakeInt64(1)) + limit := constant.MakeInt64(int64(len(cc)) * minDensity) + if constant.Compare(width, token.GTR, limit) { + // We disable jump tables if we use less than a minimum fraction of the entries. + // i.e. for switch x {case 0: case 1000: case 2000:} we don't want to use a jump table. + return false + } + jt := ir.NewJumpTableStmt(base.Pos, s.exprname) + for _, c := range cc { + jmp := c.jmp.(*ir.BranchStmt) + if jmp.Op() != ir.OGOTO || jmp.Label == nil { + panic("bad switch case body") + } + for i := c.lo.Val(); constant.Compare(i, token.LEQ, c.hi.Val()); i = constant.BinaryOp(i, token.ADD, constant.MakeInt64(1)) { + jt.Cases = append(jt.Cases, i) + jt.Targets = append(jt.Targets, jmp.Label) + } + } + out.Append(jt) + return true +} + +func (c *exprClause) test(exprname ir.Node) ir.Node { + // Integer range. + if c.hi != c.lo { + low := ir.NewBinaryExpr(c.pos, ir.OGE, exprname, c.lo) + high := ir.NewBinaryExpr(c.pos, ir.OLE, exprname, c.hi) + return ir.NewLogicalExpr(c.pos, ir.OANDAND, low, high) + } + + // Optimize "switch true { ...}" and "switch false { ... }". + if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() { + if ir.BoolVal(exprname) { + return c.lo + } else { + return ir.NewUnaryExpr(c.pos, ir.ONOT, c.lo) + } + } + + n := ir.NewBinaryExpr(c.pos, ir.OEQ, exprname, c.lo) + n.RType = c.rtype + return n +} + +func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool { + // In theory, we could be more aggressive, allowing any + // side-effect-free expressions in cases, but it's a bit + // tricky because some of that information is unavailable due + // to the introduction of temporaries during order. + // Restricting to constants is simple and probably powerful + // enough. + + for _, ncase := range sw.Cases { + for _, v := range ncase.List { + if v.Op() != ir.OLITERAL { + return false + } + } + } + return true +} + +// endsInFallthrough reports whether stmts ends with a "fallthrough" statement. +func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) { + if len(stmts) == 0 { + return false, src.NoXPos + } + i := len(stmts) - 1 + return stmts[i].Op() == ir.OFALL, stmts[i].Pos() +} + +// walkSwitchType generates an AST that implements sw, where sw is a +// type switch. +func walkSwitchType(sw *ir.SwitchStmt) { + var s typeSwitch + s.srcName = sw.Tag.(*ir.TypeSwitchGuard).X + s.srcName = walkExpr(s.srcName, sw.PtrInit()) + s.srcName = copyExpr(s.srcName, s.srcName.Type(), &sw.Compiled) + s.okName = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL]) + s.itabName = typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINT8].PtrTo()) + + // Get interface descriptor word. + // For empty interfaces this will be the type. + // For non-empty interfaces this will be the itab. + srcItab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s.srcName) + srcData := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s.srcName) + srcData.SetType(types.Types[types.TUINT8].PtrTo()) + srcData.SetTypecheck(1) + + // For empty interfaces, do: + // if e._type == nil { + // do nil case if it exists, otherwise default + // } + // h := e._type.hash + // Use a similar strategy for non-empty interfaces. + ifNil := ir.NewIfStmt(base.Pos, nil, nil, nil) + ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, srcItab, typecheck.NodNil()) + base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check. + ifNil.Cond = typecheck.Expr(ifNil.Cond) + ifNil.Cond = typecheck.DefaultLit(ifNil.Cond, nil) + // ifNil.Nbody assigned later. + sw.Compiled.Append(ifNil) + + // Load hash from type or itab. + dotHash := typeHashFieldOf(base.Pos, srcItab) + s.hashName = copyExpr(dotHash, dotHash.Type(), &sw.Compiled) + + // Make a label for each case body. + labels := make([]*types.Sym, len(sw.Cases)) + for i := range sw.Cases { + labels[i] = typecheck.AutoLabel(".s") + } + + // "jump" to execute if no case matches. + br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil) + + // Assemble a list of all the types we're looking for. + // This pass flattens the case lists, as well as handles + // some unusual cases, like default and nil cases. + type oneCase struct { + pos src.XPos + jmp ir.Node // jump to body of selected case + + // The case we're matching. Normally the type we're looking for + // is typ.Type(), but when typ is ODYNAMICTYPE the actual type + // we're looking for is not a compile-time constant (typ.Type() + // will be its shape). + typ ir.Node + } + var cases []oneCase + var defaultGoto, nilGoto ir.Node + for i, ncase := range sw.Cases { + jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, labels[i]) + if len(ncase.List) == 0 { // default: + if defaultGoto != nil { + base.Fatalf("duplicate default case not detected during typechecking") + } + defaultGoto = jmp + } + for _, n1 := range ncase.List { + if ir.IsNil(n1) { // case nil: + if nilGoto != nil { + base.Fatalf("duplicate nil case not detected during typechecking") + } + nilGoto = jmp + continue + } + if n1.Op() == ir.ODYNAMICTYPE { + // Convert dynamic to static, if the dynamic is actually static. + // TODO: why isn't this OTYPE to begin with? + dt := n1.(*ir.DynamicType) + if dt.RType != nil && dt.RType.Op() == ir.OADDR { + addr := dt.RType.(*ir.AddrExpr) + if addr.X.Op() == ir.OLINKSYMOFFSET { + n1 = ir.TypeNode(n1.Type()) + } + } + if dt.ITab != nil && dt.ITab.Op() == ir.OADDR { + addr := dt.ITab.(*ir.AddrExpr) + if addr.X.Op() == ir.OLINKSYMOFFSET { + n1 = ir.TypeNode(n1.Type()) + } + } + } + cases = append(cases, oneCase{ + pos: ncase.Pos(), + typ: n1, + jmp: jmp, + }) + } + } + if defaultGoto == nil { + defaultGoto = br + } + if nilGoto == nil { + nilGoto = defaultGoto + } + ifNil.Body = []ir.Node{nilGoto} + + // Now go through the list of cases, processing groups as we find them. + var concreteCases []oneCase + var interfaceCases []oneCase + flush := func() { + // Process all the concrete types first. Because we handle shadowing + // below, it is correct to do all the concrete types before all of + // the interface types. + // The concrete cases can all be handled without a runtime call. + if len(concreteCases) > 0 { + var clauses []typeClause + for _, c := range concreteCases { + as := ir.NewAssignListStmt(c.pos, ir.OAS2, + []ir.Node{ir.BlankNode, s.okName}, // _, ok = + []ir.Node{ir.NewTypeAssertExpr(c.pos, s.srcName, c.typ.Type())}) // iface.(type) + nif := ir.NewIfStmt(c.pos, s.okName, []ir.Node{c.jmp}, nil) + clauses = append(clauses, typeClause{ + hash: types.TypeHash(c.typ.Type()), + body: []ir.Node{typecheck.Stmt(as), typecheck.Stmt(nif)}, + }) + } + s.flush(clauses, &sw.Compiled) + concreteCases = concreteCases[:0] + } + + // The "any" case, if it exists, must be the last interface case, because + // it would shadow all subsequent cases. Strip it off here so the runtime + // call only needs to handle non-empty interfaces. + var anyGoto ir.Node + if len(interfaceCases) > 0 && interfaceCases[len(interfaceCases)-1].typ.Type().IsEmptyInterface() { + anyGoto = interfaceCases[len(interfaceCases)-1].jmp + interfaceCases = interfaceCases[:len(interfaceCases)-1] + } + + // Next, process all the interface types with a single call to the runtime. + if len(interfaceCases) > 0 { + + // Build an internal/abi.InterfaceSwitch descriptor to pass to the runtime. + lsym := types.LocalPkg.Lookup(fmt.Sprintf(".interfaceSwitch.%d", interfaceSwitchGen)).LinksymABI(obj.ABI0) + interfaceSwitchGen++ + c := rttype.NewCursor(lsym, 0, rttype.InterfaceSwitch) + c.Field("Cache").WritePtr(typecheck.LookupRuntimeVar("emptyInterfaceSwitchCache")) + c.Field("NCases").WriteInt(int64(len(interfaceCases))) + array, sizeDelta := c.Field("Cases").ModifyArray(len(interfaceCases)) + for i, c := range interfaceCases { + array.Elem(i).WritePtr(reflectdata.TypeSym(c.typ.Type()).Linksym()) + } + objw.Global(lsym, int32(rttype.InterfaceSwitch.Size()+sizeDelta), obj.LOCAL) + // The GC only needs to see the first pointer in the structure (all the others + // are to static locations). So the InterfaceSwitch type itself is fine, even + // though it might not cover the whole array we wrote above. + lsym.Gotype = reflectdata.TypeLinksym(rttype.InterfaceSwitch) + + // Call runtime to do switch + // case, itab = runtime.interfaceSwitch(&descriptor, typeof(arg)) + var typeArg ir.Node + if s.srcName.Type().IsEmptyInterface() { + typeArg = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINT8].PtrTo(), srcItab) + } else { + typeArg = itabType(srcItab) + } + caseVar := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT]) + isw := ir.NewInterfaceSwitchStmt(base.Pos, caseVar, s.itabName, typeArg, dotHash, lsym) + sw.Compiled.Append(isw) + + // Switch on the result of the call (or cache lookup). + var newCases []*ir.CaseClause + for i, c := range interfaceCases { + newCases = append(newCases, &ir.CaseClause{ + List: []ir.Node{ir.NewInt(base.Pos, int64(i))}, + Body: []ir.Node{c.jmp}, + }) + } + // TODO: add len(newCases) case, mark switch as bounded + sw2 := ir.NewSwitchStmt(base.Pos, caseVar, newCases) + sw.Compiled.Append(typecheck.Stmt(sw2)) + interfaceCases = interfaceCases[:0] + } + + if anyGoto != nil { + // We've already handled the nil case, so everything + // that reaches here matches the "any" case. + sw.Compiled.Append(anyGoto) + } + } +caseLoop: + for _, c := range cases { + if c.typ.Op() == ir.ODYNAMICTYPE { + flush() // process all previous cases + dt := c.typ.(*ir.DynamicType) + dot := ir.NewDynamicTypeAssertExpr(c.pos, ir.ODYNAMICDOTTYPE, s.srcName, dt.RType) + dot.ITab = dt.ITab + dot.SetType(c.typ.Type()) + dot.SetTypecheck(1) + + as := ir.NewAssignListStmt(c.pos, ir.OAS2, nil, nil) + as.Lhs = []ir.Node{ir.BlankNode, s.okName} // _, ok = + as.Rhs = []ir.Node{dot} + typecheck.Stmt(as) + + nif := ir.NewIfStmt(c.pos, s.okName, []ir.Node{c.jmp}, nil) + sw.Compiled.Append(as, nif) + continue + } + + // Check for shadowing (a case that will never fire because + // a previous case would have always fired first). This check + // allows us to reorder concrete and interface cases. + // (TODO: these should be vet failures, maybe?) + for _, ic := range interfaceCases { + // An interface type case will shadow all + // subsequent types that implement that interface. + if typecheck.Implements(c.typ.Type(), ic.typ.Type()) { + continue caseLoop + } + // Note that we don't need to worry about: + // 1. Two concrete types shadowing each other. That's + // disallowed by the spec. + // 2. A concrete type shadowing an interface type. + // That can never happen, as interface types can + // be satisfied by an infinite set of concrete types. + // The correctness of this step also depends on handling + // the dynamic type cases separately, as we do above. + } + + if c.typ.Type().IsInterface() { + interfaceCases = append(interfaceCases, c) + } else { + concreteCases = append(concreteCases, c) + } + } + flush() + + sw.Compiled.Append(defaultGoto) // if none of the cases matched + + // Now generate all the case bodies + for i, ncase := range sw.Cases { + sw.Compiled.Append(ir.NewLabelStmt(ncase.Pos(), labels[i])) + if caseVar := ncase.Var; caseVar != nil { + val := s.srcName + if len(ncase.List) == 1 { + // single type. We have to downcast the input value to the target type. + if ncase.List[0].Op() == ir.OTYPE { // single compile-time known type + t := ncase.List[0].Type() + if t.IsInterface() { + // This case is an interface. Build case value from input interface. + // The data word will always be the same, but the itab/type changes. + if t.IsEmptyInterface() { + var typ ir.Node + if s.srcName.Type().IsEmptyInterface() { + // E->E, nothing to do, type is already correct. + typ = srcItab + } else { + // I->E, load type out of itab + typ = itabType(srcItab) + typ.SetPos(ncase.Pos()) + } + val = ir.NewBinaryExpr(ncase.Pos(), ir.OMAKEFACE, typ, srcData) + } else { + // The itab we need was returned by a runtime.interfaceSwitch call. + val = ir.NewBinaryExpr(ncase.Pos(), ir.OMAKEFACE, s.itabName, srcData) + } + } else { + // This case is a concrete type, just read its value out of the interface. + val = ifaceData(ncase.Pos(), s.srcName, t) + } + } else if ncase.List[0].Op() == ir.ODYNAMICTYPE { // single runtime known type + dt := ncase.List[0].(*ir.DynamicType) + x := ir.NewDynamicTypeAssertExpr(ncase.Pos(), ir.ODYNAMICDOTTYPE, val, dt.RType) + x.ITab = dt.ITab + val = x + } else if ir.IsNil(ncase.List[0]) { + } else { + base.Fatalf("unhandled type switch case %v", ncase.List[0]) + } + val.SetType(caseVar.Type()) + val.SetTypecheck(1) + } + l := []ir.Node{ + ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar), + ir.NewAssignStmt(ncase.Pos(), caseVar, val), + } + typecheck.Stmts(l) + sw.Compiled.Append(l...) + } + sw.Compiled.Append(ncase.Body...) + sw.Compiled.Append(br) + } + + walkStmtList(sw.Compiled) + sw.Tag = nil + sw.Cases = nil +} + +var interfaceSwitchGen int + +// typeHashFieldOf returns an expression to select the type hash field +// from an interface's descriptor word (whether a *runtime._type or +// *runtime.itab pointer). +func typeHashFieldOf(pos src.XPos, itab *ir.UnaryExpr) *ir.SelectorExpr { + if itab.Op() != ir.OITAB { + base.Fatalf("expected OITAB, got %v", itab.Op()) + } + var hashField *types.Field + if itab.X.Type().IsEmptyInterface() { + // runtime._type's hash field + if rtypeHashField == nil { + rtypeHashField = runtimeField("hash", rttype.Type.OffsetOf("Hash"), types.Types[types.TUINT32]) + } + hashField = rtypeHashField + } else { + // runtime.itab's hash field + if itabHashField == nil { + itabHashField = runtimeField("hash", int64(2*types.PtrSize), types.Types[types.TUINT32]) + } + hashField = itabHashField + } + return boundedDotPtr(pos, itab, hashField) +} + +var rtypeHashField, itabHashField *types.Field + +// A typeSwitch walks a type switch. +type typeSwitch struct { + // Temporary variables (i.e., ONAMEs) used by type switch dispatch logic: + srcName ir.Node // value being type-switched on + hashName ir.Node // type hash of the value being type-switched on + okName ir.Node // boolean used for comma-ok type assertions + itabName ir.Node // itab value to use for first word of non-empty interface +} + +type typeClause struct { + hash uint32 + body ir.Nodes +} + +func (s *typeSwitch) flush(cc []typeClause, compiled *ir.Nodes) { + if len(cc) == 0 { + return + } + + sort.Slice(cc, func(i, j int) bool { return cc[i].hash < cc[j].hash }) + + // Combine adjacent cases with the same hash. + merged := cc[:1] + for _, c := range cc[1:] { + last := &merged[len(merged)-1] + if last.hash == c.hash { + last.body.Append(c.body.Take()...) + } else { + merged = append(merged, c) + } + } + cc = merged + + if s.tryJumpTable(cc, compiled) { + return + } + binarySearch(len(cc), compiled, + func(i int) ir.Node { + return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashName, ir.NewInt(base.Pos, int64(cc[i-1].hash))) + }, + func(i int, nif *ir.IfStmt) { + // TODO(mdempsky): Omit hash equality check if + // there's only one type. + c := cc[i] + nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashName, ir.NewInt(base.Pos, int64(c.hash))) + nif.Body.Append(c.body.Take()...) + }, + ) +} + +// Try to implement the clauses with a jump table. Returns true if successful. +func (s *typeSwitch) tryJumpTable(cc []typeClause, out *ir.Nodes) bool { + const minCases = 5 // have at least minCases cases in the switch + if base.Flag.N != 0 || !ssagen.Arch.LinkArch.CanJumpTable || base.Ctxt.Retpoline { + return false + } + if len(cc) < minCases { + return false // not enough cases for it to be worth it + } + hashes := make([]uint32, len(cc)) + // b = # of bits to use. Start with the minimum number of + // bits possible, but try a few larger sizes if needed. + b0 := bits.Len(uint(len(cc) - 1)) + for b := b0; b < b0+3; b++ { + pickI: + for i := 0; i <= 32-b; i++ { // starting bit position + // Compute the hash we'd get from all the cases, + // selecting b bits starting at bit i. + hashes = hashes[:0] + for _, c := range cc { + h := c.hash >> i & (1<> i & (1< bestScore { + bestScore = score + bestIdx = idx + bestByte = b + } + } + } + + // The split must be at least 1:n-1 because we have at least 2 distinct strings; they + // have to be different somewhere. + // TODO: what if the best split is still pretty bad? + if bestScore == 0 { + base.Fatalf("unable to split string set") + } + + // Convert expr to a []int8 + slice := ir.NewConvExpr(base.Pos, ir.OSTR2BYTESTMP, types.NewSlice(types.Types[types.TINT8]), expr) + slice.SetTypecheck(1) // legacy typechecker doesn't handle this op + slice.MarkNonNil() + // Load the byte we're splitting on. + load := ir.NewIndexExpr(base.Pos, slice, ir.NewInt(base.Pos, int64(bestIdx))) + // Compare with the value we're splitting on. + cmp := ir.Node(ir.NewBinaryExpr(base.Pos, ir.OLE, load, ir.NewInt(base.Pos, int64(bestByte)))) + cmp = typecheck.DefaultLit(typecheck.Expr(cmp), nil) + nif := ir.NewIfStmt(base.Pos, cmp, nil, nil) + + var le []exprClause + var gt []exprClause + for _, c := range cc { + s := ir.StringVal(c.lo) + if int8(s[bestIdx]) <= bestByte { + le = append(le, c) + } else { + gt = append(gt, c) + } + } + stringSearch(expr, le, &nif.Body) + stringSearch(expr, gt, &nif.Else) + out.Append(nif) + + // TODO: if expr[bestIdx] has enough different possible values, use a jump table. +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/temp.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/temp.go new file mode 100644 index 0000000000000000000000000000000000000000..886b5beec3e6712e24c3b6fbe3384db1b0f2d722 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/temp.go @@ -0,0 +1,40 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" +) + +// initStackTemp appends statements to init to initialize the given +// temporary variable to val, and then returns the expression &tmp. +func initStackTemp(init *ir.Nodes, tmp *ir.Name, val ir.Node) *ir.AddrExpr { + if val != nil && !types.Identical(tmp.Type(), val.Type()) { + base.Fatalf("bad initial value for %L: %L", tmp, val) + } + appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmp, val)) + return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr) +} + +// stackTempAddr returns the expression &tmp, where tmp is a newly +// allocated temporary variable of the given type. Statements to +// zero-initialize tmp are appended to init. +func stackTempAddr(init *ir.Nodes, typ *types.Type) *ir.AddrExpr { + return initStackTemp(init, typecheck.TempAt(base.Pos, ir.CurFunc, typ), nil) +} + +// stackBufAddr returns the expression &tmp, where tmp is a newly +// allocated temporary variable of type [len]elem. This variable is +// initialized, and elem must not contain pointers. +func stackBufAddr(len int64, elem *types.Type) *ir.AddrExpr { + if elem.HasPointers() { + base.FatalfAt(base.Pos, "%v has pointers", elem) + } + tmp := typecheck.TempAt(base.Pos, ir.CurFunc, types.NewArray(elem, len)) + return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/walk.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/walk.go new file mode 100644 index 0000000000000000000000000000000000000000..001edcc3325c223c6cb72785c03b913a1c0e0f4e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/walk/walk.go @@ -0,0 +1,393 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package walk + +import ( + "fmt" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/typecheck" + "cmd/compile/internal/types" + "cmd/internal/src" +) + +// The constant is known to runtime. +const tmpstringbufsize = 32 + +func Walk(fn *ir.Func) { + ir.CurFunc = fn + errorsBefore := base.Errors() + order(fn) + if base.Errors() > errorsBefore { + return + } + + if base.Flag.W != 0 { + s := fmt.Sprintf("\nbefore walk %v", ir.CurFunc.Sym()) + ir.DumpList(s, ir.CurFunc.Body) + } + + lno := base.Pos + + base.Pos = lno + if base.Errors() > errorsBefore { + return + } + walkStmtList(ir.CurFunc.Body) + if base.Flag.W != 0 { + s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym()) + ir.DumpList(s, ir.CurFunc.Body) + } + + // Eagerly compute sizes of all variables for SSA. + for _, n := range fn.Dcl { + types.CalcSize(n.Type()) + } +} + +// walkRecv walks an ORECV node. +func walkRecv(n *ir.UnaryExpr) ir.Node { + if n.Typecheck() == 0 { + base.Fatalf("missing typecheck: %+v", n) + } + init := ir.TakeInit(n) + + n.X = walkExpr(n.X, &init) + call := walkExpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init) + return ir.InitExpr(init, call) +} + +func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt { + if n.Op() != ir.OAS { + base.Fatalf("convas: not OAS %v", n.Op()) + } + n.SetTypecheck(1) + + if n.X == nil || n.Y == nil { + return n + } + + lt := n.X.Type() + rt := n.Y.Type() + if lt == nil || rt == nil { + return n + } + + if ir.IsBlank(n.X) { + n.Y = typecheck.DefaultLit(n.Y, nil) + return n + } + + if !types.Identical(lt, rt) { + n.Y = typecheck.AssignConv(n.Y, lt, "assignment") + n.Y = walkExpr(n.Y, init) + } + types.CalcSize(n.Y.Type()) + + return n +} + +func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr { + if init == nil { + base.Fatalf("mkcall with nil init: %v", fn) + } + if fn.Type() == nil || fn.Type().Kind() != types.TFUNC { + base.Fatalf("mkcall %v %v", fn, fn.Type()) + } + + n := fn.Type().NumParams() + if n != len(va) { + base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va)) + } + + call := typecheck.Call(base.Pos, fn, va, false).(*ir.CallExpr) + call.SetType(t) + return walkExpr(call, init).(*ir.CallExpr) +} + +func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { + return vmkcall(typecheck.LookupRuntime(name), t, init, args) +} + +func mkcallstmt(name string, args ...ir.Node) ir.Node { + return mkcallstmt1(typecheck.LookupRuntime(name), args...) +} + +func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr { + return vmkcall(fn, t, init, args) +} + +func mkcallstmt1(fn ir.Node, args ...ir.Node) ir.Node { + var init ir.Nodes + n := vmkcall(fn, nil, &init, args) + if len(init) == 0 { + return n + } + init.Append(n) + return ir.NewBlockStmt(n.Pos(), init) +} + +func chanfn(name string, n int, t *types.Type) ir.Node { + if !t.IsChan() { + base.Fatalf("chanfn %v", t) + } + switch n { + case 1: + return typecheck.LookupRuntime(name, t.Elem()) + case 2: + return typecheck.LookupRuntime(name, t.Elem(), t.Elem()) + } + base.Fatalf("chanfn %d", n) + return nil +} + +func mapfn(name string, t *types.Type, isfat bool) ir.Node { + if !t.IsMap() { + base.Fatalf("mapfn %v", t) + } + if mapfast(t) == mapslow || isfat { + return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key(), t.Elem()) + } + return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Elem()) +} + +func mapfndel(name string, t *types.Type) ir.Node { + if !t.IsMap() { + base.Fatalf("mapfn %v", t) + } + if mapfast(t) == mapslow { + return typecheck.LookupRuntime(name, t.Key(), t.Elem(), t.Key()) + } + return typecheck.LookupRuntime(name, t.Key(), t.Elem()) +} + +const ( + mapslow = iota + mapfast32 + mapfast32ptr + mapfast64 + mapfast64ptr + mapfaststr + nmapfast +) + +type mapnames [nmapfast]string + +func mkmapnames(base string, ptr string) mapnames { + return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"} +} + +var mapaccess1 = mkmapnames("mapaccess1", "") +var mapaccess2 = mkmapnames("mapaccess2", "") +var mapassign = mkmapnames("mapassign", "ptr") +var mapdelete = mkmapnames("mapdelete", "") + +func mapfast(t *types.Type) int { + // Check runtime/map.go:maxElemSize before changing. + if t.Elem().Size() > 128 { + return mapslow + } + switch reflectdata.AlgType(t.Key()) { + case types.AMEM32: + if !t.Key().HasPointers() { + return mapfast32 + } + if types.PtrSize == 4 { + return mapfast32ptr + } + base.Fatalf("small pointer %v", t.Key()) + case types.AMEM64: + if !t.Key().HasPointers() { + return mapfast64 + } + if types.PtrSize == 8 { + return mapfast64ptr + } + // Two-word object, at least one of which is a pointer. + // Use the slow path. + case types.ASTRING: + return mapfaststr + } + return mapslow +} + +func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) { + walkExprListSafe(n.Args, init) + + // walkExprListSafe will leave OINDEX (s[n]) alone if both s + // and n are name or literal, but those may index the slice we're + // modifying here. Fix explicitly. + ls := n.Args + for i1, n1 := range ls { + ls[i1] = cheapExpr(n1, init) + } +} + +// appendWalkStmt typechecks and walks stmt and then appends it to init. +func appendWalkStmt(init *ir.Nodes, stmt ir.Node) { + op := stmt.Op() + n := typecheck.Stmt(stmt) + if op == ir.OAS || op == ir.OAS2 { + // If the assignment has side effects, walkExpr will append them + // directly to init for us, while walkStmt will wrap it in an OBLOCK. + // We need to append them directly. + // TODO(rsc): Clean this up. + n = walkExpr(n, init) + } else { + n = walkStmt(n) + } + init.Append(n) +} + +// The max number of defers in a function using open-coded defers. We enforce this +// limit because the deferBits bitmask is currently a single byte (to minimize code size) +const maxOpenDefers = 8 + +// backingArrayPtrLen extracts the pointer and length from a slice or string. +// This constructs two nodes referring to n, so n must be a cheapExpr. +func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) { + var init ir.Nodes + c := cheapExpr(n, &init) + if c != n || len(init) != 0 { + base.Fatalf("backingArrayPtrLen not cheap: %v", n) + } + ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n) + if n.Type().IsString() { + ptr.SetType(types.Types[types.TUINT8].PtrTo()) + } else { + ptr.SetType(n.Type().Elem().PtrTo()) + } + ptr.SetTypecheck(1) + length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n) + length.SetType(types.Types[types.TINT]) + length.SetTypecheck(1) + return ptr, length +} + +// mayCall reports whether evaluating expression n may require +// function calls, which could clobber function call arguments/results +// currently on the stack. +func mayCall(n ir.Node) bool { + // When instrumenting, any expression might require function calls. + if base.Flag.Cfg.Instrumenting { + return true + } + + isSoftFloat := func(typ *types.Type) bool { + return types.IsFloat[typ.Kind()] || types.IsComplex[typ.Kind()] + } + + return ir.Any(n, func(n ir.Node) bool { + // walk should have already moved any Init blocks off of + // expressions. + if len(n.Init()) != 0 { + base.FatalfAt(n.Pos(), "mayCall %+v", n) + } + + switch n.Op() { + default: + base.FatalfAt(n.Pos(), "mayCall %+v", n) + + case ir.OCALLFUNC, ir.OCALLINTER, + ir.OUNSAFEADD, ir.OUNSAFESLICE: + return true + + case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR, + ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODYNAMICDOTTYPE, ir.ODIV, ir.OMOD, + ir.OSLICE2ARR, ir.OSLICE2ARRPTR: + // These ops might panic, make sure they are done + // before we start marshaling args for a call. See issue 16760. + return true + + case ir.OANDAND, ir.OOROR: + n := n.(*ir.LogicalExpr) + // The RHS expression may have init statements that + // should only execute conditionally, and so cannot be + // pulled out to the top-level init list. We could try + // to be more precise here. + return len(n.Y.Init()) != 0 + + // When using soft-float, these ops might be rewritten to function calls + // so we ensure they are evaluated first. + case ir.OADD, ir.OSUB, ir.OMUL, ir.ONEG: + return ssagen.Arch.SoftFloat && isSoftFloat(n.Type()) + case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT: + n := n.(*ir.BinaryExpr) + return ssagen.Arch.SoftFloat && isSoftFloat(n.X.Type()) + case ir.OCONV: + n := n.(*ir.ConvExpr) + return ssagen.Arch.SoftFloat && (isSoftFloat(n.Type()) || isSoftFloat(n.X.Type())) + + case ir.OMIN, ir.OMAX: + // string or float requires runtime call, see (*ssagen.state).minmax method. + return n.Type().IsString() || n.Type().IsFloat() + + case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR, + ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OMAKEFACE, + ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS, + ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL, + ir.OCONVNOP, ir.ODOT, + ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR, + ir.OBYTES2STRTMP, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OSLICEHEADER, ir.OSTRINGHEADER: + // ok: operations that don't require function calls. + // Expand as needed. + } + + return false + }) +} + +// itabType loads the _type field from a runtime.itab struct. +func itabType(itab ir.Node) ir.Node { + if itabTypeField == nil { + // runtime.itab's _type field + itabTypeField = runtimeField("_type", int64(types.PtrSize), types.NewPtr(types.Types[types.TUINT8])) + } + return boundedDotPtr(base.Pos, itab, itabTypeField) +} + +var itabTypeField *types.Field + +// boundedDotPtr returns a selector expression representing ptr.field +// and omits nil-pointer checks for ptr. +func boundedDotPtr(pos src.XPos, ptr ir.Node, field *types.Field) *ir.SelectorExpr { + sel := ir.NewSelectorExpr(pos, ir.ODOTPTR, ptr, field.Sym) + sel.Selection = field + sel.SetType(field.Type) + sel.SetTypecheck(1) + sel.SetBounded(true) // guaranteed not to fault + return sel +} + +func runtimeField(name string, offset int64, typ *types.Type) *types.Field { + f := types.NewField(src.NoXPos, ir.Pkgs.Runtime.Lookup(name), typ) + f.Offset = offset + return f +} + +// ifaceData loads the data field from an interface. +// The concrete type must be known to have type t. +// It follows the pointer if !IsDirectIface(t). +func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node { + if t.IsInterface() { + base.Fatalf("ifaceData interface: %v", t) + } + ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n) + if types.IsDirectIface(t) { + ptr.SetType(t) + ptr.SetTypecheck(1) + return ptr + } + ptr.SetType(types.NewPtr(t)) + ptr.SetTypecheck(1) + ind := ir.NewStarExpr(pos, ptr) + ind.SetType(t) + ind.SetTypecheck(1) + ind.SetBounded(true) + return ind +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/wasm/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/wasm/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..85f34a77073ed7bcc75c3a44628e308604dcbd94 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/wasm/ssa.go @@ -0,0 +1,623 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package wasm + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/objw" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/wasm" + "internal/buildcfg" +) + +/* + + Wasm implementation + ------------------- + + Wasm is a strange Go port because the machine isn't + a register-based machine, threads are different, code paths + are different, etc. We outline those differences here. + + See the design doc for some additional info on this topic. + https://docs.google.com/document/d/131vjr4DH6JFnb-blm_uRdaC0_Nv3OUwjEY5qVCxCup4/edit#heading=h.mjo1bish3xni + + PCs: + + Wasm doesn't have PCs in the normal sense that you can jump + to or call to. Instead, we simulate these PCs using our own construct. + + A PC in the Wasm implementation is the combination of a function + ID and a block ID within that function. The function ID is an index + into a function table which transfers control to the start of the + function in question, and the block ID is a sequential integer + indicating where in the function we are. + + Every function starts with a branch table which transfers control + to the place in the function indicated by the block ID. The block + ID is provided to the function as the sole Wasm argument. + + Block IDs do not encode every possible PC. They only encode places + in the function where it might be suspended. Typically these places + are call sites. + + Sometimes we encode the function ID and block ID separately. When + recorded together as a single integer, we use the value F<<16+B. + + Threads: + + Wasm doesn't (yet) have threads. We have to simulate threads by + keeping goroutine stacks in linear memory and unwinding + the Wasm stack each time we want to switch goroutines. + + To support unwinding a stack, each function call returns on the Wasm + stack a boolean that tells the function whether it should return + immediately or not. When returning immediately, a return address + is left on the top of the Go stack indicating where the goroutine + should be resumed. + + Stack pointer: + + There is a single global stack pointer which records the stack pointer + used by the currently active goroutine. This is just an address in + linear memory where the Go runtime is maintaining the stack for that + goroutine. + + Functions cache the global stack pointer in a local variable for + faster access, but any changes must be spilled to the global variable + before any call and restored from the global variable after any call. + + Calling convention: + + All Go arguments and return values are passed on the Go stack, not + the wasm stack. In addition, return addresses are pushed on the + Go stack at every call point. Return addresses are not used during + normal execution, they are used only when resuming goroutines. + (So they are not really a "return address", they are a "resume address".) + + All Go functions have the Wasm type (i32)->i32. The argument + is the block ID and the return value is the exit immediately flag. + + Callsite: + - write arguments to the Go stack (starting at SP+0) + - push return address to Go stack (8 bytes) + - write local SP to global SP + - push 0 (type i32) to Wasm stack + - issue Call + - restore local SP from global SP + - pop int32 from top of Wasm stack. If nonzero, exit function immediately. + - use results from Go stack (starting at SP+sizeof(args)) + - note that the callee will have popped the return address + + Prologue: + - initialize local SP from global SP + - jump to the location indicated by the block ID argument + (which appears in local variable 0) + - at block 0 + - check for Go stack overflow, call morestack if needed + - subtract frame size from SP + - note that arguments now start at SP+framesize+8 + + Normal epilogue: + - pop frame from Go stack + - pop return address from Go stack + - push 0 (type i32) on the Wasm stack + - return + Exit immediately epilogue: + - push 1 (type i32) on the Wasm stack + - return + - note that the return address and stack frame are left on the Go stack + + The main loop that executes goroutines is wasm_pc_f_loop, in + runtime/rt0_js_wasm.s. It grabs the saved return address from + the top of the Go stack (actually SP-8?), splits it up into F + and B parts, then calls F with its Wasm argument set to B. + + Note that when resuming a goroutine, only the most recent function + invocation of that goroutine appears on the Wasm stack. When that + Wasm function returns normally, the next most recent frame will + then be started up by wasm_pc_f_loop. + + Global 0 is SP (stack pointer) + Global 1 is CTXT (closure pointer) + Global 2 is GP (goroutine pointer) +*/ + +func Init(arch *ssagen.ArchInfo) { + arch.LinkArch = &wasm.Linkwasm + arch.REGSP = wasm.REG_SP + arch.MAXWIDTH = 1 << 50 + + arch.ZeroRange = zeroRange + arch.Ginsnop = ginsnop + + arch.SSAMarkMoves = ssaMarkMoves + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock +} + +func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog { + if cnt == 0 { + return p + } + if cnt%8 != 0 { + base.Fatalf("zerorange count not a multiple of widthptr %d", cnt) + } + + for i := int64(0); i < cnt; i += 8 { + p = pp.Append(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0) + p = pp.Append(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0) + p = pp.Append(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i) + } + + return p +} + +func ginsnop(pp *objw.Progs) *obj.Prog { + return pp.Prog(wasm.ANop) +} + +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { +} + +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { + switch b.Kind { + case ssa.BlockPlain: + if next != b.Succs[0].Block() { + s.Br(obj.AJMP, b.Succs[0].Block()) + } + + case ssa.BlockIf: + switch next { + case b.Succs[0].Block(): + // if false, jump to b.Succs[1] + getValue32(s, b.Controls[0]) + s.Prog(wasm.AI32Eqz) + s.Prog(wasm.AIf) + s.Br(obj.AJMP, b.Succs[1].Block()) + s.Prog(wasm.AEnd) + case b.Succs[1].Block(): + // if true, jump to b.Succs[0] + getValue32(s, b.Controls[0]) + s.Prog(wasm.AIf) + s.Br(obj.AJMP, b.Succs[0].Block()) + s.Prog(wasm.AEnd) + default: + // if true, jump to b.Succs[0], else jump to b.Succs[1] + getValue32(s, b.Controls[0]) + s.Prog(wasm.AIf) + s.Br(obj.AJMP, b.Succs[0].Block()) + s.Prog(wasm.AEnd) + s.Br(obj.AJMP, b.Succs[1].Block()) + } + + case ssa.BlockRet: + s.Prog(obj.ARET) + + case ssa.BlockExit, ssa.BlockRetJmp: + + case ssa.BlockDefer: + p := s.Prog(wasm.AGet) + p.From = obj.Addr{Type: obj.TYPE_REG, Reg: wasm.REG_RET0} + s.Prog(wasm.AI64Eqz) + s.Prog(wasm.AI32Eqz) + s.Prog(wasm.AIf) + s.Br(obj.AJMP, b.Succs[1].Block()) + s.Prog(wasm.AEnd) + if next != b.Succs[0].Block() { + s.Br(obj.AJMP, b.Succs[0].Block()) + } + + default: + panic("unexpected block") + } + + // Entry point for the next block. Used by the JMP in goToBlock. + s.Prog(wasm.ARESUMEPOINT) + + if s.OnWasmStackSkipped != 0 { + panic("wasm: bad stack") + } +} + +func ssaGenValue(s *ssagen.State, v *ssa.Value) { + switch v.Op { + case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall, ssa.OpWasmLoweredTailCall: + s.PrepareCall(v) + if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn { + // The runtime needs to inject jumps to + // deferreturn calls using the address in + // _func.deferreturn. Hence, the call to + // deferreturn must itself be a resumption + // point so it gets a target PC. + s.Prog(wasm.ARESUMEPOINT) + } + if v.Op == ssa.OpWasmLoweredClosureCall { + getValue64(s, v.Args[1]) + setReg(s, wasm.REG_CTXT) + } + if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn != nil { + sym := call.Fn + p := s.Prog(obj.ACALL) + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym} + p.Pos = v.Pos + if v.Op == ssa.OpWasmLoweredTailCall { + p.As = obj.ARET + } + } else { + getValue64(s, v.Args[0]) + p := s.Prog(obj.ACALL) + p.To = obj.Addr{Type: obj.TYPE_NONE} + p.Pos = v.Pos + } + + case ssa.OpWasmLoweredMove: + getValue32(s, v.Args[0]) + getValue32(s, v.Args[1]) + i32Const(s, int32(v.AuxInt)) + s.Prog(wasm.AMemoryCopy) + + case ssa.OpWasmLoweredZero: + getValue32(s, v.Args[0]) + i32Const(s, 0) + i32Const(s, int32(v.AuxInt)) + s.Prog(wasm.AMemoryFill) + + case ssa.OpWasmLoweredNilCheck: + getValue64(s, v.Args[0]) + s.Prog(wasm.AI64Eqz) + s.Prog(wasm.AIf) + p := s.Prog(wasm.ACALLNORESUME) + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.SigPanic} + s.Prog(wasm.AEnd) + if logopt.Enabled() { + logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) + } + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") + } + + case ssa.OpWasmLoweredWB: + p := s.Prog(wasm.ACall) + // AuxInt encodes how many buffer entries we need. + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.GCWriteBarrier[v.AuxInt-1]} + setReg(s, v.Reg0()) // move result from wasm stack to register local + + case ssa.OpWasmI64Store8, ssa.OpWasmI64Store16, ssa.OpWasmI64Store32, ssa.OpWasmI64Store, ssa.OpWasmF32Store, ssa.OpWasmF64Store: + getValue32(s, v.Args[0]) + getValue64(s, v.Args[1]) + p := s.Prog(v.Op.Asm()) + p.To = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt} + + case ssa.OpStoreReg: + getReg(s, wasm.REG_SP) + getValue64(s, v.Args[0]) + p := s.Prog(storeOp(v.Type)) + ssagen.AddrAuto(&p.To, v) + + case ssa.OpClobber, ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. + + default: + if v.Type.IsMemory() { + return + } + if v.OnWasmStack { + s.OnWasmStackSkipped++ + // If a Value is marked OnWasmStack, we don't generate the value and store it to a register now. + // Instead, we delay the generation to when the value is used and then directly generate it on the WebAssembly stack. + return + } + ssaGenValueOnStack(s, v, true) + if s.OnWasmStackSkipped != 0 { + panic("wasm: bad stack") + } + setReg(s, v.Reg()) + } +} + +func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) { + switch v.Op { + case ssa.OpWasmLoweredGetClosurePtr: + getReg(s, wasm.REG_CTXT) + + case ssa.OpWasmLoweredGetCallerPC: + p := s.Prog(wasm.AI64Load) + // Caller PC is stored 8 bytes below first parameter. + p.From = obj.Addr{ + Type: obj.TYPE_MEM, + Name: obj.NAME_PARAM, + Offset: -8, + } + + case ssa.OpWasmLoweredGetCallerSP: + p := s.Prog(wasm.AGet) + // Caller SP is the address of the first parameter. + p.From = obj.Addr{ + Type: obj.TYPE_ADDR, + Name: obj.NAME_PARAM, + Reg: wasm.REG_SP, + Offset: 0, + } + + case ssa.OpWasmLoweredAddr: + if v.Aux == nil { // address of off(SP), no symbol + getValue64(s, v.Args[0]) + i64Const(s, v.AuxInt) + s.Prog(wasm.AI64Add) + break + } + p := s.Prog(wasm.AGet) + p.From.Type = obj.TYPE_ADDR + switch v.Aux.(type) { + case *obj.LSym: + ssagen.AddAux(&p.From, v) + case *ir.Name: + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + default: + panic("wasm: bad LoweredAddr") + } + + case ssa.OpWasmLoweredConvert: + getValue64(s, v.Args[0]) + + case ssa.OpWasmSelect: + getValue64(s, v.Args[0]) + getValue64(s, v.Args[1]) + getValue32(s, v.Args[2]) + s.Prog(v.Op.Asm()) + + case ssa.OpWasmI64AddConst: + getValue64(s, v.Args[0]) + i64Const(s, v.AuxInt) + s.Prog(v.Op.Asm()) + + case ssa.OpWasmI64Const: + i64Const(s, v.AuxInt) + + case ssa.OpWasmF32Const: + f32Const(s, v.AuxFloat()) + + case ssa.OpWasmF64Const: + f64Const(s, v.AuxFloat()) + + case ssa.OpWasmI64Load8U, ssa.OpWasmI64Load8S, ssa.OpWasmI64Load16U, ssa.OpWasmI64Load16S, ssa.OpWasmI64Load32U, ssa.OpWasmI64Load32S, ssa.OpWasmI64Load, ssa.OpWasmF32Load, ssa.OpWasmF64Load: + getValue32(s, v.Args[0]) + p := s.Prog(v.Op.Asm()) + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt} + + case ssa.OpWasmI64Eqz: + getValue64(s, v.Args[0]) + s.Prog(v.Op.Asm()) + if extend { + s.Prog(wasm.AI64ExtendI32U) + } + + case ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU, + ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge, + ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge: + getValue64(s, v.Args[0]) + getValue64(s, v.Args[1]) + s.Prog(v.Op.Asm()) + if extend { + s.Prog(wasm.AI64ExtendI32U) + } + + case ssa.OpWasmI64Add, ssa.OpWasmI64Sub, ssa.OpWasmI64Mul, ssa.OpWasmI64DivU, ssa.OpWasmI64RemS, ssa.OpWasmI64RemU, ssa.OpWasmI64And, ssa.OpWasmI64Or, ssa.OpWasmI64Xor, ssa.OpWasmI64Shl, ssa.OpWasmI64ShrS, ssa.OpWasmI64ShrU, ssa.OpWasmI64Rotl, + ssa.OpWasmF32Add, ssa.OpWasmF32Sub, ssa.OpWasmF32Mul, ssa.OpWasmF32Div, ssa.OpWasmF32Copysign, + ssa.OpWasmF64Add, ssa.OpWasmF64Sub, ssa.OpWasmF64Mul, ssa.OpWasmF64Div, ssa.OpWasmF64Copysign: + getValue64(s, v.Args[0]) + getValue64(s, v.Args[1]) + s.Prog(v.Op.Asm()) + + case ssa.OpWasmI32Rotl: + getValue32(s, v.Args[0]) + getValue32(s, v.Args[1]) + s.Prog(wasm.AI32Rotl) + s.Prog(wasm.AI64ExtendI32U) + + case ssa.OpWasmI64DivS: + getValue64(s, v.Args[0]) + getValue64(s, v.Args[1]) + if v.Type.Size() == 8 { + // Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case. + p := s.Prog(wasm.ACall) + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmDiv} + break + } + s.Prog(wasm.AI64DivS) + + case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S: + getValue64(s, v.Args[0]) + if buildcfg.GOWASM.SatConv { + s.Prog(v.Op.Asm()) + } else { + if v.Op == ssa.OpWasmI64TruncSatF32S { + s.Prog(wasm.AF64PromoteF32) + } + p := s.Prog(wasm.ACall) + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncS} + } + + case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U: + getValue64(s, v.Args[0]) + if buildcfg.GOWASM.SatConv { + s.Prog(v.Op.Asm()) + } else { + if v.Op == ssa.OpWasmI64TruncSatF32U { + s.Prog(wasm.AF64PromoteF32) + } + p := s.Prog(wasm.ACall) + p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncU} + } + + case ssa.OpWasmF32DemoteF64: + getValue64(s, v.Args[0]) + s.Prog(v.Op.Asm()) + + case ssa.OpWasmF64PromoteF32: + getValue64(s, v.Args[0]) + s.Prog(v.Op.Asm()) + + case ssa.OpWasmF32ConvertI64S, ssa.OpWasmF32ConvertI64U, + ssa.OpWasmF64ConvertI64S, ssa.OpWasmF64ConvertI64U, + ssa.OpWasmI64Extend8S, ssa.OpWasmI64Extend16S, ssa.OpWasmI64Extend32S, + ssa.OpWasmF32Neg, ssa.OpWasmF32Sqrt, ssa.OpWasmF32Trunc, ssa.OpWasmF32Ceil, ssa.OpWasmF32Floor, ssa.OpWasmF32Nearest, ssa.OpWasmF32Abs, + ssa.OpWasmF64Neg, ssa.OpWasmF64Sqrt, ssa.OpWasmF64Trunc, ssa.OpWasmF64Ceil, ssa.OpWasmF64Floor, ssa.OpWasmF64Nearest, ssa.OpWasmF64Abs, + ssa.OpWasmI64Ctz, ssa.OpWasmI64Clz, ssa.OpWasmI64Popcnt: + getValue64(s, v.Args[0]) + s.Prog(v.Op.Asm()) + + case ssa.OpLoadReg: + p := s.Prog(loadOp(v.Type)) + ssagen.AddrAuto(&p.From, v.Args[0]) + + case ssa.OpCopy: + getValue64(s, v.Args[0]) + + default: + v.Fatalf("unexpected op: %s", v.Op) + + } +} + +func isCmp(v *ssa.Value) bool { + switch v.Op { + case ssa.OpWasmI64Eqz, ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU, + ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge, + ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge: + return true + default: + return false + } +} + +func getValue32(s *ssagen.State, v *ssa.Value) { + if v.OnWasmStack { + s.OnWasmStackSkipped-- + ssaGenValueOnStack(s, v, false) + if !isCmp(v) { + s.Prog(wasm.AI32WrapI64) + } + return + } + + reg := v.Reg() + getReg(s, reg) + if reg != wasm.REG_SP { + s.Prog(wasm.AI32WrapI64) + } +} + +func getValue64(s *ssagen.State, v *ssa.Value) { + if v.OnWasmStack { + s.OnWasmStackSkipped-- + ssaGenValueOnStack(s, v, true) + return + } + + reg := v.Reg() + getReg(s, reg) + if reg == wasm.REG_SP { + s.Prog(wasm.AI64ExtendI32U) + } +} + +func i32Const(s *ssagen.State, val int32) { + p := s.Prog(wasm.AI32Const) + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)} +} + +func i64Const(s *ssagen.State, val int64) { + p := s.Prog(wasm.AI64Const) + p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val} +} + +func f32Const(s *ssagen.State, val float64) { + p := s.Prog(wasm.AF32Const) + p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val} +} + +func f64Const(s *ssagen.State, val float64) { + p := s.Prog(wasm.AF64Const) + p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val} +} + +func getReg(s *ssagen.State, reg int16) { + p := s.Prog(wasm.AGet) + p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg} +} + +func setReg(s *ssagen.State, reg int16) { + p := s.Prog(wasm.ASet) + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg} +} + +func loadOp(t *types.Type) obj.As { + if t.IsFloat() { + switch t.Size() { + case 4: + return wasm.AF32Load + case 8: + return wasm.AF64Load + default: + panic("bad load type") + } + } + + switch t.Size() { + case 1: + if t.IsSigned() { + return wasm.AI64Load8S + } + return wasm.AI64Load8U + case 2: + if t.IsSigned() { + return wasm.AI64Load16S + } + return wasm.AI64Load16U + case 4: + if t.IsSigned() { + return wasm.AI64Load32S + } + return wasm.AI64Load32U + case 8: + return wasm.AI64Load + default: + panic("bad load type") + } +} + +func storeOp(t *types.Type) obj.As { + if t.IsFloat() { + switch t.Size() { + case 4: + return wasm.AF32Store + case 8: + return wasm.AF64Store + default: + panic("bad store type") + } + } + + switch t.Size() { + case 1: + return wasm.AI64Store8 + case 2: + return wasm.AI64Store16 + case 4: + return wasm.AI64Store32 + case 8: + return wasm.AI64Store + default: + panic("bad store type") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/x86/galign.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/x86/galign.go new file mode 100644 index 0000000000000000000000000000000000000000..5565bd32c765b36debf7cde83eab4f3d2d8a71dd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/x86/galign.go @@ -0,0 +1,39 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x86 + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/ssagen" + "cmd/internal/obj/x86" + "fmt" + "internal/buildcfg" + "os" +) + +func Init(arch *ssagen.ArchInfo) { + arch.LinkArch = &x86.Link386 + arch.REGSP = x86.REGSP + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock + arch.MAXWIDTH = (1 << 32) - 1 + switch v := buildcfg.GO386; v { + case "sse2": + case "softfloat": + arch.SoftFloat = true + case "387": + fmt.Fprintf(os.Stderr, "unsupported setting GO386=387. Consider using GO386=softfloat instead.\n") + base.Exit(1) + default: + fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v) + base.Exit(1) + + } + + arch.ZeroRange = zerorange + arch.Ginsnop = ginsnop + + arch.SSAMarkMoves = ssaMarkMoves +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/x86/ggen.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/x86/ggen.go new file mode 100644 index 0000000000000000000000000000000000000000..3ca479763e63a21fb4ca852d1ac211cd41a34c67 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/x86/ggen.go @@ -0,0 +1,50 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x86 + +import ( + "cmd/compile/internal/ir" + "cmd/compile/internal/objw" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/x86" +) + +func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog { + if cnt == 0 { + return p + } + if *ax == 0 { + p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0) + *ax = 1 + } + + if cnt <= int64(4*types.RegSize) { + for i := int64(0); i < cnt; i += int64(types.RegSize) { + p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i) + } + } else if cnt <= int64(128*types.RegSize) { + p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) + p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize))) + p.To.Sym = ir.Syms.Duffzero + } else { + p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0) + p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0) + p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) + p = pp.Append(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0) + } + + return p +} + +func ginsnop(pp *objw.Progs) *obj.Prog { + // See comment in ../amd64/ggen.go. + p := pp.Prog(x86.AXCHGL) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_AX + p.To.Type = obj.TYPE_REG + p.To.Reg = x86.REG_AX + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/compile/internal/x86/ssa.go b/platform/dbops/binaries/go/go/src/cmd/compile/internal/x86/ssa.go new file mode 100644 index 0000000000000000000000000000000000000000..42ec44a51151d775a2fbc62fafcc1cf248b7c8c6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/compile/internal/x86/ssa.go @@ -0,0 +1,958 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x86 + +import ( + "fmt" + "math" + + "cmd/compile/internal/base" + "cmd/compile/internal/ir" + "cmd/compile/internal/logopt" + "cmd/compile/internal/ssa" + "cmd/compile/internal/ssagen" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/x86" +) + +// ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags. +func ssaMarkMoves(s *ssagen.State, b *ssa.Block) { + flive := b.FlagsLiveAtEnd + for _, c := range b.ControlValues() { + flive = c.Type.IsFlags() || flive + } + for i := len(b.Values) - 1; i >= 0; i-- { + v := b.Values[i] + if flive && v.Op == ssa.Op386MOVLconst { + // The "mark" is any non-nil Aux value. + v.Aux = ssa.AuxMark + } + if v.Type.IsFlags() { + flive = false + } + for _, a := range v.Args { + if a.Type.IsFlags() { + flive = true + } + } + } +} + +// loadByType returns the load instruction of the given type. +func loadByType(t *types.Type) obj.As { + // Avoid partial register write + if !t.IsFloat() { + switch t.Size() { + case 1: + return x86.AMOVBLZX + case 2: + return x86.AMOVWLZX + } + } + // Otherwise, there's no difference between load and store opcodes. + return storeByType(t) +} + +// storeByType returns the store instruction of the given type. +func storeByType(t *types.Type) obj.As { + width := t.Size() + if t.IsFloat() { + switch width { + case 4: + return x86.AMOVSS + case 8: + return x86.AMOVSD + } + } else { + switch width { + case 1: + return x86.AMOVB + case 2: + return x86.AMOVW + case 4: + return x86.AMOVL + } + } + panic("bad store type") +} + +// moveByType returns the reg->reg move instruction of the given type. +func moveByType(t *types.Type) obj.As { + if t.IsFloat() { + switch t.Size() { + case 4: + return x86.AMOVSS + case 8: + return x86.AMOVSD + default: + panic(fmt.Sprintf("bad float register width %d:%s", t.Size(), t)) + } + } else { + switch t.Size() { + case 1: + // Avoids partial register write + return x86.AMOVL + case 2: + return x86.AMOVL + case 4: + return x86.AMOVL + default: + panic(fmt.Sprintf("bad int register width %d:%s", t.Size(), t)) + } + } +} + +// opregreg emits instructions for +// +// dest := dest(To) op src(From) +// +// and also returns the created obj.Prog so it +// may be further adjusted (offset, scale, etc). +func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog { + p := s.Prog(op) + p.From.Type = obj.TYPE_REG + p.To.Type = obj.TYPE_REG + p.To.Reg = dest + p.From.Reg = src + return p +} + +func ssaGenValue(s *ssagen.State, v *ssa.Value) { + switch v.Op { + case ssa.Op386ADDL: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + switch { + case r == r1: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case r == r2: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + default: + p := s.Prog(x86.ALEAL) + p.From.Type = obj.TYPE_MEM + p.From.Reg = r1 + p.From.Scale = 1 + p.From.Index = r2 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } + + // 2-address opcode arithmetic + case ssa.Op386SUBL, + ssa.Op386MULL, + ssa.Op386ANDL, + ssa.Op386ORL, + ssa.Op386XORL, + ssa.Op386SHLL, + ssa.Op386SHRL, ssa.Op386SHRW, ssa.Op386SHRB, + ssa.Op386SARL, ssa.Op386SARW, ssa.Op386SARB, + ssa.Op386ROLL, ssa.Op386ROLW, ssa.Op386ROLB, + ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD, + ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD, + ssa.Op386PXOR, + ssa.Op386ADCL, + ssa.Op386SBBL: + opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg()) + + case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry: + // output 0 is carry/borrow, output 1 is the low 32 bits. + opregreg(s, v.Op.Asm(), v.Reg0(), v.Args[1].Reg()) + + case ssa.Op386ADDLconstcarry, ssa.Op386SUBLconstcarry: + // output 0 is carry/borrow, output 1 is the low 32 bits. + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg0() + + case ssa.Op386DIVL, ssa.Op386DIVW, + ssa.Op386DIVLU, ssa.Op386DIVWU, + ssa.Op386MODL, ssa.Op386MODW, + ssa.Op386MODLU, ssa.Op386MODWU: + + // Arg[0] is already in AX as it's the only register we allow + // and AX is the only output + x := v.Args[1].Reg() + + // CPU faults upon signed overflow, which occurs when most + // negative int is divided by -1. + var j *obj.Prog + if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW || + v.Op == ssa.Op386MODL || v.Op == ssa.Op386MODW { + + if ssa.DivisionNeedsFixUp(v) { + var c *obj.Prog + switch v.Op { + case ssa.Op386DIVL, ssa.Op386MODL: + c = s.Prog(x86.ACMPL) + j = s.Prog(x86.AJEQ) + + case ssa.Op386DIVW, ssa.Op386MODW: + c = s.Prog(x86.ACMPW) + j = s.Prog(x86.AJEQ) + } + c.From.Type = obj.TYPE_REG + c.From.Reg = x + c.To.Type = obj.TYPE_CONST + c.To.Offset = -1 + + j.To.Type = obj.TYPE_BRANCH + } + // sign extend the dividend + switch v.Op { + case ssa.Op386DIVL, ssa.Op386MODL: + s.Prog(x86.ACDQ) + case ssa.Op386DIVW, ssa.Op386MODW: + s.Prog(x86.ACWD) + } + } + + // for unsigned ints, we sign extend by setting DX = 0 + // signed ints were sign extended above + if v.Op == ssa.Op386DIVLU || v.Op == ssa.Op386MODLU || + v.Op == ssa.Op386DIVWU || v.Op == ssa.Op386MODWU { + c := s.Prog(x86.AXORL) + c.From.Type = obj.TYPE_REG + c.From.Reg = x86.REG_DX + c.To.Type = obj.TYPE_REG + c.To.Reg = x86.REG_DX + } + + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = x + + // signed division, rest of the check for -1 case + if j != nil { + j2 := s.Prog(obj.AJMP) + j2.To.Type = obj.TYPE_BRANCH + + var n *obj.Prog + if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW { + // n * -1 = -n + n = s.Prog(x86.ANEGL) + n.To.Type = obj.TYPE_REG + n.To.Reg = x86.REG_AX + } else { + // n % -1 == 0 + n = s.Prog(x86.AXORL) + n.From.Type = obj.TYPE_REG + n.From.Reg = x86.REG_DX + n.To.Type = obj.TYPE_REG + n.To.Reg = x86.REG_DX + } + + j.To.SetTarget(n) + j2.To.SetTarget(s.Pc()) + } + + case ssa.Op386HMULL, ssa.Op386HMULLU: + // the frontend rewrites constant division by 8/16/32 bit integers into + // HMUL by a constant + // SSA rewrites generate the 64 bit versions + + // Arg[0] is already in AX as it's the only register we allow + // and DX is the only output we care about (the high bits) + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + + // IMULB puts the high portion in AH instead of DL, + // so move it to DL for consistency + if v.Type.Size() == 1 { + m := s.Prog(x86.AMOVB) + m.From.Type = obj.TYPE_REG + m.From.Reg = x86.REG_AH + m.To.Type = obj.TYPE_REG + m.To.Reg = x86.REG_DX + } + + case ssa.Op386MULLU: + // Arg[0] is already in AX as it's the only register we allow + // results lo in AX + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + + case ssa.Op386MULLQU: + // AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]). + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + + case ssa.Op386AVGLU: + // compute (x+y)/2 unsigned. + // Do a 32-bit add, the overflow goes into the carry. + // Shift right once and pull the carry back into the 31st bit. + p := s.Prog(x86.AADDL) + p.From.Type = obj.TYPE_REG + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + p.From.Reg = v.Args[1].Reg() + p = s.Prog(x86.ARCRL) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 1 + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.Op386ADDLconst: + r := v.Reg() + a := v.Args[0].Reg() + if r == a { + if v.AuxInt == 1 { + p := s.Prog(x86.AINCL) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + return + } + if v.AuxInt == -1 { + p := s.Prog(x86.ADECL) + p.To.Type = obj.TYPE_REG + p.To.Reg = r + return + } + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = r + return + } + p := s.Prog(x86.ALEAL) + p.From.Type = obj.TYPE_MEM + p.From.Reg = a + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = r + + case ssa.Op386MULLconst: + r := v.Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = r + p.AddRestSourceReg(v.Args[0].Reg()) + + case ssa.Op386SUBLconst, + ssa.Op386ADCLconst, + ssa.Op386SBBLconst, + ssa.Op386ANDLconst, + ssa.Op386ORLconst, + ssa.Op386XORLconst, + ssa.Op386SHLLconst, + ssa.Op386SHRLconst, ssa.Op386SHRWconst, ssa.Op386SHRBconst, + ssa.Op386SARLconst, ssa.Op386SARWconst, ssa.Op386SARBconst, + ssa.Op386ROLLconst, ssa.Op386ROLWconst, ssa.Op386ROLBconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.Op386SBBLcarrymask: + r := v.Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.Op386LEAL1, ssa.Op386LEAL2, ssa.Op386LEAL4, ssa.Op386LEAL8: + r := v.Args[0].Reg() + i := v.Args[1].Reg() + p := s.Prog(x86.ALEAL) + switch v.Op { + case ssa.Op386LEAL1: + p.From.Scale = 1 + if i == x86.REG_SP { + r, i = i, r + } + case ssa.Op386LEAL2: + p.From.Scale = 2 + case ssa.Op386LEAL4: + p.From.Scale = 4 + case ssa.Op386LEAL8: + p.From.Scale = 8 + } + p.From.Type = obj.TYPE_MEM + p.From.Reg = r + p.From.Index = i + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.Op386LEAL: + p := s.Prog(x86.ALEAL) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB, + ssa.Op386TESTL, ssa.Op386TESTW, ssa.Op386TESTB: + opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg()) + case ssa.Op386UCOMISS, ssa.Op386UCOMISD: + // Go assembler has swapped operands for UCOMISx relative to CMP, + // must account for that right here. + opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg()) + case ssa.Op386CMPLconst, ssa.Op386CMPWconst, ssa.Op386CMPBconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_CONST + p.To.Offset = v.AuxInt + case ssa.Op386TESTLconst, ssa.Op386TESTWconst, ssa.Op386TESTBconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Args[0].Reg() + case ssa.Op386CMPLload, ssa.Op386CMPWload, ssa.Op386CMPBload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Args[1].Reg() + case ssa.Op386CMPLconstload, ssa.Op386CMPWconstload, ssa.Op386CMPBconstload: + sc := v.AuxValAndOff() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux2(&p.From, v, sc.Off64()) + p.To.Type = obj.TYPE_CONST + p.To.Offset = sc.Val64() + case ssa.Op386MOVLconst: + x := v.Reg() + + // If flags aren't live (indicated by v.Aux == nil), + // then we can rewrite MOV $0, AX into XOR AX, AX. + if v.AuxInt == 0 && v.Aux == nil { + p := s.Prog(x86.AXORL) + p.From.Type = obj.TYPE_REG + p.From.Reg = x + p.To.Type = obj.TYPE_REG + p.To.Reg = x + break + } + + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = x + case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst: + x := v.Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_FCONST + p.From.Val = math.Float64frombits(uint64(v.AuxInt)) + p.To.Type = obj.TYPE_REG + p.To.Reg = x + case ssa.Op386MOVSSconst1, ssa.Op386MOVSDconst1: + p := s.Prog(x86.ALEAL) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_EXTERN + f := math.Float64frombits(uint64(v.AuxInt)) + if v.Op == ssa.Op386MOVSDconst1 { + p.From.Sym = base.Ctxt.Float64Sym(f) + } else { + p.From.Sym = base.Ctxt.Float32Sym(float32(f)) + } + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload, ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1, + ssa.Op386MOVSDloadidx8, ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4, ssa.Op386MOVWloadidx2: + r := v.Args[0].Reg() + i := v.Args[1].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + switch v.Op { + case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1: + if i == x86.REG_SP { + r, i = i, r + } + p.From.Scale = 1 + case ssa.Op386MOVSDloadidx8: + p.From.Scale = 8 + case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4: + p.From.Scale = 4 + case ssa.Op386MOVWloadidx2: + p.From.Scale = 2 + } + p.From.Reg = r + p.From.Index = i + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.Op386ADDLloadidx4, ssa.Op386SUBLloadidx4, ssa.Op386MULLloadidx4, + ssa.Op386ANDLloadidx4, ssa.Op386ORLloadidx4, ssa.Op386XORLloadidx4: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[1].Reg() + p.From.Index = v.Args[2].Reg() + p.From.Scale = 4 + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.Op386ADDLload, ssa.Op386SUBLload, ssa.Op386MULLload, + ssa.Op386ANDLload, ssa.Op386ORLload, ssa.Op386XORLload, + ssa.Op386ADDSDload, ssa.Op386ADDSSload, ssa.Op386SUBSDload, ssa.Op386SUBSSload, + ssa.Op386MULSDload, ssa.Op386MULSSload, ssa.Op386DIVSSload, ssa.Op386DIVSDload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[1].Reg() + ssagen.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore, ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore, + ssa.Op386ADDLmodify, ssa.Op386SUBLmodify, ssa.Op386ANDLmodify, ssa.Op386ORLmodify, ssa.Op386XORLmodify: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + case ssa.Op386ADDLconstmodify: + sc := v.AuxValAndOff() + val := sc.Val() + if val == 1 || val == -1 { + var p *obj.Prog + if val == 1 { + p = s.Prog(x86.AINCL) + } else { + p = s.Prog(x86.ADECL) + } + off := sc.Off64() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux2(&p.To, v, off) + break + } + fallthrough + case ssa.Op386ANDLconstmodify, ssa.Op386ORLconstmodify, ssa.Op386XORLconstmodify: + sc := v.AuxValAndOff() + off := sc.Off64() + val := sc.Val64() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = val + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux2(&p.To, v, off) + case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1, + ssa.Op386MOVSDstoreidx8, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, ssa.Op386MOVWstoreidx2, + ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4: + r := v.Args[0].Reg() + i := v.Args[1].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[2].Reg() + p.To.Type = obj.TYPE_MEM + switch v.Op { + case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1: + if i == x86.REG_SP { + r, i = i, r + } + p.To.Scale = 1 + case ssa.Op386MOVSDstoreidx8: + p.To.Scale = 8 + case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, + ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4: + p.To.Scale = 4 + case ssa.Op386MOVWstoreidx2: + p.To.Scale = 2 + } + p.To.Reg = r + p.To.Index = i + ssagen.AddAux(&p.To, v) + case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + sc := v.AuxValAndOff() + p.From.Offset = sc.Val64() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux2(&p.To, v, sc.Off64()) + case ssa.Op386ADDLconstmodifyidx4: + sc := v.AuxValAndOff() + val := sc.Val() + if val == 1 || val == -1 { + var p *obj.Prog + if val == 1 { + p = s.Prog(x86.AINCL) + } else { + p = s.Prog(x86.ADECL) + } + off := sc.Off64() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + p.To.Scale = 4 + p.To.Index = v.Args[1].Reg() + ssagen.AddAux2(&p.To, v, off) + break + } + fallthrough + case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1, + ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + sc := v.AuxValAndOff() + p.From.Offset = sc.Val64() + r := v.Args[0].Reg() + i := v.Args[1].Reg() + switch v.Op { + case ssa.Op386MOVBstoreconstidx1, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVLstoreconstidx1: + p.To.Scale = 1 + if i == x86.REG_SP { + r, i = i, r + } + case ssa.Op386MOVWstoreconstidx2: + p.To.Scale = 2 + case ssa.Op386MOVLstoreconstidx4, + ssa.Op386ADDLconstmodifyidx4, ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4: + p.To.Scale = 4 + } + p.To.Type = obj.TYPE_MEM + p.To.Reg = r + p.To.Index = i + ssagen.AddAux2(&p.To, v, sc.Off64()) + case ssa.Op386MOVWLSX, ssa.Op386MOVBLSX, ssa.Op386MOVWLZX, ssa.Op386MOVBLZX, + ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD, + ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL, + ssa.Op386CVTSS2SD, ssa.Op386CVTSD2SS: + opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg()) + case ssa.Op386DUFFZERO: + p := s.Prog(obj.ADUFFZERO) + p.To.Type = obj.TYPE_ADDR + p.To.Sym = ir.Syms.Duffzero + p.To.Offset = v.AuxInt + case ssa.Op386DUFFCOPY: + p := s.Prog(obj.ADUFFCOPY) + p.To.Type = obj.TYPE_ADDR + p.To.Sym = ir.Syms.Duffcopy + p.To.Offset = v.AuxInt + + case ssa.OpCopy: // TODO: use MOVLreg for reg->reg copies instead of OpCopy? + if v.Type.IsMemory() { + return + } + x := v.Args[0].Reg() + y := v.Reg() + if x != y { + opregreg(s, moveByType(v.Type), y, x) + } + case ssa.OpLoadReg: + if v.Type.IsFlags() { + v.Fatalf("load flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(loadByType(v.Type)) + ssagen.AddrAuto(&p.From, v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpStoreReg: + if v.Type.IsFlags() { + v.Fatalf("store flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(storeByType(v.Type)) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + ssagen.AddrAuto(&p.To, v) + case ssa.Op386LoweredGetClosurePtr: + // Closure pointer is DX. + ssagen.CheckLoweredGetClosurePtr(v) + case ssa.Op386LoweredGetG: + r := v.Reg() + // See the comments in cmd/internal/obj/x86/obj6.go + // near CanUse1InsnTLS for a detailed explanation of these instructions. + if x86.CanUse1InsnTLS(base.Ctxt) { + // MOVL (TLS), r + p := s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_MEM + p.From.Reg = x86.REG_TLS + p.To.Type = obj.TYPE_REG + p.To.Reg = r + } else { + // MOVL TLS, r + // MOVL (r)(TLS*1), r + p := s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_TLS + p.To.Type = obj.TYPE_REG + p.To.Reg = r + q := s.Prog(x86.AMOVL) + q.From.Type = obj.TYPE_MEM + q.From.Reg = r + q.From.Index = x86.REG_TLS + q.From.Scale = 1 + q.To.Type = obj.TYPE_REG + q.To.Reg = r + } + + case ssa.Op386LoweredGetCallerPC: + p := s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_MEM + p.From.Offset = -4 // PC is stored 4 bytes below first parameter. + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.Op386LoweredGetCallerSP: + // caller's SP is the address of the first arg + p := s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -base.Ctxt.Arch.FixedFrameSize // 0 on 386, just to be consistent with other architectures + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.Op386LoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + // AuxInt encodes how many buffer entries we need. + p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1] + + case ssa.Op386LoweredPanicBoundsA, ssa.Op386LoweredPanicBoundsB, ssa.Op386LoweredPanicBoundsC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt] + s.UseArgs(8) // space used in callee args area by assembly stubs + + case ssa.Op386LoweredPanicExtendA, ssa.Op386LoweredPanicExtendB, ssa.Op386LoweredPanicExtendC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt] + s.UseArgs(12) // space used in callee args area by assembly stubs + + case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter: + s.Call(v) + case ssa.Op386CALLtail: + s.TailCall(v) + case ssa.Op386NEGL, + ssa.Op386BSWAPL, + ssa.Op386NOTL: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.Op386BSFL, ssa.Op386BSFW, + ssa.Op386BSRL, ssa.Op386BSRW, + ssa.Op386SQRTSS, ssa.Op386SQRTSD: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.Op386SETEQ, ssa.Op386SETNE, + ssa.Op386SETL, ssa.Op386SETLE, + ssa.Op386SETG, ssa.Op386SETGE, + ssa.Op386SETGF, ssa.Op386SETGEF, + ssa.Op386SETB, ssa.Op386SETBE, + ssa.Op386SETORD, ssa.Op386SETNAN, + ssa.Op386SETA, ssa.Op386SETAE, + ssa.Op386SETO: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.Op386SETNEF: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + q := s.Prog(x86.ASETPS) + q.To.Type = obj.TYPE_REG + q.To.Reg = x86.REG_AX + opregreg(s, x86.AORL, v.Reg(), x86.REG_AX) + + case ssa.Op386SETEQF: + p := s.Prog(v.Op.Asm()) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + q := s.Prog(x86.ASETPC) + q.To.Type = obj.TYPE_REG + q.To.Reg = x86.REG_AX + opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX) + + case ssa.Op386InvertFlags: + v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) + case ssa.Op386FlagEQ, ssa.Op386FlagLT_ULT, ssa.Op386FlagLT_UGT, ssa.Op386FlagGT_ULT, ssa.Op386FlagGT_UGT: + v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) + case ssa.Op386REPSTOSL: + s.Prog(x86.AREP) + s.Prog(x86.ASTOSL) + case ssa.Op386REPMOVSL: + s.Prog(x86.AREP) + s.Prog(x86.AMOVSL) + case ssa.Op386LoweredNilCheck: + // Issue a load which will fault if the input is nil. + // TODO: We currently use the 2-byte instruction TESTB AX, (reg). + // Should we use the 3-byte TESTB $0, (reg) instead? It is larger + // but it doesn't have false dependency on AX. + // Or maybe allocate an output register and use MOVL (reg),reg2 ? + // That trades clobbering flags for clobbering a register. + p := s.Prog(x86.ATESTB) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_AX + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + ssagen.AddAux(&p.To, v) + if logopt.Enabled() { + logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name) + } + if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers + base.WarnfAt(v.Pos, "generated nil check") + } + case ssa.Op386LoweredCtz32: + // BSFL in, out + p := s.Prog(x86.ABSFL) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + // JNZ 2(PC) + p1 := s.Prog(x86.AJNE) + p1.To.Type = obj.TYPE_BRANCH + + // MOVL $32, out + p2 := s.Prog(x86.AMOVL) + p2.From.Type = obj.TYPE_CONST + p2.From.Offset = 32 + p2.To.Type = obj.TYPE_REG + p2.To.Reg = v.Reg() + + // NOP (so the JNZ has somewhere to land) + nop := s.Prog(obj.ANOP) + p1.To.SetTarget(nop) + + case ssa.OpClobber: + p := s.Prog(x86.AMOVL) + p.From.Type = obj.TYPE_CONST + p.From.Offset = 0xdeaddead + p.To.Type = obj.TYPE_MEM + p.To.Reg = x86.REG_SP + ssagen.AddAux(&p.To, v) + case ssa.OpClobberReg: + // TODO: implement for clobberdead experiment. Nop is ok for now. + default: + v.Fatalf("genValue not implemented: %s", v.LongString()) + } +} + +var blockJump = [...]struct { + asm, invasm obj.As +}{ + ssa.Block386EQ: {x86.AJEQ, x86.AJNE}, + ssa.Block386NE: {x86.AJNE, x86.AJEQ}, + ssa.Block386LT: {x86.AJLT, x86.AJGE}, + ssa.Block386GE: {x86.AJGE, x86.AJLT}, + ssa.Block386LE: {x86.AJLE, x86.AJGT}, + ssa.Block386GT: {x86.AJGT, x86.AJLE}, + ssa.Block386OS: {x86.AJOS, x86.AJOC}, + ssa.Block386OC: {x86.AJOC, x86.AJOS}, + ssa.Block386ULT: {x86.AJCS, x86.AJCC}, + ssa.Block386UGE: {x86.AJCC, x86.AJCS}, + ssa.Block386UGT: {x86.AJHI, x86.AJLS}, + ssa.Block386ULE: {x86.AJLS, x86.AJHI}, + ssa.Block386ORD: {x86.AJPC, x86.AJPS}, + ssa.Block386NAN: {x86.AJPS, x86.AJPC}, +} + +var eqfJumps = [2][2]ssagen.IndexJump{ + {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0] + {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1] +} +var nefJumps = [2][2]ssagen.IndexJump{ + {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0] + {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1] +} + +func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { + switch b.Kind { + case ssa.BlockPlain: + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockDefer: + // defer returns in rax: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := s.Prog(x86.ATESTL) + p.From.Type = obj.TYPE_REG + p.From.Reg = x86.REG_AX + p.To.Type = obj.TYPE_REG + p.To.Reg = x86.REG_AX + p = s.Prog(x86.AJNE) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()}) + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockExit, ssa.BlockRetJmp: + case ssa.BlockRet: + s.Prog(obj.ARET) + + case ssa.Block386EQF: + s.CombJump(b, next, &eqfJumps) + + case ssa.Block386NEF: + s.CombJump(b, next, &nefJumps) + + case ssa.Block386EQ, ssa.Block386NE, + ssa.Block386LT, ssa.Block386GE, + ssa.Block386LE, ssa.Block386GT, + ssa.Block386OS, ssa.Block386OC, + ssa.Block386ULT, ssa.Block386UGT, + ssa.Block386ULE, ssa.Block386UGE: + jmp := blockJump[b.Kind] + switch next { + case b.Succs[0].Block(): + s.Br(jmp.invasm, b.Succs[1].Block()) + case b.Succs[1].Block(): + s.Br(jmp.asm, b.Succs[0].Block()) + default: + if b.Likely != ssa.BranchUnlikely { + s.Br(jmp.asm, b.Succs[0].Block()) + s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + s.Br(jmp.invasm, b.Succs[1].Block()) + s.Br(obj.AJMP, b.Succs[0].Block()) + } + } + default: + b.Fatalf("branch not implemented: %s", b.LongString()) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/testdata/dep.go b/platform/dbops/binaries/go/go/src/cmd/covdata/testdata/dep.go new file mode 100644 index 0000000000000000000000000000000000000000..2127ab24f6acdbbf9a00ad8a7de7122bee449c80 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/testdata/dep.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dep + +func Dep1() int { + return 42 +} + +func PDep(x int) { + if x != 1010101 { + println(x) + } else { + panic("bad") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/testdata/prog1.go b/platform/dbops/binaries/go/go/src/cmd/covdata/testdata/prog1.go new file mode 100644 index 0000000000000000000000000000000000000000..76e9e912cc3929b8ff2a64b60278f5614d384588 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/testdata/prog1.go @@ -0,0 +1,48 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "prog/dep" +) + +//go:noinline +func first() { + println("whee") +} + +//go:noinline +func second() { + println("oy") +} + +//go:noinline +func third(x int) int { + if x != 0 { + return 42 + } + println("blarg") + return 0 +} + +//go:noinline +func fourth() int { + return 99 +} + +func main() { + println(dep.Dep1()) + dep.PDep(2) + if len(os.Args) > 1 { + second() + third(1) + } else if len(os.Args) > 2 { + fourth() + } else { + first() + third(0) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/covdata/testdata/prog2.go b/platform/dbops/binaries/go/go/src/cmd/covdata/testdata/prog2.go new file mode 100644 index 0000000000000000000000000000000000000000..e51e78672bb8ea0447b4690fc825fde0e70eec64 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/covdata/testdata/prog2.go @@ -0,0 +1,29 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "os" + "prog/dep" +) + +//go:noinline +func fifth() { + println("hubba") +} + +//go:noinline +func sixth() { + println("wha?") +} + +func main() { + println(dep.Dep1()) + if len(os.Args) > 1 { + fifth() + } else { + sixth() + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/directives.go b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/directives.go new file mode 100644 index 0000000000000000000000000000000000000000..dfb7b8ec33fa86e192ff9c46fa1e5cc5d3ba6db6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/directives.go @@ -0,0 +1,40 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is processed by the cover command, then a test verifies that +// all compiler directives are preserved and positioned appropriately. + +//go:a + +//go:b +package main + +//go:c1 + +//go:c2 +//doc +func c() { +} + +//go:d1 + +//doc +//go:d2 +type d int + +//go:e1 + +//doc +//go:e2 +type ( + e int + f int +) + +//go:_empty1 +//doc +//go:_empty2 +type () + +//go:f diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/html/html.go b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/html/html.go new file mode 100644 index 0000000000000000000000000000000000000000..20578259a5c826859bdcdda7bdef5e89fbc8b78c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/html/html.go @@ -0,0 +1,30 @@ +package html + +import "fmt" + +// This file is tested by html_test.go. +// The comments below are markers for extracting the annotated source +// from the HTML output. + +// This is a regression test for incorrect sorting of boundaries +// that coincide, specifically for empty select clauses. +// START f +func f() { + ch := make(chan int) + select { + case <-ch: + default: + } +} + +// END f + +// https://golang.org/issue/25767 +// START g +func g() { + if false { + fmt.Printf("Hello") + } +} + +// END g diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/html/html.golden b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/html/html.golden new file mode 100644 index 0000000000000000000000000000000000000000..84377d1e2035a93485605e123a2d742613d7c625 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/html/html.golden @@ -0,0 +1,18 @@ +// START f +func f() { + ch := make(chan int) + select { + case <-ch: + default: + } +} + +// END f +// START g +func g() { + if false { + fmt.Printf("Hello") + } +} + +// END g diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/html/html_test.go b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/html/html_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c15561fe4a92fd9e743b1b81dd08292157ae3d45 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/html/html_test.go @@ -0,0 +1,8 @@ +package html + +import "testing" + +func TestAll(t *testing.T) { + f() + g() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/main.go b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/main.go new file mode 100644 index 0000000000000000000000000000000000000000..be74b4aa6559422f6aa33ddf1152a88e5627347b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/main.go @@ -0,0 +1,116 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test runner for coverage test. This file is not coverage-annotated; test.go is. +// It knows the coverage counter is called +// "thisNameMustBeVeryLongToCauseOverflowOfCounterIncrementStatementOntoNextLineForTest". + +package main + +import ( + "fmt" + "os" +) + +func main() { + testAll() + verify() +} + +type block struct { + count uint32 + line uint32 +} + +var counters = make(map[block]bool) + +// shorthand for the long counter variable. +var coverTest = &thisNameMustBeVeryLongToCauseOverflowOfCounterIncrementStatementOntoNextLineForTest + +// check records the location and expected value for a counter. +func check(line, count uint32) { + b := block{ + count, + line, + } + counters[b] = true +} + +// checkVal is a version of check that returns its extra argument, +// so it can be used in conditionals. +func checkVal(line, count uint32, val int) int { + b := block{ + count, + line, + } + counters[b] = true + return val +} + +var PASS = true + +// verify checks the expected counts against the actual. It runs after the test has completed. +func verify() { + for b := range counters { + got, index := count(b.line) + if b.count == anything && got != 0 { + got = anything + } + if got != b.count { + fmt.Fprintf(os.Stderr, "test_go:%d expected count %d got %d [counter %d]\n", b.line, b.count, got, index) + PASS = false + } + } + verifyPanic() + if !PASS { + fmt.Fprintf(os.Stderr, "FAIL\n") + os.Exit(2) + } +} + +// verifyPanic is a special check for the known counter that should be +// after the panic call in testPanic. +func verifyPanic() { + if coverTest.Count[panicIndex-1] != 1 { + // Sanity check for test before panic. + fmt.Fprintf(os.Stderr, "bad before panic") + PASS = false + } + if coverTest.Count[panicIndex] != 0 { + fmt.Fprintf(os.Stderr, "bad at panic: %d should be 0\n", coverTest.Count[panicIndex]) + PASS = false + } + if coverTest.Count[panicIndex+1] != 1 { + fmt.Fprintf(os.Stderr, "bad after panic") + PASS = false + } +} + +// count returns the count and index for the counter at the specified line. +func count(line uint32) (uint32, int) { + // Linear search is fine. Choose perfect fit over approximate. + // We can have a closing brace for a range on the same line as a condition for an "else if" + // and we don't want that brace to steal the count for the condition on the "if". + // Therefore we test for a perfect (lo==line && hi==line) match, but if we can't + // find that we take the first imperfect match. + index := -1 + indexLo := uint32(1e9) + for i := range coverTest.Count { + lo, hi := coverTest.Pos[3*i], coverTest.Pos[3*i+1] + if lo == line && line == hi { + return coverTest.Count[i], i + } + // Choose the earliest match (the counters are in unpredictable order). + if lo <= line && line <= hi && indexLo > lo { + index = i + indexLo = lo + } + } + if index == -1 { + fmt.Fprintln(os.Stderr, "cover_test: no counter for line", line) + PASS = false + return 0, 0 + } + return coverTest.Count[index], index +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/p.go b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/p.go new file mode 100644 index 0000000000000000000000000000000000000000..ce3a8c061206f0210cc698ec5964404170197f1a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/p.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// A package such that there are 3 functions with zero total and covered lines. +// And one with 1 total and covered lines. Reproduces issue #20515. +package p + +//go:noinline +func A() { + +} + +//go:noinline +func B() { + +} + +//go:noinline +func C() { + +} + +//go:noinline +func D() int64 { + return 42 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/a/a.go b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/a/a.go new file mode 100644 index 0000000000000000000000000000000000000000..44c380b37930c4d3f05158536ec65728adf80b73 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/a/a.go @@ -0,0 +1,28 @@ +package a + +type Atyp int + +func (ap *Atyp) Set(q int) { + *ap = Atyp(q) +} + +func (ap Atyp) Get() int { + inter := func(q Atyp) int { + return int(q) + } + return inter(ap) +} + +var afunc = func(x int) int { + return x + 1 +} +var Avar = afunc(42) + +func A(x int) int { + if x == 0 { + return 22 + } else if x == 1 { + return 33 + } + return 44 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/a/a2.go b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/a/a2.go new file mode 100644 index 0000000000000000000000000000000000000000..e6b2fc10f7a056fe75949c7c61bcf08ff2e41f22 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/a/a2.go @@ -0,0 +1,8 @@ +package a + +func A2() { + { + } + { + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/a/a_test.go b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/a/a_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a1608e0bdd9020da4993f7318be8853e730e159d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/a/a_test.go @@ -0,0 +1,14 @@ +package a_test + +import ( + "cfg/a" + "testing" +) + +func TestA(t *testing.T) { + a.A(0) + var aat a.Atyp + at := &aat + at.Set(42) + println(at.Get()) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/noFuncsNoTests/nfnt.go b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/noFuncsNoTests/nfnt.go new file mode 100644 index 0000000000000000000000000000000000000000..52df23c8c9dfba0b244c26380fcef2c40063b803 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/noFuncsNoTests/nfnt.go @@ -0,0 +1,8 @@ +package noFuncsNoTests + +const foo = 1 + +var G struct { + x int + y bool +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/yesFuncsNoTests/yfnt.go b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/yesFuncsNoTests/yfnt.go new file mode 100644 index 0000000000000000000000000000000000000000..4e536b043867ea6891ddaff70a81f1727173a213 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/pkgcfg/yesFuncsNoTests/yfnt.go @@ -0,0 +1,13 @@ +package yesFuncsNoTests + +func F1() { + println("hi") +} + +func F2(x int) int { + if x < 0 { + return 9 + } else { + return 10 + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/profile.cov b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/profile.cov new file mode 100644 index 0000000000000000000000000000000000000000..db08602d5abb8c65c930d6e73a792eb83505a2de --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/profile.cov @@ -0,0 +1,5 @@ +mode: set +./testdata/p.go:10.10,12.2 0 0 +./testdata/p.go:15.10,17.2 0 0 +./testdata/p.go:20.10,22.2 0 0 +./testdata/p.go:25.16,27.2 1 1 diff --git a/platform/dbops/binaries/go/go/src/cmd/cover/testdata/test.go b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/test.go new file mode 100644 index 0000000000000000000000000000000000000000..0e1dbc61943112cda78141572962d9434d7f7618 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/cover/testdata/test.go @@ -0,0 +1,300 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This program is processed by the cover command, and then testAll is called. +// The test driver in main.go can then compare the coverage statistics with expectation. + +// The word LINE is replaced by the line number in this file. When the file is executed, +// the coverage processing has changed the line numbers, so we can't use runtime.Caller. + +package main + +import _ "unsafe" // for go:linkname + +//go:linkname some_name some_name +var some_name int + +const anything = 1e9 // Just some unlikely value that means "we got here, don't care how often" + +func testAll() { + testSimple() + testBlockRun() + testIf() + testFor() + testRange() + testSwitch() + testTypeSwitch() + testSelect1() + testSelect2() + testPanic() + testEmptySwitches() + testFunctionLiteral() + testGoto() +} + +// The indexes of the counters in testPanic are known to main.go +const panicIndex = 3 + +// This test appears first because the index of its counters is known to main.go +func testPanic() { + defer func() { + recover() + }() + check(LINE, 1) + panic("should not get next line") + check(LINE, 0) // this is GoCover.Count[panicIndex] + // The next counter is in testSimple and it will be non-zero. + // If the panic above does not trigger a counter, the test will fail + // because GoCover.Count[panicIndex] will be the one in testSimple. +} + +func testSimple() { + check(LINE, 1) +} + +func testIf() { + if true { + check(LINE, 1) + } else { + check(LINE, 0) + } + if false { + check(LINE, 0) + } else { + check(LINE, 1) + } + for i := 0; i < 3; i++ { + if checkVal(LINE, 3, i) <= 2 { + check(LINE, 3) + } + if checkVal(LINE, 3, i) <= 1 { + check(LINE, 2) + } + if checkVal(LINE, 3, i) <= 0 { + check(LINE, 1) + } + } + for i := 0; i < 3; i++ { + if checkVal(LINE, 3, i) <= 1 { + check(LINE, 2) + } else { + check(LINE, 1) + } + } + for i := 0; i < 3; i++ { + if checkVal(LINE, 3, i) <= 0 { + check(LINE, 1) + } else if checkVal(LINE, 2, i) <= 1 { + check(LINE, 1) + } else if checkVal(LINE, 1, i) <= 2 { + check(LINE, 1) + } else if checkVal(LINE, 0, i) <= 3 { + check(LINE, 0) + } + } + if func(a, b int) bool { return a < b }(3, 4) { + check(LINE, 1) + } +} + +func testFor() { + for i := 0; i < 10; func() { i++; check(LINE, 10) }() { + check(LINE, 10) + } +} + +func testRange() { + for _, f := range []func(){ + func() { check(LINE, 1) }, + } { + f() + check(LINE, 1) + } +} + +func testBlockRun() { + check(LINE, 1) + { + check(LINE, 1) + } + { + check(LINE, 1) + } + check(LINE, 1) + { + check(LINE, 1) + } + { + check(LINE, 1) + } + check(LINE, 1) +} + +func testSwitch() { + for i := 0; i < 5; func() { i++; check(LINE, 5) }() { + goto label2 + label1: + goto label1 + label2: + switch i { + case 0: + check(LINE, 1) + case 1: + check(LINE, 1) + case 2: + check(LINE, 1) + default: + check(LINE, 2) + } + } +} + +func testTypeSwitch() { + var x = []any{1, 2.0, "hi"} + for _, v := range x { + switch func() { check(LINE, 3) }(); v.(type) { + case int: + check(LINE, 1) + case float64: + check(LINE, 1) + case string: + check(LINE, 1) + case complex128: + check(LINE, 0) + default: + check(LINE, 0) + } + } +} + +func testSelect1() { + c := make(chan int) + go func() { + for i := 0; i < 1000; i++ { + c <- i + } + }() + for { + select { + case <-c: + check(LINE, anything) + case <-c: + check(LINE, anything) + default: + check(LINE, 1) + return + } + } +} + +func testSelect2() { + c1 := make(chan int, 1000) + c2 := make(chan int, 1000) + for i := 0; i < 1000; i++ { + c1 <- i + c2 <- i + } + for { + select { + case <-c1: + check(LINE, 1000) + case <-c2: + check(LINE, 1000) + default: + check(LINE, 1) + return + } + } +} + +// Empty control statements created syntax errors. This function +// is here just to be sure that those are handled correctly now. +func testEmptySwitches() { + check(LINE, 1) + switch 3 { + } + check(LINE, 1) + switch i := (any)(3).(int); i { + } + check(LINE, 1) + c := make(chan int) + go func() { + check(LINE, 1) + c <- 1 + select {} + }() + <-c + check(LINE, 1) +} + +func testFunctionLiteral() { + a := func(f func()) error { + f() + f() + return nil + } + + b := func(f func()) bool { + f() + f() + return true + } + + check(LINE, 1) + a(func() { + check(LINE, 2) + }) + + if err := a(func() { + check(LINE, 2) + }); err != nil { + } + + switch b(func() { + check(LINE, 2) + }) { + } + + x := 2 + switch x { + case func() int { check(LINE, 1); return 1 }(): + check(LINE, 0) + panic("2=1") + case func() int { check(LINE, 1); return 2 }(): + check(LINE, 1) + case func() int { check(LINE, 0); return 3 }(): + check(LINE, 0) + panic("2=3") + } +} + +func testGoto() { + for i := 0; i < 2; i++ { + if i == 0 { + goto Label + } + check(LINE, 1) + Label: + check(LINE, 2) + } + // Now test that we don't inject empty statements + // between a label and a loop. +loop: + for { + check(LINE, 1) + break loop + } +} + +// This comment didn't appear in generated go code. +func haha() { + // Needed for cover to add counter increment here. + _ = 42 +} + +// Some someFunction. +// +//go:nosplit +func someFunction() { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/doc/testdata/merge/aa.go b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/merge/aa.go new file mode 100644 index 0000000000000000000000000000000000000000..f8ab92dfd07f74751cb698fe4e87beed6239d618 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/merge/aa.go @@ -0,0 +1,7 @@ +// Package comment A. +package merge + +// A doc. +func A() { + // A comment. +} diff --git a/platform/dbops/binaries/go/go/src/cmd/doc/testdata/merge/bb.go b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/merge/bb.go new file mode 100644 index 0000000000000000000000000000000000000000..fd8cf3c446a4dd5b071a264b151799d1b4ddd185 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/merge/bb.go @@ -0,0 +1,7 @@ +// Package comment B. +package merge + +// B doc. +func B() { + // B comment. +} diff --git a/platform/dbops/binaries/go/go/src/cmd/doc/testdata/nested/empty/empty.go b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/nested/empty/empty.go new file mode 100644 index 0000000000000000000000000000000000000000..609cf0e0a0c0bb54d70599c4a7e45ebd6ef422d4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/nested/empty/empty.go @@ -0,0 +1 @@ +package empty diff --git a/platform/dbops/binaries/go/go/src/cmd/doc/testdata/nested/ignore.go b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/nested/ignore.go new file mode 100644 index 0000000000000000000000000000000000000000..5fa811d0a859c192c3a34713e970e604b2f93da3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/nested/ignore.go @@ -0,0 +1,5 @@ +//go:build ignore +// +build ignore + +// Ignored package +package nested diff --git a/platform/dbops/binaries/go/go/src/cmd/doc/testdata/nested/nested/real.go b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/nested/nested/real.go new file mode 100644 index 0000000000000000000000000000000000000000..1e5546081ce03a6eedaf59a0a3dd92b94f0807cf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/nested/nested/real.go @@ -0,0 +1,4 @@ +package nested + +type Foo struct { +} diff --git a/platform/dbops/binaries/go/go/src/cmd/doc/testdata/pkg.go b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/pkg.go new file mode 100644 index 0000000000000000000000000000000000000000..4d269ff0a2295908a7df7313b25ac7b88bdb2566 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/doc/testdata/pkg.go @@ -0,0 +1,254 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package comment. +package pkg + +import "io" + +// Constants + +// Comment about exported constant. +const ExportedConstant = 1 + +// Comment about internal constant. +const internalConstant = 2 + +// Comment about block of constants. +const ( + // Comment before ConstOne. + ConstOne = 1 + ConstTwo = 2 // Comment on line with ConstTwo. + constThree = 3 // Comment on line with constThree. +) + +// Const block where first entry is unexported. +const ( + constFour = iota + ConstFive + ConstSix +) + +// Variables + +// Comment about exported variable. +var ExportedVariable = 1 + +var ExportedVarOfUnExported unexportedType + +// Comment about internal variable. +var internalVariable = 2 + +// Comment about block of variables. +var ( + // Comment before VarOne. + VarOne = 1 + VarTwo = 2 // Comment on line with VarTwo. + varThree = 3 // Comment on line with varThree. +) + +// Var block where first entry is unexported. +var ( + varFour = 4 + VarFive = 5 + varSix = 6 +) + +// Comment about exported function. +func ExportedFunc(a int) bool { + // BUG(me): function body note + return true != false +} + +// Comment about internal function. +func internalFunc(a int) bool + +// Comment about exported type. +type ExportedType struct { + // Comment before exported field. + ExportedField int // Comment on line with exported field. + unexportedField int // Comment on line with unexported field. + ExportedEmbeddedType // Comment on line with exported embedded field. + *ExportedEmbeddedType // Comment on line with exported embedded *field. + *qualified.ExportedEmbeddedType // Comment on line with exported embedded *selector.field. + unexportedType // Comment on line with unexported embedded field. + *unexportedType // Comment on line with unexported embedded *field. + io.Reader // Comment on line with embedded Reader. + error // Comment on line with embedded error. +} + +// Comment about exported method. +func (ExportedType) ExportedMethod(a int) bool { + return true != true +} + +func (ExportedType) Uncommented(a int) bool { + return true != true +} + +// Comment about unexported method. +func (ExportedType) unexportedMethod(a int) bool { + return true +} + +type ExportedStructOneField struct { + OnlyField int // the only field +} + +// Constants tied to ExportedType. (The type is a struct so this isn't valid Go, +// but it parses and that's all we need.) +const ( + ExportedTypedConstant ExportedType = iota +) + +// Comment about constructor for exported type. +func ExportedTypeConstructor() *ExportedType { + return nil +} + +const unexportedTypedConstant ExportedType = 1 // In a separate section to test -u. + +// Comment about exported interface. +type ExportedInterface interface { + // Comment before exported method. + // + // // Code block showing how to use ExportedMethod + // func DoSomething() error { + // ExportedMethod() + // return nil + // } + // + ExportedMethod() // Comment on line with exported method. + unexportedMethod() // Comment on line with unexported method. + io.Reader // Comment on line with embedded Reader. + error // Comment on line with embedded error. +} + +// Comment about unexported type. +type unexportedType int + +func (unexportedType) ExportedMethod() bool { + return true +} + +func (unexportedType) unexportedMethod() bool { + return true +} + +// Constants tied to unexportedType. +const ( + ExportedTypedConstant_unexported unexportedType = iota +) + +const unexportedTypedConstant unexportedType = 1 // In a separate section to test -u. + +// For case matching. +const CaseMatch = 1 +const Casematch = 2 + +func ReturnUnexported() unexportedType { return 0 } +func ReturnExported() ExportedType { return ExportedType{} } + +const MultiLineConst = ` + MultiLineString1 + MultiLineString2 + MultiLineString3 +` + +func MultiLineFunc(x interface { + MultiLineMethod1() int + MultiLineMethod2() int + MultiLineMethod3() int +}) (r struct { + MultiLineField1 int + MultiLineField2 int + MultiLineField3 int +}) { + return r +} + +var MultiLineVar = map[struct { + MultiLineField1 string + MultiLineField2 uint64 +}]struct { + MultiLineField3 error + MultiLineField2 error +}{ + {"FieldVal1", 1}: {}, + {"FieldVal2", 2}: {}, + {"FieldVal3", 3}: {}, +} + +const ( + _, _ uint64 = 2 * iota, 1 << iota + constLeft1, constRight1 + ConstLeft2, constRight2 + constLeft3, ConstRight3 + ConstLeft4, ConstRight4 +) + +const ( + ConstGroup1 unexportedType = iota + ConstGroup2 + ConstGroup3 +) + +const ConstGroup4 ExportedType = ExportedType{} + +func newLongLine(ss ...string) + +var LongLine = newLongLine( + "someArgument1", + "someArgument2", + "someArgument3", + "someArgument4", + "someArgument5", + "someArgument6", + "someArgument7", + "someArgument8", +) + +type T2 int + +type T1 = T2 + +const ( + Duplicate = iota + duplicate +) + +// Comment about exported function with formatting. +// +// Example +// +// fmt.Println(FormattedDoc()) +// +// Text after pre-formatted block. +func ExportedFormattedDoc(a int) bool { + return true +} + +type ExportedFormattedType struct { + // Comment before exported field with formatting. + // + // Example + // + // a.ExportedField = 123 + // + // Text after pre-formatted block. + //ignore:directive + ExportedField int +} + +type SimpleConstraint interface { + ~int | ~float64 +} + +type TildeConstraint interface { + ~int +} + +type StructConstraint interface { + struct { F int } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/auth/auth.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/auth/auth.go new file mode 100644 index 0000000000000000000000000000000000000000..77edeb89245b7177c74986d0b9600f337f2c0813 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/auth/auth.go @@ -0,0 +1,28 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package auth provides access to user-provided authentication credentials. +package auth + +import "net/http" + +// AddCredentials fills in the user's credentials for req, if any. +// The return value reports whether any matching credentials were found. +func AddCredentials(req *http.Request) (added bool) { + host := req.Host + if host == "" { + host = req.URL.Hostname() + } + + // TODO(golang.org/issue/26232): Support arbitrary user-provided credentials. + netrcOnce.Do(readNetrc) + for _, l := range netrc { + if l.machine == host { + req.SetBasicAuth(l.login, l.password) + return true + } + } + + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/auth/netrc.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/auth/netrc.go new file mode 100644 index 0000000000000000000000000000000000000000..0107f20d7a68ca9f2015583fdaf75cc1154bbfb8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/auth/netrc.go @@ -0,0 +1,110 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package auth + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "sync" +) + +type netrcLine struct { + machine string + login string + password string +} + +var ( + netrcOnce sync.Once + netrc []netrcLine + netrcErr error +) + +func parseNetrc(data string) []netrcLine { + // See https://www.gnu.org/software/inetutils/manual/html_node/The-_002enetrc-file.html + // for documentation on the .netrc format. + var nrc []netrcLine + var l netrcLine + inMacro := false + for _, line := range strings.Split(data, "\n") { + if inMacro { + if line == "" { + inMacro = false + } + continue + } + + f := strings.Fields(line) + i := 0 + for ; i < len(f)-1; i += 2 { + // Reset at each "machine" token. + // “The auto-login process searches the .netrc file for a machine token + // that matches […]. Once a match is made, the subsequent .netrc tokens + // are processed, stopping when the end of file is reached or another + // machine or a default token is encountered.” + switch f[i] { + case "machine": + l = netrcLine{machine: f[i+1]} + case "default": + break + case "login": + l.login = f[i+1] + case "password": + l.password = f[i+1] + case "macdef": + // “A macro is defined with the specified name; its contents begin with + // the next .netrc line and continue until a null line (consecutive + // new-line characters) is encountered.” + inMacro = true + } + if l.machine != "" && l.login != "" && l.password != "" { + nrc = append(nrc, l) + l = netrcLine{} + } + } + + if i < len(f) && f[i] == "default" { + // “There can be only one default token, and it must be after all machine tokens.” + break + } + } + + return nrc +} + +func netrcPath() (string, error) { + if env := os.Getenv("NETRC"); env != "" { + return env, nil + } + dir, err := os.UserHomeDir() + if err != nil { + return "", err + } + base := ".netrc" + if runtime.GOOS == "windows" { + base = "_netrc" + } + return filepath.Join(dir, base), nil +} + +func readNetrc() { + path, err := netrcPath() + if err != nil { + netrcErr = err + return + } + + data, err := os.ReadFile(path) + if err != nil { + if !os.IsNotExist(err) { + netrcErr = err + } + return + } + + netrc = parseNetrc(string(data)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/auth/netrc_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/auth/netrc_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e06c545390e0e9c465eef8ecb9ef7773a7516778 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/auth/netrc_test.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package auth + +import ( + "reflect" + "testing" +) + +var testNetrc = ` +machine incomplete +password none + +machine api.github.com + login user + password pwd + +machine incomlete.host + login justlogin + +machine test.host +login user2 +password pwd2 + +machine oneline login user3 password pwd3 + +machine ignore.host macdef ignore + login nobody + password nothing + +machine hasmacro.too macdef ignore-next-lines login user4 password pwd4 + login nobody + password nothing + +default +login anonymous +password gopher@golang.org + +machine after.default +login oops +password too-late-in-file +` + +func TestParseNetrc(t *testing.T) { + lines := parseNetrc(testNetrc) + want := []netrcLine{ + {"api.github.com", "user", "pwd"}, + {"test.host", "user2", "pwd2"}, + {"oneline", "user3", "pwd3"}, + {"hasmacro.too", "user4", "pwd4"}, + } + + if !reflect.DeepEqual(lines, want) { + t.Errorf("parseNetrc:\nhave %q\nwant %q", lines, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/base/base.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/base.go new file mode 100644 index 0000000000000000000000000000000000000000..2171d139096a343c708450c613d80e923d8a7da4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/base.go @@ -0,0 +1,223 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package base defines shared basic pieces of the go command, +// in particular logging and the Command structure. +package base + +import ( + "context" + "flag" + "fmt" + "log" + "os" + "os/exec" + "reflect" + "strings" + "sync" + + "cmd/go/internal/cfg" + "cmd/go/internal/str" +) + +// A Command is an implementation of a go command +// like go build or go fix. +type Command struct { + // Run runs the command. + // The args are the arguments after the command name. + Run func(ctx context.Context, cmd *Command, args []string) + + // UsageLine is the one-line usage message. + // The words between "go" and the first flag or argument in the line are taken to be the command name. + UsageLine string + + // Short is the short description shown in the 'go help' output. + Short string + + // Long is the long message shown in the 'go help ' output. + Long string + + // Flag is a set of flags specific to this command. + Flag flag.FlagSet + + // CustomFlags indicates that the command will do its own + // flag parsing. + CustomFlags bool + + // Commands lists the available commands and help topics. + // The order here is the order in which they are printed by 'go help'. + // Note that subcommands are in general best avoided. + Commands []*Command +} + +var Go = &Command{ + UsageLine: "go", + Long: `Go is a tool for managing Go source code.`, + // Commands initialized in package main +} + +// Lookup returns the subcommand with the given name, if any. +// Otherwise it returns nil. +// +// Lookup ignores subcommands that have len(c.Commands) == 0 and c.Run == nil. +// Such subcommands are only for use as arguments to "help". +func (c *Command) Lookup(name string) *Command { + for _, sub := range c.Commands { + if sub.Name() == name && (len(c.Commands) > 0 || c.Runnable()) { + return sub + } + } + return nil +} + +// hasFlag reports whether a command or any of its subcommands contain the given +// flag. +func hasFlag(c *Command, name string) bool { + if f := c.Flag.Lookup(name); f != nil { + return true + } + for _, sub := range c.Commands { + if hasFlag(sub, name) { + return true + } + } + return false +} + +// LongName returns the command's long name: all the words in the usage line between "go" and a flag or argument, +func (c *Command) LongName() string { + name := c.UsageLine + if i := strings.Index(name, " ["); i >= 0 { + name = name[:i] + } + if name == "go" { + return "" + } + return strings.TrimPrefix(name, "go ") +} + +// Name returns the command's short name: the last word in the usage line before a flag or argument. +func (c *Command) Name() string { + name := c.LongName() + if i := strings.LastIndex(name, " "); i >= 0 { + name = name[i+1:] + } + return name +} + +func (c *Command) Usage() { + fmt.Fprintf(os.Stderr, "usage: %s\n", c.UsageLine) + fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", c.LongName()) + SetExitStatus(2) + Exit() +} + +// Runnable reports whether the command can be run; otherwise +// it is a documentation pseudo-command such as importpath. +func (c *Command) Runnable() bool { + return c.Run != nil +} + +var atExitFuncs []func() + +func AtExit(f func()) { + atExitFuncs = append(atExitFuncs, f) +} + +func Exit() { + for _, f := range atExitFuncs { + f() + } + os.Exit(exitStatus) +} + +func Fatalf(format string, args ...any) { + Errorf(format, args...) + Exit() +} + +func Errorf(format string, args ...any) { + log.Printf(format, args...) + SetExitStatus(1) +} + +func ExitIfErrors() { + if exitStatus != 0 { + Exit() + } +} + +func Error(err error) { + // We use errors.Join to return multiple errors from various routines. + // If we receive multiple errors joined with a basic errors.Join, + // handle each one separately so that they all have the leading "go: " prefix. + // A plain interface check is not good enough because there might be + // other kinds of structured errors that are logically one unit and that + // add other context: only handling the wrapped errors would lose + // that context. + if err != nil && reflect.TypeOf(err).String() == "*errors.joinError" { + for _, e := range err.(interface{ Unwrap() []error }).Unwrap() { + Error(e) + } + return + } + Errorf("go: %v", err) +} + +func Fatal(err error) { + Error(err) + Exit() +} + +var exitStatus = 0 +var exitMu sync.Mutex + +func SetExitStatus(n int) { + exitMu.Lock() + if exitStatus < n { + exitStatus = n + } + exitMu.Unlock() +} + +func GetExitStatus() int { + return exitStatus +} + +// Run runs the command, with stdout and stderr +// connected to the go command's own stdout and stderr. +// If the command fails, Run reports the error using Errorf. +func Run(cmdargs ...any) { + cmdline := str.StringList(cmdargs...) + if cfg.BuildN || cfg.BuildX { + fmt.Printf("%s\n", strings.Join(cmdline, " ")) + if cfg.BuildN { + return + } + } + + cmd := exec.Command(cmdline[0], cmdline[1:]...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + Errorf("%v", err) + } +} + +// RunStdin is like run but connects Stdin. +func RunStdin(cmdline []string) { + cmd := exec.Command(cmdline[0], cmdline[1:]...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = cfg.OrigEnv + StartSigHandlers() + if err := cmd.Run(); err != nil { + Errorf("%v", err) + } +} + +// Usage is the usage-reporting function, filled in by package main +// but here for reference by other packages. +var Usage func() diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/base/env.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/env.go new file mode 100644 index 0000000000000000000000000000000000000000..20ae06d67b4cb1b6883ec23baee945937dc92df8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/env.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "cmd/go/internal/cfg" + "fmt" + "os" + "path/filepath" + "runtime" +) + +// AppendPWD returns the result of appending PWD=dir to the environment base. +// +// The resulting environment makes os.Getwd more efficient for a subprocess +// running in dir, and also improves the accuracy of paths relative to dir +// if one or more elements of dir is a symlink. +func AppendPWD(base []string, dir string) []string { + // POSIX requires PWD to be absolute. + // Internally we only use absolute paths, so dir should already be absolute. + if !filepath.IsAbs(dir) { + panic(fmt.Sprintf("AppendPWD with relative path %q", dir)) + } + return append(base, "PWD="+dir) +} + +// AppendPATH returns the result of appending PATH=$GOROOT/bin:$PATH +// (or the platform equivalent) to the environment base. +func AppendPATH(base []string) []string { + if cfg.GOROOTbin == "" { + return base + } + + pathVar := "PATH" + if runtime.GOOS == "plan9" { + pathVar = "path" + } + + path := os.Getenv(pathVar) + if path == "" { + return append(base, pathVar+"="+cfg.GOROOTbin) + } + return append(base, pathVar+"="+cfg.GOROOTbin+string(os.PathListSeparator)+path) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/base/flag.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/flag.go new file mode 100644 index 0000000000000000000000000000000000000000..74e1275cfd077f59f19f2bb5e0813d6252dfe28a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/flag.go @@ -0,0 +1,85 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "flag" + "fmt" + + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/internal/quoted" +) + +// A StringsFlag is a command-line flag that interprets its argument +// as a space-separated list of possibly-quoted strings. +type StringsFlag []string + +func (v *StringsFlag) Set(s string) error { + var err error + *v, err = quoted.Split(s) + if *v == nil { + *v = []string{} + } + return err +} + +func (v *StringsFlag) String() string { + return "" +} + +// explicitStringFlag is like a regular string flag, but it also tracks whether +// the string was set explicitly to a non-empty value. +type explicitStringFlag struct { + value *string + explicit *bool +} + +func (f explicitStringFlag) String() string { + if f.value == nil { + return "" + } + return *f.value +} + +func (f explicitStringFlag) Set(v string) error { + *f.value = v + if v != "" { + *f.explicit = true + } + return nil +} + +// AddBuildFlagsNX adds the -n and -x build flags to the flag set. +func AddBuildFlagsNX(flags *flag.FlagSet) { + flags.BoolVar(&cfg.BuildN, "n", false, "") + flags.BoolVar(&cfg.BuildX, "x", false, "") +} + +// AddChdirFlag adds the -C flag to the flag set. +func AddChdirFlag(flags *flag.FlagSet) { + // The usage message is never printed, but it's used in chdir_test.go + // to identify that the -C flag is from AddChdirFlag. + flags.Func("C", "AddChdirFlag", ChdirFlag) +} + +// AddModFlag adds the -mod build flag to the flag set. +func AddModFlag(flags *flag.FlagSet) { + flags.Var(explicitStringFlag{value: &cfg.BuildMod, explicit: &cfg.BuildModExplicit}, "mod", "") +} + +// AddModCommonFlags adds the module-related flags common to build commands +// and 'go mod' subcommands. +func AddModCommonFlags(flags *flag.FlagSet) { + flags.BoolVar(&cfg.ModCacheRW, "modcacherw", false, "") + flags.StringVar(&cfg.ModFile, "modfile", "", "") + flags.StringVar(&fsys.OverlayFile, "overlay", "", "") +} + +func ChdirFlag(s string) error { + // main handles -C by removing it from the command line. + // If we see one during flag parsing, that's an error. + return fmt.Errorf("-C flag must be first flag on command line") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/base/goflags.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/goflags.go new file mode 100644 index 0000000000000000000000000000000000000000..eced2c5d5822682792727bb6335589ea14f88707 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/goflags.go @@ -0,0 +1,162 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "flag" + "fmt" + "runtime" + "strings" + + "cmd/go/internal/cfg" + "cmd/internal/quoted" +) + +var goflags []string // cached $GOFLAGS list; can be -x or --x form + +// GOFLAGS returns the flags from $GOFLAGS. +// The list can be assumed to contain one string per flag, +// with each string either beginning with -name or --name. +func GOFLAGS() []string { + InitGOFLAGS() + return goflags +} + +// InitGOFLAGS initializes the goflags list from $GOFLAGS. +// If goflags is already initialized, it does nothing. +func InitGOFLAGS() { + if goflags != nil { // already initialized + return + } + + // Ignore bad flag in go env and go bug, because + // they are what people reach for when debugging + // a problem, and maybe they're debugging GOFLAGS. + // (Both will show the GOFLAGS setting if let succeed.) + hideErrors := cfg.CmdName == "env" || cfg.CmdName == "bug" + + var err error + goflags, err = quoted.Split(cfg.Getenv("GOFLAGS")) + if err != nil { + if hideErrors { + return + } + Fatalf("go: parsing $GOFLAGS: %v", err) + } + + if len(goflags) == 0 { + // nothing to do; avoid work on later InitGOFLAGS call + goflags = []string{} + return + } + + // Each of the words returned by strings.Fields must be its own flag. + // To set flag arguments use -x=value instead of -x value. + // For boolean flags, -x is fine instead of -x=true. + for _, f := range goflags { + // Check that every flag looks like -x --x -x=value or --x=value. + if !strings.HasPrefix(f, "-") || f == "-" || f == "--" || strings.HasPrefix(f, "---") || strings.HasPrefix(f, "-=") || strings.HasPrefix(f, "--=") { + if hideErrors { + continue + } + Fatalf("go: parsing $GOFLAGS: non-flag %q", f) + } + + name := f[1:] + if name[0] == '-' { + name = name[1:] + } + if i := strings.Index(name, "="); i >= 0 { + name = name[:i] + } + if !hasFlag(Go, name) { + if hideErrors { + continue + } + Fatalf("go: parsing $GOFLAGS: unknown flag -%s", name) + } + } +} + +// boolFlag is the optional interface for flag.Value known to the flag package. +// (It is not clear why package flag does not export this interface.) +type boolFlag interface { + flag.Value + IsBoolFlag() bool +} + +// SetFromGOFLAGS sets the flags in the given flag set using settings in $GOFLAGS. +func SetFromGOFLAGS(flags *flag.FlagSet) { + InitGOFLAGS() + + // This loop is similar to flag.Parse except that it ignores + // unknown flags found in goflags, so that setting, say, GOFLAGS=-ldflags=-w + // does not break commands that don't have a -ldflags. + // It also adjusts the output to be clear that the reported problem is from $GOFLAGS. + where := "$GOFLAGS" + if runtime.GOOS == "windows" { + where = "%GOFLAGS%" + } + for _, goflag := range goflags { + name, value, hasValue := goflag, "", false + // Ignore invalid flags like '=' or '=value'. + // If it is not reported in InitGOFlags it means we don't want to report it. + if i := strings.Index(goflag, "="); i == 0 { + continue + } else if i > 0 { + name, value, hasValue = goflag[:i], goflag[i+1:], true + } + if strings.HasPrefix(name, "--") { + name = name[1:] + } + f := flags.Lookup(name[1:]) + if f == nil { + continue + } + + // Use flags.Set consistently (instead of f.Value.Set) so that a subsequent + // call to flags.Visit will correctly visit the flags that have been set. + + if fb, ok := f.Value.(boolFlag); ok && fb.IsBoolFlag() { + if hasValue { + if err := flags.Set(f.Name, value); err != nil { + fmt.Fprintf(flags.Output(), "go: invalid boolean value %q for flag %s (from %s): %v\n", value, name, where, err) + flags.Usage() + } + } else { + if err := flags.Set(f.Name, "true"); err != nil { + fmt.Fprintf(flags.Output(), "go: invalid boolean flag %s (from %s): %v\n", name, where, err) + flags.Usage() + } + } + } else { + if !hasValue { + fmt.Fprintf(flags.Output(), "go: flag needs an argument: %s (from %s)\n", name, where) + flags.Usage() + } + if err := flags.Set(f.Name, value); err != nil { + fmt.Fprintf(flags.Output(), "go: invalid value %q for flag %s (from %s): %v\n", value, name, where, err) + flags.Usage() + } + } + } +} + +// InGOFLAGS returns whether GOFLAGS contains the given flag, such as "-mod". +func InGOFLAGS(flag string) bool { + for _, goflag := range GOFLAGS() { + name := goflag + if strings.HasPrefix(name, "--") { + name = name[1:] + } + if i := strings.Index(name, "="); i >= 0 { + name = name[:i] + } + if name == flag { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/base/limit.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/limit.go new file mode 100644 index 0000000000000000000000000000000000000000..b4160bde021c0d5a2ecb9d14a4cbc45599304602 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/limit.go @@ -0,0 +1,84 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "fmt" + "internal/godebug" + "runtime" + "strconv" + "sync" +) + +var NetLimitGodebug = godebug.New("#cmdgonetlimit") + +// NetLimit returns the limit on concurrent network operations +// configured by GODEBUG=cmdgonetlimit, if any. +// +// A limit of 0 (indicated by 0, true) means that network operations should not +// be allowed. +func NetLimit() (int, bool) { + netLimitOnce.Do(func() { + s := NetLimitGodebug.Value() + if s == "" { + return + } + + n, err := strconv.Atoi(s) + if err != nil { + Fatalf("invalid %s: %v", NetLimitGodebug.Name(), err) + } + if n < 0 { + // Treat negative values as unlimited. + return + } + netLimitSem = make(chan struct{}, n) + }) + + return cap(netLimitSem), netLimitSem != nil +} + +// AcquireNet acquires a semaphore token for a network operation. +func AcquireNet() (release func(), err error) { + hasToken := false + if n, ok := NetLimit(); ok { + if n == 0 { + return nil, fmt.Errorf("network disabled by %v=%v", NetLimitGodebug.Name(), NetLimitGodebug.Value()) + } + netLimitSem <- struct{}{} + hasToken = true + } + + checker := new(netTokenChecker) + runtime.SetFinalizer(checker, (*netTokenChecker).panicUnreleased) + + return func() { + if checker.released { + panic("internal error: net token released twice") + } + checker.released = true + if hasToken { + <-netLimitSem + } + runtime.SetFinalizer(checker, nil) + }, nil +} + +var ( + netLimitOnce sync.Once + netLimitSem chan struct{} +) + +type netTokenChecker struct { + released bool + // We want to use a finalizer to check that all acquired tokens are returned, + // so we arbitrarily pad the tokens with a string to defeat the runtime's + // “tiny allocator”. + unusedAvoidTinyAllocator string +} + +func (c *netTokenChecker) panicUnreleased() { + panic("internal error: net token acquired but not released") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/base/path.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/path.go new file mode 100644 index 0000000000000000000000000000000000000000..64f213b408628eb155a4650484957db693d74696 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/path.go @@ -0,0 +1,79 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "sync" +) + +var cwd string +var cwdOnce sync.Once + +// UncachedCwd returns the current working directory. +// Most callers should use Cwd, which caches the result for future use. +// UncachedCwd is appropriate to call early in program startup before flag parsing, +// because the -C flag may change the current directory. +func UncachedCwd() string { + wd, err := os.Getwd() + if err != nil { + Fatalf("cannot determine current directory: %v", err) + } + return wd +} + +// Cwd returns the current working directory at the time of the first call. +func Cwd() string { + cwdOnce.Do(func() { + cwd = UncachedCwd() + }) + return cwd +} + +// ShortPath returns an absolute or relative name for path, whatever is shorter. +func ShortPath(path string) string { + if rel, err := filepath.Rel(Cwd(), path); err == nil && len(rel) < len(path) { + return rel + } + return path +} + +// RelPaths returns a copy of paths with absolute paths +// made relative to the current directory if they would be shorter. +func RelPaths(paths []string) []string { + var out []string + for _, p := range paths { + rel, err := filepath.Rel(Cwd(), p) + if err == nil && len(rel) < len(p) { + p = rel + } + out = append(out, p) + } + return out +} + +// IsTestFile reports whether the source file is a set of tests and should therefore +// be excluded from coverage analysis. +func IsTestFile(file string) bool { + // We don't cover tests, only the code they test. + return strings.HasSuffix(file, "_test.go") +} + +// IsNull reports whether the path is a common name for the null device. +// It returns true for /dev/null on Unix, or NUL (case-insensitive) on Windows. +func IsNull(path string) bool { + if path == os.DevNull { + return true + } + if runtime.GOOS == "windows" { + if strings.EqualFold(path, "NUL") { + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/base/signal.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/signal.go new file mode 100644 index 0000000000000000000000000000000000000000..05befcf7f0c8eb84b07d9aee3b3f489fe04d6be3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/signal.go @@ -0,0 +1,31 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "os" + "os/signal" + "sync" +) + +// Interrupted is closed when the go command receives an interrupt signal. +var Interrupted = make(chan struct{}) + +// processSignals setups signal handler. +func processSignals() { + sig := make(chan os.Signal, 1) + signal.Notify(sig, signalsToIgnore...) + go func() { + <-sig + close(Interrupted) + }() +} + +var onceProcessSignals sync.Once + +// StartSigHandlers starts the signal handlers. +func StartSigHandlers() { + onceProcessSignals.Do(processSignals) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/base/signal_notunix.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/signal_notunix.go new file mode 100644 index 0000000000000000000000000000000000000000..682705f9b2cb41e3a6fc81fd74536a3aa4e42104 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/signal_notunix.go @@ -0,0 +1,17 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 || windows + +package base + +import ( + "os" +) + +var signalsToIgnore = []os.Signal{os.Interrupt} + +// SignalTrace is the signal to send to make a Go program +// crash with a stack trace (no such signal in this case). +var SignalTrace os.Signal = nil diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/base/signal_unix.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/signal_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..0905971932606911be61e82a084f863e2e65982c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/signal_unix.go @@ -0,0 +1,18 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || js || wasip1 + +package base + +import ( + "os" + "syscall" +) + +var signalsToIgnore = []os.Signal{os.Interrupt, syscall.SIGQUIT} + +// SignalTrace is the signal to send to make a Go program +// crash with a stack trace. +var SignalTrace os.Signal = syscall.SIGQUIT diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/base/tool.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/tool.go new file mode 100644 index 0000000000000000000000000000000000000000..ab623da4264866a2641d5236f31ecb83043fdfe9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/base/tool.go @@ -0,0 +1,41 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package base + +import ( + "fmt" + "go/build" + "os" + "path/filepath" + + "cmd/go/internal/cfg" + "cmd/go/internal/par" +) + +// Tool returns the path to the named tool (for example, "vet"). +// If the tool cannot be found, Tool exits the process. +func Tool(toolName string) string { + toolPath, err := ToolPath(toolName) + if err != nil && len(cfg.BuildToolexec) == 0 { + // Give a nice message if there is no tool with that name. + fmt.Fprintf(os.Stderr, "go: no such tool %q\n", toolName) + SetExitStatus(2) + Exit() + } + return toolPath +} + +// Tool returns the path at which we expect to find the named tool +// (for example, "vet"), and the error (if any) from statting that path. +func ToolPath(toolName string) (string, error) { + toolPath := filepath.Join(build.ToolDir, toolName) + cfg.ToolExeSuffix() + err := toolStatCache.Do(toolPath, func() error { + _, err := os.Stat(toolPath) + return err + }) + return toolPath, err +} + +var toolStatCache par.Cache[string, error] diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/bug/bug.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/bug/bug.go new file mode 100644 index 0000000000000000000000000000000000000000..ed1813605e5de4f2c55f9789cadb18862be94ec3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/bug/bug.go @@ -0,0 +1,224 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bug implements the “go bug” command. +package bug + +import ( + "bytes" + "context" + "fmt" + "io" + urlpkg "net/url" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/envcmd" + "cmd/go/internal/web" + "cmd/go/internal/work" +) + +var CmdBug = &base.Command{ + Run: runBug, + UsageLine: "go bug", + Short: "start a bug report", + Long: ` +Bug opens the default browser and starts a new bug report. +The report includes useful system information. + `, +} + +func init() { + CmdBug.Flag.BoolVar(&cfg.BuildV, "v", false, "") + base.AddChdirFlag(&CmdBug.Flag) +} + +func runBug(ctx context.Context, cmd *base.Command, args []string) { + if len(args) > 0 { + base.Fatalf("go: bug takes no arguments") + } + work.BuildInit() + + var buf strings.Builder + buf.WriteString(bugHeader) + printGoVersion(&buf) + buf.WriteString("### Does this issue reproduce with the latest release?\n\n\n") + printEnvDetails(&buf) + buf.WriteString(bugFooter) + + body := buf.String() + url := "https://github.com/golang/go/issues/new?body=" + urlpkg.QueryEscape(body) + if !web.OpenBrowser(url) { + fmt.Print("Please file a new issue at golang.org/issue/new using this template:\n\n") + fmt.Print(body) + } +} + +const bugHeader = ` + +` +const bugFooter = `### What did you do? + + + + + +### What did you expect to see? + + + +### What did you see instead? + +` + +func printGoVersion(w io.Writer) { + fmt.Fprintf(w, "### What version of Go are you using (`go version`)?\n\n") + fmt.Fprintf(w, "
\n")
+	fmt.Fprintf(w, "$ go version\n")
+	fmt.Fprintf(w, "go version %s %s/%s\n", runtime.Version(), runtime.GOOS, runtime.GOARCH)
+	fmt.Fprintf(w, "
\n") + fmt.Fprintf(w, "\n") +} + +func printEnvDetails(w io.Writer) { + fmt.Fprintf(w, "### What operating system and processor architecture are you using (`go env`)?\n\n") + fmt.Fprintf(w, "
go env Output
\n")
+	fmt.Fprintf(w, "$ go env\n")
+	printGoEnv(w)
+	printGoDetails(w)
+	printOSDetails(w)
+	printCDetails(w)
+	fmt.Fprintf(w, "
\n\n") +} + +func printGoEnv(w io.Writer) { + env := envcmd.MkEnv() + env = append(env, envcmd.ExtraEnvVars()...) + env = append(env, envcmd.ExtraEnvVarsCostly()...) + envcmd.PrintEnv(w, env) +} + +func printGoDetails(w io.Writer) { + gocmd := filepath.Join(runtime.GOROOT(), "bin/go") + printCmdOut(w, "GOROOT/bin/go version: ", gocmd, "version") + printCmdOut(w, "GOROOT/bin/go tool compile -V: ", gocmd, "tool", "compile", "-V") +} + +func printOSDetails(w io.Writer) { + switch runtime.GOOS { + case "darwin", "ios": + printCmdOut(w, "uname -v: ", "uname", "-v") + printCmdOut(w, "", "sw_vers") + case "linux": + printCmdOut(w, "uname -sr: ", "uname", "-sr") + printCmdOut(w, "", "lsb_release", "-a") + printGlibcVersion(w) + case "openbsd", "netbsd", "freebsd", "dragonfly": + printCmdOut(w, "uname -v: ", "uname", "-v") + case "illumos", "solaris": + // Be sure to use the OS-supplied uname, in "/usr/bin": + printCmdOut(w, "uname -srv: ", "/usr/bin/uname", "-srv") + out, err := os.ReadFile("/etc/release") + if err == nil { + fmt.Fprintf(w, "/etc/release: %s\n", out) + } else { + if cfg.BuildV { + fmt.Printf("failed to read /etc/release: %v\n", err) + } + } + } +} + +func printCDetails(w io.Writer) { + printCmdOut(w, "lldb --version: ", "lldb", "--version") + cmd := exec.Command("gdb", "--version") + out, err := cmd.Output() + if err == nil { + // There's apparently no combination of command line flags + // to get gdb to spit out its version without the license and warranty. + // Print up to the first newline. + fmt.Fprintf(w, "gdb --version: %s\n", firstLine(out)) + } else { + if cfg.BuildV { + fmt.Printf("failed to run gdb --version: %v\n", err) + } + } +} + +// printCmdOut prints the output of running the given command. +// It ignores failures; 'go bug' is best effort. +func printCmdOut(w io.Writer, prefix, path string, args ...string) { + cmd := exec.Command(path, args...) + out, err := cmd.Output() + if err != nil { + if cfg.BuildV { + fmt.Printf("%s %s: %v\n", path, strings.Join(args, " "), err) + } + return + } + fmt.Fprintf(w, "%s%s\n", prefix, bytes.TrimSpace(out)) +} + +// firstLine returns the first line of a given byte slice. +func firstLine(buf []byte) []byte { + idx := bytes.IndexByte(buf, '\n') + if idx > 0 { + buf = buf[:idx] + } + return bytes.TrimSpace(buf) +} + +// printGlibcVersion prints information about the glibc version. +// It ignores failures. +func printGlibcVersion(w io.Writer) { + tempdir := os.TempDir() + if tempdir == "" { + return + } + src := []byte(`int main() {}`) + srcfile := filepath.Join(tempdir, "go-bug.c") + outfile := filepath.Join(tempdir, "go-bug") + err := os.WriteFile(srcfile, src, 0644) + if err != nil { + return + } + defer os.Remove(srcfile) + cmd := exec.Command("gcc", "-o", outfile, srcfile) + if _, err = cmd.CombinedOutput(); err != nil { + return + } + defer os.Remove(outfile) + + cmd = exec.Command("ldd", outfile) + out, err := cmd.CombinedOutput() + if err != nil { + return + } + re := regexp.MustCompile(`libc\.so[^ ]* => ([^ ]+)`) + m := re.FindStringSubmatch(string(out)) + if m == nil { + return + } + cmd = exec.Command(m[1]) + out, err = cmd.Output() + if err != nil { + return + } + fmt.Fprintf(w, "%s: %s\n", m[1], firstLine(out)) + + // print another line (the one containing version string) in case of musl libc + if idx := bytes.IndexByte(out, '\n'); bytes.Contains(out, []byte("musl")) && idx > -1 { + fmt.Fprintf(w, "%s\n", firstLine(out[idx+1:])) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/cache.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/cache.go new file mode 100644 index 0000000000000000000000000000000000000000..14b2deccd4d18e9bc8365c662b3f5268ed731a31 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/cache.go @@ -0,0 +1,627 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cache implements a build artifact cache. +package cache + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "internal/godebug" + "io" + "io/fs" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "cmd/go/internal/lockedfile" + "cmd/go/internal/mmap" +) + +// An ActionID is a cache action key, the hash of a complete description of a +// repeatable computation (command line, environment variables, +// input file contents, executable contents). +type ActionID [HashSize]byte + +// An OutputID is a cache output key, the hash of an output of a computation. +type OutputID [HashSize]byte + +// Cache is the interface as used by the cmd/go. +type Cache interface { + // Get returns the cache entry for the provided ActionID. + // On miss, the error type should be of type *entryNotFoundError. + // + // After a success call to Get, OutputFile(Entry.OutputID) must + // exist on disk for until Close is called (at the end of the process). + Get(ActionID) (Entry, error) + + // Put adds an item to the cache. + // + // The seeker is only used to seek to the beginning. After a call to Put, + // the seek position is not guaranteed to be in any particular state. + // + // As a special case, if the ReadSeeker is of type noVerifyReadSeeker, + // the verification from GODEBUG=goverifycache=1 is skipped. + // + // After a success call to Get, OutputFile(Entry.OutputID) must + // exist on disk for until Close is called (at the end of the process). + Put(ActionID, io.ReadSeeker) (_ OutputID, size int64, _ error) + + // Close is called at the end of the go process. Implementations can do + // cache cleanup work at this phase, or wait for and report any errors from + // background cleanup work started earlier. Any cache trimming should in one + // process should not violate cause the invariants of this interface to be + // violated in another process. Namely, a cache trim from one process should + // not delete an ObjectID from disk that was recently Get or Put from + // another process. As a rule of thumb, don't trim things used in the last + // day. + Close() error + + // OutputFile returns the path on disk where OutputID is stored. + // + // It's only called after a successful get or put call so it doesn't need + // to return an error; it's assumed that if the previous get or put succeeded, + // it's already on disk. + OutputFile(OutputID) string + + // FuzzDir returns where fuzz files are stored. + FuzzDir() string +} + +// A Cache is a package cache, backed by a file system directory tree. +type DiskCache struct { + dir string + now func() time.Time +} + +// Open opens and returns the cache in the given directory. +// +// It is safe for multiple processes on a single machine to use the +// same cache directory in a local file system simultaneously. +// They will coordinate using operating system file locks and may +// duplicate effort but will not corrupt the cache. +// +// However, it is NOT safe for multiple processes on different machines +// to share a cache directory (for example, if the directory were stored +// in a network file system). File locking is notoriously unreliable in +// network file systems and may not suffice to protect the cache. +func Open(dir string) (*DiskCache, error) { + info, err := os.Stat(dir) + if err != nil { + return nil, err + } + if !info.IsDir() { + return nil, &fs.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")} + } + for i := 0; i < 256; i++ { + name := filepath.Join(dir, fmt.Sprintf("%02x", i)) + if err := os.MkdirAll(name, 0777); err != nil { + return nil, err + } + } + c := &DiskCache{ + dir: dir, + now: time.Now, + } + return c, nil +} + +// fileName returns the name of the file corresponding to the given id. +func (c *DiskCache) fileName(id [HashSize]byte, key string) string { + return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key) +} + +// An entryNotFoundError indicates that a cache entry was not found, with an +// optional underlying reason. +type entryNotFoundError struct { + Err error +} + +func (e *entryNotFoundError) Error() string { + if e.Err == nil { + return "cache entry not found" + } + return fmt.Sprintf("cache entry not found: %v", e.Err) +} + +func (e *entryNotFoundError) Unwrap() error { + return e.Err +} + +const ( + // action entry file is "v1 \n" + hexSize = HashSize * 2 + entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1 +) + +// verify controls whether to run the cache in verify mode. +// In verify mode, the cache always returns errMissing from Get +// but then double-checks in Put that the data being written +// exactly matches any existing entry. This provides an easy +// way to detect program behavior that would have been different +// had the cache entry been returned from Get. +// +// verify is enabled by setting the environment variable +// GODEBUG=gocacheverify=1. +var verify = false + +var errVerifyMode = errors.New("gocacheverify=1") + +// DebugTest is set when GODEBUG=gocachetest=1 is in the environment. +var DebugTest = false + +func init() { initEnv() } + +var ( + goCacheVerify = godebug.New("gocacheverify") + goDebugHash = godebug.New("gocachehash") + goCacheTest = godebug.New("gocachetest") +) + +func initEnv() { + if goCacheVerify.Value() == "1" { + goCacheVerify.IncNonDefault() + verify = true + } + if goDebugHash.Value() == "1" { + goDebugHash.IncNonDefault() + debugHash = true + } + if goCacheTest.Value() == "1" { + goCacheTest.IncNonDefault() + DebugTest = true + } +} + +// Get looks up the action ID in the cache, +// returning the corresponding output ID and file size, if any. +// Note that finding an output ID does not guarantee that the +// saved file for that output ID is still available. +func (c *DiskCache) Get(id ActionID) (Entry, error) { + if verify { + return Entry{}, &entryNotFoundError{Err: errVerifyMode} + } + return c.get(id) +} + +type Entry struct { + OutputID OutputID + Size int64 + Time time.Time // when added to cache +} + +// get is Get but does not respect verify mode, so that Put can use it. +func (c *DiskCache) get(id ActionID) (Entry, error) { + missing := func(reason error) (Entry, error) { + return Entry{}, &entryNotFoundError{Err: reason} + } + f, err := os.Open(c.fileName(id, "a")) + if err != nil { + return missing(err) + } + defer f.Close() + entry := make([]byte, entrySize+1) // +1 to detect whether f is too long + if n, err := io.ReadFull(f, entry); n > entrySize { + return missing(errors.New("too long")) + } else if err != io.ErrUnexpectedEOF { + if err == io.EOF { + return missing(errors.New("file is empty")) + } + return missing(err) + } else if n < entrySize { + return missing(errors.New("entry file incomplete")) + } + if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' { + return missing(errors.New("invalid header")) + } + eid, entry := entry[3:3+hexSize], entry[3+hexSize:] + eout, entry := entry[1:1+hexSize], entry[1+hexSize:] + esize, entry := entry[1:1+20], entry[1+20:] + etime, entry := entry[1:1+20], entry[1+20:] + var buf [HashSize]byte + if _, err := hex.Decode(buf[:], eid); err != nil { + return missing(fmt.Errorf("decoding ID: %v", err)) + } else if buf != id { + return missing(errors.New("mismatched ID")) + } + if _, err := hex.Decode(buf[:], eout); err != nil { + return missing(fmt.Errorf("decoding output ID: %v", err)) + } + i := 0 + for i < len(esize) && esize[i] == ' ' { + i++ + } + size, err := strconv.ParseInt(string(esize[i:]), 10, 64) + if err != nil { + return missing(fmt.Errorf("parsing size: %v", err)) + } else if size < 0 { + return missing(errors.New("negative size")) + } + i = 0 + for i < len(etime) && etime[i] == ' ' { + i++ + } + tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) + if err != nil { + return missing(fmt.Errorf("parsing timestamp: %v", err)) + } else if tm < 0 { + return missing(errors.New("negative timestamp")) + } + + c.used(c.fileName(id, "a")) + + return Entry{buf, size, time.Unix(0, tm)}, nil +} + +// GetFile looks up the action ID in the cache and returns +// the name of the corresponding data file. +func GetFile(c Cache, id ActionID) (file string, entry Entry, err error) { + entry, err = c.Get(id) + if err != nil { + return "", Entry{}, err + } + file = c.OutputFile(entry.OutputID) + info, err := os.Stat(file) + if err != nil { + return "", Entry{}, &entryNotFoundError{Err: err} + } + if info.Size() != entry.Size { + return "", Entry{}, &entryNotFoundError{Err: errors.New("file incomplete")} + } + return file, entry, nil +} + +// GetBytes looks up the action ID in the cache and returns +// the corresponding output bytes. +// GetBytes should only be used for data that can be expected to fit in memory. +func GetBytes(c Cache, id ActionID) ([]byte, Entry, error) { + entry, err := c.Get(id) + if err != nil { + return nil, entry, err + } + data, _ := os.ReadFile(c.OutputFile(entry.OutputID)) + if sha256.Sum256(data) != entry.OutputID { + return nil, entry, &entryNotFoundError{Err: errors.New("bad checksum")} + } + return data, entry, nil +} + +// GetMmap looks up the action ID in the cache and returns +// the corresponding output bytes. +// GetMmap should only be used for data that can be expected to fit in memory. +func GetMmap(c Cache, id ActionID) ([]byte, Entry, error) { + entry, err := c.Get(id) + if err != nil { + return nil, entry, err + } + md, err := mmap.Mmap(c.OutputFile(entry.OutputID)) + if err != nil { + return nil, Entry{}, err + } + if int64(len(md.Data)) != entry.Size { + return nil, Entry{}, &entryNotFoundError{Err: errors.New("file incomplete")} + } + return md.Data, entry, nil +} + +// OutputFile returns the name of the cache file storing output with the given OutputID. +func (c *DiskCache) OutputFile(out OutputID) string { + file := c.fileName(out, "d") + c.used(file) + return file +} + +// Time constants for cache expiration. +// +// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour), +// to avoid causing many unnecessary inode updates. The mtimes therefore +// roughly reflect "time of last use" but may in fact be older by at most an hour. +// +// We scan the cache for entries to delete at most once per trimInterval (1 day). +// +// When we do scan the cache, we delete entries that have not been used for +// at least trimLimit (5 days). Statistics gathered from a month of usage by +// Go developers found that essentially all reuse of cached entries happened +// within 5 days of the previous reuse. See golang.org/issue/22990. +const ( + mtimeInterval = 1 * time.Hour + trimInterval = 24 * time.Hour + trimLimit = 5 * 24 * time.Hour +) + +// used makes a best-effort attempt to update mtime on file, +// so that mtime reflects cache access time. +// +// Because the reflection only needs to be approximate, +// and to reduce the amount of disk activity caused by using +// cache entries, used only updates the mtime if the current +// mtime is more than an hour old. This heuristic eliminates +// nearly all of the mtime updates that would otherwise happen, +// while still keeping the mtimes useful for cache trimming. +func (c *DiskCache) used(file string) { + info, err := os.Stat(file) + if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval { + return + } + os.Chtimes(file, c.now(), c.now()) +} + +func (c *DiskCache) Close() error { return c.Trim() } + +// Trim removes old cache entries that are likely not to be reused. +func (c *DiskCache) Trim() error { + now := c.now() + + // We maintain in dir/trim.txt the time of the last completed cache trim. + // If the cache has been trimmed recently enough, do nothing. + // This is the common case. + // If the trim file is corrupt, detected if the file can't be parsed, or the + // trim time is too far in the future, attempt the trim anyway. It's possible that + // the cache was full when the corruption happened. Attempting a trim on + // an empty cache is cheap, so there wouldn't be a big performance hit in that case. + if data, err := lockedfile.Read(filepath.Join(c.dir, "trim.txt")); err == nil { + if t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64); err == nil { + lastTrim := time.Unix(t, 0) + if d := now.Sub(lastTrim); d < trimInterval && d > -mtimeInterval { + return nil + } + } + } + + // Trim each of the 256 subdirectories. + // We subtract an additional mtimeInterval + // to account for the imprecision of our "last used" mtimes. + cutoff := now.Add(-trimLimit - mtimeInterval) + for i := 0; i < 256; i++ { + subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i)) + c.trimSubdir(subdir, cutoff) + } + + // Ignore errors from here: if we don't write the complete timestamp, the + // cache will appear older than it is, and we'll trim it again next time. + var b bytes.Buffer + fmt.Fprintf(&b, "%d", now.Unix()) + if err := lockedfile.Write(filepath.Join(c.dir, "trim.txt"), &b, 0666); err != nil { + return err + } + + return nil +} + +// trimSubdir trims a single cache subdirectory. +func (c *DiskCache) trimSubdir(subdir string, cutoff time.Time) { + // Read all directory entries from subdir before removing + // any files, in case removing files invalidates the file offset + // in the directory scan. Also, ignore error from f.Readdirnames, + // because we don't care about reporting the error and we still + // want to process any entries found before the error. + f, err := os.Open(subdir) + if err != nil { + return + } + names, _ := f.Readdirnames(-1) + f.Close() + + for _, name := range names { + // Remove only cache entries (xxxx-a and xxxx-d). + if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") { + continue + } + entry := filepath.Join(subdir, name) + info, err := os.Stat(entry) + if err == nil && info.ModTime().Before(cutoff) { + os.Remove(entry) + } + } +} + +// putIndexEntry adds an entry to the cache recording that executing the action +// with the given id produces an output with the given output id (hash) and size. +func (c *DiskCache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error { + // Note: We expect that for one reason or another it may happen + // that repeating an action produces a different output hash + // (for example, if the output contains a time stamp or temp dir name). + // While not ideal, this is also not a correctness problem, so we + // don't make a big deal about it. In particular, we leave the action + // cache entries writable specifically so that they can be overwritten. + // + // Setting GODEBUG=gocacheverify=1 does make a big deal: + // in verify mode we are double-checking that the cache entries + // are entirely reproducible. As just noted, this may be unrealistic + // in some cases but the check is also useful for shaking out real bugs. + entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano()) + if verify && allowVerify { + old, err := c.get(id) + if err == nil && (old.OutputID != out || old.Size != size) { + // panic to show stack trace, so we can see what code is generating this cache entry. + msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size) + panic(msg) + } + } + file := c.fileName(id, "a") + + // Copy file to cache directory. + mode := os.O_WRONLY | os.O_CREATE + f, err := os.OpenFile(file, mode, 0666) + if err != nil { + return err + } + _, err = f.WriteString(entry) + if err == nil { + // Truncate the file only *after* writing it. + // (This should be a no-op, but truncate just in case of previous corruption.) + // + // This differs from os.WriteFile, which truncates to 0 *before* writing + // via os.O_TRUNC. Truncating only after writing ensures that a second write + // of the same content to the same file is idempotent, and does not — even + // temporarily! — undo the effect of the first write. + err = f.Truncate(int64(len(entry))) + } + if closeErr := f.Close(); err == nil { + err = closeErr + } + if err != nil { + // TODO(bcmills): This Remove potentially races with another go command writing to file. + // Can we eliminate it? + os.Remove(file) + return err + } + os.Chtimes(file, c.now(), c.now()) // mainly for tests + + return nil +} + +// noVerifyReadSeeker is an io.ReadSeeker wrapper sentinel type +// that says that Cache.Put should skip the verify check +// (from GODEBUG=goverifycache=1). +type noVerifyReadSeeker struct { + io.ReadSeeker +} + +// Put stores the given output in the cache as the output for the action ID. +// It may read file twice. The content of file must not change between the two passes. +func (c *DiskCache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { + wrapper, isNoVerify := file.(noVerifyReadSeeker) + if isNoVerify { + file = wrapper.ReadSeeker + } + return c.put(id, file, !isNoVerify) +} + +// PutNoVerify is like Put but disables the verify check +// when GODEBUG=goverifycache=1 is set. +// It is meant for data that is OK to cache but that we expect to vary slightly from run to run, +// like test output containing times and the like. +func PutNoVerify(c Cache, id ActionID, file io.ReadSeeker) (OutputID, int64, error) { + return c.Put(id, noVerifyReadSeeker{file}) +} + +func (c *DiskCache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) { + // Compute output ID. + h := sha256.New() + if _, err := file.Seek(0, 0); err != nil { + return OutputID{}, 0, err + } + size, err := io.Copy(h, file) + if err != nil { + return OutputID{}, 0, err + } + var out OutputID + h.Sum(out[:0]) + + // Copy to cached output file (if not already present). + if err := c.copyFile(file, out, size); err != nil { + return out, size, err + } + + // Add to cache index. + return out, size, c.putIndexEntry(id, out, size, allowVerify) +} + +// PutBytes stores the given bytes in the cache as the output for the action ID. +func PutBytes(c Cache, id ActionID, data []byte) error { + _, _, err := c.Put(id, bytes.NewReader(data)) + return err +} + +// copyFile copies file into the cache, expecting it to have the given +// output ID and size, if that file is not present already. +func (c *DiskCache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { + name := c.fileName(out, "d") + info, err := os.Stat(name) + if err == nil && info.Size() == size { + // Check hash. + if f, err := os.Open(name); err == nil { + h := sha256.New() + io.Copy(h, f) + f.Close() + var out2 OutputID + h.Sum(out2[:0]) + if out == out2 { + return nil + } + } + // Hash did not match. Fall through and rewrite file. + } + + // Copy file to cache directory. + mode := os.O_RDWR | os.O_CREATE + if err == nil && info.Size() > size { // shouldn't happen but fix in case + mode |= os.O_TRUNC + } + f, err := os.OpenFile(name, mode, 0666) + if err != nil { + return err + } + defer f.Close() + if size == 0 { + // File now exists with correct size. + // Only one possible zero-length file, so contents are OK too. + // Early return here makes sure there's a "last byte" for code below. + return nil + } + + // From here on, if any of the I/O writing the file fails, + // we make a best-effort attempt to truncate the file f + // before returning, to avoid leaving bad bytes in the file. + + // Copy file to f, but also into h to double-check hash. + if _, err := file.Seek(0, 0); err != nil { + f.Truncate(0) + return err + } + h := sha256.New() + w := io.MultiWriter(f, h) + if _, err := io.CopyN(w, file, size-1); err != nil { + f.Truncate(0) + return err + } + // Check last byte before writing it; writing it will make the size match + // what other processes expect to find and might cause them to start + // using the file. + buf := make([]byte, 1) + if _, err := file.Read(buf); err != nil { + f.Truncate(0) + return err + } + h.Write(buf) + sum := h.Sum(nil) + if !bytes.Equal(sum, out[:]) { + f.Truncate(0) + return fmt.Errorf("file content changed underfoot") + } + + // Commit cache file entry. + if _, err := f.Write(buf); err != nil { + f.Truncate(0) + return err + } + if err := f.Close(); err != nil { + // Data might not have been written, + // but file may look like it is the right size. + // To be extra careful, remove cached file. + os.Remove(name) + return err + } + os.Chtimes(name, c.now(), c.now()) // mainly for tests + + return nil +} + +// FuzzDir returns a subdirectory within the cache for storing fuzzing data. +// The subdirectory may not exist. +// +// This directory is managed by the internal/fuzz package. Files in this +// directory aren't removed by the 'go clean -cache' command or by Trim. +// They may be removed with 'go clean -fuzzcache'. +// +// TODO(#48526): make Trim remove unused files from this directory. +func (c *DiskCache) FuzzDir() string { + return filepath.Join(c.dir, "fuzz") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/cache_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/cache_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a12f1d2ee798ee735594475861c314da3eea97eb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/cache_test.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "bytes" + "encoding/binary" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "testing" + "time" +) + +func init() { + verify = false // even if GODEBUG is set +} + +func TestBasic(t *testing.T) { + dir, err := os.MkdirTemp("", "cachetest-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + _, err = Open(filepath.Join(dir, "notexist")) + if err == nil { + t.Fatal(`Open("tmp/notexist") succeeded, want failure`) + } + + cdir := filepath.Join(dir, "c1") + if err := os.Mkdir(cdir, 0777); err != nil { + t.Fatal(err) + } + + c1, err := Open(cdir) + if err != nil { + t.Fatalf("Open(c1) (create): %v", err) + } + if err := c1.putIndexEntry(dummyID(1), dummyID(12), 13, true); err != nil { + t.Fatalf("addIndexEntry: %v", err) + } + if err := c1.putIndexEntry(dummyID(1), dummyID(2), 3, true); err != nil { // overwrite entry + t.Fatalf("addIndexEntry: %v", err) + } + if entry, err := c1.Get(dummyID(1)); err != nil || entry.OutputID != dummyID(2) || entry.Size != 3 { + t.Fatalf("c1.Get(1) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(2), 3) + } + + c2, err := Open(cdir) + if err != nil { + t.Fatalf("Open(c2) (reuse): %v", err) + } + if entry, err := c2.Get(dummyID(1)); err != nil || entry.OutputID != dummyID(2) || entry.Size != 3 { + t.Fatalf("c2.Get(1) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(2), 3) + } + if err := c2.putIndexEntry(dummyID(2), dummyID(3), 4, true); err != nil { + t.Fatalf("addIndexEntry: %v", err) + } + if entry, err := c1.Get(dummyID(2)); err != nil || entry.OutputID != dummyID(3) || entry.Size != 4 { + t.Fatalf("c1.Get(2) = %x, %v, %v, want %x, %v, nil", entry.OutputID, entry.Size, err, dummyID(3), 4) + } +} + +func TestGrowth(t *testing.T) { + dir, err := os.MkdirTemp("", "cachetest-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + c, err := Open(dir) + if err != nil { + t.Fatalf("Open: %v", err) + } + + n := 10000 + if testing.Short() { + n = 10 + } + + for i := 0; i < n; i++ { + if err := c.putIndexEntry(dummyID(i), dummyID(i*99), int64(i)*101, true); err != nil { + t.Fatalf("addIndexEntry: %v", err) + } + id := ActionID(dummyID(i)) + entry, err := c.Get(id) + if err != nil { + t.Fatalf("Get(%x): %v", id, err) + } + if entry.OutputID != dummyID(i*99) || entry.Size != int64(i)*101 { + t.Errorf("Get(%x) = %x, %d, want %x, %d", id, entry.OutputID, entry.Size, dummyID(i*99), int64(i)*101) + } + } + for i := 0; i < n; i++ { + id := ActionID(dummyID(i)) + entry, err := c.Get(id) + if err != nil { + t.Fatalf("Get2(%x): %v", id, err) + } + if entry.OutputID != dummyID(i*99) || entry.Size != int64(i)*101 { + t.Errorf("Get2(%x) = %x, %d, want %x, %d", id, entry.OutputID, entry.Size, dummyID(i*99), int64(i)*101) + } + } +} + +func TestVerifyPanic(t *testing.T) { + os.Setenv("GODEBUG", "gocacheverify=1") + initEnv() + defer func() { + os.Unsetenv("GODEBUG") + verify = false + }() + + if !verify { + t.Fatal("initEnv did not set verify") + } + + dir, err := os.MkdirTemp("", "cachetest-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + c, err := Open(dir) + if err != nil { + t.Fatalf("Open: %v", err) + } + + id := ActionID(dummyID(1)) + if err := PutBytes(c, id, []byte("abc")); err != nil { + t.Fatal(err) + } + + defer func() { + if err := recover(); err != nil { + t.Log(err) + return + } + }() + PutBytes(c, id, []byte("def")) + t.Fatal("mismatched Put did not panic in verify mode") +} + +func dummyID(x int) [HashSize]byte { + var out [HashSize]byte + binary.LittleEndian.PutUint64(out[:], uint64(x)) + return out +} + +func TestCacheTrim(t *testing.T) { + dir, err := os.MkdirTemp("", "cachetest-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(dir) + + c, err := Open(dir) + if err != nil { + t.Fatalf("Open: %v", err) + } + const start = 1000000000 + now := int64(start) + c.now = func() time.Time { return time.Unix(now, 0) } + + checkTime := func(name string, mtime int64) { + t.Helper() + file := filepath.Join(c.dir, name[:2], name) + info, err := os.Stat(file) + if err != nil { + t.Fatal(err) + } + if info.ModTime().Unix() != mtime { + t.Fatalf("%s mtime = %d, want %d", name, info.ModTime().Unix(), mtime) + } + } + + id := ActionID(dummyID(1)) + PutBytes(c, id, []byte("abc")) + entry, _ := c.Get(id) + PutBytes(c, ActionID(dummyID(2)), []byte("def")) + mtime := now + checkTime(fmt.Sprintf("%x-a", id), mtime) + checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime) + + // Get should not change recent mtimes. + now = start + 10 + c.Get(id) + checkTime(fmt.Sprintf("%x-a", id), mtime) + checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime) + + // Get should change distant mtimes. + now = start + 5000 + mtime2 := now + if _, err := c.Get(id); err != nil { + t.Fatal(err) + } + c.OutputFile(entry.OutputID) + checkTime(fmt.Sprintf("%x-a", id), mtime2) + checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime2) + + // Trim should leave everything alone: it's all too new. + if err := c.Trim(); err != nil { + if testenv.SyscallIsNotSupported(err) { + t.Skipf("skipping: Trim is unsupported (%v)", err) + } + t.Fatal(err) + } + if _, err := c.Get(id); err != nil { + t.Fatal(err) + } + c.OutputFile(entry.OutputID) + data, err := os.ReadFile(filepath.Join(dir, "trim.txt")) + if err != nil { + t.Fatal(err) + } + checkTime(fmt.Sprintf("%x-a", dummyID(2)), start) + + // Trim less than a day later should not do any work at all. + now = start + 80000 + if err := c.Trim(); err != nil { + t.Fatal(err) + } + if _, err := c.Get(id); err != nil { + t.Fatal(err) + } + c.OutputFile(entry.OutputID) + data2, err := os.ReadFile(filepath.Join(dir, "trim.txt")) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(data, data2) { + t.Fatalf("second trim did work: %q -> %q", data, data2) + } + + // Fast forward and do another trim just before the 5 day cutoff. + // Note that because of usedQuantum the cutoff is actually 5 days + 1 hour. + // We used c.Get(id) just now, so 5 days later it should still be kept. + // On the other hand almost a full day has gone by since we wrote dummyID(2) + // and we haven't looked at it since, so 5 days later it should be gone. + now += 5 * 86400 + checkTime(fmt.Sprintf("%x-a", dummyID(2)), start) + if err := c.Trim(); err != nil { + t.Fatal(err) + } + if _, err := c.Get(id); err != nil { + t.Fatal(err) + } + c.OutputFile(entry.OutputID) + mtime3 := now + if _, err := c.Get(dummyID(2)); err == nil { // haven't done a Get for this since original write above + t.Fatalf("Trim did not remove dummyID(2)") + } + + // The c.Get(id) refreshed id's mtime again. + // Check that another 5 days later it is still not gone, + // but check by using checkTime, which doesn't bring mtime forward. + now += 5 * 86400 + if err := c.Trim(); err != nil { + t.Fatal(err) + } + checkTime(fmt.Sprintf("%x-a", id), mtime3) + checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime3) + + // Half a day later Trim should still be a no-op, because there was a Trim recently. + // Even though the entry for id is now old enough to be trimmed, + // it gets a reprieve until the time comes for a new Trim scan. + now += 86400 / 2 + if err := c.Trim(); err != nil { + t.Fatal(err) + } + checkTime(fmt.Sprintf("%x-a", id), mtime3) + checkTime(fmt.Sprintf("%x-d", entry.OutputID), mtime3) + + // Another half a day later, Trim should actually run, and it should remove id. + now += 86400/2 + 1 + if err := c.Trim(); err != nil { + t.Fatal(err) + } + if _, err := c.Get(dummyID(1)); err == nil { + t.Fatal("Trim did not remove dummyID(1)") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/default.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/default.go new file mode 100644 index 0000000000000000000000000000000000000000..b5650eac669b46926769c9519dec7f7476fb6e2d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/default.go @@ -0,0 +1,105 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "fmt" + "os" + "path/filepath" + "sync" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "internal/goexperiment" +) + +// Default returns the default cache to use. +// It never returns nil. +func Default() Cache { + defaultOnce.Do(initDefaultCache) + return defaultCache +} + +var ( + defaultOnce sync.Once + defaultCache Cache +) + +// cacheREADME is a message stored in a README in the cache directory. +// Because the cache lives outside the normal Go trees, we leave the +// README as a courtesy to explain where it came from. +const cacheREADME = `This directory holds cached build artifacts from the Go build system. +Run "go clean -cache" if the directory is getting too large. +Run "go clean -fuzzcache" to delete the fuzz cache. +See golang.org to learn more about Go. +` + +// initDefaultCache does the work of finding the default cache +// the first time Default is called. +func initDefaultCache() { + dir := DefaultDir() + if dir == "off" { + if defaultDirErr != nil { + base.Fatalf("build cache is required, but could not be located: %v", defaultDirErr) + } + base.Fatalf("build cache is disabled by GOCACHE=off, but required as of Go 1.12") + } + if err := os.MkdirAll(dir, 0777); err != nil { + base.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) + } + if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { + // Best effort. + os.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666) + } + + diskCache, err := Open(dir) + if err != nil { + base.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) + } + + if v := cfg.Getenv("GOCACHEPROG"); v != "" && goexperiment.CacheProg { + defaultCache = startCacheProg(v, diskCache) + } else { + defaultCache = diskCache + } +} + +var ( + defaultDirOnce sync.Once + defaultDir string + defaultDirErr error +) + +// DefaultDir returns the effective GOCACHE setting. +// It returns "off" if the cache is disabled. +func DefaultDir() string { + // Save the result of the first call to DefaultDir for later use in + // initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that + // subprocesses will inherit it, but that means initDefaultCache can't + // otherwise distinguish between an explicit "off" and a UserCacheDir error. + + defaultDirOnce.Do(func() { + defaultDir = cfg.Getenv("GOCACHE") + if filepath.IsAbs(defaultDir) || defaultDir == "off" { + return + } + if defaultDir != "" { + defaultDir = "off" + defaultDirErr = fmt.Errorf("GOCACHE is not an absolute path") + return + } + + // Compute default location. + dir, err := os.UserCacheDir() + if err != nil { + defaultDir = "off" + defaultDirErr = fmt.Errorf("GOCACHE is not defined and %v", err) + return + } + defaultDir = filepath.Join(dir, "go-build") + }) + + return defaultDir +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/hash.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/hash.go new file mode 100644 index 0000000000000000000000000000000000000000..4f79c3150024492db1572271c08ffb1a3eeca082 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/hash.go @@ -0,0 +1,190 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "bytes" + "crypto/sha256" + "fmt" + "hash" + "io" + "os" + "runtime" + "strings" + "sync" +) + +var debugHash = false // set when GODEBUG=gocachehash=1 + +// HashSize is the number of bytes in a hash. +const HashSize = 32 + +// A Hash provides access to the canonical hash function used to index the cache. +// The current implementation uses salted SHA256, but clients must not assume this. +type Hash struct { + h hash.Hash + name string // for debugging + buf *bytes.Buffer // for verify +} + +// hashSalt is a salt string added to the beginning of every hash +// created by NewHash. Using the Go version makes sure that different +// versions of the go command (or even different Git commits during +// work on the development branch) do not address the same cache +// entries, so that a bug in one version does not affect the execution +// of other versions. This salt will result in additional ActionID files +// in the cache, but not additional copies of the large output files, +// which are still addressed by unsalted SHA256. +// +// We strip any GOEXPERIMENTs the go tool was built with from this +// version string on the assumption that they shouldn't affect go tool +// execution. This allows bootstrapping to converge faster: dist builds +// go_bootstrap without any experiments, so by stripping experiments +// go_bootstrap and the final go binary will use the same salt. +var hashSalt = []byte(stripExperiment(runtime.Version())) + +// stripExperiment strips any GOEXPERIMENT configuration from the Go +// version string. +func stripExperiment(version string) string { + if i := strings.Index(version, " X:"); i >= 0 { + return version[:i] + } + return version +} + +// Subkey returns an action ID corresponding to mixing a parent +// action ID with a string description of the subkey. +func Subkey(parent ActionID, desc string) ActionID { + h := sha256.New() + h.Write([]byte("subkey:")) + h.Write(parent[:]) + h.Write([]byte(desc)) + var out ActionID + h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out) + } + if verify { + hashDebug.Lock() + hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc) + hashDebug.Unlock() + } + return out +} + +// NewHash returns a new Hash. +// The caller is expected to Write data to it and then call Sum. +func NewHash(name string) *Hash { + h := &Hash{h: sha256.New(), name: name} + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name) + } + h.Write(hashSalt) + if verify { + h.buf = new(bytes.Buffer) + } + return h +} + +// Write writes data to the running hash. +func (h *Hash) Write(b []byte) (int, error) { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b) + } + if h.buf != nil { + h.buf.Write(b) + } + return h.h.Write(b) +} + +// Sum returns the hash of the data written previously. +func (h *Hash) Sum() [HashSize]byte { + var out [HashSize]byte + h.h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out) + } + if h.buf != nil { + hashDebug.Lock() + if hashDebug.m == nil { + hashDebug.m = make(map[[HashSize]byte]string) + } + hashDebug.m[out] = h.buf.String() + hashDebug.Unlock() + } + return out +} + +// In GODEBUG=gocacheverify=1 mode, +// hashDebug holds the input to every computed hash ID, +// so that we can work backward from the ID involved in a +// cache entry mismatch to a description of what should be there. +var hashDebug struct { + sync.Mutex + m map[[HashSize]byte]string +} + +// reverseHash returns the input used to compute the hash id. +func reverseHash(id [HashSize]byte) string { + hashDebug.Lock() + s := hashDebug.m[id] + hashDebug.Unlock() + return s +} + +var hashFileCache struct { + sync.Mutex + m map[string][HashSize]byte +} + +// FileHash returns the hash of the named file. +// It caches repeated lookups for a given file, +// and the cache entry for a file can be initialized +// using SetFileHash. +// The hash used by FileHash is not the same as +// the hash used by NewHash. +func FileHash(file string) ([HashSize]byte, error) { + hashFileCache.Lock() + out, ok := hashFileCache.m[file] + hashFileCache.Unlock() + + if ok { + return out, nil + } + + h := sha256.New() + f, err := os.Open(file) + if err != nil { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) + } + return [HashSize]byte{}, err + } + _, err = io.Copy(h, f) + f.Close() + if err != nil { + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) + } + return [HashSize]byte{}, err + } + h.Sum(out[:0]) + if debugHash { + fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out) + } + + SetFileHash(file, out) + return out, nil +} + +// SetFileHash sets the hash returned by FileHash for file. +func SetFileHash(file string, sum [HashSize]byte) { + hashFileCache.Lock() + if hashFileCache.m == nil { + hashFileCache.m = make(map[string][HashSize]byte) + } + hashFileCache.m[file] = sum + hashFileCache.Unlock() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/hash_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/hash_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a0356771cac829a66df8efc27e1ee295acc44c02 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/hash_test.go @@ -0,0 +1,51 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "fmt" + "os" + "testing" +) + +func TestHash(t *testing.T) { + oldSalt := hashSalt + hashSalt = nil + defer func() { + hashSalt = oldSalt + }() + + h := NewHash("alice") + h.Write([]byte("hello world")) + sum := fmt.Sprintf("%x", h.Sum()) + want := "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9" + if sum != want { + t.Errorf("hash(hello world) = %v, want %v", sum, want) + } +} + +func TestHashFile(t *testing.T) { + f, err := os.CreateTemp("", "cmd-go-test-") + if err != nil { + t.Fatal(err) + } + name := f.Name() + fmt.Fprintf(f, "hello world") + defer os.Remove(name) + if err := f.Close(); err != nil { + t.Fatal(err) + } + + var h ActionID // make sure hash result is assignable to ActionID + h, err = FileHash(name) + if err != nil { + t.Fatal(err) + } + sum := fmt.Sprintf("%x", h) + want := "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9" + if sum != want { + t.Errorf("hash(hello world) = %v, want %v", sum, want) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/prog.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/prog.go new file mode 100644 index 0000000000000000000000000000000000000000..8d826f0b99b6f00ca02c523cd14d4359030a643e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/cache/prog.go @@ -0,0 +1,427 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cache + +import ( + "bufio" + "cmd/go/internal/base" + "cmd/internal/quoted" + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "os/exec" + "sync" + "sync/atomic" + "time" +) + +// ProgCache implements Cache via JSON messages over stdin/stdout to a child +// helper process which can then implement whatever caching policy/mechanism it +// wants. +// +// See https://github.com/golang/go/issues/59719 +type ProgCache struct { + cmd *exec.Cmd + stdout io.ReadCloser // from the child process + stdin io.WriteCloser // to the child process + bw *bufio.Writer // to stdin + jenc *json.Encoder // to bw + + // can are the commands that the child process declared that it supports. + // This is effectively the versioning mechanism. + can map[ProgCmd]bool + + // fuzzDirCache is another Cache implementation to use for the FuzzDir + // method. In practice this is the default GOCACHE disk-based + // implementation. + // + // TODO(bradfitz): maybe this isn't ideal. But we'd need to extend the Cache + // interface and the fuzzing callers to be less disk-y to do more here. + fuzzDirCache Cache + + closing atomic.Bool + ctx context.Context // valid until Close via ctxClose + ctxCancel context.CancelFunc // called on Close + readLoopDone chan struct{} // closed when readLoop returns + + mu sync.Mutex // guards following fields + nextID int64 + inFlight map[int64]chan<- *ProgResponse + outputFile map[OutputID]string // object => abs path on disk + + // writeMu serializes writing to the child process. + // It must never be held at the same time as mu. + writeMu sync.Mutex +} + +// ProgCmd is a command that can be issued to a child process. +// +// If the interface needs to grow, we can add new commands or new versioned +// commands like "get2". +type ProgCmd string + +const ( + cmdGet = ProgCmd("get") + cmdPut = ProgCmd("put") + cmdClose = ProgCmd("close") +) + +// ProgRequest is the JSON-encoded message that's sent from cmd/go to +// the GOCACHEPROG child process over stdin. Each JSON object is on its +// own line. A ProgRequest of Type "put" with BodySize > 0 will be followed +// by a line containing a base64-encoded JSON string literal of the body. +type ProgRequest struct { + // ID is a unique number per process across all requests. + // It must be echoed in the ProgResponse from the child. + ID int64 + + // Command is the type of request. + // The cmd/go tool will only send commands that were declared + // as supported by the child. + Command ProgCmd + + // ActionID is non-nil for get and puts. + ActionID []byte `json:",omitempty"` // or nil if not used + + // ObjectID is set for Type "put" and "output-file". + ObjectID []byte `json:",omitempty"` // or nil if not used + + // Body is the body for "put" requests. It's sent after the JSON object + // as a base64-encoded JSON string when BodySize is non-zero. + // It's sent as a separate JSON value instead of being a struct field + // send in this JSON object so large values can be streamed in both directions. + // The base64 string body of a ProgRequest will always be written + // immediately after the JSON object and a newline. + Body io.Reader `json:"-"` + + // BodySize is the number of bytes of Body. If zero, the body isn't written. + BodySize int64 `json:",omitempty"` +} + +// ProgResponse is the JSON response from the child process to cmd/go. +// +// With the exception of the first protocol message that the child writes to its +// stdout with ID==0 and KnownCommands populated, these are only sent in +// response to a ProgRequest from cmd/go. +// +// ProgResponses can be sent in any order. The ID must match the request they're +// replying to. +type ProgResponse struct { + ID int64 // that corresponds to ProgRequest; they can be answered out of order + Err string `json:",omitempty"` // if non-empty, the error + + // KnownCommands is included in the first message that cache helper program + // writes to stdout on startup (with ID==0). It includes the + // ProgRequest.Command types that are supported by the program. + // + // This lets us extend the protocol gracefully over time (adding "get2", + // etc), or fail gracefully when needed. It also lets us verify the program + // wants to be a cache helper. + KnownCommands []ProgCmd `json:",omitempty"` + + // For Get requests. + + Miss bool `json:",omitempty"` // cache miss + OutputID []byte `json:",omitempty"` + Size int64 `json:",omitempty"` // in bytes + Time *time.Time `json:",omitempty"` // an Entry.Time; when the object was added to the docs + + // DiskPath is the absolute path on disk of the ObjectID corresponding + // a "get" request's ActionID (on cache hit) or a "put" request's + // provided ObjectID. + DiskPath string `json:",omitempty"` +} + +// startCacheProg starts the prog binary (with optional space-separated flags) +// and returns a Cache implementation that talks to it. +// +// It blocks a few seconds to wait for the child process to successfully start +// and advertise its capabilities. +func startCacheProg(progAndArgs string, fuzzDirCache Cache) Cache { + if fuzzDirCache == nil { + panic("missing fuzzDirCache") + } + args, err := quoted.Split(progAndArgs) + if err != nil { + base.Fatalf("GOCACHEPROG args: %v", err) + } + var prog string + if len(args) > 0 { + prog = args[0] + args = args[1:] + } + + ctx, ctxCancel := context.WithCancel(context.Background()) + + cmd := exec.CommandContext(ctx, prog, args...) + out, err := cmd.StdoutPipe() + if err != nil { + base.Fatalf("StdoutPipe to GOCACHEPROG: %v", err) + } + in, err := cmd.StdinPipe() + if err != nil { + base.Fatalf("StdinPipe to GOCACHEPROG: %v", err) + } + cmd.Stderr = os.Stderr + cmd.Cancel = in.Close + + if err := cmd.Start(); err != nil { + base.Fatalf("error starting GOCACHEPROG program %q: %v", prog, err) + } + + pc := &ProgCache{ + ctx: ctx, + ctxCancel: ctxCancel, + fuzzDirCache: fuzzDirCache, + cmd: cmd, + stdout: out, + stdin: in, + bw: bufio.NewWriter(in), + inFlight: make(map[int64]chan<- *ProgResponse), + outputFile: make(map[OutputID]string), + readLoopDone: make(chan struct{}), + } + + // Register our interest in the initial protocol message from the child to + // us, saying what it can do. + capResc := make(chan *ProgResponse, 1) + pc.inFlight[0] = capResc + + pc.jenc = json.NewEncoder(pc.bw) + go pc.readLoop(pc.readLoopDone) + + // Give the child process a few seconds to report its capabilities. This + // should be instant and not require any slow work by the program. + timer := time.NewTicker(5 * time.Second) + defer timer.Stop() + for { + select { + case <-timer.C: + log.Printf("# still waiting for GOCACHEPROG %v ...", prog) + case capRes := <-capResc: + can := map[ProgCmd]bool{} + for _, cmd := range capRes.KnownCommands { + can[cmd] = true + } + if len(can) == 0 { + base.Fatalf("GOCACHEPROG %v declared no supported commands", prog) + } + pc.can = can + return pc + } + } +} + +func (c *ProgCache) readLoop(readLoopDone chan<- struct{}) { + defer close(readLoopDone) + jd := json.NewDecoder(c.stdout) + for { + res := new(ProgResponse) + if err := jd.Decode(res); err != nil { + if c.closing.Load() { + return // quietly + } + if err == io.EOF { + c.mu.Lock() + inFlight := len(c.inFlight) + c.mu.Unlock() + base.Fatalf("GOCACHEPROG exited pre-Close with %v pending requests", inFlight) + } + base.Fatalf("error reading JSON from GOCACHEPROG: %v", err) + } + c.mu.Lock() + ch, ok := c.inFlight[res.ID] + delete(c.inFlight, res.ID) + c.mu.Unlock() + if ok { + ch <- res + } else { + base.Fatalf("GOCACHEPROG sent response for unknown request ID %v", res.ID) + } + } +} + +func (c *ProgCache) send(ctx context.Context, req *ProgRequest) (*ProgResponse, error) { + resc := make(chan *ProgResponse, 1) + if err := c.writeToChild(req, resc); err != nil { + return nil, err + } + select { + case res := <-resc: + if res.Err != "" { + return nil, errors.New(res.Err) + } + return res, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (c *ProgCache) writeToChild(req *ProgRequest, resc chan<- *ProgResponse) (err error) { + c.mu.Lock() + c.nextID++ + req.ID = c.nextID + c.inFlight[req.ID] = resc + c.mu.Unlock() + + defer func() { + if err != nil { + c.mu.Lock() + delete(c.inFlight, req.ID) + c.mu.Unlock() + } + }() + + c.writeMu.Lock() + defer c.writeMu.Unlock() + + if err := c.jenc.Encode(req); err != nil { + return err + } + if err := c.bw.WriteByte('\n'); err != nil { + return err + } + if req.Body != nil && req.BodySize > 0 { + if err := c.bw.WriteByte('"'); err != nil { + return err + } + e := base64.NewEncoder(base64.StdEncoding, c.bw) + wrote, err := io.Copy(e, req.Body) + if err != nil { + return err + } + if err := e.Close(); err != nil { + return nil + } + if wrote != req.BodySize { + return fmt.Errorf("short write writing body to GOCACHEPROG for action %x, object %x: wrote %v; expected %v", + req.ActionID, req.ObjectID, wrote, req.BodySize) + } + if _, err := c.bw.WriteString("\"\n"); err != nil { + return err + } + } + if err := c.bw.Flush(); err != nil { + return err + } + return nil +} + +func (c *ProgCache) Get(a ActionID) (Entry, error) { + if !c.can[cmdGet] { + // They can't do a "get". Maybe they're a write-only cache. + // + // TODO(bradfitz,bcmills): figure out the proper error type here. Maybe + // errors.ErrUnsupported? Is entryNotFoundError even appropriate? There + // might be places where we rely on the fact that a recent Put can be + // read through a corresponding Get. Audit callers and check, and document + // error types on the Cache interface. + return Entry{}, &entryNotFoundError{} + } + res, err := c.send(c.ctx, &ProgRequest{ + Command: cmdGet, + ActionID: a[:], + }) + if err != nil { + return Entry{}, err // TODO(bradfitz): or entryNotFoundError? Audit callers. + } + if res.Miss { + return Entry{}, &entryNotFoundError{} + } + e := Entry{ + Size: res.Size, + } + if res.Time != nil { + e.Time = *res.Time + } else { + e.Time = time.Now() + } + if res.DiskPath == "" { + return Entry{}, &entryNotFoundError{errors.New("GOCACHEPROG didn't populate DiskPath on get hit")} + } + if copy(e.OutputID[:], res.OutputID) != len(res.OutputID) { + return Entry{}, &entryNotFoundError{errors.New("incomplete ProgResponse OutputID")} + } + c.noteOutputFile(e.OutputID, res.DiskPath) + return e, nil +} + +func (c *ProgCache) noteOutputFile(o OutputID, diskPath string) { + c.mu.Lock() + defer c.mu.Unlock() + c.outputFile[o] = diskPath +} + +func (c *ProgCache) OutputFile(o OutputID) string { + c.mu.Lock() + defer c.mu.Unlock() + return c.outputFile[o] +} + +func (c *ProgCache) Put(a ActionID, file io.ReadSeeker) (_ OutputID, size int64, _ error) { + // Compute output ID. + h := sha256.New() + if _, err := file.Seek(0, 0); err != nil { + return OutputID{}, 0, err + } + size, err := io.Copy(h, file) + if err != nil { + return OutputID{}, 0, err + } + var out OutputID + h.Sum(out[:0]) + + if _, err := file.Seek(0, 0); err != nil { + return OutputID{}, 0, err + } + + if !c.can[cmdPut] { + // Child is a read-only cache. Do nothing. + return out, size, nil + } + + res, err := c.send(c.ctx, &ProgRequest{ + Command: cmdPut, + ActionID: a[:], + ObjectID: out[:], + Body: file, + BodySize: size, + }) + if err != nil { + return OutputID{}, 0, err + } + if res.DiskPath == "" { + return OutputID{}, 0, errors.New("GOCACHEPROG didn't return DiskPath in put response") + } + c.noteOutputFile(out, res.DiskPath) + return out, size, err +} + +func (c *ProgCache) Close() error { + c.closing.Store(true) + var err error + + // First write a "close" message to the child so it can exit nicely + // and clean up if it wants. Only after that exchange do we cancel + // the context that kills the process. + if c.can[cmdClose] { + _, err = c.send(c.ctx, &ProgRequest{Command: cmdClose}) + } + c.ctxCancel() + <-c.readLoopDone + return err +} + +func (c *ProgCache) FuzzDir() string { + // TODO(bradfitz): figure out what to do here. For now just use the + // disk-based default. + return c.fuzzDirCache.FuzzDir() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/bench_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/bench_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2dd99319fc4cecf3e27e7e54e11812ed74742821 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/bench_test.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cfg + +import ( + "internal/testenv" + "testing" +) + +func BenchmarkLookPath(b *testing.B) { + testenv.MustHaveExecPath(b, "go") + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := LookPath("go") + if err != nil { + b.Fatal(err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/cfg.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/cfg.go new file mode 100644 index 0000000000000000000000000000000000000000..a8daa2dfc369ea45a6a47a05ad4157bb2f7ee9bb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/cfg.go @@ -0,0 +1,618 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cfg holds configuration shared by multiple parts +// of the go command. +package cfg + +import ( + "bytes" + "context" + "fmt" + "go/build" + "internal/buildcfg" + "internal/cfg" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + + "cmd/go/internal/fsys" +) + +// Global build parameters (used during package load) +var ( + Goos = envOr("GOOS", build.Default.GOOS) + Goarch = envOr("GOARCH", build.Default.GOARCH) + + ExeSuffix = exeSuffix() + + // ModulesEnabled specifies whether the go command is running + // in module-aware mode (as opposed to GOPATH mode). + // It is equal to modload.Enabled, but not all packages can import modload. + ModulesEnabled bool +) + +func exeSuffix() string { + if Goos == "windows" { + return ".exe" + } + return "" +} + +// Configuration for tools installed to GOROOT/bin. +// Normally these match runtime.GOOS and runtime.GOARCH, +// but when testing a cross-compiled cmd/go they will +// indicate the GOOS and GOARCH of the installed cmd/go +// rather than the test binary. +var ( + installedGOOS string + installedGOARCH string +) + +// ToolExeSuffix returns the suffix for executables installed +// in build.ToolDir. +func ToolExeSuffix() string { + if installedGOOS == "windows" { + return ".exe" + } + return "" +} + +// These are general "build flags" used by build and other commands. +var ( + BuildA bool // -a flag + BuildBuildmode string // -buildmode flag + BuildBuildvcs = "auto" // -buildvcs flag: "true", "false", or "auto" + BuildContext = defaultContext() + BuildMod string // -mod flag + BuildModExplicit bool // whether -mod was set explicitly + BuildModReason string // reason -mod was set, if set by default + BuildLinkshared bool // -linkshared flag + BuildMSan bool // -msan flag + BuildASan bool // -asan flag + BuildCover bool // -cover flag + BuildCoverMode string // -covermode flag + BuildCoverPkg []string // -coverpkg flag + BuildN bool // -n flag + BuildO string // -o flag + BuildP = runtime.GOMAXPROCS(0) // -p flag + BuildPGO string // -pgo flag + BuildPkgdir string // -pkgdir flag + BuildRace bool // -race flag + BuildToolexec []string // -toolexec flag + BuildToolchainName string + BuildToolchainCompiler func() string + BuildToolchainLinker func() string + BuildTrimpath bool // -trimpath flag + BuildV bool // -v flag + BuildWork bool // -work flag + BuildX bool // -x flag + + ModCacheRW bool // -modcacherw flag + ModFile string // -modfile flag + + CmdName string // "build", "install", "list", "mod tidy", etc. + + DebugActiongraph string // -debug-actiongraph flag (undocumented, unstable) + DebugTrace string // -debug-trace flag + DebugRuntimeTrace string // -debug-runtime-trace flag (undocumented, unstable) + + // GoPathError is set when GOPATH is not set. it contains an + // explanation why GOPATH is unset. + GoPathError string +) + +func defaultContext() build.Context { + ctxt := build.Default + + ctxt.JoinPath = filepath.Join // back door to say "do not use go command" + + // Override defaults computed in go/build with defaults + // from go environment configuration file, if known. + ctxt.GOPATH = envOr("GOPATH", gopath(ctxt)) + ctxt.GOOS = Goos + ctxt.GOARCH = Goarch + + // Clear the GOEXPERIMENT-based tool tags, which we will recompute later. + var save []string + for _, tag := range ctxt.ToolTags { + if !strings.HasPrefix(tag, "goexperiment.") { + save = append(save, tag) + } + } + ctxt.ToolTags = save + + // The go/build rule for whether cgo is enabled is: + // 1. If $CGO_ENABLED is set, respect it. + // 2. Otherwise, if this is a cross-compile, disable cgo. + // 3. Otherwise, use built-in default for GOOS/GOARCH. + // Recreate that logic here with the new GOOS/GOARCH setting. + if v := Getenv("CGO_ENABLED"); v == "0" || v == "1" { + ctxt.CgoEnabled = v[0] == '1' + } else if ctxt.GOOS != runtime.GOOS || ctxt.GOARCH != runtime.GOARCH { + ctxt.CgoEnabled = false + } else { + // Use built-in default cgo setting for GOOS/GOARCH. + // Note that ctxt.GOOS/GOARCH are derived from the preference list + // (1) environment, (2) go/env file, (3) runtime constants, + // while go/build.Default.GOOS/GOARCH are derived from the preference list + // (1) environment, (2) runtime constants. + // + // We know ctxt.GOOS/GOARCH == runtime.GOOS/GOARCH; + // no matter how that happened, go/build.Default will make the + // same decision (either the environment variables are set explicitly + // to match the runtime constants, or else they are unset, in which + // case go/build falls back to the runtime constants), so + // go/build.Default.GOOS/GOARCH == runtime.GOOS/GOARCH. + // So ctxt.CgoEnabled (== go/build.Default.CgoEnabled) is correct + // as is and can be left unmodified. + // + // All that said, starting in Go 1.20 we layer one more rule + // on top of the go/build decision: if CC is unset and + // the default C compiler we'd look for is not in the PATH, + // we automatically default cgo to off. + // This makes go builds work automatically on systems + // without a C compiler installed. + if ctxt.CgoEnabled { + if os.Getenv("CC") == "" { + cc := DefaultCC(ctxt.GOOS, ctxt.GOARCH) + if _, err := LookPath(cc); err != nil { + ctxt.CgoEnabled = false + } + } + } + } + + ctxt.OpenFile = func(path string) (io.ReadCloser, error) { + return fsys.Open(path) + } + ctxt.ReadDir = fsys.ReadDir + ctxt.IsDir = func(path string) bool { + isDir, err := fsys.IsDir(path) + return err == nil && isDir + } + + return ctxt +} + +func init() { + SetGOROOT(Getenv("GOROOT"), false) + BuildToolchainCompiler = func() string { return "missing-compiler" } + BuildToolchainLinker = func() string { return "missing-linker" } +} + +// SetGOROOT sets GOROOT and associated variables to the given values. +// +// If isTestGo is true, build.ToolDir is set based on the TESTGO_GOHOSTOS and +// TESTGO_GOHOSTARCH environment variables instead of runtime.GOOS and +// runtime.GOARCH. +func SetGOROOT(goroot string, isTestGo bool) { + BuildContext.GOROOT = goroot + + GOROOT = goroot + if goroot == "" { + GOROOTbin = "" + GOROOTpkg = "" + GOROOTsrc = "" + } else { + GOROOTbin = filepath.Join(goroot, "bin") + GOROOTpkg = filepath.Join(goroot, "pkg") + GOROOTsrc = filepath.Join(goroot, "src") + } + GOROOT_FINAL = findGOROOT_FINAL(goroot) + + installedGOOS = runtime.GOOS + installedGOARCH = runtime.GOARCH + if isTestGo { + if testOS := os.Getenv("TESTGO_GOHOSTOS"); testOS != "" { + installedGOOS = testOS + } + if testArch := os.Getenv("TESTGO_GOHOSTARCH"); testArch != "" { + installedGOARCH = testArch + } + } + + if runtime.Compiler != "gccgo" { + if goroot == "" { + build.ToolDir = "" + } else { + // Note that we must use the installed OS and arch here: the tool + // directory does not move based on environment variables, and even if we + // are testing a cross-compiled cmd/go all of the installed packages and + // tools would have been built using the native compiler and linker (and + // would spuriously appear stale if we used a cross-compiled compiler and + // linker). + // + // This matches the initialization of ToolDir in go/build, except for + // using ctxt.GOROOT and the installed GOOS and GOARCH rather than the + // GOROOT, GOOS, and GOARCH reported by the runtime package. + build.ToolDir = filepath.Join(GOROOTpkg, "tool", installedGOOS+"_"+installedGOARCH) + } + } +} + +// Experiment configuration. +var ( + // RawGOEXPERIMENT is the GOEXPERIMENT value set by the user. + RawGOEXPERIMENT = envOr("GOEXPERIMENT", buildcfg.DefaultGOEXPERIMENT) + // CleanGOEXPERIMENT is the minimal GOEXPERIMENT value needed to reproduce the + // experiments enabled by RawGOEXPERIMENT. + CleanGOEXPERIMENT = RawGOEXPERIMENT + + Experiment *buildcfg.ExperimentFlags + ExperimentErr error +) + +func init() { + Experiment, ExperimentErr = buildcfg.ParseGOEXPERIMENT(Goos, Goarch, RawGOEXPERIMENT) + if ExperimentErr != nil { + return + } + + // GOEXPERIMENT is valid, so convert it to canonical form. + CleanGOEXPERIMENT = Experiment.String() + + // Add build tags based on the experiments in effect. + exps := Experiment.Enabled() + expTags := make([]string, 0, len(exps)+len(BuildContext.ToolTags)) + for _, exp := range exps { + expTags = append(expTags, "goexperiment."+exp) + } + BuildContext.ToolTags = append(expTags, BuildContext.ToolTags...) +} + +// An EnvVar is an environment variable Name=Value. +type EnvVar struct { + Name string + Value string +} + +// OrigEnv is the original environment of the program at startup. +var OrigEnv []string + +// CmdEnv is the new environment for running go tool commands. +// User binaries (during go test or go run) are run with OrigEnv, +// not CmdEnv. +var CmdEnv []EnvVar + +var envCache struct { + once sync.Once + m map[string]string +} + +// EnvFile returns the name of the Go environment configuration file. +func EnvFile() (string, error) { + if file := os.Getenv("GOENV"); file != "" { + if file == "off" { + return "", fmt.Errorf("GOENV=off") + } + return file, nil + } + dir, err := os.UserConfigDir() + if err != nil { + return "", err + } + if dir == "" { + return "", fmt.Errorf("missing user-config dir") + } + return filepath.Join(dir, "go/env"), nil +} + +func initEnvCache() { + envCache.m = make(map[string]string) + if file, _ := EnvFile(); file != "" { + readEnvFile(file, "user") + } + goroot := findGOROOT(envCache.m["GOROOT"]) + if goroot != "" { + readEnvFile(filepath.Join(goroot, "go.env"), "GOROOT") + } + + // Save the goroot for func init calling SetGOROOT, + // and also overwrite anything that might have been in go.env. + // It makes no sense for GOROOT/go.env to specify + // a different GOROOT. + envCache.m["GOROOT"] = goroot +} + +func readEnvFile(file string, source string) { + if file == "" { + return + } + data, err := os.ReadFile(file) + if err != nil { + return + } + + for len(data) > 0 { + // Get next line. + line := data + i := bytes.IndexByte(data, '\n') + if i >= 0 { + line, data = line[:i], data[i+1:] + } else { + data = nil + } + + i = bytes.IndexByte(line, '=') + if i < 0 || line[0] < 'A' || 'Z' < line[0] { + // Line is missing = (or empty) or a comment or not a valid env name. Ignore. + // This should not happen in the user file, since the file should be maintained almost + // exclusively by "go env -w", but better to silently ignore than to make + // the go command unusable just because somehow the env file has + // gotten corrupted. + // In the GOROOT/go.env file, we expect comments. + continue + } + key, val := line[:i], line[i+1:] + + if source == "GOROOT" { + // In the GOROOT/go.env file, do not overwrite fields loaded from the user's go/env file. + if _, ok := envCache.m[string(key)]; ok { + continue + } + } + envCache.m[string(key)] = string(val) + } +} + +// Getenv gets the value for the configuration key. +// It consults the operating system environment +// and then the go/env file. +// If Getenv is called for a key that cannot be set +// in the go/env file (for example GODEBUG), it panics. +// This ensures that CanGetenv is accurate, so that +// 'go env -w' stays in sync with what Getenv can retrieve. +func Getenv(key string) string { + if !CanGetenv(key) { + switch key { + case "CGO_TEST_ALLOW", "CGO_TEST_DISALLOW", "CGO_test_ALLOW", "CGO_test_DISALLOW": + // used by internal/work/security_test.go; allow + default: + panic("internal error: invalid Getenv " + key) + } + } + val := os.Getenv(key) + if val != "" { + return val + } + envCache.once.Do(initEnvCache) + return envCache.m[key] +} + +// CanGetenv reports whether key is a valid go/env configuration key. +func CanGetenv(key string) bool { + envCache.once.Do(initEnvCache) + if _, ok := envCache.m[key]; ok { + // Assume anything in the user file or go.env file is valid. + return true + } + return strings.Contains(cfg.KnownEnv, "\t"+key+"\n") +} + +var ( + GOROOT string + + // Either empty or produced by filepath.Join(GOROOT, …). + GOROOTbin string + GOROOTpkg string + GOROOTsrc string + + GOROOT_FINAL string + + GOBIN = Getenv("GOBIN") + GOMODCACHE = envOr("GOMODCACHE", gopathDir("pkg/mod")) + + // Used in envcmd.MkEnv and build ID computations. + GOARM = envOr("GOARM", fmt.Sprint(buildcfg.GOARM)) + GO386 = envOr("GO386", buildcfg.GO386) + GOAMD64 = envOr("GOAMD64", fmt.Sprintf("%s%d", "v", buildcfg.GOAMD64)) + GOMIPS = envOr("GOMIPS", buildcfg.GOMIPS) + GOMIPS64 = envOr("GOMIPS64", buildcfg.GOMIPS64) + GOPPC64 = envOr("GOPPC64", fmt.Sprintf("%s%d", "power", buildcfg.GOPPC64)) + GOWASM = envOr("GOWASM", fmt.Sprint(buildcfg.GOWASM)) + + GOPROXY = envOr("GOPROXY", "") + GOSUMDB = envOr("GOSUMDB", "") + GOPRIVATE = Getenv("GOPRIVATE") + GONOPROXY = envOr("GONOPROXY", GOPRIVATE) + GONOSUMDB = envOr("GONOSUMDB", GOPRIVATE) + GOINSECURE = Getenv("GOINSECURE") + GOVCS = Getenv("GOVCS") +) + +var SumdbDir = gopathDir("pkg/sumdb") + +// GetArchEnv returns the name and setting of the +// GOARCH-specific architecture environment variable. +// If the current architecture has no GOARCH-specific variable, +// GetArchEnv returns empty key and value. +func GetArchEnv() (key, val string) { + switch Goarch { + case "arm": + return "GOARM", GOARM + case "386": + return "GO386", GO386 + case "amd64": + return "GOAMD64", GOAMD64 + case "mips", "mipsle": + return "GOMIPS", GOMIPS + case "mips64", "mips64le": + return "GOMIPS64", GOMIPS64 + case "ppc64", "ppc64le": + return "GOPPC64", GOPPC64 + case "wasm": + return "GOWASM", GOWASM + } + return "", "" +} + +// envOr returns Getenv(key) if set, or else def. +func envOr(key, def string) string { + val := Getenv(key) + if val == "" { + val = def + } + return val +} + +// There is a copy of findGOROOT, isSameDir, and isGOROOT in +// x/tools/cmd/godoc/goroot.go. +// Try to keep them in sync for now. + +// findGOROOT returns the GOROOT value, using either an explicitly +// provided environment variable, a GOROOT that contains the current +// os.Executable value, or else the GOROOT that the binary was built +// with from runtime.GOROOT(). +// +// There is a copy of this code in x/tools/cmd/godoc/goroot.go. +func findGOROOT(env string) string { + if env == "" { + // Not using Getenv because findGOROOT is called + // to find the GOROOT/go.env file. initEnvCache + // has passed in the setting from the user go/env file. + env = os.Getenv("GOROOT") + } + if env != "" { + return filepath.Clean(env) + } + def := "" + if r := runtime.GOROOT(); r != "" { + def = filepath.Clean(r) + } + if runtime.Compiler == "gccgo" { + // gccgo has no real GOROOT, and it certainly doesn't + // depend on the executable's location. + return def + } + + // canonical returns a directory path that represents + // the same directory as dir, + // preferring the spelling in def if the two are the same. + canonical := func(dir string) string { + if isSameDir(def, dir) { + return def + } + return dir + } + + exe, err := os.Executable() + if err == nil { + exe, err = filepath.Abs(exe) + if err == nil { + // cmd/go may be installed in GOROOT/bin or GOROOT/bin/GOOS_GOARCH, + // depending on whether it was cross-compiled with a different + // GOHOSTOS (see https://go.dev/issue/62119). Try both. + if dir := filepath.Join(exe, "../.."); isGOROOT(dir) { + return canonical(dir) + } + if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) { + return canonical(dir) + } + + // Depending on what was passed on the command line, it is possible + // that os.Executable is a symlink (like /usr/local/bin/go) referring + // to a binary installed in a real GOROOT elsewhere + // (like /usr/lib/go/bin/go). + // Try to find that GOROOT by resolving the symlinks. + exe, err = filepath.EvalSymlinks(exe) + if err == nil { + if dir := filepath.Join(exe, "../.."); isGOROOT(dir) { + return canonical(dir) + } + if dir := filepath.Join(exe, "../../.."); isGOROOT(dir) { + return canonical(dir) + } + } + } + } + return def +} + +func findGOROOT_FINAL(goroot string) string { + // $GOROOT_FINAL is only for use during make.bash + // so it is not settable using go/env, so we use os.Getenv here. + def := goroot + if env := os.Getenv("GOROOT_FINAL"); env != "" { + def = filepath.Clean(env) + } + return def +} + +// isSameDir reports whether dir1 and dir2 are the same directory. +func isSameDir(dir1, dir2 string) bool { + if dir1 == dir2 { + return true + } + info1, err1 := os.Stat(dir1) + info2, err2 := os.Stat(dir2) + return err1 == nil && err2 == nil && os.SameFile(info1, info2) +} + +// isGOROOT reports whether path looks like a GOROOT. +// +// It does this by looking for the path/pkg/tool directory, +// which is necessary for useful operation of the cmd/go tool, +// and is not typically present in a GOPATH. +// +// There is a copy of this code in x/tools/cmd/godoc/goroot.go. +func isGOROOT(path string) bool { + stat, err := os.Stat(filepath.Join(path, "pkg", "tool")) + if err != nil { + return false + } + return stat.IsDir() +} + +func gopathDir(rel string) string { + list := filepath.SplitList(BuildContext.GOPATH) + if len(list) == 0 || list[0] == "" { + return "" + } + return filepath.Join(list[0], rel) +} + +func gopath(ctxt build.Context) string { + if len(ctxt.GOPATH) > 0 { + return ctxt.GOPATH + } + env := "HOME" + if runtime.GOOS == "windows" { + env = "USERPROFILE" + } else if runtime.GOOS == "plan9" { + env = "home" + } + if home := os.Getenv(env); home != "" { + def := filepath.Join(home, "go") + if filepath.Clean(def) == filepath.Clean(runtime.GOROOT()) { + GoPathError = "cannot set GOROOT as GOPATH" + } + return "" + } + GoPathError = fmt.Sprintf("%s is not set", env) + return "" +} + +// WithBuildXWriter returns a Context in which BuildX output is written +// to given io.Writer. +func WithBuildXWriter(ctx context.Context, xLog io.Writer) context.Context { + return context.WithValue(ctx, buildXContextKey{}, xLog) +} + +type buildXContextKey struct{} + +// BuildXWriter returns nil if BuildX is false, or +// the writer to which BuildX output should be written otherwise. +func BuildXWriter(ctx context.Context) (io.Writer, bool) { + if !BuildX { + return nil, false + } + if v := ctx.Value(buildXContextKey{}); v != nil { + return v.(io.Writer), true + } + return os.Stderr, true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/lookpath.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/lookpath.go new file mode 100644 index 0000000000000000000000000000000000000000..1b0fdc742620d16b348944997609773450a971e9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/lookpath.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cfg + +import ( + "cmd/go/internal/par" + "os/exec" +) + +var lookPathCache par.ErrCache[string, string] + +// LookPath wraps exec.LookPath and caches the result +// which can be called by multiple Goroutines at the same time. +func LookPath(file string) (path string, err error) { + return lookPathCache.Do(file, + func() (string, error) { + return exec.LookPath(file) + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/zdefaultcc.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/zdefaultcc.go new file mode 100644 index 0000000000000000000000000000000000000000..c03a1b0d2a45784d9b9a91f066fdada35ed09f1b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/cfg/zdefaultcc.go @@ -0,0 +1,23 @@ +// Code generated by go tool dist; DO NOT EDIT. + +package cfg + +const DefaultPkgConfig = `pkg-config` +func DefaultCC(goos, goarch string) string { + switch goos+`/`+goarch { + } + switch goos { + case "darwin", "ios", "freebsd", "openbsd": + return "clang" + } + return "gcc" +} +func DefaultCXX(goos, goarch string) string { + switch goos+`/`+goarch { + } + switch goos { + case "darwin", "ios", "freebsd", "openbsd": + return "clang++" + } + return "g++" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/clean/clean.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/clean/clean.go new file mode 100644 index 0000000000000000000000000000000000000000..b021b784dada5cd0ea76b9d2d80170aa46116c92 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/clean/clean.go @@ -0,0 +1,401 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clean implements the “go clean” command. +package clean + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "cmd/go/internal/base" + "cmd/go/internal/cache" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/lockedfile" + "cmd/go/internal/modfetch" + "cmd/go/internal/modload" + "cmd/go/internal/str" + "cmd/go/internal/work" +) + +var CmdClean = &base.Command{ + UsageLine: "go clean [clean flags] [build flags] [packages]", + Short: "remove object files and cached files", + Long: ` +Clean removes object files from package source directories. +The go command builds most objects in a temporary directory, +so go clean is mainly concerned with object files left by other +tools or by manual invocations of go build. + +If a package argument is given or the -i or -r flag is set, +clean removes the following files from each of the +source directories corresponding to the import paths: + + _obj/ old object directory, left from Makefiles + _test/ old test directory, left from Makefiles + _testmain.go old gotest file, left from Makefiles + test.out old test log, left from Makefiles + build.out old test log, left from Makefiles + *.[568ao] object files, left from Makefiles + + DIR(.exe) from go build + DIR.test(.exe) from go test -c + MAINFILE(.exe) from go build MAINFILE.go + *.so from SWIG + +In the list, DIR represents the final path element of the +directory, and MAINFILE is the base name of any Go source +file in the directory that is not included when building +the package. + +The -i flag causes clean to remove the corresponding installed +archive or binary (what 'go install' would create). + +The -n flag causes clean to print the remove commands it would execute, +but not run them. + +The -r flag causes clean to be applied recursively to all the +dependencies of the packages named by the import paths. + +The -x flag causes clean to print remove commands as it executes them. + +The -cache flag causes clean to remove the entire go build cache. + +The -testcache flag causes clean to expire all test results in the +go build cache. + +The -modcache flag causes clean to remove the entire module +download cache, including unpacked source code of versioned +dependencies. + +The -fuzzcache flag causes clean to remove files stored in the Go build +cache for fuzz testing. The fuzzing engine caches files that expand +code coverage, so removing them may make fuzzing less effective until +new inputs are found that provide the same coverage. These files are +distinct from those stored in testdata directory; clean does not remove +those files. + +For more about build flags, see 'go help build'. + +For more about specifying packages, see 'go help packages'. + `, +} + +var ( + cleanI bool // clean -i flag + cleanR bool // clean -r flag + cleanCache bool // clean -cache flag + cleanFuzzcache bool // clean -fuzzcache flag + cleanModcache bool // clean -modcache flag + cleanTestcache bool // clean -testcache flag +) + +func init() { + // break init cycle + CmdClean.Run = runClean + + CmdClean.Flag.BoolVar(&cleanI, "i", false, "") + CmdClean.Flag.BoolVar(&cleanR, "r", false, "") + CmdClean.Flag.BoolVar(&cleanCache, "cache", false, "") + CmdClean.Flag.BoolVar(&cleanFuzzcache, "fuzzcache", false, "") + CmdClean.Flag.BoolVar(&cleanModcache, "modcache", false, "") + CmdClean.Flag.BoolVar(&cleanTestcache, "testcache", false, "") + + // -n and -x are important enough to be + // mentioned explicitly in the docs but they + // are part of the build flags. + + work.AddBuildFlags(CmdClean, work.DefaultBuildFlags) +} + +func runClean(ctx context.Context, cmd *base.Command, args []string) { + if len(args) > 0 { + cacheFlag := "" + switch { + case cleanCache: + cacheFlag = "-cache" + case cleanTestcache: + cacheFlag = "-testcache" + case cleanFuzzcache: + cacheFlag = "-fuzzcache" + case cleanModcache: + cacheFlag = "-modcache" + } + if cacheFlag != "" { + base.Fatalf("go: clean %s cannot be used with package arguments", cacheFlag) + } + } + + // golang.org/issue/29925: only load packages before cleaning if + // either the flags and arguments explicitly imply a package, + // or no other target (such as a cache) was requested to be cleaned. + cleanPkg := len(args) > 0 || cleanI || cleanR + if (!modload.Enabled() || modload.HasModRoot()) && + !cleanCache && !cleanModcache && !cleanTestcache && !cleanFuzzcache { + cleanPkg = true + } + + if cleanPkg { + for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) { + clean(pkg) + } + } + + sh := work.NewShell("", fmt.Print) + + if cleanCache { + dir := cache.DefaultDir() + if dir != "off" { + // Remove the cache subdirectories but not the top cache directory. + // The top cache directory may have been created with special permissions + // and not something that we want to remove. Also, we'd like to preserve + // the access log for future analysis, even if the cache is cleared. + subdirs, _ := filepath.Glob(filepath.Join(str.QuoteGlob(dir), "[0-9a-f][0-9a-f]")) + printedErrors := false + if len(subdirs) > 0 { + if err := sh.RemoveAll(subdirs...); err != nil && !printedErrors { + printedErrors = true + base.Error(err) + } + } + + logFile := filepath.Join(dir, "log.txt") + if err := sh.RemoveAll(logFile); err != nil && !printedErrors { + printedErrors = true + base.Error(err) + } + } + } + + if cleanTestcache && !cleanCache { + // Instead of walking through the entire cache looking for test results, + // we write a file to the cache indicating that all test results from before + // right now are to be ignored. + dir := cache.DefaultDir() + if dir != "off" { + f, err := lockedfile.Edit(filepath.Join(dir, "testexpire.txt")) + if err == nil { + now := time.Now().UnixNano() + buf, _ := io.ReadAll(f) + prev, _ := strconv.ParseInt(strings.TrimSpace(string(buf)), 10, 64) + if now > prev { + if err = f.Truncate(0); err == nil { + if _, err = f.Seek(0, 0); err == nil { + _, err = fmt.Fprintf(f, "%d\n", now) + } + } + } + if closeErr := f.Close(); err == nil { + err = closeErr + } + } + if err != nil { + if _, statErr := os.Stat(dir); !os.IsNotExist(statErr) { + base.Error(err) + } + } + } + } + + if cleanModcache { + if cfg.GOMODCACHE == "" { + base.Fatalf("go: cannot clean -modcache without a module cache") + } + if cfg.BuildN || cfg.BuildX { + sh.ShowCmd("", "rm -rf %s", cfg.GOMODCACHE) + } + if !cfg.BuildN { + if err := modfetch.RemoveAll(cfg.GOMODCACHE); err != nil { + base.Error(err) + } + } + } + + if cleanFuzzcache { + fuzzDir := cache.Default().FuzzDir() + if err := sh.RemoveAll(fuzzDir); err != nil { + base.Error(err) + } + } +} + +var cleaned = map[*load.Package]bool{} + +// TODO: These are dregs left by Makefile-based builds. +// Eventually, can stop deleting these. +var cleanDir = map[string]bool{ + "_test": true, + "_obj": true, +} + +var cleanFile = map[string]bool{ + "_testmain.go": true, + "test.out": true, + "build.out": true, + "a.out": true, +} + +var cleanExt = map[string]bool{ + ".5": true, + ".6": true, + ".8": true, + ".a": true, + ".o": true, + ".so": true, +} + +func clean(p *load.Package) { + if cleaned[p] { + return + } + cleaned[p] = true + + if p.Dir == "" { + base.Errorf("%v", p.Error) + return + } + dirs, err := os.ReadDir(p.Dir) + if err != nil { + base.Errorf("go: %s: %v", p.Dir, err) + return + } + + sh := work.NewShell("", fmt.Print) + + packageFile := map[string]bool{} + if p.Name != "main" { + // Record which files are not in package main. + // The others are. + keep := func(list []string) { + for _, f := range list { + packageFile[f] = true + } + } + keep(p.GoFiles) + keep(p.CgoFiles) + keep(p.TestGoFiles) + keep(p.XTestGoFiles) + } + + _, elem := filepath.Split(p.Dir) + var allRemove []string + + // Remove dir-named executable only if this is package main. + if p.Name == "main" { + allRemove = append(allRemove, + elem, + elem+".exe", + p.DefaultExecName(), + p.DefaultExecName()+".exe", + ) + } + + // Remove package test executables. + allRemove = append(allRemove, + elem+".test", + elem+".test.exe", + p.DefaultExecName()+".test", + p.DefaultExecName()+".test.exe", + ) + + // Remove a potential executable, test executable for each .go file in the directory that + // is not part of the directory's package. + for _, dir := range dirs { + name := dir.Name() + if packageFile[name] { + continue + } + + if dir.IsDir() { + continue + } + + if base, found := strings.CutSuffix(name, "_test.go"); found { + allRemove = append(allRemove, base+".test", base+".test.exe") + } + + if base, found := strings.CutSuffix(name, ".go"); found { + // TODO(adg,rsc): check that this .go file is actually + // in "package main", and therefore capable of building + // to an executable file. + allRemove = append(allRemove, base, base+".exe") + } + } + + if cfg.BuildN || cfg.BuildX { + sh.ShowCmd(p.Dir, "rm -f %s", strings.Join(allRemove, " ")) + } + + toRemove := map[string]bool{} + for _, name := range allRemove { + toRemove[name] = true + } + for _, dir := range dirs { + name := dir.Name() + if dir.IsDir() { + // TODO: Remove once Makefiles are forgotten. + if cleanDir[name] { + if err := sh.RemoveAll(filepath.Join(p.Dir, name)); err != nil { + base.Error(err) + } + } + continue + } + + if cfg.BuildN { + continue + } + + if cleanFile[name] || cleanExt[filepath.Ext(name)] || toRemove[name] { + removeFile(filepath.Join(p.Dir, name)) + } + } + + if cleanI && p.Target != "" { + if cfg.BuildN || cfg.BuildX { + sh.ShowCmd("", "rm -f %s", p.Target) + } + if !cfg.BuildN { + removeFile(p.Target) + } + } + + if cleanR { + for _, p1 := range p.Internal.Imports { + clean(p1) + } + } +} + +// removeFile tries to remove file f, if error other than file doesn't exist +// occurs, it will report the error. +func removeFile(f string) { + err := os.Remove(f) + if err == nil || os.IsNotExist(err) { + return + } + // Windows does not allow deletion of a binary file while it is executing. + if runtime.GOOS == "windows" { + // Remove lingering ~ file from last attempt. + if _, err2 := os.Stat(f + "~"); err2 == nil { + os.Remove(f + "~") + } + // Try to move it out of the way. If the move fails, + // which is likely, we'll try again the + // next time we do an install of this binary. + if err2 := os.Rename(f, f+"~"); err2 == nil { + os.Remove(f + "~") + return + } + } + base.Error(err) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/cmdflag/flag.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/cmdflag/flag.go new file mode 100644 index 0000000000000000000000000000000000000000..86e33ea111eb4b432bc453be8fb83b97438c63c1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/cmdflag/flag.go @@ -0,0 +1,122 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cmdflag handles flag processing common to several go tools. +package cmdflag + +import ( + "errors" + "flag" + "fmt" + "strings" +) + +// The flag handling part of go commands such as test is large and distracting. +// We can't use the standard flag package because some of the flags from +// our command line are for us, and some are for the binary we're running, +// and some are for both. + +// ErrFlagTerminator indicates the distinguished token "--", which causes the +// flag package to treat all subsequent arguments as non-flags. +var ErrFlagTerminator = errors.New("flag terminator") + +// A FlagNotDefinedError indicates a flag-like argument that does not correspond +// to any registered flag in a FlagSet. +type FlagNotDefinedError struct { + RawArg string // the original argument, like --foo or -foo=value + Name string + HasValue bool // is this the -foo=value or --foo=value form? + Value string // only provided if HasValue is true +} + +func (e FlagNotDefinedError) Error() string { + return fmt.Sprintf("flag provided but not defined: -%s", e.Name) +} + +// A NonFlagError indicates an argument that is not a syntactically-valid flag. +type NonFlagError struct { + RawArg string +} + +func (e NonFlagError) Error() string { + return fmt.Sprintf("not a flag: %q", e.RawArg) +} + +// ParseOne sees if args[0] is present in the given flag set and if so, +// sets its value and returns the flag along with the remaining (unused) arguments. +// +// ParseOne always returns either a non-nil Flag or a non-nil error, +// and always consumes at least one argument (even on error). +// +// Unlike (*flag.FlagSet).Parse, ParseOne does not log its own errors. +func ParseOne(fs *flag.FlagSet, args []string) (f *flag.Flag, remainingArgs []string, err error) { + // This function is loosely derived from (*flag.FlagSet).parseOne. + + raw, args := args[0], args[1:] + arg := raw + if strings.HasPrefix(arg, "--") { + if arg == "--" { + return nil, args, ErrFlagTerminator + } + arg = arg[1:] // reduce two minuses to one + } + + switch arg { + case "-?", "-h", "-help": + return nil, args, flag.ErrHelp + } + if len(arg) < 2 || arg[0] != '-' || arg[1] == '-' || arg[1] == '=' { + return nil, args, NonFlagError{RawArg: raw} + } + + name, value, hasValue := strings.Cut(arg[1:], "=") + + f = fs.Lookup(name) + if f == nil { + return nil, args, FlagNotDefinedError{ + RawArg: raw, + Name: name, + HasValue: hasValue, + Value: value, + } + } + + // Use fs.Set instead of f.Value.Set below so that any subsequent call to + // fs.Visit will correctly visit the flags that have been set. + + failf := func(format string, a ...any) (*flag.Flag, []string, error) { + return f, args, fmt.Errorf(format, a...) + } + + if fv, ok := f.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg + if hasValue { + if err := fs.Set(name, value); err != nil { + return failf("invalid boolean value %q for -%s: %v", value, name, err) + } + } else { + if err := fs.Set(name, "true"); err != nil { + return failf("invalid boolean flag %s: %v", name, err) + } + } + } else { + // It must have a value, which might be the next argument. + if !hasValue && len(args) > 0 { + // value is the next arg + hasValue = true + value, args = args[0], args[1:] + } + if !hasValue { + return failf("flag needs an argument: -%s", name) + } + if err := fs.Set(name, value); err != nil { + return failf("invalid value %q for flag -%s: %v", value, name, err) + } + } + + return f, args, nil +} + +type boolFlag interface { + IsBoolFlag() bool +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/doc/doc.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/doc/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..3b6cd94799ada37541a9eb93a1228e0d87fb26ee --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/doc/doc.go @@ -0,0 +1,134 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package doc implements the “go doc” command. +package doc + +import ( + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "context" +) + +var CmdDoc = &base.Command{ + Run: runDoc, + UsageLine: "go doc [doc flags] [package|[package.]symbol[.methodOrField]]", + CustomFlags: true, + Short: "show documentation for package or symbol", + Long: ` +Doc prints the documentation comments associated with the item identified by its +arguments (a package, const, func, type, var, method, or struct field) +followed by a one-line summary of each of the first-level items "under" +that item (package-level declarations for a package, methods for a type, +etc.). + +Doc accepts zero, one, or two arguments. + +Given no arguments, that is, when run as + + go doc + +it prints the package documentation for the package in the current directory. +If the package is a command (package main), the exported symbols of the package +are elided from the presentation unless the -cmd flag is provided. + +When run with one argument, the argument is treated as a Go-syntax-like +representation of the item to be documented. What the argument selects depends +on what is installed in GOROOT and GOPATH, as well as the form of the argument, +which is schematically one of these: + + go doc + go doc [.] + go doc [.][.] + go doc [.][.] + +The first item in this list matched by the argument is the one whose documentation +is printed. (See the examples below.) However, if the argument starts with a capital +letter it is assumed to identify a symbol or method in the current directory. + +For packages, the order of scanning is determined lexically in breadth-first order. +That is, the package presented is the one that matches the search and is nearest +the root and lexically first at its level of the hierarchy. The GOROOT tree is +always scanned in its entirety before GOPATH. + +If there is no package specified or matched, the package in the current +directory is selected, so "go doc Foo" shows the documentation for symbol Foo in +the current package. + +The package path must be either a qualified path or a proper suffix of a +path. The go tool's usual package mechanism does not apply: package path +elements like . and ... are not implemented by go doc. + +When run with two arguments, the first is a package path (full path or suffix), +and the second is a symbol, or symbol with method or struct field: + + go doc [.] + +In all forms, when matching symbols, lower-case letters in the argument match +either case but upper-case letters match exactly. This means that there may be +multiple matches of a lower-case argument in a package if different symbols have +different cases. If this occurs, documentation for all matches is printed. + +Examples: + go doc + Show documentation for current package. + go doc Foo + Show documentation for Foo in the current package. + (Foo starts with a capital letter so it cannot match + a package path.) + go doc encoding/json + Show documentation for the encoding/json package. + go doc json + Shorthand for encoding/json. + go doc json.Number (or go doc json.number) + Show documentation and method summary for json.Number. + go doc json.Number.Int64 (or go doc json.number.int64) + Show documentation for json.Number's Int64 method. + go doc cmd/doc + Show package docs for the doc command. + go doc -cmd cmd/doc + Show package docs and exported symbols within the doc command. + go doc template.new + Show documentation for html/template's New function. + (html/template is lexically before text/template) + go doc text/template.new # One argument + Show documentation for text/template's New function. + go doc text/template new # Two arguments + Show documentation for text/template's New function. + + At least in the current tree, these invocations all print the + documentation for json.Decoder's Decode method: + + go doc json.Decoder.Decode + go doc json.decoder.decode + go doc json.decode + cd go/src/encoding/json; go doc decode + +Flags: + -all + Show all the documentation for the package. + -c + Respect case when matching symbols. + -cmd + Treat a command (package main) like a regular package. + Otherwise package main's exported symbols are hidden + when showing the package's top-level documentation. + -short + One-line representation for each symbol. + -src + Show the full source code for the symbol. This will + display the full Go source of its declaration and + definition, such as a function definition (including + the body), type declaration or enclosing const + block. The output may therefore include unexported + details. + -u + Show documentation for unexported as well as exported + symbols, methods, and fields. +`, +} + +func runDoc(ctx context.Context, cmd *base.Command, args []string) { + base.Run(cfg.BuildToolexec, base.Tool("doc"), args) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/envcmd/env.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/envcmd/env.go new file mode 100644 index 0000000000000000000000000000000000000000..c7c2e83e0f7a9ec9f69c66f002cfb420644a3623 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/envcmd/env.go @@ -0,0 +1,691 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package envcmd implements the “go env” command. +package envcmd + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/build" + "internal/buildcfg" + "io" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "cmd/go/internal/base" + "cmd/go/internal/cache" + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/go/internal/load" + "cmd/go/internal/modload" + "cmd/go/internal/work" + "cmd/internal/quoted" +) + +var CmdEnv = &base.Command{ + UsageLine: "go env [-json] [-u] [-w] [var ...]", + Short: "print Go environment information", + Long: ` +Env prints Go environment information. + +By default env prints information as a shell script +(on Windows, a batch file). If one or more variable +names is given as arguments, env prints the value of +each named variable on its own line. + +The -json flag prints the environment in JSON format +instead of as a shell script. + +The -u flag requires one or more arguments and unsets +the default setting for the named environment variables, +if one has been set with 'go env -w'. + +The -w flag requires one or more arguments of the +form NAME=VALUE and changes the default settings +of the named environment variables to the given values. + +For more about environment variables, see 'go help environment'. + `, +} + +func init() { + CmdEnv.Run = runEnv // break init cycle + base.AddChdirFlag(&CmdEnv.Flag) + base.AddBuildFlagsNX(&CmdEnv.Flag) +} + +var ( + envJson = CmdEnv.Flag.Bool("json", false, "") + envU = CmdEnv.Flag.Bool("u", false, "") + envW = CmdEnv.Flag.Bool("w", false, "") +) + +func MkEnv() []cfg.EnvVar { + envFile, _ := cfg.EnvFile() + env := []cfg.EnvVar{ + {Name: "GO111MODULE", Value: cfg.Getenv("GO111MODULE")}, + {Name: "GOARCH", Value: cfg.Goarch}, + {Name: "GOBIN", Value: cfg.GOBIN}, + {Name: "GOCACHE", Value: cache.DefaultDir()}, + {Name: "GOENV", Value: envFile}, + {Name: "GOEXE", Value: cfg.ExeSuffix}, + + // List the raw value of GOEXPERIMENT, not the cleaned one. + // The set of default experiments may change from one release + // to the next, so a GOEXPERIMENT setting that is redundant + // with the current toolchain might actually be relevant with + // a different version (for example, when bisecting a regression). + {Name: "GOEXPERIMENT", Value: cfg.RawGOEXPERIMENT}, + + {Name: "GOFLAGS", Value: cfg.Getenv("GOFLAGS")}, + {Name: "GOHOSTARCH", Value: runtime.GOARCH}, + {Name: "GOHOSTOS", Value: runtime.GOOS}, + {Name: "GOINSECURE", Value: cfg.GOINSECURE}, + {Name: "GOMODCACHE", Value: cfg.GOMODCACHE}, + {Name: "GONOPROXY", Value: cfg.GONOPROXY}, + {Name: "GONOSUMDB", Value: cfg.GONOSUMDB}, + {Name: "GOOS", Value: cfg.Goos}, + {Name: "GOPATH", Value: cfg.BuildContext.GOPATH}, + {Name: "GOPRIVATE", Value: cfg.GOPRIVATE}, + {Name: "GOPROXY", Value: cfg.GOPROXY}, + {Name: "GOROOT", Value: cfg.GOROOT}, + {Name: "GOSUMDB", Value: cfg.GOSUMDB}, + {Name: "GOTMPDIR", Value: cfg.Getenv("GOTMPDIR")}, + {Name: "GOTOOLCHAIN", Value: cfg.Getenv("GOTOOLCHAIN")}, + {Name: "GOTOOLDIR", Value: build.ToolDir}, + {Name: "GOVCS", Value: cfg.GOVCS}, + {Name: "GOVERSION", Value: runtime.Version()}, + } + + if work.GccgoBin != "" { + env = append(env, cfg.EnvVar{Name: "GCCGO", Value: work.GccgoBin}) + } else { + env = append(env, cfg.EnvVar{Name: "GCCGO", Value: work.GccgoName}) + } + + key, val := cfg.GetArchEnv() + if key != "" { + env = append(env, cfg.EnvVar{Name: key, Value: val}) + } + + cc := cfg.Getenv("CC") + if cc == "" { + cc = cfg.DefaultCC(cfg.Goos, cfg.Goarch) + } + cxx := cfg.Getenv("CXX") + if cxx == "" { + cxx = cfg.DefaultCXX(cfg.Goos, cfg.Goarch) + } + env = append(env, cfg.EnvVar{Name: "AR", Value: envOr("AR", "ar")}) + env = append(env, cfg.EnvVar{Name: "CC", Value: cc}) + env = append(env, cfg.EnvVar{Name: "CXX", Value: cxx}) + + if cfg.BuildContext.CgoEnabled { + env = append(env, cfg.EnvVar{Name: "CGO_ENABLED", Value: "1"}) + } else { + env = append(env, cfg.EnvVar{Name: "CGO_ENABLED", Value: "0"}) + } + + return env +} + +func envOr(name, def string) string { + val := cfg.Getenv(name) + if val != "" { + return val + } + return def +} + +func findEnv(env []cfg.EnvVar, name string) string { + for _, e := range env { + if e.Name == name { + return e.Value + } + } + if cfg.CanGetenv(name) { + return cfg.Getenv(name) + } + return "" +} + +// ExtraEnvVars returns environment variables that should not leak into child processes. +func ExtraEnvVars() []cfg.EnvVar { + gomod := "" + modload.Init() + if modload.HasModRoot() { + gomod = modload.ModFilePath() + } else if modload.Enabled() { + gomod = os.DevNull + } + modload.InitWorkfile() + gowork := modload.WorkFilePath() + // As a special case, if a user set off explicitly, report that in GOWORK. + if cfg.Getenv("GOWORK") == "off" { + gowork = "off" + } + return []cfg.EnvVar{ + {Name: "GOMOD", Value: gomod}, + {Name: "GOWORK", Value: gowork}, + } +} + +// ExtraEnvVarsCostly returns environment variables that should not leak into child processes +// but are costly to evaluate. +func ExtraEnvVarsCostly() []cfg.EnvVar { + b := work.NewBuilder("") + defer func() { + if err := b.Close(); err != nil { + base.Fatal(err) + } + }() + + cppflags, cflags, cxxflags, fflags, ldflags, err := b.CFlags(&load.Package{}) + if err != nil { + // Should not happen - b.CFlags was given an empty package. + fmt.Fprintf(os.Stderr, "go: invalid cflags: %v\n", err) + return nil + } + cmd := b.GccCmd(".", "") + + join := func(s []string) string { + q, err := quoted.Join(s) + if err != nil { + return strings.Join(s, " ") + } + return q + } + + return []cfg.EnvVar{ + // Note: Update the switch in runEnv below when adding to this list. + {Name: "CGO_CFLAGS", Value: join(cflags)}, + {Name: "CGO_CPPFLAGS", Value: join(cppflags)}, + {Name: "CGO_CXXFLAGS", Value: join(cxxflags)}, + {Name: "CGO_FFLAGS", Value: join(fflags)}, + {Name: "CGO_LDFLAGS", Value: join(ldflags)}, + {Name: "PKG_CONFIG", Value: b.PkgconfigCmd()}, + {Name: "GOGCCFLAGS", Value: join(cmd[3:])}, + } +} + +// argKey returns the KEY part of the arg KEY=VAL, or else arg itself. +func argKey(arg string) string { + i := strings.Index(arg, "=") + if i < 0 { + return arg + } + return arg[:i] +} + +func runEnv(ctx context.Context, cmd *base.Command, args []string) { + if *envJson && *envU { + base.Fatalf("go: cannot use -json with -u") + } + if *envJson && *envW { + base.Fatalf("go: cannot use -json with -w") + } + if *envU && *envW { + base.Fatalf("go: cannot use -u with -w") + } + + // Handle 'go env -w' and 'go env -u' before calling buildcfg.Check, + // so they can be used to recover from an invalid configuration. + if *envW { + runEnvW(args) + return + } + + if *envU { + runEnvU(args) + return + } + + buildcfg.Check() + if cfg.ExperimentErr != nil { + base.Fatal(cfg.ExperimentErr) + } + + for _, arg := range args { + if strings.Contains(arg, "=") { + base.Fatalf("go: invalid variable name %q (use -w to set variable)", arg) + } + } + + env := cfg.CmdEnv + env = append(env, ExtraEnvVars()...) + + if err := fsys.Init(base.Cwd()); err != nil { + base.Fatal(err) + } + + // Do we need to call ExtraEnvVarsCostly, which is a bit expensive? + needCostly := false + if len(args) == 0 { + // We're listing all environment variables ("go env"), + // including the expensive ones. + needCostly = true + } else { + needCostly = false + checkCostly: + for _, arg := range args { + switch argKey(arg) { + case "CGO_CFLAGS", + "CGO_CPPFLAGS", + "CGO_CXXFLAGS", + "CGO_FFLAGS", + "CGO_LDFLAGS", + "PKG_CONFIG", + "GOGCCFLAGS": + needCostly = true + break checkCostly + } + } + } + if needCostly { + work.BuildInit() + env = append(env, ExtraEnvVarsCostly()...) + } + + if len(args) > 0 { + if *envJson { + var es []cfg.EnvVar + for _, name := range args { + e := cfg.EnvVar{Name: name, Value: findEnv(env, name)} + es = append(es, e) + } + printEnvAsJSON(es) + } else { + for _, name := range args { + fmt.Printf("%s\n", findEnv(env, name)) + } + } + return + } + + if *envJson { + printEnvAsJSON(env) + return + } + + PrintEnv(os.Stdout, env) +} + +func runEnvW(args []string) { + // Process and sanity-check command line. + if len(args) == 0 { + base.Fatalf("go: no KEY=VALUE arguments given") + } + osEnv := make(map[string]string) + for _, e := range cfg.OrigEnv { + if i := strings.Index(e, "="); i >= 0 { + osEnv[e[:i]] = e[i+1:] + } + } + add := make(map[string]string) + for _, arg := range args { + key, val, found := strings.Cut(arg, "=") + if !found { + base.Fatalf("go: arguments must be KEY=VALUE: invalid argument: %s", arg) + } + if err := checkEnvWrite(key, val); err != nil { + base.Fatal(err) + } + if _, ok := add[key]; ok { + base.Fatalf("go: multiple values for key: %s", key) + } + add[key] = val + if osVal := osEnv[key]; osVal != "" && osVal != val { + fmt.Fprintf(os.Stderr, "warning: go env -w %s=... does not override conflicting OS environment variable\n", key) + } + } + + if err := checkBuildConfig(add, nil); err != nil { + base.Fatal(err) + } + + gotmp, okGOTMP := add["GOTMPDIR"] + if okGOTMP { + if !filepath.IsAbs(gotmp) && gotmp != "" { + base.Fatalf("go: GOTMPDIR must be an absolute path") + } + } + + updateEnvFile(add, nil) +} + +func runEnvU(args []string) { + // Process and sanity-check command line. + if len(args) == 0 { + base.Fatalf("go: 'go env -u' requires an argument") + } + del := make(map[string]bool) + for _, arg := range args { + if err := checkEnvWrite(arg, ""); err != nil { + base.Fatal(err) + } + del[arg] = true + } + + if err := checkBuildConfig(nil, del); err != nil { + base.Fatal(err) + } + + updateEnvFile(nil, del) +} + +// checkBuildConfig checks whether the build configuration is valid +// after the specified configuration environment changes are applied. +func checkBuildConfig(add map[string]string, del map[string]bool) error { + // get returns the value for key after applying add and del and + // reports whether it changed. cur should be the current value + // (i.e., before applying changes) and def should be the default + // value (i.e., when no environment variables are provided at all). + get := func(key, cur, def string) (string, bool) { + if val, ok := add[key]; ok { + return val, true + } + if del[key] { + val := getOrigEnv(key) + if val == "" { + val = def + } + return val, true + } + return cur, false + } + + goos, okGOOS := get("GOOS", cfg.Goos, build.Default.GOOS) + goarch, okGOARCH := get("GOARCH", cfg.Goarch, build.Default.GOARCH) + if okGOOS || okGOARCH { + if err := work.CheckGOOSARCHPair(goos, goarch); err != nil { + return err + } + } + + goexperiment, okGOEXPERIMENT := get("GOEXPERIMENT", cfg.RawGOEXPERIMENT, buildcfg.DefaultGOEXPERIMENT) + if okGOEXPERIMENT { + if _, err := buildcfg.ParseGOEXPERIMENT(goos, goarch, goexperiment); err != nil { + return err + } + } + + return nil +} + +// PrintEnv prints the environment variables to w. +func PrintEnv(w io.Writer, env []cfg.EnvVar) { + for _, e := range env { + if e.Name != "TERM" { + if runtime.GOOS != "plan9" && bytes.Contains([]byte(e.Value), []byte{0}) { + base.Fatalf("go: internal error: encountered null byte in environment variable %s on non-plan9 platform", e.Name) + } + switch runtime.GOOS { + default: + fmt.Fprintf(w, "%s=%s\n", e.Name, shellQuote(e.Value)) + case "plan9": + if strings.IndexByte(e.Value, '\x00') < 0 { + fmt.Fprintf(w, "%s='%s'\n", e.Name, strings.ReplaceAll(e.Value, "'", "''")) + } else { + v := strings.Split(e.Value, "\x00") + fmt.Fprintf(w, "%s=(", e.Name) + for x, s := range v { + if x > 0 { + fmt.Fprintf(w, " ") + } + fmt.Fprintf(w, "'%s'", strings.ReplaceAll(s, "'", "''")) + } + fmt.Fprintf(w, ")\n") + } + case "windows": + if hasNonGraphic(e.Value) { + base.Errorf("go: stripping unprintable or unescapable characters from %%%q%%", e.Name) + } + fmt.Fprintf(w, "set %s=%s\n", e.Name, batchEscape(e.Value)) + } + } + } +} + +func hasNonGraphic(s string) bool { + for _, c := range []byte(s) { + if c == '\r' || c == '\n' || (!unicode.IsGraphic(rune(c)) && !unicode.IsSpace(rune(c))) { + return true + } + } + return false +} + +func shellQuote(s string) string { + var b bytes.Buffer + b.WriteByte('\'') + for _, x := range []byte(s) { + if x == '\'' { + // Close the single quoted string, add an escaped single quote, + // and start another single quoted string. + b.WriteString(`'\''`) + } else { + b.WriteByte(x) + } + } + b.WriteByte('\'') + return b.String() +} + +func batchEscape(s string) string { + var b bytes.Buffer + for _, x := range []byte(s) { + if x == '\r' || x == '\n' || (!unicode.IsGraphic(rune(x)) && !unicode.IsSpace(rune(x))) { + b.WriteRune(unicode.ReplacementChar) + continue + } + switch x { + case '%': + b.WriteString("%%") + case '<', '>', '|', '&', '^': + // These are special characters that need to be escaped with ^. See + // https://learn.microsoft.com/en-us/windows-server/administration/windows-commands/set_1. + b.WriteByte('^') + b.WriteByte(x) + default: + b.WriteByte(x) + } + } + return b.String() +} + +func printEnvAsJSON(env []cfg.EnvVar) { + m := make(map[string]string) + for _, e := range env { + if e.Name == "TERM" { + continue + } + m[e.Name] = e.Value + } + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", "\t") + if err := enc.Encode(m); err != nil { + base.Fatalf("go: %s", err) + } +} + +func getOrigEnv(key string) string { + for _, v := range cfg.OrigEnv { + if v, found := strings.CutPrefix(v, key+"="); found { + return v + } + } + return "" +} + +func checkEnvWrite(key, val string) error { + switch key { + case "GOEXE", "GOGCCFLAGS", "GOHOSTARCH", "GOHOSTOS", "GOMOD", "GOWORK", "GOTOOLDIR", "GOVERSION": + return fmt.Errorf("%s cannot be modified", key) + case "GOENV": + return fmt.Errorf("%s can only be set using the OS environment", key) + } + + // To catch typos and the like, check that we know the variable. + // If it's already in the env file, we assume it's known. + if !cfg.CanGetenv(key) { + return fmt.Errorf("unknown go command variable %s", key) + } + + // Some variables can only have one of a few valid values. If set to an + // invalid value, the next cmd/go invocation might fail immediately, + // even 'go env -w' itself. + switch key { + case "GO111MODULE": + switch val { + case "", "auto", "on", "off": + default: + return fmt.Errorf("invalid %s value %q", key, val) + } + case "GOPATH": + if strings.HasPrefix(val, "~") { + return fmt.Errorf("GOPATH entry cannot start with shell metacharacter '~': %q", val) + } + if !filepath.IsAbs(val) && val != "" { + return fmt.Errorf("GOPATH entry is relative; must be absolute path: %q", val) + } + case "GOMODCACHE": + if !filepath.IsAbs(val) && val != "" { + return fmt.Errorf("GOMODCACHE entry is relative; must be absolute path: %q", val) + } + case "CC", "CXX": + if val == "" { + break + } + args, err := quoted.Split(val) + if err != nil { + return fmt.Errorf("invalid %s: %v", key, err) + } + if len(args) == 0 { + return fmt.Errorf("%s entry cannot contain only space", key) + } + if !filepath.IsAbs(args[0]) && args[0] != filepath.Base(args[0]) { + return fmt.Errorf("%s entry is relative; must be absolute path: %q", key, args[0]) + } + } + + if !utf8.ValidString(val) { + return fmt.Errorf("invalid UTF-8 in %s=... value", key) + } + if strings.Contains(val, "\x00") { + return fmt.Errorf("invalid NUL in %s=... value", key) + } + if strings.ContainsAny(val, "\v\r\n") { + return fmt.Errorf("invalid newline in %s=... value", key) + } + return nil +} + +func readEnvFileLines(mustExist bool) []string { + file, err := cfg.EnvFile() + if file == "" { + if mustExist { + base.Fatalf("go: cannot find go env config: %v", err) + } + return nil + } + data, err := os.ReadFile(file) + if err != nil && (!os.IsNotExist(err) || mustExist) { + base.Fatalf("go: reading go env config: %v", err) + } + lines := strings.SplitAfter(string(data), "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } else { + lines[len(lines)-1] += "\n" + } + return lines +} + +func updateEnvFile(add map[string]string, del map[string]bool) { + lines := readEnvFileLines(len(add) == 0) + + // Delete all but last copy of any duplicated variables, + // since the last copy is the one that takes effect. + prev := make(map[string]int) + for l, line := range lines { + if key := lineToKey(line); key != "" { + if p, ok := prev[key]; ok { + lines[p] = "" + } + prev[key] = l + } + } + + // Add variables (go env -w). Update existing lines in file if present, add to end otherwise. + for key, val := range add { + if p, ok := prev[key]; ok { + lines[p] = key + "=" + val + "\n" + delete(add, key) + } + } + for key, val := range add { + lines = append(lines, key+"="+val+"\n") + } + + // Delete requested variables (go env -u). + for key := range del { + if p, ok := prev[key]; ok { + lines[p] = "" + } + } + + // Sort runs of KEY=VALUE lines + // (that is, blocks of lines where blocks are separated + // by comments, blank lines, or invalid lines). + start := 0 + for i := 0; i <= len(lines); i++ { + if i == len(lines) || lineToKey(lines[i]) == "" { + sortKeyValues(lines[start:i]) + start = i + 1 + } + } + + file, err := cfg.EnvFile() + if file == "" { + base.Fatalf("go: cannot find go env config: %v", err) + } + data := []byte(strings.Join(lines, "")) + err = os.WriteFile(file, data, 0666) + if err != nil { + // Try creating directory. + os.MkdirAll(filepath.Dir(file), 0777) + err = os.WriteFile(file, data, 0666) + if err != nil { + base.Fatalf("go: writing go env config: %v", err) + } + } +} + +// lineToKey returns the KEY part of the line KEY=VALUE or else an empty string. +func lineToKey(line string) string { + i := strings.Index(line, "=") + if i < 0 || strings.Contains(line[:i], "#") { + return "" + } + return line[:i] +} + +// sortKeyValues sorts a sequence of lines by key. +// It differs from sort.Strings in that keys which are GOx where x is an ASCII +// character smaller than = sort after GO=. +// (There are no such keys currently. It used to matter for GO386 which was +// removed in Go 1.16.) +func sortKeyValues(lines []string) { + sort.Slice(lines, func(i, j int) bool { + return lineToKey(lines[i]) < lineToKey(lines[j]) + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/envcmd/env_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/envcmd/env_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7419cf3fc20e73db8b565991566e4452edf9d3d4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/envcmd/env_test.go @@ -0,0 +1,93 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || windows + +package envcmd + +import ( + "bytes" + "cmd/go/internal/cfg" + "fmt" + "internal/testenv" + "os" + "os/exec" + "path/filepath" + "runtime" + "testing" + "unicode" +) + +func FuzzPrintEnvEscape(f *testing.F) { + f.Add(`$(echo 'cc"'; echo 'OOPS="oops')`) + f.Add("$(echo shell expansion 1>&2)") + f.Add("''") + f.Add(`C:\"Program Files"\`) + f.Add(`\\"Quoted Host"\\share`) + f.Add("\xfb") + f.Add("0") + f.Add("") + f.Add("''''''''") + f.Add("\r") + f.Add("\n") + f.Add("E,%") + f.Fuzz(func(t *testing.T, s string) { + t.Parallel() + + for _, c := range []byte(s) { + if c == 0 { + t.Skipf("skipping %q: contains a null byte. Null bytes can't occur in the environment"+ + " outside of Plan 9, which has different code path than Windows and Unix that this test"+ + " isn't testing.", s) + } + if c > unicode.MaxASCII { + t.Skipf("skipping %#q: contains a non-ASCII character %q", s, c) + } + if !unicode.IsGraphic(rune(c)) && !unicode.IsSpace(rune(c)) { + t.Skipf("skipping %#q: contains non-graphic character %q", s, c) + } + if runtime.GOOS == "windows" && c == '\r' || c == '\n' { + t.Skipf("skipping %#q on Windows: contains unescapable character %q", s, c) + } + } + + var b bytes.Buffer + if runtime.GOOS == "windows" { + b.WriteString("@echo off\n") + } + PrintEnv(&b, []cfg.EnvVar{{Name: "var", Value: s}}) + var want string + if runtime.GOOS == "windows" { + fmt.Fprintf(&b, "echo \"%%var%%\"\n") + want += "\"" + s + "\"\r\n" + } else { + fmt.Fprintf(&b, "printf '%%s\\n' \"$var\"\n") + want += s + "\n" + } + scriptfilename := "script.sh" + if runtime.GOOS == "windows" { + scriptfilename = "script.bat" + } + var cmd *exec.Cmd + if runtime.GOOS == "windows" { + scriptfile := filepath.Join(t.TempDir(), scriptfilename) + if err := os.WriteFile(scriptfile, b.Bytes(), 0777); err != nil { + t.Fatal(err) + } + cmd = testenv.Command(t, "cmd.exe", "/C", scriptfile) + } else { + cmd = testenv.Command(t, "sh", "-c", b.String()) + } + out, err := cmd.Output() + t.Log(string(out)) + if err != nil { + t.Fatal(err) + } + + if string(out) != want { + t.Fatalf("output of running PrintEnv script and echoing variable: got: %q, want: %q", + string(out), want) + } + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/fix/fix.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/fix/fix.go new file mode 100644 index 0000000000000000000000000000000000000000..3705b30ef9533c0efcbf25e84b06cf7f33338d76 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/fix/fix.go @@ -0,0 +1,85 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fix implements the “go fix” command. +package fix + +import ( + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/modload" + "cmd/go/internal/str" + "cmd/go/internal/work" + "context" + "fmt" + "go/build" + "os" +) + +var CmdFix = &base.Command{ + UsageLine: "go fix [-fix list] [packages]", + Short: "update packages to use new APIs", + Long: ` +Fix runs the Go fix command on the packages named by the import paths. + +The -fix flag sets a comma-separated list of fixes to run. +The default is all known fixes. +(Its value is passed to 'go tool fix -r'.) + +For more about fix, see 'go doc cmd/fix'. +For more about specifying packages, see 'go help packages'. + +To run fix with other options, run 'go tool fix'. + +See also: go fmt, go vet. + `, +} + +var fixes = CmdFix.Flag.String("fix", "", "comma-separated list of fixes to apply") + +func init() { + work.AddBuildFlags(CmdFix, work.DefaultBuildFlags) + CmdFix.Run = runFix // fix cycle +} + +func runFix(ctx context.Context, cmd *base.Command, args []string) { + pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args) + w := 0 + for _, pkg := range pkgs { + if pkg.Error != nil { + base.Errorf("%v", pkg.Error) + continue + } + pkgs[w] = pkg + w++ + } + pkgs = pkgs[:w] + + printed := false + for _, pkg := range pkgs { + if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { + if !printed { + fmt.Fprintf(os.Stderr, "go: not fixing packages in dependency modules\n") + printed = true + } + continue + } + // Use pkg.gofiles instead of pkg.Dir so that + // the command only applies to this package, + // not to packages in subdirectories. + files := base.RelPaths(pkg.InternalAllGoFiles()) + goVersion := "" + if pkg.Module != nil { + goVersion = "go" + pkg.Module.GoVersion + } else if pkg.Standard { + goVersion = build.Default.ReleaseTags[len(build.Default.ReleaseTags)-1] + } + var fixArg []string + if *fixes != "" { + fixArg = []string{"-r=" + *fixes} + } + base.Run(str.StringList(cfg.BuildToolexec, base.Tool("fix"), "-go="+goVersion, fixArg, files)) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/fmtcmd/fmt.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/fmtcmd/fmt.go new file mode 100644 index 0000000000000000000000000000000000000000..62b22f6bcfa4079e418ab816b5448d156af91b09 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/fmtcmd/fmt.go @@ -0,0 +1,115 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fmtcmd implements the “go fmt” command. +package fmtcmd + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/modload" + "cmd/internal/sys" +) + +func init() { + base.AddBuildFlagsNX(&CmdFmt.Flag) + base.AddChdirFlag(&CmdFmt.Flag) + base.AddModFlag(&CmdFmt.Flag) + base.AddModCommonFlags(&CmdFmt.Flag) +} + +var CmdFmt = &base.Command{ + Run: runFmt, + UsageLine: "go fmt [-n] [-x] [packages]", + Short: "gofmt (reformat) package sources", + Long: ` +Fmt runs the command 'gofmt -l -w' on the packages named +by the import paths. It prints the names of the files that are modified. + +For more about gofmt, see 'go doc cmd/gofmt'. +For more about specifying packages, see 'go help packages'. + +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. + +The -mod flag's value sets which module download mode +to use: readonly or vendor. See 'go help modules' for more. + +To run gofmt with specific options, run gofmt itself. + +See also: go fix, go vet. + `, +} + +func runFmt(ctx context.Context, cmd *base.Command, args []string) { + printed := false + gofmt := gofmtPath() + + gofmtArgs := []string{gofmt, "-l", "-w"} + gofmtArgLen := len(gofmt) + len(" -l -w") + + baseGofmtArgs := len(gofmtArgs) + baseGofmtArgLen := gofmtArgLen + + for _, pkg := range load.PackagesAndErrors(ctx, load.PackageOpts{}, args) { + if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { + if !printed { + fmt.Fprintf(os.Stderr, "go: not formatting packages in dependency modules\n") + printed = true + } + continue + } + if pkg.Error != nil { + var nogo *load.NoGoError + var embed *load.EmbedError + if (errors.As(pkg.Error, &nogo) || errors.As(pkg.Error, &embed)) && len(pkg.InternalAllGoFiles()) > 0 { + // Skip this error, as we will format + // all files regardless. + } else { + base.Errorf("%v", pkg.Error) + continue + } + } + // Use pkg.gofiles instead of pkg.Dir so that + // the command only applies to this package, + // not to packages in subdirectories. + files := base.RelPaths(pkg.InternalAllGoFiles()) + for _, file := range files { + gofmtArgs = append(gofmtArgs, file) + gofmtArgLen += 1 + len(file) // plus separator + if gofmtArgLen >= sys.ExecArgLengthLimit { + base.Run(gofmtArgs) + gofmtArgs = gofmtArgs[:baseGofmtArgs] + gofmtArgLen = baseGofmtArgLen + } + } + } + if len(gofmtArgs) > baseGofmtArgs { + base.Run(gofmtArgs) + } +} + +func gofmtPath() string { + gofmt := "gofmt" + cfg.ToolExeSuffix() + + gofmtPath := filepath.Join(cfg.GOBIN, gofmt) + if _, err := os.Stat(gofmtPath); err == nil { + return gofmtPath + } + + gofmtPath = filepath.Join(cfg.GOROOT, "bin", gofmt) + if _, err := os.Stat(gofmtPath); err == nil { + return gofmtPath + } + + // fallback to looking for gofmt in $PATH + return "gofmt" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/fsys/fsys.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/fsys/fsys.go new file mode 100644 index 0000000000000000000000000000000000000000..06159dbbb7343215b01618dd81627e0de03e80c8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/fsys/fsys.go @@ -0,0 +1,784 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fsys is an abstraction for reading files that +// allows for virtual overlays on top of the files on disk. +package fsys + +import ( + "encoding/json" + "errors" + "fmt" + "internal/godebug" + "io/fs" + "log" + "os" + pathpkg "path" + "path/filepath" + "runtime" + "runtime/debug" + "sort" + "strings" + "sync" + "time" +) + +// Trace emits a trace event for the operation and file path to the trace log, +// but only when $GODEBUG contains gofsystrace=1. +// The traces are appended to the file named by the $GODEBUG setting gofsystracelog, or else standard error. +// For debugging, if the $GODEBUG setting gofsystracestack is non-empty, then trace events for paths +// matching that glob pattern (using path.Match) will be followed by a full stack trace. +func Trace(op, path string) { + if !doTrace { + return + } + traceMu.Lock() + defer traceMu.Unlock() + fmt.Fprintf(traceFile, "%d gofsystrace %s %s\n", os.Getpid(), op, path) + if pattern := gofsystracestack.Value(); pattern != "" { + if match, _ := pathpkg.Match(pattern, path); match { + traceFile.Write(debug.Stack()) + } + } +} + +var ( + doTrace bool + traceFile *os.File + traceMu sync.Mutex + + gofsystrace = godebug.New("#gofsystrace") + gofsystracelog = godebug.New("#gofsystracelog") + gofsystracestack = godebug.New("#gofsystracestack") +) + +func init() { + if gofsystrace.Value() != "1" { + return + } + doTrace = true + if f := gofsystracelog.Value(); f != "" { + // Note: No buffering on writes to this file, so no need to worry about closing it at exit. + var err error + traceFile, err = os.OpenFile(f, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) + if err != nil { + log.Fatal(err) + } + } else { + traceFile = os.Stderr + } +} + +// OverlayFile is the path to a text file in the OverlayJSON format. +// It is the value of the -overlay flag. +var OverlayFile string + +// OverlayJSON is the format overlay files are expected to be in. +// The Replace map maps from overlaid paths to replacement paths: +// the Go command will forward all reads trying to open +// each overlaid path to its replacement path, or consider the overlaid +// path not to exist if the replacement path is empty. +type OverlayJSON struct { + Replace map[string]string +} + +type node struct { + actualFilePath string // empty if a directory + children map[string]*node // path element → file or directory +} + +func (n *node) isDir() bool { + return n.actualFilePath == "" && n.children != nil +} + +func (n *node) isDeleted() bool { + return n.actualFilePath == "" && n.children == nil +} + +// TODO(matloob): encapsulate these in an io/fs-like interface +var overlay map[string]*node // path -> file or directory node +var cwd string // copy of base.Cwd() to avoid dependency + +// canonicalize a path for looking it up in the overlay. +// Important: filepath.Join(cwd, path) doesn't always produce +// the correct absolute path if path is relative, because on +// Windows producing the correct absolute path requires making +// a syscall. So this should only be used when looking up paths +// in the overlay, or canonicalizing the paths in the overlay. +func canonicalize(path string) string { + if path == "" { + return "" + } + if filepath.IsAbs(path) { + return filepath.Clean(path) + } + + if v := filepath.VolumeName(cwd); v != "" && path[0] == filepath.Separator { + // On Windows filepath.Join(cwd, path) doesn't always work. In general + // filepath.Abs needs to make a syscall on Windows. Elsewhere in cmd/go + // use filepath.Join(cwd, path), but cmd/go specifically supports Windows + // paths that start with "\" which implies the path is relative to the + // volume of the working directory. See golang.org/issue/8130. + return filepath.Join(v, path) + } + + // Make the path absolute. + return filepath.Join(cwd, path) +} + +// Init initializes the overlay, if one is being used. +func Init(wd string) error { + if overlay != nil { + // already initialized + return nil + } + + cwd = wd + + if OverlayFile == "" { + return nil + } + + Trace("ReadFile", OverlayFile) + b, err := os.ReadFile(OverlayFile) + if err != nil { + return fmt.Errorf("reading overlay file: %v", err) + } + + var overlayJSON OverlayJSON + if err := json.Unmarshal(b, &overlayJSON); err != nil { + return fmt.Errorf("parsing overlay JSON: %v", err) + } + + return initFromJSON(overlayJSON) +} + +func initFromJSON(overlayJSON OverlayJSON) error { + // Canonicalize the paths in the overlay map. + // Use reverseCanonicalized to check for collisions: + // no two 'from' paths should canonicalize to the same path. + overlay = make(map[string]*node) + reverseCanonicalized := make(map[string]string) // inverse of canonicalize operation, to check for duplicates + // Build a table of file and directory nodes from the replacement map. + + // Remove any potential non-determinism from iterating over map by sorting it. + replaceFrom := make([]string, 0, len(overlayJSON.Replace)) + for k := range overlayJSON.Replace { + replaceFrom = append(replaceFrom, k) + } + sort.Strings(replaceFrom) + + for _, from := range replaceFrom { + to := overlayJSON.Replace[from] + // Canonicalize paths and check for a collision. + if from == "" { + return fmt.Errorf("empty string key in overlay file Replace map") + } + cfrom := canonicalize(from) + if to != "" { + // Don't canonicalize "", meaning to delete a file, because then it will turn into ".". + to = canonicalize(to) + } + if otherFrom, seen := reverseCanonicalized[cfrom]; seen { + return fmt.Errorf( + "paths %q and %q both canonicalize to %q in overlay file Replace map", otherFrom, from, cfrom) + } + reverseCanonicalized[cfrom] = from + from = cfrom + + // Create node for overlaid file. + dir, base := filepath.Dir(from), filepath.Base(from) + if n, ok := overlay[from]; ok { + // All 'from' paths in the overlay are file paths. Since the from paths + // are in a map, they are unique, so if the node already exists we added + // it below when we create parent directory nodes. That is, that + // both a file and a path to one of its parent directories exist as keys + // in the Replace map. + // + // This only applies if the overlay directory has any files or directories + // in it: placeholder directories that only contain deleted files don't + // count. They are safe to be overwritten with actual files. + for _, f := range n.children { + if !f.isDeleted() { + return fmt.Errorf("invalid overlay: path %v is used as both file and directory", from) + } + } + } + overlay[from] = &node{actualFilePath: to} + + // Add parent directory nodes to overlay structure. + childNode := overlay[from] + for { + dirNode := overlay[dir] + if dirNode == nil || dirNode.isDeleted() { + dirNode = &node{children: make(map[string]*node)} + overlay[dir] = dirNode + } + if childNode.isDeleted() { + // Only create one parent for a deleted file: + // the directory only conditionally exists if + // there are any non-deleted children, so + // we don't create their parents. + if dirNode.isDir() { + dirNode.children[base] = childNode + } + break + } + if !dirNode.isDir() { + // This path already exists as a file, so it can't be a parent + // directory. See comment at error above. + return fmt.Errorf("invalid overlay: path %v is used as both file and directory", dir) + } + dirNode.children[base] = childNode + parent := filepath.Dir(dir) + if parent == dir { + break // reached the top; there is no parent + } + dir, base = parent, filepath.Base(dir) + childNode = dirNode + } + } + + return nil +} + +// IsDir returns true if path is a directory on disk or in the +// overlay. +func IsDir(path string) (bool, error) { + Trace("IsDir", path) + path = canonicalize(path) + + if _, ok := parentIsOverlayFile(path); ok { + return false, nil + } + + if n, ok := overlay[path]; ok { + return n.isDir(), nil + } + + fi, err := os.Stat(path) + if err != nil { + return false, err + } + + return fi.IsDir(), nil +} + +// parentIsOverlayFile returns whether name or any of +// its parents are files in the overlay, and the first parent found, +// including name itself, that's a file in the overlay. +func parentIsOverlayFile(name string) (string, bool) { + if overlay != nil { + // Check if name can't possibly be a directory because + // it or one of its parents is overlaid with a file. + // TODO(matloob): Maybe save this to avoid doing it every time? + prefix := name + for { + node := overlay[prefix] + if node != nil && !node.isDir() { + return prefix, true + } + parent := filepath.Dir(prefix) + if parent == prefix { + break + } + prefix = parent + } + } + + return "", false +} + +// errNotDir is used to communicate from ReadDir to IsDirWithGoFiles +// that the argument is not a directory, so that IsDirWithGoFiles doesn't +// return an error. +var errNotDir = errors.New("not a directory") + +func nonFileInOverlayError(overlayPath string) error { + return fmt.Errorf("replacement path %q is a directory, not a file", overlayPath) +} + +// readDir reads a dir on disk, returning an error that is errNotDir if the dir is not a directory. +// Unfortunately, the error returned by os.ReadDir if dir is not a directory +// can vary depending on the OS (Linux, Mac, Windows return ENOTDIR; BSD returns EINVAL). +func readDir(dir string) ([]fs.FileInfo, error) { + entries, err := os.ReadDir(dir) + if err != nil { + if os.IsNotExist(err) { + return nil, err + } + if dirfi, staterr := os.Stat(dir); staterr == nil && !dirfi.IsDir() { + return nil, &fs.PathError{Op: "ReadDir", Path: dir, Err: errNotDir} + } + return nil, err + } + + fis := make([]fs.FileInfo, 0, len(entries)) + for _, entry := range entries { + info, err := entry.Info() + if err != nil { + continue + } + fis = append(fis, info) + } + return fis, nil +} + +// ReadDir provides a slice of fs.FileInfo entries corresponding +// to the overlaid files in the directory. +func ReadDir(dir string) ([]fs.FileInfo, error) { + Trace("ReadDir", dir) + dir = canonicalize(dir) + if _, ok := parentIsOverlayFile(dir); ok { + return nil, &fs.PathError{Op: "ReadDir", Path: dir, Err: errNotDir} + } + + dirNode := overlay[dir] + if dirNode == nil { + return readDir(dir) + } + if dirNode.isDeleted() { + return nil, &fs.PathError{Op: "ReadDir", Path: dir, Err: fs.ErrNotExist} + } + diskfis, err := readDir(dir) + if err != nil && !os.IsNotExist(err) && !errors.Is(err, errNotDir) { + return nil, err + } + + // Stat files in overlay to make composite list of fileinfos + files := make(map[string]fs.FileInfo) + for _, f := range diskfis { + files[f.Name()] = f + } + for name, to := range dirNode.children { + switch { + case to.isDir(): + files[name] = fakeDir(name) + case to.isDeleted(): + delete(files, name) + default: + // To keep the data model simple, if the overlay contains a symlink we + // always stat through it (using Stat, not Lstat). That way we don't need + // to worry about the interaction between Lstat and directories: if a + // symlink in the overlay points to a directory, we reject it like an + // ordinary directory. + fi, err := os.Stat(to.actualFilePath) + if err != nil { + files[name] = missingFile(name) + continue + } else if fi.IsDir() { + return nil, &fs.PathError{Op: "Stat", Path: filepath.Join(dir, name), Err: nonFileInOverlayError(to.actualFilePath)} + } + // Add a fileinfo for the overlaid file, so that it has + // the original file's name, but the overlaid file's metadata. + files[name] = fakeFile{name, fi} + } + } + sortedFiles := diskfis[:0] + for _, f := range files { + sortedFiles = append(sortedFiles, f) + } + sort.Slice(sortedFiles, func(i, j int) bool { return sortedFiles[i].Name() < sortedFiles[j].Name() }) + return sortedFiles, nil +} + +// OverlayPath returns the path to the overlaid contents of the +// file, the empty string if the overlay deletes the file, or path +// itself if the file is not in the overlay, the file is a directory +// in the overlay, or there is no overlay. +// It returns true if the path is overlaid with a regular file +// or deleted, and false otherwise. +func OverlayPath(path string) (string, bool) { + if p, ok := overlay[canonicalize(path)]; ok && !p.isDir() { + return p.actualFilePath, ok + } + + return path, false +} + +// Open opens the file at or overlaid on the given path. +func Open(path string) (*os.File, error) { + Trace("Open", path) + return openFile(path, os.O_RDONLY, 0) +} + +// OpenFile opens the file at or overlaid on the given path with the flag and perm. +func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) { + Trace("OpenFile", path) + return openFile(path, flag, perm) +} + +func openFile(path string, flag int, perm os.FileMode) (*os.File, error) { + cpath := canonicalize(path) + if node, ok := overlay[cpath]; ok { + // Opening a file in the overlay. + if node.isDir() { + return nil, &fs.PathError{Op: "OpenFile", Path: path, Err: errors.New("fsys.OpenFile doesn't support opening directories yet")} + } + // We can't open overlaid paths for write. + if perm != os.FileMode(os.O_RDONLY) { + return nil, &fs.PathError{Op: "OpenFile", Path: path, Err: errors.New("overlaid files can't be opened for write")} + } + return os.OpenFile(node.actualFilePath, flag, perm) + } + if parent, ok := parentIsOverlayFile(filepath.Dir(cpath)); ok { + // The file is deleted explicitly in the Replace map, + // or implicitly because one of its parent directories was + // replaced by a file. + return nil, &fs.PathError{ + Op: "Open", + Path: path, + Err: fmt.Errorf("file %s does not exist: parent directory %s is replaced by a file in overlay", path, parent), + } + } + return os.OpenFile(cpath, flag, perm) +} + +// IsDirWithGoFiles reports whether dir is a directory containing Go files +// either on disk or in the overlay. +func IsDirWithGoFiles(dir string) (bool, error) { + Trace("IsDirWithGoFiles", dir) + fis, err := ReadDir(dir) + if os.IsNotExist(err) || errors.Is(err, errNotDir) { + return false, nil + } + if err != nil { + return false, err + } + + var firstErr error + for _, fi := range fis { + if fi.IsDir() { + continue + } + + // TODO(matloob): this enforces that the "from" in the map + // has a .go suffix, but the actual destination file + // doesn't need to have a .go suffix. Is this okay with the + // compiler? + if !strings.HasSuffix(fi.Name(), ".go") { + continue + } + if fi.Mode().IsRegular() { + return true, nil + } + + // fi is the result of an Lstat, so it doesn't follow symlinks. + // But it's okay if the file is a symlink pointing to a regular + // file, so use os.Stat to follow symlinks and check that. + actualFilePath, _ := OverlayPath(filepath.Join(dir, fi.Name())) + fi, err := os.Stat(actualFilePath) + if err == nil && fi.Mode().IsRegular() { + return true, nil + } + if err != nil && firstErr == nil { + firstErr = err + } + } + + // No go files found in directory. + return false, firstErr +} + +// walk recursively descends path, calling walkFn. Copied, with some +// modifications from path/filepath.walk. +func walk(path string, info fs.FileInfo, walkFn filepath.WalkFunc) error { + if err := walkFn(path, info, nil); err != nil || !info.IsDir() { + return err + } + + fis, err := ReadDir(path) + if err != nil { + return walkFn(path, info, err) + } + + for _, fi := range fis { + filename := filepath.Join(path, fi.Name()) + if err := walk(filename, fi, walkFn); err != nil { + if !fi.IsDir() || err != filepath.SkipDir { + return err + } + } + } + return nil +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. +func Walk(root string, walkFn filepath.WalkFunc) error { + Trace("Walk", root) + info, err := Lstat(root) + if err != nil { + err = walkFn(root, nil, err) + } else { + err = walk(root, info, walkFn) + } + if err == filepath.SkipDir { + return nil + } + return err +} + +// Lstat implements a version of os.Lstat that operates on the overlay filesystem. +func Lstat(path string) (fs.FileInfo, error) { + Trace("Lstat", path) + return overlayStat(path, os.Lstat, "lstat") +} + +// Stat implements a version of os.Stat that operates on the overlay filesystem. +func Stat(path string) (fs.FileInfo, error) { + Trace("Stat", path) + return overlayStat(path, os.Stat, "stat") +} + +// overlayStat implements lstat or Stat (depending on whether os.Lstat or os.Stat is passed in). +func overlayStat(path string, osStat func(string) (fs.FileInfo, error), opName string) (fs.FileInfo, error) { + cpath := canonicalize(path) + + if _, ok := parentIsOverlayFile(filepath.Dir(cpath)); ok { + return nil, &fs.PathError{Op: opName, Path: cpath, Err: fs.ErrNotExist} + } + + node, ok := overlay[cpath] + if !ok { + // The file or directory is not overlaid. + return osStat(path) + } + + switch { + case node.isDeleted(): + return nil, &fs.PathError{Op: opName, Path: cpath, Err: fs.ErrNotExist} + case node.isDir(): + return fakeDir(filepath.Base(path)), nil + default: + // To keep the data model simple, if the overlay contains a symlink we + // always stat through it (using Stat, not Lstat). That way we don't need to + // worry about the interaction between Lstat and directories: if a symlink + // in the overlay points to a directory, we reject it like an ordinary + // directory. + fi, err := os.Stat(node.actualFilePath) + if err != nil { + return nil, err + } + if fi.IsDir() { + return nil, &fs.PathError{Op: opName, Path: cpath, Err: nonFileInOverlayError(node.actualFilePath)} + } + return fakeFile{name: filepath.Base(path), real: fi}, nil + } +} + +// fakeFile provides an fs.FileInfo implementation for an overlaid file, +// so that the file has the name of the overlaid file, but takes all +// other characteristics of the replacement file. +type fakeFile struct { + name string + real fs.FileInfo +} + +func (f fakeFile) Name() string { return f.name } +func (f fakeFile) Size() int64 { return f.real.Size() } +func (f fakeFile) Mode() fs.FileMode { return f.real.Mode() } +func (f fakeFile) ModTime() time.Time { return f.real.ModTime() } +func (f fakeFile) IsDir() bool { return f.real.IsDir() } +func (f fakeFile) Sys() any { return f.real.Sys() } + +func (f fakeFile) String() string { + return fs.FormatFileInfo(f) +} + +// missingFile provides an fs.FileInfo for an overlaid file where the +// destination file in the overlay doesn't exist. It returns zero values +// for the fileInfo methods other than Name, set to the file's name, and Mode +// set to ModeIrregular. +type missingFile string + +func (f missingFile) Name() string { return string(f) } +func (f missingFile) Size() int64 { return 0 } +func (f missingFile) Mode() fs.FileMode { return fs.ModeIrregular } +func (f missingFile) ModTime() time.Time { return time.Unix(0, 0) } +func (f missingFile) IsDir() bool { return false } +func (f missingFile) Sys() any { return nil } + +func (f missingFile) String() string { + return fs.FormatFileInfo(f) +} + +// fakeDir provides an fs.FileInfo implementation for directories that are +// implicitly created by overlaid files. Each directory in the +// path of an overlaid file is considered to exist in the overlay filesystem. +type fakeDir string + +func (f fakeDir) Name() string { return string(f) } +func (f fakeDir) Size() int64 { return 0 } +func (f fakeDir) Mode() fs.FileMode { return fs.ModeDir | 0500 } +func (f fakeDir) ModTime() time.Time { return time.Unix(0, 0) } +func (f fakeDir) IsDir() bool { return true } +func (f fakeDir) Sys() any { return nil } + +func (f fakeDir) String() string { + return fs.FormatFileInfo(f) +} + +// Glob is like filepath.Glob but uses the overlay file system. +func Glob(pattern string) (matches []string, err error) { + Trace("Glob", pattern) + // Check pattern is well-formed. + if _, err := filepath.Match(pattern, ""); err != nil { + return nil, err + } + if !hasMeta(pattern) { + if _, err = Lstat(pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + volumeLen := 0 + if runtime.GOOS == "windows" { + volumeLen, dir = cleanGlobPathWindows(dir) + } else { + dir = cleanGlobPath(dir) + } + + if !hasMeta(dir[volumeLen:]) { + return glob(dir, file, nil) + } + + // Prevent infinite recursion. See issue 15879. + if dir == pattern { + return nil, filepath.ErrBadPattern + } + + var m []string + m, err = Glob(dir) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(d, file, matches) + if err != nil { + return + } + } + return +} + +// cleanGlobPath prepares path for glob matching. +func cleanGlobPath(path string) string { + switch path { + case "": + return "." + case string(filepath.Separator): + // do nothing to the path + return path + default: + return path[0 : len(path)-1] // chop off trailing separator + } +} + +func volumeNameLen(path string) int { + isSlash := func(c uint8) bool { + return c == '\\' || c == '/' + } + if len(path) < 2 { + return 0 + } + // with drive letter + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + return 2 + } + // is it UNC? https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file + if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) && + !isSlash(path[2]) && path[2] != '.' { + // first, leading `\\` and next shouldn't be `\`. its server name. + for n := 3; n < l-1; n++ { + // second, next '\' shouldn't be repeated. + if isSlash(path[n]) { + n++ + // third, following something characters. its share name. + if !isSlash(path[n]) { + if path[n] == '.' { + break + } + for ; n < l; n++ { + if isSlash(path[n]) { + break + } + } + return n + } + break + } + } + } + return 0 +} + +// cleanGlobPathWindows is windows version of cleanGlobPath. +func cleanGlobPathWindows(path string) (prefixLen int, cleaned string) { + vollen := volumeNameLen(path) + switch { + case path == "": + return 0, "." + case vollen+1 == len(path) && os.IsPathSeparator(path[len(path)-1]): // /, \, C:\ and C:/ + // do nothing to the path + return vollen + 1, path + case vollen == len(path) && len(path) == 2: // C: + return vollen, path + "." // convert C: into C:. + default: + if vollen >= len(path) { + vollen = len(path) - 1 + } + return vollen, path[0 : len(path)-1] // chop off trailing separator + } +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := Stat(dir) + if err != nil { + return // ignore I/O error + } + if !fi.IsDir() { + return // ignore I/O error + } + + list, err := ReadDir(dir) + if err != nil { + return // ignore I/O error + } + + var names []string + for _, info := range list { + names = append(names, info.Name()) + } + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by filepath.Match. +func hasMeta(path string) bool { + magicChars := `*?[` + if runtime.GOOS != "windows" { + magicChars = `*?[\` + } + return strings.ContainsAny(path, magicChars) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/fsys/fsys_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/fsys/fsys_test.go new file mode 100644 index 0000000000000000000000000000000000000000..612c5213c1279e75224224b29c5328fdfee3c5c4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/fsys/fsys_test.go @@ -0,0 +1,1140 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package fsys + +import ( + "encoding/json" + "errors" + "internal/testenv" + "internal/txtar" + "io" + "io/fs" + "os" + "path/filepath" + "reflect" + "testing" +) + +// initOverlay resets the overlay state to reflect the config. +// config should be a text archive string. The comment is the overlay config +// json, and the files, in the archive are laid out in a temp directory +// that cwd is set to. +func initOverlay(t *testing.T, config string) { + t.Helper() + + // Create a temporary directory and chdir to it. + prevwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + cwd = filepath.Join(t.TempDir(), "root") + if err := os.Mkdir(cwd, 0777); err != nil { + t.Fatal(err) + } + if err := os.Chdir(cwd); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + if err := os.Chdir(prevwd); err != nil { + t.Fatal(err) + } + }) + + a := txtar.Parse([]byte(config)) + for _, f := range a.Files { + name := filepath.Join(cwd, f.Name) + if err := os.MkdirAll(filepath.Dir(name), 0777); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(name, f.Data, 0666); err != nil { + t.Fatal(err) + } + } + + var overlayJSON OverlayJSON + if err := json.Unmarshal(a.Comment, &overlayJSON); err != nil { + t.Fatal("parsing overlay JSON:", err) + } + + if err := initFromJSON(overlayJSON); err != nil { + t.Fatal(err) + } + t.Cleanup(func() { overlay = nil }) +} + +func TestIsDir(t *testing.T) { + initOverlay(t, ` +{ + "Replace": { + "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt", + "subdir4": "overlayfiles/subdir4", + "subdir3/file3b.txt": "overlayfiles/subdir3_file3b.txt", + "subdir5": "", + "subdir6": "" + } +} +-- subdir1/file1.txt -- + +-- subdir3/file3a.txt -- +33 +-- subdir4/file4.txt -- +444 +-- overlayfiles/subdir2_file2.txt -- +2 +-- overlayfiles/subdir3_file3b.txt -- +66666 +-- overlayfiles/subdir4 -- +x +-- subdir6/file6.txt -- +six +`) + + testCases := []struct { + path string + want, wantErr bool + }{ + {"", true, true}, + {".", true, false}, + {cwd, true, false}, + {cwd + string(filepath.Separator), true, false}, + // subdir1 is only on disk + {filepath.Join(cwd, "subdir1"), true, false}, + {"subdir1", true, false}, + {"subdir1" + string(filepath.Separator), true, false}, + {"subdir1/file1.txt", false, false}, + {"subdir1/doesntexist.txt", false, true}, + {"doesntexist", false, true}, + // subdir2 is only in overlay + {filepath.Join(cwd, "subdir2"), true, false}, + {"subdir2", true, false}, + {"subdir2" + string(filepath.Separator), true, false}, + {"subdir2/file2.txt", false, false}, + {"subdir2/doesntexist.txt", false, true}, + // subdir3 has files on disk and in overlay + {filepath.Join(cwd, "subdir3"), true, false}, + {"subdir3", true, false}, + {"subdir3" + string(filepath.Separator), true, false}, + {"subdir3/file3a.txt", false, false}, + {"subdir3/file3b.txt", false, false}, + {"subdir3/doesntexist.txt", false, true}, + // subdir4 is overlaid with a file + {filepath.Join(cwd, "subdir4"), false, false}, + {"subdir4", false, false}, + {"subdir4" + string(filepath.Separator), false, false}, + {"subdir4/file4.txt", false, false}, + {"subdir4/doesntexist.txt", false, false}, + // subdir5 doesn't exist, and is overlaid with a "delete" entry + {filepath.Join(cwd, "subdir5"), false, false}, + {"subdir5", false, false}, + {"subdir5" + string(filepath.Separator), false, false}, + {"subdir5/file5.txt", false, false}, + {"subdir5/doesntexist.txt", false, false}, + // subdir6 does exist, and is overlaid with a "delete" entry + {filepath.Join(cwd, "subdir6"), false, false}, + {"subdir6", false, false}, + {"subdir6" + string(filepath.Separator), false, false}, + {"subdir6/file6.txt", false, false}, + {"subdir6/doesntexist.txt", false, false}, + } + + for _, tc := range testCases { + got, err := IsDir(tc.path) + if err != nil { + if !tc.wantErr { + t.Errorf("IsDir(%q): got error with string %q, want no error", tc.path, err.Error()) + } + continue + } + if tc.wantErr { + t.Errorf("IsDir(%q): got no error, want error", tc.path) + } + if tc.want != got { + t.Errorf("IsDir(%q) = %v, want %v", tc.path, got, tc.want) + } + } +} + +const readDirOverlay = ` +{ + "Replace": { + "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt", + "subdir4": "overlayfiles/subdir4", + "subdir3/file3b.txt": "overlayfiles/subdir3_file3b.txt", + "subdir5": "", + "subdir6/asubsubdir/afile.txt": "overlayfiles/subdir6_asubsubdir_afile.txt", + "subdir6/asubsubdir/zfile.txt": "overlayfiles/subdir6_asubsubdir_zfile.txt", + "subdir6/zsubsubdir/file.txt": "overlayfiles/subdir6_zsubsubdir_file.txt", + "subdir7/asubsubdir/file.txt": "overlayfiles/subdir7_asubsubdir_file.txt", + "subdir7/zsubsubdir/file.txt": "overlayfiles/subdir7_zsubsubdir_file.txt", + "subdir8/doesntexist": "this_file_doesnt_exist_anywhere", + "other/pointstodir": "overlayfiles/this_is_a_directory", + "parentoverwritten/subdir1": "overlayfiles/parentoverwritten_subdir1", + "subdir9/this_file_is_overlaid.txt": "overlayfiles/subdir9_this_file_is_overlaid.txt", + "subdir10/only_deleted_file.txt": "", + "subdir11/deleted.txt": "", + "subdir11": "overlayfiles/subdir11", + "textfile.txt/file.go": "overlayfiles/textfile_txt_file.go" + } +} +-- subdir1/file1.txt -- + +-- subdir3/file3a.txt -- +33 +-- subdir4/file4.txt -- +444 +-- subdir6/file.txt -- +-- subdir6/asubsubdir/file.txt -- +-- subdir6/anothersubsubdir/file.txt -- +-- subdir9/this_file_is_overlaid.txt -- +-- subdir10/only_deleted_file.txt -- +this will be deleted in overlay +-- subdir11/deleted.txt -- +-- parentoverwritten/subdir1/subdir2/subdir3/file.txt -- +-- textfile.txt -- +this will be overridden by textfile.txt/file.go +-- overlayfiles/subdir2_file2.txt -- +2 +-- overlayfiles/subdir3_file3b.txt -- +66666 +-- overlayfiles/subdir4 -- +x +-- overlayfiles/subdir6_asubsubdir_afile.txt -- +-- overlayfiles/subdir6_asubsubdir_zfile.txt -- +-- overlayfiles/subdir6_zsubsubdir_file.txt -- +-- overlayfiles/subdir7_asubsubdir_file.txt -- +-- overlayfiles/subdir7_zsubsubdir_file.txt -- +-- overlayfiles/parentoverwritten_subdir1 -- +x +-- overlayfiles/subdir9_this_file_is_overlaid.txt -- +99999999 +-- overlayfiles/subdir11 -- +-- overlayfiles/this_is_a_directory/file.txt -- +-- overlayfiles/textfile_txt_file.go -- +x +` + +func TestReadDir(t *testing.T) { + initOverlay(t, readDirOverlay) + + type entry struct { + name string + size int64 + isDir bool + } + + testCases := []struct { + dir string + want []entry + }{ + { + ".", []entry{ + {"other", 0, true}, + {"overlayfiles", 0, true}, + {"parentoverwritten", 0, true}, + {"subdir1", 0, true}, + {"subdir10", 0, true}, + {"subdir11", 0, false}, + {"subdir2", 0, true}, + {"subdir3", 0, true}, + {"subdir4", 2, false}, + // no subdir5. + {"subdir6", 0, true}, + {"subdir7", 0, true}, + {"subdir8", 0, true}, + {"subdir9", 0, true}, + {"textfile.txt", 0, true}, + }, + }, + { + "subdir1", []entry{ + {"file1.txt", 1, false}, + }, + }, + { + "subdir2", []entry{ + {"file2.txt", 2, false}, + }, + }, + { + "subdir3", []entry{ + {"file3a.txt", 3, false}, + {"file3b.txt", 6, false}, + }, + }, + { + "subdir6", []entry{ + {"anothersubsubdir", 0, true}, + {"asubsubdir", 0, true}, + {"file.txt", 0, false}, + {"zsubsubdir", 0, true}, + }, + }, + { + "subdir6/asubsubdir", []entry{ + {"afile.txt", 0, false}, + {"file.txt", 0, false}, + {"zfile.txt", 0, false}, + }, + }, + { + "subdir8", []entry{ + {"doesntexist", 0, false}, // entry is returned even if destination file doesn't exist + }, + }, + { + // check that read dir actually redirects files that already exist + // the original this_file_is_overlaid.txt is empty + "subdir9", []entry{ + {"this_file_is_overlaid.txt", 9, false}, + }, + }, + { + "subdir10", []entry{}, + }, + { + "parentoverwritten", []entry{ + {"subdir1", 2, false}, + }, + }, + { + "textfile.txt", []entry{ + {"file.go", 2, false}, + }, + }, + } + + for _, tc := range testCases { + dir, want := tc.dir, tc.want + infos, err := ReadDir(dir) + if err != nil { + t.Errorf("ReadDir(%q): %v", dir, err) + continue + } + // Sorted diff of want and infos. + for len(infos) > 0 || len(want) > 0 { + switch { + case len(want) == 0 || len(infos) > 0 && infos[0].Name() < want[0].name: + t.Errorf("ReadDir(%q): unexpected entry: %s IsDir=%v Size=%v", dir, infos[0].Name(), infos[0].IsDir(), infos[0].Size()) + infos = infos[1:] + case len(infos) == 0 || len(want) > 0 && want[0].name < infos[0].Name(): + t.Errorf("ReadDir(%q): missing entry: %s IsDir=%v Size=%v", dir, want[0].name, want[0].isDir, want[0].size) + want = want[1:] + default: + infoSize := infos[0].Size() + if want[0].isDir { + infoSize = 0 + } + if infos[0].IsDir() != want[0].isDir || want[0].isDir && infoSize != want[0].size { + t.Errorf("ReadDir(%q): %s: IsDir=%v Size=%v, want IsDir=%v Size=%v", dir, want[0].name, infos[0].IsDir(), infoSize, want[0].isDir, want[0].size) + } + infos = infos[1:] + want = want[1:] + } + } + } + + errCases := []string{ + "subdir1/file1.txt", // regular file on disk + "subdir2/file2.txt", // regular file in overlay + "subdir4", // directory overlaid with regular file + "subdir5", // directory deleted in overlay + "parentoverwritten/subdir1/subdir2/subdir3", // parentoverwritten/subdir1 overlaid with regular file + "parentoverwritten/subdir1/subdir2", // parentoverwritten/subdir1 overlaid with regular file + "subdir11", // directory with deleted child, overlaid with regular file + "other/pointstodir", + } + + for _, dir := range errCases { + _, err := ReadDir(dir) + if _, ok := err.(*fs.PathError); !ok { + t.Errorf("ReadDir(%q): err = %T (%v), want fs.PathError", dir, err, err) + } + } +} + +func TestGlob(t *testing.T) { + initOverlay(t, readDirOverlay) + + testCases := []struct { + pattern string + match []string + }{ + { + "*o*", + []string{ + "other", + "overlayfiles", + "parentoverwritten", + }, + }, + { + "subdir2/file2.txt", + []string{ + "subdir2/file2.txt", + }, + }, + { + "*/*.txt", + []string{ + "overlayfiles/subdir2_file2.txt", + "overlayfiles/subdir3_file3b.txt", + "overlayfiles/subdir6_asubsubdir_afile.txt", + "overlayfiles/subdir6_asubsubdir_zfile.txt", + "overlayfiles/subdir6_zsubsubdir_file.txt", + "overlayfiles/subdir7_asubsubdir_file.txt", + "overlayfiles/subdir7_zsubsubdir_file.txt", + "overlayfiles/subdir9_this_file_is_overlaid.txt", + "subdir1/file1.txt", + "subdir2/file2.txt", + "subdir3/file3a.txt", + "subdir3/file3b.txt", + "subdir6/file.txt", + "subdir9/this_file_is_overlaid.txt", + }, + }, + } + + for _, tc := range testCases { + pattern := tc.pattern + match, err := Glob(pattern) + if err != nil { + t.Errorf("Glob(%q): %v", pattern, err) + continue + } + want := tc.match + for i, name := range want { + if name != tc.pattern { + want[i] = filepath.FromSlash(name) + } + } + for len(match) > 0 || len(want) > 0 { + switch { + case len(match) == 0 || len(want) > 0 && want[0] < match[0]: + t.Errorf("Glob(%q): missing match: %s", pattern, want[0]) + want = want[1:] + case len(want) == 0 || len(match) > 0 && match[0] < want[0]: + t.Errorf("Glob(%q): extra match: %s", pattern, match[0]) + match = match[1:] + default: + want = want[1:] + match = match[1:] + } + } + } +} + +func TestOverlayPath(t *testing.T) { + initOverlay(t, ` +{ + "Replace": { + "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt", + "subdir3/doesntexist": "this_file_doesnt_exist_anywhere", + "subdir4/this_file_is_overlaid.txt": "overlayfiles/subdir4_this_file_is_overlaid.txt", + "subdir5/deleted.txt": "", + "parentoverwritten/subdir1": "" + } +} +-- subdir1/file1.txt -- +file 1 +-- subdir4/this_file_is_overlaid.txt -- +these contents are replaced by the overlay +-- parentoverwritten/subdir1/subdir2/subdir3/file.txt -- +-- subdir5/deleted.txt -- +deleted +-- overlayfiles/subdir2_file2.txt -- +file 2 +-- overlayfiles/subdir4_this_file_is_overlaid.txt -- +99999999 +`) + + testCases := []struct { + path string + wantPath string + wantOK bool + }{ + {"subdir1/file1.txt", "subdir1/file1.txt", false}, + // OverlayPath returns false for directories + {"subdir2", "subdir2", false}, + {"subdir2/file2.txt", filepath.Join(cwd, "overlayfiles/subdir2_file2.txt"), true}, + // OverlayPath doesn't stat a file to see if it exists, so it happily returns + // the 'to' path and true even if the 'to' path doesn't exist on disk. + {"subdir3/doesntexist", filepath.Join(cwd, "this_file_doesnt_exist_anywhere"), true}, + // Like the subdir2/file2.txt case above, but subdir4 exists on disk, but subdir2 does not. + {"subdir4/this_file_is_overlaid.txt", filepath.Join(cwd, "overlayfiles/subdir4_this_file_is_overlaid.txt"), true}, + {"subdir5", "subdir5", false}, + {"subdir5/deleted.txt", "", true}, + } + + for _, tc := range testCases { + gotPath, gotOK := OverlayPath(tc.path) + if gotPath != tc.wantPath || gotOK != tc.wantOK { + t.Errorf("OverlayPath(%q): got %v, %v; want %v, %v", + tc.path, gotPath, gotOK, tc.wantPath, tc.wantOK) + } + } +} + +func TestOpen(t *testing.T) { + initOverlay(t, ` +{ + "Replace": { + "subdir2/file2.txt": "overlayfiles/subdir2_file2.txt", + "subdir3/doesntexist": "this_file_doesnt_exist_anywhere", + "subdir4/this_file_is_overlaid.txt": "overlayfiles/subdir4_this_file_is_overlaid.txt", + "subdir5/deleted.txt": "", + "parentoverwritten/subdir1": "", + "childoverlay/subdir1.txt/child.txt": "overlayfiles/child.txt", + "subdir11/deleted.txt": "", + "subdir11": "overlayfiles/subdir11", + "parentdeleted": "", + "parentdeleted/file.txt": "overlayfiles/parentdeleted_file.txt" + } +} +-- subdir11/deleted.txt -- +-- subdir1/file1.txt -- +file 1 +-- subdir4/this_file_is_overlaid.txt -- +these contents are replaced by the overlay +-- parentoverwritten/subdir1/subdir2/subdir3/file.txt -- +-- childoverlay/subdir1.txt -- +this file doesn't exist because the path +childoverlay/subdir1.txt/child.txt is in the overlay +-- subdir5/deleted.txt -- +deleted +-- parentdeleted -- +this will be deleted so that parentdeleted/file.txt can exist +-- overlayfiles/subdir2_file2.txt -- +file 2 +-- overlayfiles/subdir4_this_file_is_overlaid.txt -- +99999999 +-- overlayfiles/child.txt -- +-- overlayfiles/subdir11 -- +11 +-- overlayfiles/parentdeleted_file.txt -- +this can exist because the parent directory is deleted +`) + + testCases := []struct { + path string + wantContents string + isErr bool + }{ + {"subdir1/file1.txt", "file 1\n", false}, + {"subdir2/file2.txt", "file 2\n", false}, + {"subdir3/doesntexist", "", true}, + {"subdir4/this_file_is_overlaid.txt", "99999999\n", false}, + {"subdir5/deleted.txt", "", true}, + {"parentoverwritten/subdir1/subdir2/subdir3/file.txt", "", true}, + {"childoverlay/subdir1.txt", "", true}, + {"subdir11", "11\n", false}, + {"parentdeleted/file.txt", "this can exist because the parent directory is deleted\n", false}, + } + + for _, tc := range testCases { + f, err := Open(tc.path) + if tc.isErr { + if err == nil { + f.Close() + t.Errorf("Open(%q): got no error, but want error", tc.path) + } + continue + } + if err != nil { + t.Errorf("Open(%q): got error %v, want nil", tc.path, err) + continue + } + contents, err := io.ReadAll(f) + if err != nil { + t.Errorf("unexpected error reading contents of file: %v", err) + } + if string(contents) != tc.wantContents { + t.Errorf("contents of file opened with Open(%q): got %q, want %q", + tc.path, contents, tc.wantContents) + } + f.Close() + } +} + +func TestIsDirWithGoFiles(t *testing.T) { + initOverlay(t, ` +{ + "Replace": { + "goinoverlay/file.go": "dummy", + "directory/removed/by/file": "dummy", + "directory_with_go_dir/dir.go/file.txt": "dummy", + "otherdirectory/deleted.go": "", + "nonexistentdirectory/deleted.go": "", + "textfile.txt/file.go": "dummy" + } +} +-- dummy -- +a destination file for the overlay entries to point to +contents don't matter for this test +-- nogo/file.txt -- +-- goondisk/file.go -- +-- goinoverlay/file.txt -- +-- directory/removed/by/file/in/overlay/file.go -- +-- otherdirectory/deleted.go -- +-- textfile.txt -- +`) + + testCases := []struct { + dir string + want bool + wantErr bool + }{ + {"nogo", false, false}, + {"goondisk", true, false}, + {"goinoverlay", true, false}, + {"directory/removed/by/file/in/overlay", false, false}, + {"directory_with_go_dir", false, false}, + {"otherdirectory", false, false}, + {"nonexistentdirectory", false, false}, + {"textfile.txt", true, false}, + } + + for _, tc := range testCases { + got, gotErr := IsDirWithGoFiles(tc.dir) + if tc.wantErr { + if gotErr == nil { + t.Errorf("IsDirWithGoFiles(%q): got %v, %v; want non-nil error", tc.dir, got, gotErr) + } + continue + } + if gotErr != nil { + t.Errorf("IsDirWithGoFiles(%q): got %v, %v; want nil error", tc.dir, got, gotErr) + } + if got != tc.want { + t.Errorf("IsDirWithGoFiles(%q) = %v; want %v", tc.dir, got, tc.want) + } + } +} + +func TestWalk(t *testing.T) { + // The root of the walk must be a name with an actual basename, not just ".". + // Walk uses Lstat to obtain the name of the root, and Lstat on platforms + // other than Plan 9 reports the name "." instead of the actual base name of + // the directory. (See https://golang.org/issue/42115.) + + type file struct { + path string + name string + size int64 + mode fs.FileMode + isDir bool + } + testCases := []struct { + name string + overlay string + root string + wantFiles []file + }{ + {"no overlay", ` +{} +-- dir/file.txt -- +`, + "dir", + []file{ + {"dir", "dir", 0, fs.ModeDir | 0700, true}, + {"dir/file.txt", "file.txt", 0, 0600, false}, + }, + }, + {"overlay with different file", ` +{ + "Replace": { + "dir/file.txt": "dir/other.txt" + } +} +-- dir/file.txt -- +-- dir/other.txt -- +contents of other file +`, + "dir", + []file{ + {"dir", "dir", 0, fs.ModeDir | 0500, true}, + {"dir/file.txt", "file.txt", 23, 0600, false}, + {"dir/other.txt", "other.txt", 23, 0600, false}, + }, + }, + {"overlay with new file", ` +{ + "Replace": { + "dir/file.txt": "dir/other.txt" + } +} +-- dir/other.txt -- +contents of other file +`, + "dir", + []file{ + {"dir", "dir", 0, fs.ModeDir | 0500, true}, + {"dir/file.txt", "file.txt", 23, 0600, false}, + {"dir/other.txt", "other.txt", 23, 0600, false}, + }, + }, + {"overlay with new directory", ` +{ + "Replace": { + "dir/subdir/file.txt": "dir/other.txt" + } +} +-- dir/other.txt -- +contents of other file +`, + "dir", + []file{ + {"dir", "dir", 0, fs.ModeDir | 0500, true}, + {"dir/other.txt", "other.txt", 23, 0600, false}, + {"dir/subdir", "subdir", 0, fs.ModeDir | 0500, true}, + {"dir/subdir/file.txt", "file.txt", 23, 0600, false}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + initOverlay(t, tc.overlay) + + var got []file + Walk(tc.root, func(path string, info fs.FileInfo, err error) error { + got = append(got, file{path, info.Name(), info.Size(), info.Mode(), info.IsDir()}) + return nil + }) + + if len(got) != len(tc.wantFiles) { + t.Errorf("Walk: saw %#v in walk; want %#v", got, tc.wantFiles) + } + for i := 0; i < len(got) && i < len(tc.wantFiles); i++ { + wantPath := filepath.FromSlash(tc.wantFiles[i].path) + if got[i].path != wantPath { + t.Errorf("path of file #%v in walk, got %q, want %q", i, got[i].path, wantPath) + } + if got[i].name != tc.wantFiles[i].name { + t.Errorf("name of file #%v in walk, got %q, want %q", i, got[i].name, tc.wantFiles[i].name) + } + if got[i].mode&(fs.ModeDir|0700) != tc.wantFiles[i].mode { + t.Errorf("mode&(fs.ModeDir|0700) for mode of file #%v in walk, got %v, want %v", i, got[i].mode&(fs.ModeDir|0700), tc.wantFiles[i].mode) + } + if got[i].isDir != tc.wantFiles[i].isDir { + t.Errorf("isDir for file #%v in walk, got %v, want %v", i, got[i].isDir, tc.wantFiles[i].isDir) + } + if tc.wantFiles[i].isDir { + continue // don't check size for directories + } + if got[i].size != tc.wantFiles[i].size { + t.Errorf("size of file #%v in walk, got %v, want %v", i, got[i].size, tc.wantFiles[i].size) + } + } + }) + } +} + +func TestWalkSkipDir(t *testing.T) { + initOverlay(t, ` +{ + "Replace": { + "dir/skip/file.go": "dummy.txt", + "dir/dontskip/file.go": "dummy.txt", + "dir/dontskip/skip/file.go": "dummy.txt" + } +} +-- dummy.txt -- +`) + + var seen []string + Walk("dir", func(path string, info fs.FileInfo, err error) error { + seen = append(seen, filepath.ToSlash(path)) + if info.Name() == "skip" { + return filepath.SkipDir + } + return nil + }) + + wantSeen := []string{"dir", "dir/dontskip", "dir/dontskip/file.go", "dir/dontskip/skip", "dir/skip"} + + if len(seen) != len(wantSeen) { + t.Errorf("paths seen in walk: got %v entries; want %v entries", len(seen), len(wantSeen)) + } + + for i := 0; i < len(seen) && i < len(wantSeen); i++ { + if seen[i] != wantSeen[i] { + t.Errorf("path #%v seen walking tree: want %q, got %q", i, seen[i], wantSeen[i]) + } + } +} + +func TestWalkSkipAll(t *testing.T) { + initOverlay(t, ` +{ + "Replace": { + "dir/subdir1/foo1": "dummy.txt", + "dir/subdir1/foo2": "dummy.txt", + "dir/subdir1/foo3": "dummy.txt", + "dir/subdir2/foo4": "dummy.txt", + "dir/zzlast": "dummy.txt" + } +} +-- dummy.txt -- +`) + + var seen []string + Walk("dir", func(path string, info fs.FileInfo, err error) error { + seen = append(seen, filepath.ToSlash(path)) + if info.Name() == "foo2" { + return filepath.SkipAll + } + return nil + }) + + wantSeen := []string{"dir", "dir/subdir1", "dir/subdir1/foo1", "dir/subdir1/foo2"} + + if len(seen) != len(wantSeen) { + t.Errorf("paths seen in walk: got %v entries; want %v entries", len(seen), len(wantSeen)) + } + + for i := 0; i < len(seen) && i < len(wantSeen); i++ { + if seen[i] != wantSeen[i] { + t.Errorf("path %#v seen walking tree: got %q, want %q", i, seen[i], wantSeen[i]) + } + } +} + +func TestWalkError(t *testing.T) { + initOverlay(t, "{}") + + alreadyCalled := false + err := Walk("foo", func(path string, info fs.FileInfo, err error) error { + if alreadyCalled { + t.Fatal("expected walk function to be called exactly once, but it was called more than once") + } + alreadyCalled = true + return errors.New("returned from function") + }) + if !alreadyCalled { + t.Fatal("expected walk function to be called exactly once, but it was never called") + + } + if err == nil { + t.Fatalf("Walk: got no error, want error") + } + if err.Error() != "returned from function" { + t.Fatalf("Walk: got error %v, want \"returned from function\" error", err) + } +} + +func TestWalkSymlink(t *testing.T) { + testenv.MustHaveSymlink(t) + + initOverlay(t, `{ + "Replace": {"overlay_symlink/file": "symlink/file"} +} +-- dir/file --`) + + // Create symlink + if err := os.Symlink("dir", "symlink"); err != nil { + t.Error(err) + } + + testCases := []struct { + name string + dir string + wantFiles []string + }{ + {"control", "dir", []string{"dir", filepath.Join("dir", "file")}}, + // ensure Walk doesn't walk into the directory pointed to by the symlink + // (because it's supposed to use Lstat instead of Stat). + {"symlink_to_dir", "symlink", []string{"symlink"}}, + {"overlay_to_symlink_to_dir", "overlay_symlink", []string{"overlay_symlink", filepath.Join("overlay_symlink", "file")}}, + + // However, adding filepath.Separator should cause the link to be resolved. + {"symlink_with_slash", "symlink" + string(filepath.Separator), []string{"symlink" + string(filepath.Separator), filepath.Join("symlink", "file")}}, + {"overlay_to_symlink_to_dir", "overlay_symlink" + string(filepath.Separator), []string{"overlay_symlink" + string(filepath.Separator), filepath.Join("overlay_symlink", "file")}}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var got []string + + err := Walk(tc.dir, func(path string, info fs.FileInfo, err error) error { + t.Logf("walk %q", path) + got = append(got, path) + if err != nil { + t.Errorf("walkfn: got non nil err argument: %v, want nil err argument", err) + } + return nil + }) + if err != nil { + t.Errorf("Walk: got error %q, want nil", err) + } + + if !reflect.DeepEqual(got, tc.wantFiles) { + t.Errorf("files examined by walk: got %v, want %v", got, tc.wantFiles) + } + }) + } + +} + +func TestLstat(t *testing.T) { + type file struct { + name string + size int64 + mode fs.FileMode // mode & (fs.ModeDir|0x700): only check 'user' permissions + isDir bool + } + + testCases := []struct { + name string + overlay string + path string + + want file + wantErr bool + }{ + { + "regular_file", + `{} +-- file.txt -- +contents`, + "file.txt", + file{"file.txt", 9, 0600, false}, + false, + }, + { + "new_file_in_overlay", + `{"Replace": {"file.txt": "dummy.txt"}} +-- dummy.txt -- +contents`, + "file.txt", + file{"file.txt", 9, 0600, false}, + false, + }, + { + "file_replaced_in_overlay", + `{"Replace": {"file.txt": "dummy.txt"}} +-- file.txt -- +-- dummy.txt -- +contents`, + "file.txt", + file{"file.txt", 9, 0600, false}, + false, + }, + { + "file_cant_exist", + `{"Replace": {"deleted": "dummy.txt"}} +-- deleted/file.txt -- +-- dummy.txt -- +`, + "deleted/file.txt", + file{}, + true, + }, + { + "deleted", + `{"Replace": {"deleted": ""}} +-- deleted -- +`, + "deleted", + file{}, + true, + }, + { + "dir_on_disk", + `{} +-- dir/foo.txt -- +`, + "dir", + file{"dir", 0, 0700 | fs.ModeDir, true}, + false, + }, + { + "dir_in_overlay", + `{"Replace": {"dir/file.txt": "dummy.txt"}} +-- dummy.txt -- +`, + "dir", + file{"dir", 0, 0500 | fs.ModeDir, true}, + false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + initOverlay(t, tc.overlay) + got, err := Lstat(tc.path) + if tc.wantErr { + if err == nil { + t.Errorf("lstat(%q): got no error, want error", tc.path) + } + return + } + if err != nil { + t.Fatalf("lstat(%q): got error %v, want no error", tc.path, err) + } + if got.Name() != tc.want.name { + t.Errorf("lstat(%q).Name(): got %q, want %q", tc.path, got.Name(), tc.want.name) + } + if got.Mode()&(fs.ModeDir|0700) != tc.want.mode { + t.Errorf("lstat(%q).Mode()&(fs.ModeDir|0700): got %v, want %v", tc.path, got.Mode()&(fs.ModeDir|0700), tc.want.mode) + } + if got.IsDir() != tc.want.isDir { + t.Errorf("lstat(%q).IsDir(): got %v, want %v", tc.path, got.IsDir(), tc.want.isDir) + } + if tc.want.isDir { + return // don't check size for directories + } + if got.Size() != tc.want.size { + t.Errorf("lstat(%q).Size(): got %v, want %v", tc.path, got.Size(), tc.want.size) + } + }) + } +} + +func TestStat(t *testing.T) { + testenv.MustHaveSymlink(t) + + type file struct { + name string + size int64 + mode os.FileMode // mode & (os.ModeDir|0x700): only check 'user' permissions + isDir bool + } + + testCases := []struct { + name string + overlay string + path string + + want file + wantErr bool + }{ + { + "regular_file", + `{} +-- file.txt -- +contents`, + "file.txt", + file{"file.txt", 9, 0600, false}, + false, + }, + { + "new_file_in_overlay", + `{"Replace": {"file.txt": "dummy.txt"}} +-- dummy.txt -- +contents`, + "file.txt", + file{"file.txt", 9, 0600, false}, + false, + }, + { + "file_replaced_in_overlay", + `{"Replace": {"file.txt": "dummy.txt"}} +-- file.txt -- +-- dummy.txt -- +contents`, + "file.txt", + file{"file.txt", 9, 0600, false}, + false, + }, + { + "file_cant_exist", + `{"Replace": {"deleted": "dummy.txt"}} +-- deleted/file.txt -- +-- dummy.txt -- +`, + "deleted/file.txt", + file{}, + true, + }, + { + "deleted", + `{"Replace": {"deleted": ""}} +-- deleted -- +`, + "deleted", + file{}, + true, + }, + { + "dir_on_disk", + `{} +-- dir/foo.txt -- +`, + "dir", + file{"dir", 0, 0700 | os.ModeDir, true}, + false, + }, + { + "dir_in_overlay", + `{"Replace": {"dir/file.txt": "dummy.txt"}} +-- dummy.txt -- +`, + "dir", + file{"dir", 0, 0500 | os.ModeDir, true}, + false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + initOverlay(t, tc.overlay) + got, err := Stat(tc.path) + if tc.wantErr { + if err == nil { + t.Errorf("Stat(%q): got no error, want error", tc.path) + } + return + } + if err != nil { + t.Fatalf("Stat(%q): got error %v, want no error", tc.path, err) + } + if got.Name() != tc.want.name { + t.Errorf("Stat(%q).Name(): got %q, want %q", tc.path, got.Name(), tc.want.name) + } + if got.Mode()&(os.ModeDir|0700) != tc.want.mode { + t.Errorf("Stat(%q).Mode()&(os.ModeDir|0700): got %v, want %v", tc.path, got.Mode()&(os.ModeDir|0700), tc.want.mode) + } + if got.IsDir() != tc.want.isDir { + t.Errorf("Stat(%q).IsDir(): got %v, want %v", tc.path, got.IsDir(), tc.want.isDir) + } + if tc.want.isDir { + return // don't check size for directories + } + if got.Size() != tc.want.size { + t.Errorf("Stat(%q).Size(): got %v, want %v", tc.path, got.Size(), tc.want.size) + } + }) + } +} + +func TestStatSymlink(t *testing.T) { + testenv.MustHaveSymlink(t) + + initOverlay(t, `{ + "Replace": {"file.go": "symlink"} +} +-- to.go -- +0123456789 +`) + + // Create symlink + if err := os.Symlink("to.go", "symlink"); err != nil { + t.Error(err) + } + + f := "file.go" + fi, err := Stat(f) + if err != nil { + t.Errorf("Stat(%q): got error %q, want nil error", f, err) + } + + if !fi.Mode().IsRegular() { + t.Errorf("Stat(%q).Mode(): got %v, want regular mode", f, fi.Mode()) + } + + if fi.Size() != 11 { + t.Errorf("Stat(%q).Size(): got %v, want 11", f, fi.Size()) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/generate/generate.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/generate/generate.go new file mode 100644 index 0000000000000000000000000000000000000000..6371353e2024356305bf50e606af233cc4b4641d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/generate/generate.go @@ -0,0 +1,510 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package generate implements the “go generate” command. +package generate + +import ( + "bufio" + "bytes" + "context" + "fmt" + "go/parser" + "go/token" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "slices" + "strconv" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/modload" + "cmd/go/internal/str" + "cmd/go/internal/work" +) + +var CmdGenerate = &base.Command{ + Run: runGenerate, + UsageLine: "go generate [-run regexp] [-n] [-v] [-x] [build flags] [file.go... | packages]", + Short: "generate Go files by processing source", + Long: ` +Generate runs commands described by directives within existing +files. Those commands can run any process but the intent is to +create or update Go source files. + +Go generate is never run automatically by go build, go test, +and so on. It must be run explicitly. + +Go generate scans the file for directives, which are lines of +the form, + + //go:generate command argument... + +(note: no leading spaces and no space in "//go") where command +is the generator to be run, corresponding to an executable file +that can be run locally. It must either be in the shell path +(gofmt), a fully qualified path (/usr/you/bin/mytool), or a +command alias, described below. + +Note that go generate does not parse the file, so lines that look +like directives in comments or multiline strings will be treated +as directives. + +The arguments to the directive are space-separated tokens or +double-quoted strings passed to the generator as individual +arguments when it is run. + +Quoted strings use Go syntax and are evaluated before execution; a +quoted string appears as a single argument to the generator. + +To convey to humans and machine tools that code is generated, +generated source should have a line that matches the following +regular expression (in Go syntax): + + ^// Code generated .* DO NOT EDIT\.$ + +This line must appear before the first non-comment, non-blank +text in the file. + +Go generate sets several variables when it runs the generator: + + $GOARCH + The execution architecture (arm, amd64, etc.) + $GOOS + The execution operating system (linux, windows, etc.) + $GOFILE + The base name of the file. + $GOLINE + The line number of the directive in the source file. + $GOPACKAGE + The name of the package of the file containing the directive. + $GOROOT + The GOROOT directory for the 'go' command that invoked the + generator, containing the Go toolchain and standard library. + $DOLLAR + A dollar sign. + $PATH + The $PATH of the parent process, with $GOROOT/bin + placed at the beginning. This causes generators + that execute 'go' commands to use the same 'go' + as the parent 'go generate' command. + +Other than variable substitution and quoted-string evaluation, no +special processing such as "globbing" is performed on the command +line. + +As a last step before running the command, any invocations of any +environment variables with alphanumeric names, such as $GOFILE or +$HOME, are expanded throughout the command line. The syntax for +variable expansion is $NAME on all operating systems. Due to the +order of evaluation, variables are expanded even inside quoted +strings. If the variable NAME is not set, $NAME expands to the +empty string. + +A directive of the form, + + //go:generate -command xxx args... + +specifies, for the remainder of this source file only, that the +string xxx represents the command identified by the arguments. This +can be used to create aliases or to handle multiword generators. +For example, + + //go:generate -command foo go tool foo + +specifies that the command "foo" represents the generator +"go tool foo". + +Generate processes packages in the order given on the command line, +one at a time. If the command line lists .go files from a single directory, +they are treated as a single package. Within a package, generate processes the +source files in a package in file name order, one at a time. Within +a source file, generate runs generators in the order they appear +in the file, one at a time. The go generate tool also sets the build +tag "generate" so that files may be examined by go generate but ignored +during build. + +For packages with invalid code, generate processes only source files with a +valid package clause. + +If any generator returns an error exit status, "go generate" skips +all further processing for that package. + +The generator is run in the package's source directory. + +Go generate accepts two specific flags: + + -run="" + if non-empty, specifies a regular expression to select + directives whose full original source text (excluding + any trailing spaces and final newline) matches the + expression. + + -skip="" + if non-empty, specifies a regular expression to suppress + directives whose full original source text (excluding + any trailing spaces and final newline) matches the + expression. If a directive matches both the -run and + the -skip arguments, it is skipped. + +It also accepts the standard build flags including -v, -n, and -x. +The -v flag prints the names of packages and files as they are +processed. +The -n flag prints commands that would be executed. +The -x flag prints commands as they are executed. + +For more about build flags, see 'go help build'. + +For more about specifying packages, see 'go help packages'. + `, +} + +var ( + generateRunFlag string // generate -run flag + generateRunRE *regexp.Regexp // compiled expression for -run + + generateSkipFlag string // generate -skip flag + generateSkipRE *regexp.Regexp // compiled expression for -skip +) + +func init() { + work.AddBuildFlags(CmdGenerate, work.DefaultBuildFlags) + CmdGenerate.Flag.StringVar(&generateRunFlag, "run", "", "") + CmdGenerate.Flag.StringVar(&generateSkipFlag, "skip", "", "") +} + +func runGenerate(ctx context.Context, cmd *base.Command, args []string) { + modload.InitWorkfile() + + if generateRunFlag != "" { + var err error + generateRunRE, err = regexp.Compile(generateRunFlag) + if err != nil { + log.Fatalf("generate: %s", err) + } + } + if generateSkipFlag != "" { + var err error + generateSkipRE, err = regexp.Compile(generateSkipFlag) + if err != nil { + log.Fatalf("generate: %s", err) + } + } + + cfg.BuildContext.BuildTags = append(cfg.BuildContext.BuildTags, "generate") + + // Even if the arguments are .go files, this loop suffices. + printed := false + pkgOpts := load.PackageOpts{IgnoreImports: true} + for _, pkg := range load.PackagesAndErrors(ctx, pkgOpts, args) { + if modload.Enabled() && pkg.Module != nil && !pkg.Module.Main { + if !printed { + fmt.Fprintf(os.Stderr, "go: not generating in packages in dependency modules\n") + printed = true + } + continue + } + + if pkg.Error != nil && len(pkg.InternalAllGoFiles()) == 0 { + // A directory only contains a Go package if it has at least + // one .go source file, so the fact that there are no files + // implies that the package couldn't be found. + base.Errorf("%v", pkg.Error) + } + + for _, file := range pkg.InternalGoFiles() { + if !generate(file) { + break + } + } + + for _, file := range pkg.InternalXGoFiles() { + if !generate(file) { + break + } + } + } + base.ExitIfErrors() +} + +// generate runs the generation directives for a single file. +func generate(absFile string) bool { + src, err := os.ReadFile(absFile) + if err != nil { + log.Fatalf("generate: %s", err) + } + + // Parse package clause + filePkg, err := parser.ParseFile(token.NewFileSet(), "", src, parser.PackageClauseOnly) + if err != nil { + // Invalid package clause - ignore file. + return true + } + + g := &Generator{ + r: bytes.NewReader(src), + path: absFile, + pkg: filePkg.Name.String(), + commands: make(map[string][]string), + } + return g.run() +} + +// A Generator represents the state of a single Go source file +// being scanned for generator commands. +type Generator struct { + r io.Reader + path string // full rooted path name. + dir string // full rooted directory of file. + file string // base name of file. + pkg string + commands map[string][]string + lineNum int // current line number. + env []string +} + +// run runs the generators in the current file. +func (g *Generator) run() (ok bool) { + // Processing below here calls g.errorf on failure, which does panic(stop). + // If we encounter an error, we abort the package. + defer func() { + e := recover() + if e != nil { + ok = false + if e != stop { + panic(e) + } + base.SetExitStatus(1) + } + }() + g.dir, g.file = filepath.Split(g.path) + g.dir = filepath.Clean(g.dir) // No final separator please. + if cfg.BuildV { + fmt.Fprintf(os.Stderr, "%s\n", base.ShortPath(g.path)) + } + + // Scan for lines that start "//go:generate". + // Can't use bufio.Scanner because it can't handle long lines, + // which are likely to appear when using generate. + input := bufio.NewReader(g.r) + var err error + // One line per loop. + for { + g.lineNum++ // 1-indexed. + var buf []byte + buf, err = input.ReadSlice('\n') + if err == bufio.ErrBufferFull { + // Line too long - consume and ignore. + if isGoGenerate(buf) { + g.errorf("directive too long") + } + for err == bufio.ErrBufferFull { + _, err = input.ReadSlice('\n') + } + if err != nil { + break + } + continue + } + + if err != nil { + // Check for marker at EOF without final \n. + if err == io.EOF && isGoGenerate(buf) { + err = io.ErrUnexpectedEOF + } + break + } + + if !isGoGenerate(buf) { + continue + } + if generateRunFlag != "" && !generateRunRE.Match(bytes.TrimSpace(buf)) { + continue + } + if generateSkipFlag != "" && generateSkipRE.Match(bytes.TrimSpace(buf)) { + continue + } + + g.setEnv() + words := g.split(string(buf)) + if len(words) == 0 { + g.errorf("no arguments to directive") + } + if words[0] == "-command" { + g.setShorthand(words) + continue + } + // Run the command line. + if cfg.BuildN || cfg.BuildX { + fmt.Fprintf(os.Stderr, "%s\n", strings.Join(words, " ")) + } + if cfg.BuildN { + continue + } + g.exec(words) + } + if err != nil && err != io.EOF { + g.errorf("error reading %s: %s", base.ShortPath(g.path), err) + } + return true +} + +func isGoGenerate(buf []byte) bool { + return bytes.HasPrefix(buf, []byte("//go:generate ")) || bytes.HasPrefix(buf, []byte("//go:generate\t")) +} + +// setEnv sets the extra environment variables used when executing a +// single go:generate command. +func (g *Generator) setEnv() { + env := []string{ + "GOROOT=" + cfg.GOROOT, + "GOARCH=" + cfg.BuildContext.GOARCH, + "GOOS=" + cfg.BuildContext.GOOS, + "GOFILE=" + g.file, + "GOLINE=" + strconv.Itoa(g.lineNum), + "GOPACKAGE=" + g.pkg, + "DOLLAR=" + "$", + } + env = base.AppendPATH(env) + env = base.AppendPWD(env, g.dir) + g.env = env +} + +// split breaks the line into words, evaluating quoted +// strings and evaluating environment variables. +// The initial //go:generate element is present in line. +func (g *Generator) split(line string) []string { + // Parse line, obeying quoted strings. + var words []string + line = line[len("//go:generate ") : len(line)-1] // Drop preamble and final newline. + // There may still be a carriage return. + if len(line) > 0 && line[len(line)-1] == '\r' { + line = line[:len(line)-1] + } + // One (possibly quoted) word per iteration. +Words: + for { + line = strings.TrimLeft(line, " \t") + if len(line) == 0 { + break + } + if line[0] == '"' { + for i := 1; i < len(line); i++ { + c := line[i] // Only looking for ASCII so this is OK. + switch c { + case '\\': + if i+1 == len(line) { + g.errorf("bad backslash") + } + i++ // Absorb next byte (If it's a multibyte we'll get an error in Unquote). + case '"': + word, err := strconv.Unquote(line[0 : i+1]) + if err != nil { + g.errorf("bad quoted string") + } + words = append(words, word) + line = line[i+1:] + // Check the next character is space or end of line. + if len(line) > 0 && line[0] != ' ' && line[0] != '\t' { + g.errorf("expect space after quoted argument") + } + continue Words + } + } + g.errorf("mismatched quoted string") + } + i := strings.IndexAny(line, " \t") + if i < 0 { + i = len(line) + } + words = append(words, line[0:i]) + line = line[i:] + } + // Substitute command if required. + if len(words) > 0 && g.commands[words[0]] != nil { + // Replace 0th word by command substitution. + // + // Force a copy of the command definition to + // ensure words doesn't end up as a reference + // to the g.commands content. + tmpCmdWords := append([]string(nil), (g.commands[words[0]])...) + words = append(tmpCmdWords, words[1:]...) + } + // Substitute environment variables. + for i, word := range words { + words[i] = os.Expand(word, g.expandVar) + } + return words +} + +var stop = fmt.Errorf("error in generation") + +// errorf logs an error message prefixed with the file and line number. +// It then exits the program (with exit status 1) because generation stops +// at the first error. +func (g *Generator) errorf(format string, args ...any) { + fmt.Fprintf(os.Stderr, "%s:%d: %s\n", base.ShortPath(g.path), g.lineNum, + fmt.Sprintf(format, args...)) + panic(stop) +} + +// expandVar expands the $XXX invocation in word. It is called +// by os.Expand. +func (g *Generator) expandVar(word string) string { + w := word + "=" + for _, e := range g.env { + if strings.HasPrefix(e, w) { + return e[len(w):] + } + } + return os.Getenv(word) +} + +// setShorthand installs a new shorthand as defined by a -command directive. +func (g *Generator) setShorthand(words []string) { + // Create command shorthand. + if len(words) == 1 { + g.errorf("no command specified for -command") + } + command := words[1] + if g.commands[command] != nil { + g.errorf("command %q multiply defined", command) + } + g.commands[command] = slices.Clip(words[2:]) +} + +// exec runs the command specified by the argument. The first word is +// the command name itself. +func (g *Generator) exec(words []string) { + path := words[0] + if path != "" && !strings.Contains(path, string(os.PathSeparator)) { + // If a generator says '//go:generate go run ' it almost certainly + // intends to use the same 'go' as 'go generate' itself. + // Prefer to resolve the binary from GOROOT/bin, and for consistency + // prefer to resolve any other commands there too. + gorootBinPath, err := cfg.LookPath(filepath.Join(cfg.GOROOTbin, path)) + if err == nil { + path = gorootBinPath + } + } + cmd := exec.Command(path, words[1:]...) + cmd.Args[0] = words[0] // Overwrite with the original in case it was rewritten above. + + // Standard in and out of generator should be the usual. + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + // Run the command in the package directory. + cmd.Dir = g.dir + cmd.Env = str.StringList(cfg.OrigEnv, g.env) + err := cmd.Run() + if err != nil { + g.errorf("running %q: %s", words[0], err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/generate/generate_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/generate/generate_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d61ecf104a1ec569dc5143993552cbdd52eb788f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/generate/generate_test.go @@ -0,0 +1,259 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package generate + +import ( + "internal/testenv" + "os" + "path/filepath" + "reflect" + "runtime" + "testing" +) + +type splitTest struct { + in string + out []string +} + +// Same as above, except including source line number to set +type splitTestWithLine struct { + in string + out []string + lineNumber int +} + +const anyLineNo = 0 + +var splitTests = []splitTest{ + {"", nil}, + {"x", []string{"x"}}, + {" a b\tc ", []string{"a", "b", "c"}}, + {` " a " `, []string{" a "}}, + {"$GOARCH", []string{runtime.GOARCH}}, + {"$GOOS", []string{runtime.GOOS}}, + {"$GOFILE", []string{"proc.go"}}, + {"$GOPACKAGE", []string{"sys"}}, + {"a $XXNOTDEFINEDXX b", []string{"a", "", "b"}}, + {"/$XXNOTDEFINED/", []string{"//"}}, + {"/$DOLLAR/", []string{"/$/"}}, + {"yacc -o $GOARCH/yacc_$GOFILE", []string{"go", "tool", "yacc", "-o", runtime.GOARCH + "/yacc_proc.go"}}, +} + +func TestGenerateCommandParse(t *testing.T) { + dir := filepath.Join(testenv.GOROOT(t), "src", "sys") + g := &Generator{ + r: nil, // Unused here. + path: filepath.Join(dir, "proc.go"), + dir: dir, + file: "proc.go", + pkg: "sys", + commands: make(map[string][]string), + } + g.setEnv() + g.setShorthand([]string{"-command", "yacc", "go", "tool", "yacc"}) + for _, test := range splitTests { + // First with newlines. + got := g.split("//go:generate " + test.in + "\n") + if !reflect.DeepEqual(got, test.out) { + t.Errorf("split(%q): got %q expected %q", test.in, got, test.out) + } + // Then with CRLFs, thank you Windows. + got = g.split("//go:generate " + test.in + "\r\n") + if !reflect.DeepEqual(got, test.out) { + t.Errorf("split(%q): got %q expected %q", test.in, got, test.out) + } + } +} + +// These environment variables will be undefined before the splitTestWithLine tests +var undefEnvList = []string{ + "_XYZZY_", +} + +// These environment variables will be defined before the splitTestWithLine tests +var defEnvMap = map[string]string{ + "_PLUGH_": "SomeVal", + "_X": "Y", +} + +// TestGenerateCommandShortHand - similar to TestGenerateCommandParse, +// except: +// 1. if the result starts with -command, record that shorthand +// before moving on to the next test. +// 2. If a source line number is specified, set that in the parser +// before executing the test. i.e., execute the split as if it +// processing that source line. +func TestGenerateCommandShorthand(t *testing.T) { + dir := filepath.Join(testenv.GOROOT(t), "src", "sys") + g := &Generator{ + r: nil, // Unused here. + path: filepath.Join(dir, "proc.go"), + dir: dir, + file: "proc.go", + pkg: "sys", + commands: make(map[string][]string), + } + + var inLine string + var expected, got []string + + g.setEnv() + + // Set up the system environment variables + for i := range undefEnvList { + os.Unsetenv(undefEnvList[i]) + } + for k := range defEnvMap { + os.Setenv(k, defEnvMap[k]) + } + + // simple command from environment variable + inLine = "//go:generate -command CMD0 \"ab${_X}cd\"" + expected = []string{"-command", "CMD0", "abYcd"} + got = g.split(inLine + "\n") + + if !reflect.DeepEqual(got, expected) { + t.Errorf("split(%q): got %q expected %q", inLine, got, expected) + } + + // try again, with an extra level of indirection (should leave variable in command) + inLine = "//go:generate -command CMD0 \"ab${DOLLAR}{_X}cd\"" + expected = []string{"-command", "CMD0", "ab${_X}cd"} + got = g.split(inLine + "\n") + + if !reflect.DeepEqual(got, expected) { + t.Errorf("split(%q): got %q expected %q", inLine, got, expected) + } + + // Now the interesting part, record that output as a command + g.setShorthand(got) + + // see that the command still substitutes correctly from env. variable + inLine = "//go:generate CMD0" + expected = []string{"abYcd"} + got = g.split(inLine + "\n") + + if !reflect.DeepEqual(got, expected) { + t.Errorf("split(%q): got %q expected %q", inLine, got, expected) + } + + // Now change the value of $X and see if the recorded definition is + // still intact (vs. having the $_X already substituted out) + + os.Setenv("_X", "Z") + inLine = "//go:generate CMD0" + expected = []string{"abZcd"} + got = g.split(inLine + "\n") + + if !reflect.DeepEqual(got, expected) { + t.Errorf("split(%q): got %q expected %q", inLine, got, expected) + } + + // What if the variable is now undefined? Should be empty substitution. + + os.Unsetenv("_X") + inLine = "//go:generate CMD0" + expected = []string{"abcd"} + got = g.split(inLine + "\n") + + if !reflect.DeepEqual(got, expected) { + t.Errorf("split(%q): got %q expected %q", inLine, got, expected) + } + + // Try another undefined variable as an extra check + os.Unsetenv("_Z") + inLine = "//go:generate -command CMD1 \"ab${_Z}cd\"" + expected = []string{"-command", "CMD1", "abcd"} + got = g.split(inLine + "\n") + + if !reflect.DeepEqual(got, expected) { + t.Errorf("split(%q): got %q expected %q", inLine, got, expected) + } + + g.setShorthand(got) + + inLine = "//go:generate CMD1" + expected = []string{"abcd"} + got = g.split(inLine + "\n") + + if !reflect.DeepEqual(got, expected) { + t.Errorf("split(%q): got %q expected %q", inLine, got, expected) + } + + const val = "someNewValue" + os.Setenv("_Z", val) + + // try again with the properly-escaped variable. + + inLine = "//go:generate -command CMD2 \"ab${DOLLAR}{_Z}cd\"" + expected = []string{"-command", "CMD2", "ab${_Z}cd"} + got = g.split(inLine + "\n") + + if !reflect.DeepEqual(got, expected) { + t.Errorf("split(%q): got %q expected %q", inLine, got, expected) + } + + g.setShorthand(got) + + inLine = "//go:generate CMD2" + expected = []string{"ab" + val + "cd"} + got = g.split(inLine + "\n") + + if !reflect.DeepEqual(got, expected) { + t.Errorf("split(%q): got %q expected %q", inLine, got, expected) + } +} + +// Command-related tests for TestGenerateCommandShortHand2 +// -- Note line numbers included to check substitutions from "build-in" variable - $GOLINE +var splitTestsLines = []splitTestWithLine{ + {"-command TEST1 $GOLINE", []string{"-command", "TEST1", "22"}, 22}, + {"-command TEST2 ${DOLLAR}GOLINE", []string{"-command", "TEST2", "$GOLINE"}, 26}, + {"TEST1", []string{"22"}, 33}, + {"TEST2", []string{"66"}, 66}, + {"TEST1 ''", []string{"22", "''"}, 99}, + {"TEST2 ''", []string{"44", "''"}, 44}, +} + +// TestGenerateCommandShortHand - similar to TestGenerateCommandParse, +// except: +// 1. if the result starts with -command, record that shorthand +// before moving on to the next test. +// 2. If a source line number is specified, set that in the parser +// before executing the test. i.e., execute the split as if it +// processing that source line. +func TestGenerateCommandShortHand2(t *testing.T) { + dir := filepath.Join(testenv.GOROOT(t), "src", "sys") + g := &Generator{ + r: nil, // Unused here. + path: filepath.Join(dir, "proc.go"), + dir: dir, + file: "proc.go", + pkg: "sys", + commands: make(map[string][]string), + } + g.setEnv() + for _, test := range splitTestsLines { + // if the test specified a line number, reflect that + if test.lineNumber != anyLineNo { + g.lineNum = test.lineNumber + g.setEnv() + } + // First with newlines. + got := g.split("//go:generate " + test.in + "\n") + if !reflect.DeepEqual(got, test.out) { + t.Errorf("split(%q): got %q expected %q", test.in, got, test.out) + } + // Then with CRLFs, thank you Windows. + got = g.split("//go:generate " + test.in + "\r\n") + if !reflect.DeepEqual(got, test.out) { + t.Errorf("split(%q): got %q expected %q", test.in, got, test.out) + } + if got[0] == "-command" { // record commands + g.setShorthand(got) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/gomod.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/gomod.go new file mode 100644 index 0000000000000000000000000000000000000000..4a4ae5302908b5936817f9051547e1801100d6ee --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/gomod.go @@ -0,0 +1,43 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gover + +import ( + "bytes" + "strings" +) + +var nl = []byte("\n") + +// GoModLookup takes go.mod or go.work content, +// finds the first line in the file starting with the given key, +// and returns the value associated with that key. +// +// Lookup should only be used with non-factored verbs +// such as "go" and "toolchain", usually to find versions +// or version-like strings. +func GoModLookup(gomod []byte, key string) string { + for len(gomod) > 0 { + var line []byte + line, gomod, _ = bytes.Cut(gomod, nl) + line = bytes.TrimSpace(line) + if v, ok := parseKey(line, key); ok { + return v + } + } + return "" +} + +func parseKey(line []byte, key string) (string, bool) { + if !strings.HasPrefix(string(line), key) { + return "", false + } + s := strings.TrimPrefix(string(line), key) + if len(s) == 0 || (s[0] != ' ' && s[0] != '\t') { + return "", false + } + s, _, _ = strings.Cut(s, "//") // strip comments + return strings.TrimSpace(s), true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/gover.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/gover.go new file mode 100644 index 0000000000000000000000000000000000000000..19c6f670c5d8eda90b1750a53d4a605115c91a57 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/gover.go @@ -0,0 +1,75 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gover implements support for Go toolchain versions like 1.21.0 and 1.21rc1. +// (For historical reasons, Go does not use semver for its toolchains.) +// This package provides the same basic analysis that golang.org/x/mod/semver does for semver. +// It also provides some helpers for extracting versions from go.mod files +// and for dealing with module.Versions that may use Go versions or semver +// depending on the module path. +package gover + +import ( + "internal/gover" +) + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as toolchain versions. +// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". +// Malformed versions compare less than well-formed versions and equal to each other. +// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". +func Compare(x, y string) int { + return gover.Compare(x, y) +} + +// Max returns the maximum of x and y interpreted as toolchain versions, +// compared using Compare. +// If x and y compare equal, Max returns x. +func Max(x, y string) string { + return gover.Max(x, y) +} + +// IsLang reports whether v denotes the overall Go language version +// and not a specific release. Starting with the Go 1.21 release, "1.x" denotes +// the overall language version; the first release is "1.x.0". +// The distinction is important because the relative ordering is +// +// 1.21 < 1.21rc1 < 1.21.0 +// +// meaning that Go 1.21rc1 and Go 1.21.0 will both handle go.mod files that +// say "go 1.21", but Go 1.21rc1 will not handle files that say "go 1.21.0". +func IsLang(x string) bool { + return gover.IsLang(x) +} + +// Lang returns the Go language version. For example, Lang("1.2.3") == "1.2". +func Lang(x string) string { + return gover.Lang(x) +} + +// IsPrerelease reports whether v denotes a Go prerelease version. +func IsPrerelease(x string) bool { + return gover.Parse(x).Kind != "" +} + +// Prev returns the Go major release immediately preceding v, +// or v itself if v is the first Go major release (1.0) or not a supported +// Go version. +// +// Examples: +// +// Prev("1.2") = "1.1" +// Prev("1.3rc4") = "1.2" +func Prev(x string) string { + v := gover.Parse(x) + if gover.CmpInt(v.Minor, "1") <= 0 { + return v.Major + } + return v.Major + "." + gover.DecInt(v.Minor) +} + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { + return gover.IsValid(x) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/gover_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/gover_test.go new file mode 100644 index 0000000000000000000000000000000000000000..68fd56f31dee210dd619db6a303d70257f97259f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/gover_test.go @@ -0,0 +1,142 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gover + +import ( + "reflect" + "testing" +) + +func TestCompare(t *testing.T) { test2(t, compareTests, "Compare", Compare) } + +var compareTests = []testCase2[string, string, int]{ + {"", "", 0}, + {"x", "x", 0}, + {"", "x", 0}, + {"1", "1.1", -1}, + {"1.5", "1.6", -1}, + {"1.5", "1.10", -1}, + {"1.6", "1.6.1", -1}, + {"1.19", "1.19.0", 0}, + {"1.19rc1", "1.19", -1}, + {"1.20", "1.20.0", 0}, + {"1.20rc1", "1.20", -1}, + {"1.21", "1.21.0", -1}, + {"1.21", "1.21rc1", -1}, + {"1.21rc1", "1.21.0", -1}, + {"1.6", "1.19", -1}, + {"1.19", "1.19.1", -1}, + {"1.19rc1", "1.19", -1}, + {"1.19rc1", "1.19.1", -1}, + {"1.19rc1", "1.19rc2", -1}, + {"1.19.0", "1.19.1", -1}, + {"1.19rc1", "1.19.0", -1}, + {"1.19alpha3", "1.19beta2", -1}, + {"1.19beta2", "1.19rc1", -1}, + {"1.1", "1.99999999999999998", -1}, + {"1.99999999999999998", "1.99999999999999999", -1}, +} + +func TestLang(t *testing.T) { test1(t, langTests, "Lang", Lang) } + +var langTests = []testCase1[string, string]{ + {"1.2rc3", "1.2"}, + {"1.2.3", "1.2"}, + {"1.2", "1.2"}, + {"1", "1"}, + {"1.999testmod", "1.999"}, +} + +func TestIsLang(t *testing.T) { test1(t, isLangTests, "IsLang", IsLang) } + +var isLangTests = []testCase1[string, bool]{ + {"1.2rc3", false}, + {"1.2.3", false}, + {"1.999testmod", false}, + {"1.22", true}, + {"1.21", true}, + {"1.20", false}, // == 1.20.0 + {"1.19", false}, // == 1.20.0 + {"1.3", false}, // == 1.3.0 + {"1.2", false}, // == 1.2.0 + {"1", false}, // == 1.0.0 +} + +func TestPrev(t *testing.T) { test1(t, prevTests, "Prev", Prev) } + +var prevTests = []testCase1[string, string]{ + {"", ""}, + {"0", "0"}, + {"1.3rc4", "1.2"}, + {"1.3.5", "1.2"}, + {"1.3", "1.2"}, + {"1", "1"}, + {"1.99999999999999999", "1.99999999999999998"}, + {"1.40000000000000000", "1.39999999999999999"}, +} + +func TestIsValid(t *testing.T) { test1(t, isValidTests, "IsValid", IsValid) } + +var isValidTests = []testCase1[string, bool]{ + {"1.2rc3", true}, + {"1.2.3", true}, + {"1.999testmod", true}, + {"1.600+auto", false}, + {"1.22", true}, + {"1.21.0", true}, + {"1.21rc2", true}, + {"1.21", true}, + {"1.20.0", true}, + {"1.20", true}, + {"1.19", true}, + {"1.3", true}, + {"1.2", true}, + {"1", true}, +} + +type testCase1[In, Out any] struct { + in In + out Out +} + +type testCase2[In1, In2, Out any] struct { + in1 In1 + in2 In2 + out Out +} + +type testCase3[In1, In2, In3, Out any] struct { + in1 In1 + in2 In2 + in3 In3 + out Out +} + +func test1[In, Out any](t *testing.T, tests []testCase1[In, Out], name string, f func(In) Out) { + t.Helper() + for _, tt := range tests { + if out := f(tt.in); !reflect.DeepEqual(out, tt.out) { + t.Errorf("%s(%v) = %v, want %v", name, tt.in, out, tt.out) + } + } +} + +func test2[In1, In2, Out any](t *testing.T, tests []testCase2[In1, In2, Out], name string, f func(In1, In2) Out) { + t.Helper() + for _, tt := range tests { + if out := f(tt.in1, tt.in2); !reflect.DeepEqual(out, tt.out) { + t.Errorf("%s(%+v, %+v) = %+v, want %+v", name, tt.in1, tt.in2, out, tt.out) + } + } +} + +func test3[In1, In2, In3, Out any](t *testing.T, tests []testCase3[In1, In2, In3, Out], name string, f func(In1, In2, In3) Out) { + t.Helper() + for _, tt := range tests { + if out := f(tt.in1, tt.in2, tt.in3); !reflect.DeepEqual(out, tt.out) { + t.Errorf("%s(%+v, %+v, %+v) = %+v, want %+v", name, tt.in1, tt.in2, tt.in3, out, tt.out) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/local.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/local.go new file mode 100644 index 0000000000000000000000000000000000000000..8183a5c3d47497d4f70f4af5b4a8dc2e8d2b0da1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/local.go @@ -0,0 +1,42 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gover + +import ( + "internal/goversion" + "runtime" + "strconv" +) + +// TestVersion is initialized in the go command test binary +// to be $TESTGO_VERSION, to allow tests to override the +// go command's idea of its own version as returned by Local. +var TestVersion string + +// Local returns the local Go version, the one implemented by this go command. +func Local() string { + v, _ := local() + return v +} + +// LocalToolchain returns the local toolchain name, the one implemented by this go command. +func LocalToolchain() string { + _, t := local() + return t +} + +func local() (goVers, toolVers string) { + toolVers = runtime.Version() + if TestVersion != "" { + toolVers = TestVersion + } + goVers = FromToolchain(toolVers) + if goVers == "" { + // Development branch. Use "Dev" version with just 1.N, no rc1 or .0 suffix. + goVers = "1." + strconv.Itoa(goversion.Version) + toolVers = "go" + goVers + } + return goVers, toolVers +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/mod.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/mod.go new file mode 100644 index 0000000000000000000000000000000000000000..d3cc17068def6d19cc51f4a25a2a61b1bbd543a9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/mod.go @@ -0,0 +1,127 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gover + +import ( + "sort" + "strings" + + "golang.org/x/mod/module" + "golang.org/x/mod/semver" +) + +// IsToolchain reports whether the module path corresponds to the +// virtual, non-downloadable module tracking go or toolchain directives in the go.mod file. +// +// Note that IsToolchain only matches "go" and "toolchain", not the +// real, downloadable module "golang.org/toolchain" containing toolchain files. +// +// IsToolchain("go") = true +// IsToolchain("toolchain") = true +// IsToolchain("golang.org/x/tools") = false +// IsToolchain("golang.org/toolchain") = false +func IsToolchain(path string) bool { + return path == "go" || path == "toolchain" +} + +// ModCompare returns the result of comparing the versions x and y +// for the module with the given path. +// The path is necessary because the "go" and "toolchain" modules +// use a different version syntax and semantics (gover, this package) +// than most modules (semver). +func ModCompare(path string, x, y string) int { + if path == "go" { + return Compare(x, y) + } + if path == "toolchain" { + return Compare(maybeToolchainVersion(x), maybeToolchainVersion(y)) + } + return semver.Compare(x, y) +} + +// ModSort is like module.Sort but understands the "go" and "toolchain" +// modules and their version ordering. +func ModSort(list []module.Version) { + sort.Slice(list, func(i, j int) bool { + mi := list[i] + mj := list[j] + if mi.Path != mj.Path { + return mi.Path < mj.Path + } + // To help go.sum formatting, allow version/file. + // Compare semver prefix by semver rules, + // file by string order. + vi := mi.Version + vj := mj.Version + var fi, fj string + if k := strings.Index(vi, "/"); k >= 0 { + vi, fi = vi[:k], vi[k:] + } + if k := strings.Index(vj, "/"); k >= 0 { + vj, fj = vj[:k], vj[k:] + } + if vi != vj { + return ModCompare(mi.Path, vi, vj) < 0 + } + return fi < fj + }) +} + +// ModIsValid reports whether vers is a valid version syntax for the module with the given path. +func ModIsValid(path, vers string) bool { + if IsToolchain(path) { + if path == "toolchain" { + return IsValid(FromToolchain(vers)) + } + return IsValid(vers) + } + return semver.IsValid(vers) +} + +// ModIsPrefix reports whether v is a valid version syntax prefix for the module with the given path. +// The caller is assumed to have checked that ModIsValid(path, vers) is true. +func ModIsPrefix(path, vers string) bool { + if IsToolchain(path) { + if path == "toolchain" { + return IsLang(FromToolchain(vers)) + } + return IsLang(vers) + } + // Semver + dots := 0 + for i := 0; i < len(vers); i++ { + switch vers[i] { + case '-', '+': + return false + case '.': + dots++ + if dots >= 2 { + return false + } + } + } + return true +} + +// ModIsPrerelease reports whether v is a prerelease version for the module with the given path. +// The caller is assumed to have checked that ModIsValid(path, vers) is true. +func ModIsPrerelease(path, vers string) bool { + if IsToolchain(path) { + return IsPrerelease(vers) + } + return semver.Prerelease(vers) != "" +} + +// ModMajorMinor returns the "major.minor" truncation of the version v, +// for use as a prefix in "@patch" queries. +func ModMajorMinor(path, vers string) string { + if IsToolchain(path) { + if path == "toolchain" { + return "go" + Lang(FromToolchain(vers)) + } + return Lang(vers) + } + return semver.MajorMinor(vers) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/mod_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/mod_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c92169cb32d50caa013d695f0a6a20b5b1e38494 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/mod_test.go @@ -0,0 +1,72 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gover + +import ( + "slices" + "strings" + "testing" + + "golang.org/x/mod/module" +) + +func TestIsToolchain(t *testing.T) { test1(t, isToolchainTests, "IsToolchain", IsToolchain) } + +var isToolchainTests = []testCase1[string, bool]{ + {"go", true}, + {"toolchain", true}, + {"anything", false}, + {"golang.org/toolchain", false}, +} + +func TestModCompare(t *testing.T) { test3(t, modCompareTests, "ModCompare", ModCompare) } + +var modCompareTests = []testCase3[string, string, string, int]{ + {"go", "1.2", "1.3", -1}, + {"go", "v1.2", "v1.3", 0}, // equal because invalid + {"go", "1.2", "1.2", 0}, + {"toolchain", "go1.2", "go1.3", -1}, + {"toolchain", "go1.2", "go1.2", 0}, + {"toolchain", "1.2", "1.3", -1}, // accepted but non-standard + {"toolchain", "v1.2", "v1.3", 0}, // equal because invalid + {"rsc.io/quote", "v1.2", "v1.3", -1}, + {"rsc.io/quote", "1.2", "1.3", 0}, // equal because invalid +} + +func TestModIsValid(t *testing.T) { test2(t, modIsValidTests, "ModIsValid", ModIsValid) } + +var modIsValidTests = []testCase2[string, string, bool]{ + {"go", "1.2", true}, + {"go", "v1.2", false}, + {"toolchain", "go1.2", true}, + {"toolchain", "v1.2", false}, + {"rsc.io/quote", "v1.2", true}, + {"rsc.io/quote", "1.2", false}, +} + +func TestModSort(t *testing.T) { + test1(t, modSortTests, "ModSort", func(list []module.Version) []module.Version { + out := slices.Clone(list) + ModSort(out) + return out + }) +} + +var modSortTests = []testCase1[[]module.Version, []module.Version]{ + { + mvl(`z v1.1; a v1.2; a v1.1; go 1.3; toolchain 1.3; toolchain 1.2; go 1.2`), + mvl(`a v1.1; a v1.2; go 1.2; go 1.3; toolchain 1.2; toolchain 1.3; z v1.1`), + }, +} + +func mvl(s string) []module.Version { + var list []module.Version + for _, f := range strings.Split(s, ";") { + f = strings.TrimSpace(f) + path, vers, _ := strings.Cut(f, " ") + list = append(list, module.Version{Path: path, Version: vers}) + } + return list +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/toolchain.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/toolchain.go new file mode 100644 index 0000000000000000000000000000000000000000..43b117edcf00237da81b4d734bef76db322f58f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/toolchain.go @@ -0,0 +1,108 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gover + +import ( + "cmd/go/internal/base" + "context" + "errors" + "fmt" + "strings" +) + +// FromToolchain returns the Go version for the named toolchain, +// derived from the name itself (not by running the toolchain). +// A toolchain is named "goVERSION". +// A suffix after the VERSION introduced by a -, space, or tab is removed. +// Examples: +// +// FromToolchain("go1.2.3") == "1.2.3" +// FromToolchain("go1.2.3-bigcorp") == "1.2.3" +// FromToolchain("invalid") == "" +func FromToolchain(name string) string { + if strings.ContainsAny(name, "\\/") { + // The suffix must not include a path separator, since that would cause + // exec.LookPath to resolve it from a relative directory instead of from + // $PATH. + return "" + } + + var v string + if strings.HasPrefix(name, "go") { + v = name[2:] + } else { + return "" + } + // Some builds use custom suffixes; strip them. + if i := strings.IndexAny(v, " \t-"); i >= 0 { + v = v[:i] + } + if !IsValid(v) { + return "" + } + return v +} + +func maybeToolchainVersion(name string) string { + if IsValid(name) { + return name + } + return FromToolchain(name) +} + +// ToolchainMax returns the maximum of x and y interpreted as toolchain names, +// compared using Compare(FromToolchain(x), FromToolchain(y)). +// If x and y compare equal, Max returns x. +func ToolchainMax(x, y string) string { + if Compare(FromToolchain(x), FromToolchain(y)) < 0 { + return y + } + return x +} + +// Startup records the information that went into the startup-time version switch. +// It is initialized by switchGoToolchain. +var Startup struct { + GOTOOLCHAIN string // $GOTOOLCHAIN setting + AutoFile string // go.mod or go.work file consulted + AutoGoVersion string // go line found in file + AutoToolchain string // toolchain line found in file +} + +// A TooNewError explains that a module is too new for this version of Go. +type TooNewError struct { + What string + GoVersion string + Toolchain string // for callers if they want to use it, but not printed +} + +func (e *TooNewError) Error() string { + var explain string + if Startup.GOTOOLCHAIN != "" && Startup.GOTOOLCHAIN != "auto" { + explain = "; GOTOOLCHAIN=" + Startup.GOTOOLCHAIN + } + if Startup.AutoFile != "" && (Startup.AutoGoVersion != "" || Startup.AutoToolchain != "") { + explain += fmt.Sprintf("; %s sets ", base.ShortPath(Startup.AutoFile)) + if Startup.AutoToolchain != "" { + explain += "toolchain " + Startup.AutoToolchain + } else { + explain += "go " + Startup.AutoGoVersion + } + } + return fmt.Sprintf("%v requires go >= %v (running go %v%v)", e.What, e.GoVersion, Local(), explain) +} + +var ErrTooNew = errors.New("module too new") + +func (e *TooNewError) Is(err error) bool { + return err == ErrTooNew +} + +// A Switcher provides the ability to switch to a new toolchain in response to TooNewErrors. +// See [cmd/go/internal/toolchain.Switcher] for documentation. +type Switcher interface { + Error(err error) + Switch(ctx context.Context) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/toolchain_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/toolchain_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d1c22fbc37cb9cecc61852ad01031bc3d9810b6f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/toolchain_test.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gover + +import "testing" + +func TestFromToolchain(t *testing.T) { test1(t, fromToolchainTests, "FromToolchain", FromToolchain) } + +var fromToolchainTests = []testCase1[string, string]{ + {"go1.2.3", "1.2.3"}, + {"1.2.3", ""}, + {"go1.2.3+bigcorp", ""}, + {"go1.2.3-bigcorp", "1.2.3"}, + {"go1.2.3-bigcorp more text", "1.2.3"}, + {"gccgo-go1.23rc4", ""}, + {"gccgo-go1.23rc4-bigdwarf", ""}, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/version.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/version.go new file mode 100644 index 0000000000000000000000000000000000000000..2681013fef7f281546943500f0bfba29c0c3dc0b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/gover/version.go @@ -0,0 +1,74 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gover + +import "golang.org/x/mod/modfile" + +const ( + // narrowAllVersion is the Go version at which the + // module-module "all" pattern no longer closes over the dependencies of + // tests outside of the main module. + NarrowAllVersion = "1.16" + + // DefaultGoModVersion is the Go version to assume for go.mod files + // that do not declare a Go version. The go command has been + // writing go versions to modules since Go 1.12, so a go.mod + // without a version is either very old or recently hand-written. + // Since we can't tell which, we have to assume it's very old. + // The semantics of the go.mod changed at Go 1.17 to support + // graph pruning. If see a go.mod without a go line, we have to + // assume Go 1.16 so that we interpret the requirements correctly. + // Note that this default must stay at Go 1.16; it cannot be moved forward. + DefaultGoModVersion = "1.16" + + // DefaultGoWorkVersion is the Go version to assume for go.work files + // that do not declare a Go version. Workspaces were added in Go 1.18, + // so use that. + DefaultGoWorkVersion = "1.18" + + // ExplicitIndirectVersion is the Go version at which a + // module's go.mod file is expected to list explicit requirements on every + // module that provides any package transitively imported by that module. + // + // Other indirect dependencies of such a module can be safely pruned out of + // the module graph; see https://golang.org/ref/mod#graph-pruning. + ExplicitIndirectVersion = "1.17" + + // separateIndirectVersion is the Go version at which + // "// indirect" dependencies are added in a block separate from the direct + // ones. See https://golang.org/issue/45965. + SeparateIndirectVersion = "1.17" + + // tidyGoModSumVersion is the Go version at which + // 'go mod tidy' preserves go.mod checksums needed to build test dependencies + // of packages in "all", so that 'go test all' can be run without checksum + // errors. + // See https://go.dev/issue/56222. + TidyGoModSumVersion = "1.21" + + // goStrictVersion is the Go version at which the Go versions + // became "strict" in the sense that, restricted to modules at this version + // or later, every module must have a go version line ≥ all its dependencies. + // It is also the version after which "too new" a version is considered a fatal error. + GoStrictVersion = "1.21" +) + +// FromGoMod returns the go version from the go.mod file. +// It returns DefaultGoModVersion if the go.mod file does not contain a go line or if mf is nil. +func FromGoMod(mf *modfile.File) string { + if mf == nil || mf.Go == nil { + return DefaultGoModVersion + } + return mf.Go.Version +} + +// FromGoWork returns the go version from the go.mod file. +// It returns DefaultGoWorkVersion if the go.mod file does not contain a go line or if wf is nil. +func FromGoWork(wf *modfile.WorkFile) string { + if wf == nil || wf.Go == nil { + return DefaultGoWorkVersion + } + return wf.Go.Version +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/help/help.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/help/help.go new file mode 100644 index 0000000000000000000000000000000000000000..501f08eb2d63cbbdbc3f901dd62fe0ea542a069b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/help/help.go @@ -0,0 +1,188 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package help implements the “go help” command. +package help + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + "text/template" + "unicode" + "unicode/utf8" + + "cmd/go/internal/base" +) + +// Help implements the 'help' command. +func Help(w io.Writer, args []string) { + // 'go help documentation' generates doc.go. + if len(args) == 1 && args[0] == "documentation" { + fmt.Fprintln(w, "// Copyright 2011 The Go Authors. All rights reserved.") + fmt.Fprintln(w, "// Use of this source code is governed by a BSD-style") + fmt.Fprintln(w, "// license that can be found in the LICENSE file.") + fmt.Fprintln(w) + fmt.Fprintln(w, "// Code generated by 'go test cmd/go -v -run=^TestDocsUpToDate$ -fixdocs'; DO NOT EDIT.") + fmt.Fprintln(w, "// Edit the documentation in other files and then execute 'go generate cmd/go' to generate this one.") + fmt.Fprintln(w) + buf := new(strings.Builder) + PrintUsage(buf, base.Go) + usage := &base.Command{Long: buf.String()} + cmds := []*base.Command{usage} + for _, cmd := range base.Go.Commands { + cmds = append(cmds, cmd) + cmds = append(cmds, cmd.Commands...) + } + tmpl(&commentWriter{W: w}, documentationTemplate, cmds) + fmt.Fprintln(w, "package main") + return + } + + cmd := base.Go +Args: + for i, arg := range args { + for _, sub := range cmd.Commands { + if sub.Name() == arg { + cmd = sub + continue Args + } + } + + // helpSuccess is the help command using as many args as possible that would succeed. + helpSuccess := "go help" + if i > 0 { + helpSuccess += " " + strings.Join(args[:i], " ") + } + fmt.Fprintf(os.Stderr, "go help %s: unknown help topic. Run '%s'.\n", strings.Join(args, " "), helpSuccess) + base.SetExitStatus(2) // failed at 'go help cmd' + base.Exit() + } + + if len(cmd.Commands) > 0 { + PrintUsage(os.Stdout, cmd) + } else { + tmpl(os.Stdout, helpTemplate, cmd) + } + // not exit 2: succeeded at 'go help cmd'. + return +} + +var usageTemplate = `{{.Long | trim}} + +Usage: + + {{.UsageLine}} [arguments] + +The commands are: +{{range .Commands}}{{if or (.Runnable) .Commands}} + {{.Name | printf "%-11s"}} {{.Short}}{{end}}{{end}} + +Use "go help{{with .LongName}} {{.}}{{end}} " for more information about a command. +{{if eq (.UsageLine) "go"}} +Additional help topics: +{{range .Commands}}{{if and (not .Runnable) (not .Commands)}} + {{.Name | printf "%-15s"}} {{.Short}}{{end}}{{end}} + +Use "go help{{with .LongName}} {{.}}{{end}} " for more information about that topic. +{{end}} +` + +var helpTemplate = `{{if .Runnable}}usage: {{.UsageLine}} + +{{end}}{{.Long | trim}} +` + +var documentationTemplate = `{{range .}}{{if .Short}}{{.Short | capitalize}} + +{{end}}{{if .Commands}}` + usageTemplate + `{{else}}{{if .Runnable}}Usage: + + {{.UsageLine}} + +{{end}}{{.Long | trim}} + + +{{end}}{{end}}` + +// commentWriter writes a Go comment to the underlying io.Writer, +// using line comment form (//). +type commentWriter struct { + W io.Writer + wroteSlashes bool // Wrote "//" at the beginning of the current line. +} + +func (c *commentWriter) Write(p []byte) (int, error) { + var n int + for i, b := range p { + if !c.wroteSlashes { + s := "//" + if b != '\n' { + s = "// " + } + if _, err := io.WriteString(c.W, s); err != nil { + return n, err + } + c.wroteSlashes = true + } + n0, err := c.W.Write(p[i : i+1]) + n += n0 + if err != nil { + return n, err + } + if b == '\n' { + c.wroteSlashes = false + } + } + return len(p), nil +} + +// An errWriter wraps a writer, recording whether a write error occurred. +type errWriter struct { + w io.Writer + err error +} + +func (w *errWriter) Write(b []byte) (int, error) { + n, err := w.w.Write(b) + if err != nil { + w.err = err + } + return n, err +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data any) { + t := template.New("top") + t.Funcs(template.FuncMap{"trim": strings.TrimSpace, "capitalize": capitalize}) + template.Must(t.Parse(text)) + ew := &errWriter{w: w} + err := t.Execute(ew, data) + if ew.err != nil { + // I/O error writing. Ignore write on closed pipe. + if strings.Contains(ew.err.Error(), "pipe") { + base.SetExitStatus(1) + base.Exit() + } + base.Fatalf("writing output: %v", ew.err) + } + if err != nil { + panic(err) + } +} + +func capitalize(s string) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(unicode.ToTitle(r)) + s[n:] +} + +func PrintUsage(w io.Writer, cmd *base.Command) { + bw := bufio.NewWriter(w) + tmpl(bw, usageTemplate, cmd) + bw.Flush() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/help/helpdoc.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/help/helpdoc.go new file mode 100644 index 0000000000000000000000000000000000000000..c5d1e2af16e2286ed0f8be3164a605d8926a16c3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/help/helpdoc.go @@ -0,0 +1,947 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package help + +import "cmd/go/internal/base" + +var HelpC = &base.Command{ + UsageLine: "c", + Short: "calling between Go and C", + Long: ` +There are two different ways to call between Go and C/C++ code. + +The first is the cgo tool, which is part of the Go distribution. For +information on how to use it see the cgo documentation (go doc cmd/cgo). + +The second is the SWIG program, which is a general tool for +interfacing between languages. For information on SWIG see +http://swig.org/. When running go build, any file with a .swig +extension will be passed to SWIG. Any file with a .swigcxx extension +will be passed to SWIG with the -c++ option. + +When either cgo or SWIG is used, go build will pass any .c, .m, .s, .S +or .sx files to the C compiler, and any .cc, .cpp, .cxx files to the C++ +compiler. The CC or CXX environment variables may be set to determine +the C or C++ compiler, respectively, to use. + `, +} + +var HelpPackages = &base.Command{ + UsageLine: "packages", + Short: "package lists and patterns", + Long: ` +Many commands apply to a set of packages: + + go [packages] + +Usually, [packages] is a list of import paths. + +An import path that is a rooted path or that begins with +a . or .. element is interpreted as a file system path and +denotes the package in that directory. + +Otherwise, the import path P denotes the package found in +the directory DIR/src/P for some DIR listed in the GOPATH +environment variable (For more details see: 'go help gopath'). + +If no import paths are given, the action applies to the +package in the current directory. + +There are four reserved names for paths that should not be used +for packages to be built with the go tool: + +- "main" denotes the top-level package in a stand-alone executable. + +- "all" expands to all packages found in all the GOPATH +trees. For example, 'go list all' lists all the packages on the local +system. When using modules, "all" expands to all packages in +the main module and their dependencies, including dependencies +needed by tests of any of those. + +- "std" is like all but expands to just the packages in the standard +Go library. + +- "cmd" expands to the Go repository's commands and their +internal libraries. + +Import paths beginning with "cmd/" only match source code in +the Go repository. + +An import path is a pattern if it includes one or more "..." wildcards, +each of which can match any string, including the empty string and +strings containing slashes. Such a pattern expands to all package +directories found in the GOPATH trees with names matching the +patterns. + +To make common patterns more convenient, there are two special cases. +First, /... at the end of the pattern can match an empty string, +so that net/... matches both net and packages in its subdirectories, like net/http. +Second, any slash-separated pattern element containing a wildcard never +participates in a match of the "vendor" element in the path of a vendored +package, so that ./... does not match packages in subdirectories of +./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. +Note, however, that a directory named vendor that itself contains code +is not a vendored package: cmd/vendor would be a command named vendor, +and the pattern cmd/... matches it. +See golang.org/s/go15vendor for more about vendoring. + +An import path can also name a package to be downloaded from +a remote repository. Run 'go help importpath' for details. + +Every package in a program must have a unique import path. +By convention, this is arranged by starting each path with a +unique prefix that belongs to you. For example, paths used +internally at Google all begin with 'google', and paths +denoting remote repositories begin with the path to the code, +such as 'github.com/user/repo'. + +Packages in a program need not have unique package names, +but there are two reserved package names with special meaning. +The name main indicates a command, not a library. +Commands are built into binaries and cannot be imported. +The name documentation indicates documentation for +a non-Go program in the directory. Files in package documentation +are ignored by the go command. + +As a special case, if the package list is a list of .go files from a +single directory, the command is applied to a single synthesized +package made up of exactly those files, ignoring any build constraints +in those files and ignoring any other files in the directory. + +Directory and file names that begin with "." or "_" are ignored +by the go tool, as are directories named "testdata". + `, +} + +var HelpImportPath = &base.Command{ + UsageLine: "importpath", + Short: "import path syntax", + Long: ` + +An import path (see 'go help packages') denotes a package stored in the local +file system. In general, an import path denotes either a standard package (such +as "unicode/utf8") or a package found in one of the work spaces (For more +details see: 'go help gopath'). + +Relative import paths + +An import path beginning with ./ or ../ is called a relative path. +The toolchain supports relative import paths as a shortcut in two ways. + +First, a relative path can be used as a shorthand on the command line. +If you are working in the directory containing the code imported as +"unicode" and want to run the tests for "unicode/utf8", you can type +"go test ./utf8" instead of needing to specify the full path. +Similarly, in the reverse situation, "go test .." will test "unicode" from +the "unicode/utf8" directory. Relative patterns are also allowed, like +"go test ./..." to test all subdirectories. See 'go help packages' for details +on the pattern syntax. + +Second, if you are compiling a Go program not in a work space, +you can use a relative path in an import statement in that program +to refer to nearby code also not in a work space. +This makes it easy to experiment with small multipackage programs +outside of the usual work spaces, but such programs cannot be +installed with "go install" (there is no work space in which to install them), +so they are rebuilt from scratch each time they are built. +To avoid ambiguity, Go programs cannot use relative import paths +within a work space. + +Remote import paths + +Certain import paths also +describe how to obtain the source code for the package using +a revision control system. + +A few common code hosting sites have special syntax: + + Bitbucket (Git, Mercurial) + + import "bitbucket.org/user/project" + import "bitbucket.org/user/project/sub/directory" + + GitHub (Git) + + import "github.com/user/project" + import "github.com/user/project/sub/directory" + + Launchpad (Bazaar) + + import "launchpad.net/project" + import "launchpad.net/project/series" + import "launchpad.net/project/series/sub/directory" + + import "launchpad.net/~user/project/branch" + import "launchpad.net/~user/project/branch/sub/directory" + + IBM DevOps Services (Git) + + import "hub.jazz.net/git/user/project" + import "hub.jazz.net/git/user/project/sub/directory" + +For code hosted on other servers, import paths may either be qualified +with the version control type, or the go tool can dynamically fetch +the import path over https/http and discover where the code resides +from a tag in the HTML. + +To declare the code location, an import path of the form + + repository.vcs/path + +specifies the given repository, with or without the .vcs suffix, +using the named version control system, and then the path inside +that repository. The supported version control systems are: + + Bazaar .bzr + Fossil .fossil + Git .git + Mercurial .hg + Subversion .svn + +For example, + + import "example.org/user/foo.hg" + +denotes the root directory of the Mercurial repository at +example.org/user/foo or foo.hg, and + + import "example.org/repo.git/foo/bar" + +denotes the foo/bar directory of the Git repository at +example.org/repo or repo.git. + +When a version control system supports multiple protocols, +each is tried in turn when downloading. For example, a Git +download tries https://, then git+ssh://. + +By default, downloads are restricted to known secure protocols +(e.g. https, ssh). To override this setting for Git downloads, the +GIT_ALLOW_PROTOCOL environment variable can be set (For more details see: +'go help environment'). + +If the import path is not a known code hosting site and also lacks a +version control qualifier, the go tool attempts to fetch the import +over https/http and looks for a tag in the document's HTML +. + +The meta tag has the form: + + + +The import-prefix is the import path corresponding to the repository +root. It must be a prefix or an exact match of the package being +fetched with "go get". If it's not an exact match, another http +request is made at the prefix to verify the tags match. + +The meta tag should appear as early in the file as possible. +In particular, it should appear before any raw JavaScript or CSS, +to avoid confusing the go command's restricted parser. + +The vcs is one of "bzr", "fossil", "git", "hg", "svn". + +The repo-root is the root of the version control system +containing a scheme and not containing a .vcs qualifier. + +For example, + + import "example.org/pkg/foo" + +will result in the following requests: + + https://example.org/pkg/foo?go-get=1 (preferred) + http://example.org/pkg/foo?go-get=1 (fallback, only with use of correctly set GOINSECURE) + +If that page contains the meta tag + + + +the go tool will verify that https://example.org/?go-get=1 contains the +same meta tag and then git clone https://code.org/r/p/exproj into +GOPATH/src/example.org. + +When using GOPATH, downloaded packages are written to the first directory +listed in the GOPATH environment variable. +(See 'go help gopath-get' and 'go help gopath'.) + +When using modules, downloaded packages are stored in the module cache. +See https://golang.org/ref/mod#module-cache. + +When using modules, an additional variant of the go-import meta tag is +recognized and is preferred over those listing version control systems. +That variant uses "mod" as the vcs in the content value, as in: + + + +This tag means to fetch modules with paths beginning with example.org +from the module proxy available at the URL https://code.org/moduleproxy. +See https://golang.org/ref/mod#goproxy-protocol for details about the +proxy protocol. + +Import path checking + +When the custom import path feature described above redirects to a +known code hosting site, each of the resulting packages has two possible +import paths, using the custom domain or the known hosting site. + +A package statement is said to have an "import comment" if it is immediately +followed (before the next newline) by a comment of one of these two forms: + + package math // import "path" + package math /* import "path" */ + +The go command will refuse to install a package with an import comment +unless it is being referred to by that import path. In this way, import comments +let package authors make sure the custom import path is used and not a +direct path to the underlying code hosting site. + +Import path checking is disabled for code found within vendor trees. +This makes it possible to copy code into alternate locations in vendor trees +without needing to update import comments. + +Import path checking is also disabled when using modules. +Import path comments are obsoleted by the go.mod file's module statement. + +See https://golang.org/s/go14customimport for details. + `, +} + +var HelpGopath = &base.Command{ + UsageLine: "gopath", + Short: "GOPATH environment variable", + Long: ` +The Go path is used to resolve import statements. +It is implemented by and documented in the go/build package. + +The GOPATH environment variable lists places to look for Go code. +On Unix, the value is a colon-separated string. +On Windows, the value is a semicolon-separated string. +On Plan 9, the value is a list. + +If the environment variable is unset, GOPATH defaults +to a subdirectory named "go" in the user's home directory +($HOME/go on Unix, %USERPROFILE%\go on Windows), +unless that directory holds a Go distribution. +Run "go env GOPATH" to see the current GOPATH. + +See https://golang.org/wiki/SettingGOPATH to set a custom GOPATH. + +Each directory listed in GOPATH must have a prescribed structure: + +The src directory holds source code. The path below src +determines the import path or executable name. + +The pkg directory holds installed package objects. +As in the Go tree, each target operating system and +architecture pair has its own subdirectory of pkg +(pkg/GOOS_GOARCH). + +If DIR is a directory listed in the GOPATH, a package with +source in DIR/src/foo/bar can be imported as "foo/bar" and +has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a". + +The bin directory holds compiled commands. +Each command is named for its source directory, but only +the final element, not the entire path. That is, the +command with source in DIR/src/foo/quux is installed into +DIR/bin/quux, not DIR/bin/foo/quux. The "foo/" prefix is stripped +so that you can add DIR/bin to your PATH to get at the +installed commands. If the GOBIN environment variable is +set, commands are installed to the directory it names instead +of DIR/bin. GOBIN must be an absolute path. + +Here's an example directory layout: + + GOPATH=/home/user/go + + /home/user/go/ + src/ + foo/ + bar/ (go code in package bar) + x.go + quux/ (go code in package main) + y.go + bin/ + quux (installed command) + pkg/ + linux_amd64/ + foo/ + bar.a (installed package object) + +Go searches each directory listed in GOPATH to find source code, +but new packages are always downloaded into the first directory +in the list. + +See https://golang.org/doc/code.html for an example. + +GOPATH and Modules + +When using modules, GOPATH is no longer used for resolving imports. +However, it is still used to store downloaded source code (in GOPATH/pkg/mod) +and compiled commands (in GOPATH/bin). + +Internal Directories + +Code in or below a directory named "internal" is importable only +by code in the directory tree rooted at the parent of "internal". +Here's an extended version of the directory layout above: + + /home/user/go/ + src/ + crash/ + bang/ (go code in package bang) + b.go + foo/ (go code in package foo) + f.go + bar/ (go code in package bar) + x.go + internal/ + baz/ (go code in package baz) + z.go + quux/ (go code in package main) + y.go + + +The code in z.go is imported as "foo/internal/baz", but that +import statement can only appear in source files in the subtree +rooted at foo. The source files foo/f.go, foo/bar/x.go, and +foo/quux/y.go can all import "foo/internal/baz", but the source file +crash/bang/b.go cannot. + +See https://golang.org/s/go14internal for details. + +Vendor Directories + +Go 1.6 includes support for using local copies of external dependencies +to satisfy imports of those dependencies, often referred to as vendoring. + +Code below a directory named "vendor" is importable only +by code in the directory tree rooted at the parent of "vendor", +and only using an import path that omits the prefix up to and +including the vendor element. + +Here's the example from the previous section, +but with the "internal" directory renamed to "vendor" +and a new foo/vendor/crash/bang directory added: + + /home/user/go/ + src/ + crash/ + bang/ (go code in package bang) + b.go + foo/ (go code in package foo) + f.go + bar/ (go code in package bar) + x.go + vendor/ + crash/ + bang/ (go code in package bang) + b.go + baz/ (go code in package baz) + z.go + quux/ (go code in package main) + y.go + +The same visibility rules apply as for internal, but the code +in z.go is imported as "baz", not as "foo/vendor/baz". + +Code in vendor directories deeper in the source tree shadows +code in higher directories. Within the subtree rooted at foo, an import +of "crash/bang" resolves to "foo/vendor/crash/bang", not the +top-level "crash/bang". + +Code in vendor directories is not subject to import path +checking (see 'go help importpath'). + +When 'go get' checks out or updates a git repository, it now also +updates submodules. + +Vendor directories do not affect the placement of new repositories +being checked out for the first time by 'go get': those are always +placed in the main GOPATH, never in a vendor subtree. + +See https://golang.org/s/go15vendor for details. + `, +} + +var HelpEnvironment = &base.Command{ + UsageLine: "environment", + Short: "environment variables", + Long: ` + +The go command and the tools it invokes consult environment variables +for configuration. If an environment variable is unset or empty, the go +command uses a sensible default setting. To see the effective setting of +the variable , run 'go env '. To change the default setting, +run 'go env -w ='. Defaults changed using 'go env -w' +are recorded in a Go environment configuration file stored in the +per-user configuration directory, as reported by os.UserConfigDir. +The location of the configuration file can be changed by setting +the environment variable GOENV, and 'go env GOENV' prints the +effective location, but 'go env -w' cannot change the default location. +See 'go help env' for details. + +General-purpose environment variables: + + GO111MODULE + Controls whether the go command runs in module-aware mode or GOPATH mode. + May be "off", "on", or "auto". + See https://golang.org/ref/mod#mod-commands. + GCCGO + The gccgo command to run for 'go build -compiler=gccgo'. + GOARCH + The architecture, or processor, for which to compile code. + Examples are amd64, 386, arm, ppc64. + GOBIN + The directory where 'go install' will install a command. + GOCACHE + The directory where the go command will store cached + information for reuse in future builds. + GOMODCACHE + The directory where the go command will store downloaded modules. + GODEBUG + Enable various debugging facilities. See https://go.dev/doc/godebug + for details. + GOENV + The location of the Go environment configuration file. + Cannot be set using 'go env -w'. + Setting GOENV=off in the environment disables the use of the + default configuration file. + GOFLAGS + A space-separated list of -flag=value settings to apply + to go commands by default, when the given flag is known by + the current command. Each entry must be a standalone flag. + Because the entries are space-separated, flag values must + not contain spaces. Flags listed on the command line + are applied after this list and therefore override it. + GOINSECURE + Comma-separated list of glob patterns (in the syntax of Go's path.Match) + of module path prefixes that should always be fetched in an insecure + manner. Only applies to dependencies that are being fetched directly. + GOINSECURE does not disable checksum database validation. GOPRIVATE or + GONOSUMDB may be used to achieve that. + GOOS + The operating system for which to compile code. + Examples are linux, darwin, windows, netbsd. + GOPATH + Controls where various files are stored. See: 'go help gopath'. + GOPROXY + URL of Go module proxy. See https://golang.org/ref/mod#environment-variables + and https://golang.org/ref/mod#module-proxy for details. + GOPRIVATE, GONOPROXY, GONOSUMDB + Comma-separated list of glob patterns (in the syntax of Go's path.Match) + of module path prefixes that should always be fetched directly + or that should not be compared against the checksum database. + See https://golang.org/ref/mod#private-modules. + GOROOT + The root of the go tree. + GOSUMDB + The name of checksum database to use and optionally its public key and + URL. See https://golang.org/ref/mod#authenticating. + GOTOOLCHAIN + Controls which Go toolchain is used. See https://go.dev/doc/toolchain. + GOTMPDIR + The directory where the go command will write + temporary source files, packages, and binaries. + GOVCS + Lists version control commands that may be used with matching servers. + See 'go help vcs'. + GOWORK + In module aware mode, use the given go.work file as a workspace file. + By default or when GOWORK is "auto", the go command searches for a + file named go.work in the current directory and then containing directories + until one is found. If a valid go.work file is found, the modules + specified will collectively be used as the main modules. If GOWORK + is "off", or a go.work file is not found in "auto" mode, workspace + mode is disabled. + +Environment variables for use with cgo: + + AR + The command to use to manipulate library archives when + building with the gccgo compiler. + The default is 'ar'. + CC + The command to use to compile C code. + CGO_ENABLED + Whether the cgo command is supported. Either 0 or 1. + CGO_CFLAGS + Flags that cgo will pass to the compiler when compiling + C code. + CGO_CFLAGS_ALLOW + A regular expression specifying additional flags to allow + to appear in #cgo CFLAGS source code directives. + Does not apply to the CGO_CFLAGS environment variable. + CGO_CFLAGS_DISALLOW + A regular expression specifying flags that must be disallowed + from appearing in #cgo CFLAGS source code directives. + Does not apply to the CGO_CFLAGS environment variable. + CGO_CPPFLAGS, CGO_CPPFLAGS_ALLOW, CGO_CPPFLAGS_DISALLOW + Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + but for the C preprocessor. + CGO_CXXFLAGS, CGO_CXXFLAGS_ALLOW, CGO_CXXFLAGS_DISALLOW + Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + but for the C++ compiler. + CGO_FFLAGS, CGO_FFLAGS_ALLOW, CGO_FFLAGS_DISALLOW + Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + but for the Fortran compiler. + CGO_LDFLAGS, CGO_LDFLAGS_ALLOW, CGO_LDFLAGS_DISALLOW + Like CGO_CFLAGS, CGO_CFLAGS_ALLOW, and CGO_CFLAGS_DISALLOW, + but for the linker. + CXX + The command to use to compile C++ code. + FC + The command to use to compile Fortran code. + PKG_CONFIG + Path to pkg-config tool. + +Architecture-specific environment variables: + + GOARM + For GOARCH=arm, the ARM architecture for which to compile. + Valid values are 5, 6, 7. + The value can be followed by an option specifying how to implement floating point instructions. + Valid options are ,softfloat (default for 5) and ,hardfloat (default for 6 and 7). + GO386 + For GOARCH=386, how to implement floating point instructions. + Valid values are sse2 (default), softfloat. + GOAMD64 + For GOARCH=amd64, the microarchitecture level for which to compile. + Valid values are v1 (default), v2, v3, v4. + See https://golang.org/wiki/MinimumRequirements#amd64 + GOMIPS + For GOARCH=mips{,le}, whether to use floating point instructions. + Valid values are hardfloat (default), softfloat. + GOMIPS64 + For GOARCH=mips64{,le}, whether to use floating point instructions. + Valid values are hardfloat (default), softfloat. + GOPPC64 + For GOARCH=ppc64{,le}, the target ISA (Instruction Set Architecture). + Valid values are power8 (default), power9, power10. + GOWASM + For GOARCH=wasm, comma-separated list of experimental WebAssembly features to use. + Valid values are satconv, signext. + +Environment variables for use with code coverage: + + GOCOVERDIR + Directory into which to write code coverage data files + generated by running a "go build -cover" binary. + Requires that GOEXPERIMENT=coverageredesign is enabled. + +Special-purpose environment variables: + + GCCGOTOOLDIR + If set, where to find gccgo tools, such as cgo. + The default is based on how gccgo was configured. + GOEXPERIMENT + Comma-separated list of toolchain experiments to enable or disable. + The list of available experiments may change arbitrarily over time. + See src/internal/goexperiment/flags.go for currently valid values. + Warning: This variable is provided for the development and testing + of the Go toolchain itself. Use beyond that purpose is unsupported. + GOROOT_FINAL + The root of the installed Go tree, when it is + installed in a location other than where it is built. + File names in stack traces are rewritten from GOROOT to + GOROOT_FINAL. + GO_EXTLINK_ENABLED + Whether the linker should use external linking mode + when using -linkmode=auto with code that uses cgo. + Set to 0 to disable external linking mode, 1 to enable it. + GIT_ALLOW_PROTOCOL + Defined by Git. A colon-separated list of schemes that are allowed + to be used with git fetch/clone. If set, any scheme not explicitly + mentioned will be considered insecure by 'go get'. + Because the variable is defined by Git, the default value cannot + be set using 'go env -w'. + +Additional information available from 'go env' but not read from the environment: + + GOEXE + The executable file name suffix (".exe" on Windows, "" on other systems). + GOGCCFLAGS + A space-separated list of arguments supplied to the CC command. + GOHOSTARCH + The architecture (GOARCH) of the Go toolchain binaries. + GOHOSTOS + The operating system (GOOS) of the Go toolchain binaries. + GOMOD + The absolute path to the go.mod of the main module. + If module-aware mode is enabled, but there is no go.mod, GOMOD will be + os.DevNull ("/dev/null" on Unix-like systems, "NUL" on Windows). + If module-aware mode is disabled, GOMOD will be the empty string. + GOTOOLDIR + The directory where the go tools (compile, cover, doc, etc...) are installed. + GOVERSION + The version of the installed Go tree, as reported by runtime.Version. + `, +} + +var HelpFileType = &base.Command{ + UsageLine: "filetype", + Short: "file types", + Long: ` +The go command examines the contents of a restricted set of files +in each directory. It identifies which files to examine based on +the extension of the file name. These extensions are: + + .go + Go source files. + .c, .h + C source files. + If the package uses cgo or SWIG, these will be compiled with the + OS-native compiler (typically gcc); otherwise they will + trigger an error. + .cc, .cpp, .cxx, .hh, .hpp, .hxx + C++ source files. Only useful with cgo or SWIG, and always + compiled with the OS-native compiler. + .m + Objective-C source files. Only useful with cgo, and always + compiled with the OS-native compiler. + .s, .S, .sx + Assembler source files. + If the package uses cgo or SWIG, these will be assembled with the + OS-native assembler (typically gcc (sic)); otherwise they + will be assembled with the Go assembler. + .swig, .swigcxx + SWIG definition files. + .syso + System object files. + +Files of each of these types except .syso may contain build +constraints, but the go command stops scanning for build constraints +at the first item in the file that is not a blank line or //-style +line comment. See the go/build package documentation for +more details. + `, +} + +var HelpBuildmode = &base.Command{ + UsageLine: "buildmode", + Short: "build modes", + Long: ` +The 'go build' and 'go install' commands take a -buildmode argument which +indicates which kind of object file is to be built. Currently supported values +are: + + -buildmode=archive + Build the listed non-main packages into .a files. Packages named + main are ignored. + + -buildmode=c-archive + Build the listed main package, plus all packages it imports, + into a C archive file. The only callable symbols will be those + functions exported using a cgo //export comment. Requires + exactly one main package to be listed. + + -buildmode=c-shared + Build the listed main package, plus all packages it imports, + into a C shared library. The only callable symbols will + be those functions exported using a cgo //export comment. + Requires exactly one main package to be listed. + + -buildmode=default + Listed main packages are built into executables and listed + non-main packages are built into .a files (the default + behavior). + + -buildmode=shared + Combine all the listed non-main packages into a single shared + library that will be used when building with the -linkshared + option. Packages named main are ignored. + + -buildmode=exe + Build the listed main packages and everything they import into + executables. Packages not named main are ignored. + + -buildmode=pie + Build the listed main packages and everything they import into + position independent executables (PIE). Packages not named + main are ignored. + + -buildmode=plugin + Build the listed main packages, plus all packages that they + import, into a Go plugin. Packages not named main are ignored. + +On AIX, when linking a C program that uses a Go archive built with +-buildmode=c-archive, you must pass -Wl,-bnoobjreorder to the C compiler. +`, +} + +var HelpCache = &base.Command{ + UsageLine: "cache", + Short: "build and test caching", + Long: ` +The go command caches build outputs for reuse in future builds. +The default location for cache data is a subdirectory named go-build +in the standard user cache directory for the current operating system. +Setting the GOCACHE environment variable overrides this default, +and running 'go env GOCACHE' prints the current cache directory. + +The go command periodically deletes cached data that has not been +used recently. Running 'go clean -cache' deletes all cached data. + +The build cache correctly accounts for changes to Go source files, +compilers, compiler options, and so on: cleaning the cache explicitly +should not be necessary in typical use. However, the build cache +does not detect changes to C libraries imported with cgo. +If you have made changes to the C libraries on your system, you +will need to clean the cache explicitly or else use the -a build flag +(see 'go help build') to force rebuilding of packages that +depend on the updated C libraries. + +The go command also caches successful package test results. +See 'go help test' for details. Running 'go clean -testcache' removes +all cached test results (but not cached build results). + +The go command also caches values used in fuzzing with 'go test -fuzz', +specifically, values that expanded code coverage when passed to a +fuzz function. These values are not used for regular building and +testing, but they're stored in a subdirectory of the build cache. +Running 'go clean -fuzzcache' removes all cached fuzzing values. +This may make fuzzing less effective, temporarily. + +The GODEBUG environment variable can enable printing of debugging +information about the state of the cache: + +GODEBUG=gocacheverify=1 causes the go command to bypass the +use of any cache entries and instead rebuild everything and check +that the results match existing cache entries. + +GODEBUG=gocachehash=1 causes the go command to print the inputs +for all of the content hashes it uses to construct cache lookup keys. +The output is voluminous but can be useful for debugging the cache. + +GODEBUG=gocachetest=1 causes the go command to print details of its +decisions about whether to reuse a cached test result. +`, +} + +var HelpBuildConstraint = &base.Command{ + UsageLine: "buildconstraint", + Short: "build constraints", + Long: ` +A build constraint, also known as a build tag, is a condition under which a +file should be included in the package. Build constraints are given by a +line comment that begins + + //go:build + +Constraints may appear in any kind of source file (not just Go), but +they must appear near the top of the file, preceded +only by blank lines and other comments. These rules mean that in Go +files a build constraint must appear before the package clause. + +To distinguish build constraints from package documentation, +a build constraint should be followed by a blank line. + +A build constraint comment is evaluated as an expression containing +build tags combined by ||, &&, and ! operators and parentheses. +Operators have the same meaning as in Go. + +For example, the following build constraint constrains a file to +build when the "linux" and "386" constraints are satisfied, or when +"darwin" is satisfied and "cgo" is not: + + //go:build (linux && 386) || (darwin && !cgo) + +It is an error for a file to have more than one //go:build line. + +During a particular build, the following build tags are satisfied: + + - the target operating system, as spelled by runtime.GOOS, set with the + GOOS environment variable. + - the target architecture, as spelled by runtime.GOARCH, set with the + GOARCH environment variable. + - any architecture features, in the form GOARCH.feature + (for example, "amd64.v2"), as detailed below. + - "unix", if GOOS is a Unix or Unix-like system. + - the compiler being used, either "gc" or "gccgo" + - "cgo", if the cgo command is supported (see CGO_ENABLED in + 'go help environment'). + - a term for each Go major release, through the current version: + "go1.1" from Go version 1.1 onward, "go1.12" from Go 1.12, and so on. + - any additional tags given by the -tags flag (see 'go help build'). + +There are no separate build tags for beta or minor releases. + +If a file's name, after stripping the extension and a possible _test suffix, +matches any of the following patterns: + *_GOOS + *_GOARCH + *_GOOS_GOARCH +(example: source_windows_amd64.go) where GOOS and GOARCH represent +any known operating system and architecture values respectively, then +the file is considered to have an implicit build constraint requiring +those terms (in addition to any explicit constraints in the file). + +Using GOOS=android matches build tags and files as for GOOS=linux +in addition to android tags and files. + +Using GOOS=illumos matches build tags and files as for GOOS=solaris +in addition to illumos tags and files. + +Using GOOS=ios matches build tags and files as for GOOS=darwin +in addition to ios tags and files. + +The defined architecture feature build tags are: + + - For GOARCH=386, GO386=387 and GO386=sse2 + set the 386.387 and 386.sse2 build tags, respectively. + - For GOARCH=amd64, GOAMD64=v1, v2, and v3 + correspond to the amd64.v1, amd64.v2, and amd64.v3 feature build tags. + - For GOARCH=arm, GOARM=5, 6, and 7 + correspond to the arm.5, arm.6, and arm.7 feature build tags. + - For GOARCH=mips or mipsle, + GOMIPS=hardfloat and softfloat + correspond to the mips.hardfloat and mips.softfloat + (or mipsle.hardfloat and mipsle.softfloat) feature build tags. + - For GOARCH=mips64 or mips64le, + GOMIPS64=hardfloat and softfloat + correspond to the mips64.hardfloat and mips64.softfloat + (or mips64le.hardfloat and mips64le.softfloat) feature build tags. + - For GOARCH=ppc64 or ppc64le, + GOPPC64=power8, power9, and power10 correspond to the + ppc64.power8, ppc64.power9, and ppc64.power10 + (or ppc64le.power8, ppc64le.power9, and ppc64le.power10) + feature build tags. + - For GOARCH=wasm, GOWASM=satconv and signext + correspond to the wasm.satconv and wasm.signext feature build tags. + +For GOARCH=amd64, arm, ppc64, and ppc64le, a particular feature level +sets the feature build tags for all previous levels as well. +For example, GOAMD64=v2 sets the amd64.v1 and amd64.v2 feature flags. +This ensures that code making use of v2 features continues to compile +when, say, GOAMD64=v4 is introduced. +Code handling the absence of a particular feature level +should use a negation: + + //go:build !amd64.v2 + +To keep a file from being considered for any build: + + //go:build ignore + +(Any other unsatisfied word will work as well, but "ignore" is conventional.) + +To build a file only when using cgo, and only on Linux and OS X: + + //go:build cgo && (linux || darwin) + +Such a file is usually paired with another file implementing the +default functionality for other systems, which in this case would +carry the constraint: + + //go:build !(cgo && (linux || darwin)) + +Naming a file dns_windows.go will cause it to be included only when +building the package for Windows; similarly, math_386.s will be included +only when building the package for 32-bit x86. + +Go versions 1.16 and earlier used a different syntax for build constraints, +with a "// +build" prefix. The gofmt command will add an equivalent //go:build +constraint when encountering the older syntax. +`, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/build.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/build.go new file mode 100644 index 0000000000000000000000000000000000000000..3a4a66b8699c7cc075248064670829e9a0cde529 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/build.go @@ -0,0 +1,374 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied from Go distribution src/go/build/build.go, syslist.go. +// That package does not export the ability to process raw file data, +// although we could fake it with an appropriate build.Context +// and a lot of unwrapping. +// More importantly, that package does not implement the tags["*"] +// special case, in which both tag and !tag are considered to be true +// for essentially all tags (except "ignore"). +// +// If we added this API to go/build directly, we wouldn't need this +// file anymore, but this API is not terribly general-purpose and we +// don't really want to commit to any public form of it, nor do we +// want to move the core parts of go/build into a top-level internal package. +// These details change very infrequently, so the copy is fine. + +package imports + +import ( + "bytes" + "cmd/go/internal/cfg" + "errors" + "fmt" + "go/build/constraint" + "strings" + "unicode" +) + +var ( + bSlashSlash = []byte("//") + bStarSlash = []byte("*/") + bSlashStar = []byte("/*") + bPlusBuild = []byte("+build") + + goBuildComment = []byte("//go:build") + + errMultipleGoBuild = errors.New("multiple //go:build comments") +) + +func isGoBuildComment(line []byte) bool { + if !bytes.HasPrefix(line, goBuildComment) { + return false + } + line = bytes.TrimSpace(line) + rest := line[len(goBuildComment):] + return len(rest) == 0 || len(bytes.TrimSpace(rest)) < len(rest) +} + +// ShouldBuild reports whether it is okay to use this file, +// The rule is that in the file's leading run of // comments +// and blank lines, which must be followed by a blank line +// (to avoid including a Go package clause doc comment), +// lines beginning with '// +build' are taken as build directives. +// +// The file is accepted only if each such line lists something +// matching the file. For example: +// +// // +build windows linux +// +// marks the file as applicable only on Windows and Linux. +// +// If tags["*"] is true, then ShouldBuild will consider every +// build tag except "ignore" to be both true and false for +// the purpose of satisfying build tags, in order to estimate +// (conservatively) whether a file could ever possibly be used +// in any build. +func ShouldBuild(content []byte, tags map[string]bool) bool { + // Identify leading run of // comments and blank lines, + // which must be followed by a blank line. + // Also identify any //go:build comments. + content, goBuild, _, err := parseFileHeader(content) + if err != nil { + return false + } + + // If //go:build line is present, it controls. + // Otherwise fall back to +build processing. + var shouldBuild bool + switch { + case goBuild != nil: + x, err := constraint.Parse(string(goBuild)) + if err != nil { + return false + } + shouldBuild = eval(x, tags, true) + + default: + shouldBuild = true + p := content + for len(p) > 0 { + line := p + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, p = line[:i], p[i+1:] + } else { + p = p[len(p):] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, bSlashSlash) || !bytes.Contains(line, bPlusBuild) { + continue + } + text := string(line) + if !constraint.IsPlusBuild(text) { + continue + } + if x, err := constraint.Parse(text); err == nil { + if !eval(x, tags, true) { + shouldBuild = false + } + } + } + } + + return shouldBuild +} + +func parseFileHeader(content []byte) (trimmed, goBuild []byte, sawBinaryOnly bool, err error) { + end := 0 + p := content + ended := false // found non-blank, non-// line, so stopped accepting // +build lines + inSlashStar := false // in /* */ comment + +Lines: + for len(p) > 0 { + line := p + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, p = line[:i], p[i+1:] + } else { + p = p[len(p):] + } + line = bytes.TrimSpace(line) + if len(line) == 0 && !ended { // Blank line + // Remember position of most recent blank line. + // When we find the first non-blank, non-// line, + // this "end" position marks the latest file position + // where a // +build line can appear. + // (It must appear _before_ a blank line before the non-blank, non-// line. + // Yes, that's confusing, which is part of why we moved to //go:build lines.) + // Note that ended==false here means that inSlashStar==false, + // since seeing a /* would have set ended==true. + end = len(content) - len(p) + continue Lines + } + if !bytes.HasPrefix(line, bSlashSlash) { // Not comment line + ended = true + } + + if !inSlashStar && isGoBuildComment(line) { + if goBuild != nil { + return nil, nil, false, errMultipleGoBuild + } + goBuild = line + } + + Comments: + for len(line) > 0 { + if inSlashStar { + if i := bytes.Index(line, bStarSlash); i >= 0 { + inSlashStar = false + line = bytes.TrimSpace(line[i+len(bStarSlash):]) + continue Comments + } + continue Lines + } + if bytes.HasPrefix(line, bSlashSlash) { + continue Lines + } + if bytes.HasPrefix(line, bSlashStar) { + inSlashStar = true + line = bytes.TrimSpace(line[len(bSlashStar):]) + continue Comments + } + // Found non-comment text. + break Lines + } + } + + return content[:end], goBuild, sawBinaryOnly, nil +} + +// matchTag reports whether the tag name is valid and tags[name] is true. +// As a special case, if tags["*"] is true and name is not empty or ignore, +// then matchTag will return prefer instead of the actual answer, +// which allows the caller to pretend in that case that most tags are +// both true and false. +func matchTag(name string, tags map[string]bool, prefer bool) bool { + // Tags must be letters, digits, underscores or dots. + // Unlike in Go identifiers, all digits are fine (e.g., "386"). + for _, c := range name { + if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' { + return false + } + } + + if tags["*"] && name != "" && name != "ignore" { + // Special case for gathering all possible imports: + // if we put * in the tags map then all tags + // except "ignore" are considered both present and not + // (so we return true no matter how 'want' is set). + return prefer + } + + if tags[name] { + return true + } + + switch name { + case "linux": + return tags["android"] + case "solaris": + return tags["illumos"] + case "darwin": + return tags["ios"] + case "unix": + return unixOS[cfg.BuildContext.GOOS] + default: + return false + } +} + +// eval is like +// +// x.Eval(func(tag string) bool { return matchTag(tag, tags) }) +// +// except that it implements the special case for tags["*"] meaning +// all tags are both true and false at the same time. +func eval(x constraint.Expr, tags map[string]bool, prefer bool) bool { + switch x := x.(type) { + case *constraint.TagExpr: + return matchTag(x.Tag, tags, prefer) + case *constraint.NotExpr: + return !eval(x.X, tags, !prefer) + case *constraint.AndExpr: + return eval(x.X, tags, prefer) && eval(x.Y, tags, prefer) + case *constraint.OrExpr: + return eval(x.X, tags, prefer) || eval(x.Y, tags, prefer) + } + panic(fmt.Sprintf("unexpected constraint expression %T", x)) +} + +// Eval is like +// +// x.Eval(func(tag string) bool { return matchTag(tag, tags) }) +// +// except that it implements the special case for tags["*"] meaning +// all tags are both true and false at the same time. +func Eval(x constraint.Expr, tags map[string]bool, prefer bool) bool { + return eval(x, tags, prefer) +} + +// MatchFile returns false if the name contains a $GOOS or $GOARCH +// suffix which does not match the current system. +// The recognized name formats are: +// +// name_$(GOOS).* +// name_$(GOARCH).* +// name_$(GOOS)_$(GOARCH).* +// name_$(GOOS)_test.* +// name_$(GOARCH)_test.* +// name_$(GOOS)_$(GOARCH)_test.* +// +// Exceptions: +// +// if GOOS=android, then files with GOOS=linux are also matched. +// if GOOS=illumos, then files with GOOS=solaris are also matched. +// if GOOS=ios, then files with GOOS=darwin are also matched. +// +// If tags["*"] is true, then MatchFile will consider all possible +// GOOS and GOARCH to be available and will consequently +// always return true. +func MatchFile(name string, tags map[string]bool) bool { + if tags["*"] { + return true + } + if dot := strings.Index(name, "."); dot != -1 { + name = name[:dot] + } + + // Before Go 1.4, a file called "linux.go" would be equivalent to having a + // build tag "linux" in that file. For Go 1.4 and beyond, we require this + // auto-tagging to apply only to files with a non-empty prefix, so + // "foo_linux.go" is tagged but "linux.go" is not. This allows new operating + // systems, such as android, to arrive without breaking existing code with + // innocuous source code in "android.go". The easiest fix: cut everything + // in the name before the initial _. + i := strings.Index(name, "_") + if i < 0 { + return true + } + name = name[i:] // ignore everything before first _ + + l := strings.Split(name, "_") + if n := len(l); n > 0 && l[n-1] == "test" { + l = l[:n-1] + } + n := len(l) + if n >= 2 && KnownOS[l[n-2]] && KnownArch[l[n-1]] { + return matchTag(l[n-2], tags, true) && matchTag(l[n-1], tags, true) + } + if n >= 1 && KnownOS[l[n-1]] { + return matchTag(l[n-1], tags, true) + } + if n >= 1 && KnownArch[l[n-1]] { + return matchTag(l[n-1], tags, true) + } + return true +} + +var KnownOS = map[string]bool{ + "aix": true, + "android": true, + "darwin": true, + "dragonfly": true, + "freebsd": true, + "hurd": true, + "illumos": true, + "ios": true, + "js": true, + "linux": true, + "nacl": true, // legacy; don't remove + "netbsd": true, + "openbsd": true, + "plan9": true, + "solaris": true, + "wasip1": true, + "windows": true, + "zos": true, +} + +// unixOS is the set of GOOS values matched by the "unix" build tag. +// This is not used for filename matching. +// This is the same list as in go/build/syslist.go and cmd/dist/build.go. +var unixOS = map[string]bool{ + "aix": true, + "android": true, + "darwin": true, + "dragonfly": true, + "freebsd": true, + "hurd": true, + "illumos": true, + "ios": true, + "linux": true, + "netbsd": true, + "openbsd": true, + "solaris": true, +} + +var KnownArch = map[string]bool{ + "386": true, + "amd64": true, + "amd64p32": true, // legacy; don't remove + "arm": true, + "armbe": true, + "arm64": true, + "arm64be": true, + "ppc64": true, + "ppc64le": true, + "mips": true, + "mipsle": true, + "mips64": true, + "mips64le": true, + "mips64p32": true, + "mips64p32le": true, + "loong64": true, + "ppc": true, + "riscv": true, + "riscv64": true, + "s390": true, + "s390x": true, + "sparc": true, + "sparc64": true, + "wasm": true, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/read.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/read.go new file mode 100644 index 0000000000000000000000000000000000000000..70d5190450502d042c2a2d0ed3d17105d50e6dbc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/read.go @@ -0,0 +1,263 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied from Go distribution src/go/build/read.go. + +package imports + +import ( + "bufio" + "bytes" + "errors" + "io" + "unicode/utf8" +) + +type importReader struct { + b *bufio.Reader + buf []byte + peek byte + err error + eof bool + nerr int +} + +var bom = []byte{0xef, 0xbb, 0xbf} + +func newImportReader(b *bufio.Reader) *importReader { + // Remove leading UTF-8 BOM. + // Per https://golang.org/ref/spec#Source_code_representation: + // a compiler may ignore a UTF-8-encoded byte order mark (U+FEFF) + // if it is the first Unicode code point in the source text. + if leadingBytes, err := b.Peek(3); err == nil && bytes.Equal(leadingBytes, bom) { + b.Discard(3) + } + return &importReader{b: b} +} + +func isIdent(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf +} + +var ( + errSyntax = errors.New("syntax error") + errNUL = errors.New("unexpected NUL in input") +) + +// syntaxError records a syntax error, but only if an I/O error has not already been recorded. +func (r *importReader) syntaxError() { + if r.err == nil { + r.err = errSyntax + } +} + +// readByte reads the next byte from the input, saves it in buf, and returns it. +// If an error occurs, readByte records the error in r.err and returns 0. +func (r *importReader) readByte() byte { + c, err := r.b.ReadByte() + if err == nil { + r.buf = append(r.buf, c) + if c == 0 { + err = errNUL + } + } + if err != nil { + if err == io.EOF { + r.eof = true + } else if r.err == nil { + r.err = err + } + c = 0 + } + return c +} + +// peekByte returns the next byte from the input reader but does not advance beyond it. +// If skipSpace is set, peekByte skips leading spaces and comments. +func (r *importReader) peekByte(skipSpace bool) byte { + if r.err != nil { + if r.nerr++; r.nerr > 10000 { + panic("go/build: import reader looping") + } + return 0 + } + + // Use r.peek as first input byte. + // Don't just return r.peek here: it might have been left by peekByte(false) + // and this might be peekByte(true). + c := r.peek + if c == 0 { + c = r.readByte() + } + for r.err == nil && !r.eof { + if skipSpace { + // For the purposes of this reader, semicolons are never necessary to + // understand the input and are treated as spaces. + switch c { + case ' ', '\f', '\t', '\r', '\n', ';': + c = r.readByte() + continue + + case '/': + c = r.readByte() + if c == '/' { + for c != '\n' && r.err == nil && !r.eof { + c = r.readByte() + } + } else if c == '*' { + var c1 byte + for (c != '*' || c1 != '/') && r.err == nil { + if r.eof { + r.syntaxError() + } + c, c1 = c1, r.readByte() + } + } else { + r.syntaxError() + } + c = r.readByte() + continue + } + } + break + } + r.peek = c + return r.peek +} + +// nextByte is like peekByte but advances beyond the returned byte. +func (r *importReader) nextByte(skipSpace bool) byte { + c := r.peekByte(skipSpace) + r.peek = 0 + return c +} + +// readKeyword reads the given keyword from the input. +// If the keyword is not present, readKeyword records a syntax error. +func (r *importReader) readKeyword(kw string) { + r.peekByte(true) + for i := 0; i < len(kw); i++ { + if r.nextByte(false) != kw[i] { + r.syntaxError() + return + } + } + if isIdent(r.peekByte(false)) { + r.syntaxError() + } +} + +// readIdent reads an identifier from the input. +// If an identifier is not present, readIdent records a syntax error. +func (r *importReader) readIdent() { + c := r.peekByte(true) + if !isIdent(c) { + r.syntaxError() + return + } + for isIdent(r.peekByte(false)) { + r.peek = 0 + } +} + +// readString reads a quoted string literal from the input. +// If an identifier is not present, readString records a syntax error. +func (r *importReader) readString(save *[]string) { + switch r.nextByte(true) { + case '`': + start := len(r.buf) - 1 + for r.err == nil { + if r.nextByte(false) == '`' { + if save != nil { + *save = append(*save, string(r.buf[start:])) + } + break + } + if r.eof { + r.syntaxError() + } + } + case '"': + start := len(r.buf) - 1 + for r.err == nil { + c := r.nextByte(false) + if c == '"' { + if save != nil { + *save = append(*save, string(r.buf[start:])) + } + break + } + if r.eof || c == '\n' { + r.syntaxError() + } + if c == '\\' { + r.nextByte(false) + } + } + default: + r.syntaxError() + } +} + +// readImport reads an import clause - optional identifier followed by quoted string - +// from the input. +func (r *importReader) readImport(imports *[]string) { + c := r.peekByte(true) + if c == '.' { + r.peek = 0 + } else if isIdent(c) { + r.readIdent() + } + r.readString(imports) +} + +// ReadComments is like io.ReadAll, except that it only reads the leading +// block of comments in the file. +func ReadComments(f io.Reader) ([]byte, error) { + r := newImportReader(bufio.NewReader(f)) + r.peekByte(true) + if r.err == nil && !r.eof { + // Didn't reach EOF, so must have found a non-space byte. Remove it. + r.buf = r.buf[:len(r.buf)-1] + } + return r.buf, r.err +} + +// ReadImports is like io.ReadAll, except that it expects a Go file as input +// and stops reading the input once the imports have completed. +func ReadImports(f io.Reader, reportSyntaxError bool, imports *[]string) ([]byte, error) { + r := newImportReader(bufio.NewReader(f)) + + r.readKeyword("package") + r.readIdent() + for r.peekByte(true) == 'i' { + r.readKeyword("import") + if r.peekByte(true) == '(' { + r.nextByte(false) + for r.peekByte(true) != ')' && r.err == nil { + r.readImport(imports) + } + r.nextByte(false) + } else { + r.readImport(imports) + } + } + + // If we stopped successfully before EOF, we read a byte that told us we were done. + // Return all but that last byte, which would cause a syntax error if we let it through. + if r.err == nil && !r.eof { + return r.buf[:len(r.buf)-1], nil + } + + // If we stopped for a syntax error, consume the whole file so that + // we are sure we don't change the errors that go/parser returns. + if r.err == errSyntax && !reportSyntaxError { + r.err = nil + for r.err == nil && !r.eof { + r.readByte() + } + } + + return r.buf, r.err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/read_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/read_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6a1a6524a116d50c8036baa0b0ff3d60e08ccf1a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/read_test.go @@ -0,0 +1,254 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Copied from Go distribution src/go/build/read.go. + +package imports + +import ( + "io" + "strings" + "testing" +) + +const quote = "`" + +type readTest struct { + // Test input contains ℙ where readImports should stop. + in string + err string +} + +var readImportsTests = []readTest{ + { + `package p`, + "", + }, + { + `package p; import "x"`, + "", + }, + { + `package p; import . "x"`, + "", + }, + { + `package p; import "x";ℙvar x = 1`, + "", + }, + { + `package p + + // comment + + import "x" + import _ "x" + import a "x" + + /* comment */ + + import ( + "x" /* comment */ + _ "x" + a "x" // comment + ` + quote + `x` + quote + ` + _ /*comment*/ ` + quote + `x` + quote + ` + a ` + quote + `x` + quote + ` + ) + import ( + ) + import () + import()import()import() + import();import();import() + + ℙvar x = 1 + `, + "", + }, + { + "\ufeff𝔻" + `package p; import "x";ℙvar x = 1`, + "", + }, +} + +var readCommentsTests = []readTest{ + { + `ℙpackage p`, + "", + }, + { + `ℙpackage p; import "x"`, + "", + }, + { + `ℙpackage p; import . "x"`, + "", + }, + { + "\ufeff𝔻" + `ℙpackage p; import . "x"`, + "", + }, + { + `// foo + + /* bar */ + + /* quux */ // baz + + /*/ zot */ + + // asdf + ℙHello, world`, + "", + }, + { + "\ufeff𝔻" + `// foo + + /* bar */ + + /* quux */ // baz + + /*/ zot */ + + // asdf + ℙHello, world`, + "", + }, +} + +func testRead(t *testing.T, tests []readTest, read func(io.Reader) ([]byte, error)) { + for i, tt := range tests { + var in, testOut string + j := strings.Index(tt.in, "ℙ") + if j < 0 { + in = tt.in + testOut = tt.in + } else { + in = tt.in[:j] + tt.in[j+len("ℙ"):] + testOut = tt.in[:j] + } + d := strings.Index(tt.in, "𝔻") + if d >= 0 { + in = in[:d] + in[d+len("𝔻"):] + testOut = testOut[d+len("𝔻"):] + } + r := strings.NewReader(in) + buf, err := read(r) + if err != nil { + if tt.err == "" { + t.Errorf("#%d: err=%q, expected success (%q)", i, err, string(buf)) + continue + } + if !strings.Contains(err.Error(), tt.err) { + t.Errorf("#%d: err=%q, expected %q", i, err, tt.err) + continue + } + continue + } + if err == nil && tt.err != "" { + t.Errorf("#%d: success, expected %q", i, tt.err) + continue + } + + out := string(buf) + if out != testOut { + t.Errorf("#%d: wrong output:\nhave %q\nwant %q\n", i, out, testOut) + } + } +} + +func TestReadImports(t *testing.T) { + testRead(t, readImportsTests, func(r io.Reader) ([]byte, error) { return ReadImports(r, true, nil) }) +} + +func TestReadComments(t *testing.T) { + testRead(t, readCommentsTests, ReadComments) +} + +var readFailuresTests = []readTest{ + { + `package`, + "syntax error", + }, + { + "package p\n\x00\nimport `math`\n", + "unexpected NUL in input", + }, + { + `package p; import`, + "syntax error", + }, + { + `package p; import "`, + "syntax error", + }, + { + "package p; import ` \n\n", + "syntax error", + }, + { + `package p; import "x`, + "syntax error", + }, + { + `package p; import _`, + "syntax error", + }, + { + `package p; import _ "`, + "syntax error", + }, + { + `package p; import _ "x`, + "syntax error", + }, + { + `package p; import .`, + "syntax error", + }, + { + `package p; import . "`, + "syntax error", + }, + { + `package p; import . "x`, + "syntax error", + }, + { + `package p; import (`, + "syntax error", + }, + { + `package p; import ("`, + "syntax error", + }, + { + `package p; import ("x`, + "syntax error", + }, + { + `package p; import ("x"`, + "syntax error", + }, +} + +func TestReadFailures(t *testing.T) { + // Errors should be reported (true arg to readImports). + testRead(t, readFailuresTests, func(r io.Reader) ([]byte, error) { return ReadImports(r, true, nil) }) +} + +func TestReadFailuresIgnored(t *testing.T) { + // Syntax errors should not be reported (false arg to readImports). + // Instead, entire file should be the output and no error. + // Convert tests not to return syntax errors. + tests := make([]readTest, len(readFailuresTests)) + copy(tests, readFailuresTests) + for i := range tests { + tt := &tests[i] + if !strings.Contains(tt.err, "NUL") { + tt.err = "" + } + } + testRead(t, tests, func(r io.Reader) ([]byte, error) { return ReadImports(r, false, nil) }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/scan.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/scan.go new file mode 100644 index 0000000000000000000000000000000000000000..ee11a8708b5eaa26c904bbb41477c19aa773e136 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/scan.go @@ -0,0 +1,107 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "fmt" + "io/fs" + "path/filepath" + "sort" + "strconv" + "strings" + + "cmd/go/internal/fsys" +) + +func ScanDir(dir string, tags map[string]bool) ([]string, []string, error) { + infos, err := fsys.ReadDir(dir) + if err != nil { + return nil, nil, err + } + var files []string + for _, info := range infos { + name := info.Name() + + // If the directory entry is a symlink, stat it to obtain the info for the + // link target instead of the link itself. + if info.Mode()&fs.ModeSymlink != 0 { + info, err = fsys.Stat(filepath.Join(dir, name)) + if err != nil { + continue // Ignore broken symlinks. + } + } + + if info.Mode().IsRegular() && !strings.HasPrefix(name, "_") && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") && MatchFile(name, tags) { + files = append(files, filepath.Join(dir, name)) + } + } + return scanFiles(files, tags, false) +} + +func ScanFiles(files []string, tags map[string]bool) ([]string, []string, error) { + return scanFiles(files, tags, true) +} + +func scanFiles(files []string, tags map[string]bool, explicitFiles bool) ([]string, []string, error) { + imports := make(map[string]bool) + testImports := make(map[string]bool) + numFiles := 0 +Files: + for _, name := range files { + r, err := fsys.Open(name) + if err != nil { + return nil, nil, err + } + var list []string + data, err := ReadImports(r, false, &list) + r.Close() + if err != nil { + return nil, nil, fmt.Errorf("reading %s: %v", name, err) + } + + // import "C" is implicit requirement of cgo tag. + // When listing files on the command line (explicitFiles=true) + // we do not apply build tag filtering but we still do apply + // cgo filtering, so no explicitFiles check here. + // Why? Because we always have, and it's not worth breaking + // that behavior now. + for _, path := range list { + if path == `"C"` && !tags["cgo"] && !tags["*"] { + continue Files + } + } + + if !explicitFiles && !ShouldBuild(data, tags) { + continue + } + numFiles++ + m := imports + if strings.HasSuffix(name, "_test.go") { + m = testImports + } + for _, p := range list { + q, err := strconv.Unquote(p) + if err != nil { + continue + } + m[q] = true + } + } + if numFiles == 0 { + return nil, nil, ErrNoGo + } + return keys(imports), keys(testImports), nil +} + +var ErrNoGo = fmt.Errorf("no Go source files") + +func keys(m map[string]bool) []string { + var list []string + for k := range m { + list = append(list, k) + } + sort.Strings(list) + return list +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/scan_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/scan_test.go new file mode 100644 index 0000000000000000000000000000000000000000..56efa9023f19906154ec87178a0b04f30ce7b3ca --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/scan_test.go @@ -0,0 +1,93 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "bytes" + "internal/testenv" + "os" + "path" + "path/filepath" + "strings" + "testing" +) + +func TestScan(t *testing.T) { + testenv.MustHaveGoBuild(t) + + imports, testImports, err := ScanDir(filepath.Join(testenv.GOROOT(t), "src/encoding/json"), Tags()) + if err != nil { + t.Fatal(err) + } + foundBase64 := false + for _, p := range imports { + if p == "encoding/base64" { + foundBase64 = true + } + if p == "encoding/binary" { + // A dependency but not an import + t.Errorf("json reported as importing encoding/binary but does not") + } + if p == "net/http" { + // A test import but not an import + t.Errorf("json reported as importing net/http but does not") + } + } + if !foundBase64 { + t.Errorf("json missing import encoding/base64 (%q)", imports) + } + + foundHTTP := false + for _, p := range testImports { + if p == "net/http" { + foundHTTP = true + } + if p == "unicode/utf16" { + // A package import but not a test import + t.Errorf("json reported as test-importing unicode/utf16 but does not") + } + } + if !foundHTTP { + t.Errorf("json missing test import net/http (%q)", testImports) + } +} +func TestScanDir(t *testing.T) { + testenv.MustHaveGoBuild(t) + + dirs, err := os.ReadDir("testdata") + if err != nil { + t.Fatal(err) + } + for _, dir := range dirs { + if !dir.IsDir() || strings.HasPrefix(dir.Name(), ".") { + continue + } + t.Run(dir.Name(), func(t *testing.T) { + tagsData, err := os.ReadFile(filepath.Join("testdata", dir.Name(), "tags.txt")) + if err != nil { + t.Fatalf("error reading tags: %v", err) + } + tags := make(map[string]bool) + for _, t := range strings.Fields(string(tagsData)) { + tags[t] = true + } + + wantData, err := os.ReadFile(filepath.Join("testdata", dir.Name(), "want.txt")) + if err != nil { + t.Fatalf("error reading want: %v", err) + } + want := string(bytes.TrimSpace(wantData)) + + imports, _, err := ScanDir(path.Join("testdata", dir.Name()), tags) + if err != nil { + t.Fatal(err) + } + got := strings.Join(imports, "\n") + if got != want { + t.Errorf("ScanDir: got imports:\n%s\n\nwant:\n%s", got, want) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/tags.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/tags.go new file mode 100644 index 0000000000000000000000000000000000000000..d1467b81b0b66083a3542ca0b27f9829500eb682 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/tags.go @@ -0,0 +1,61 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package imports + +import ( + "cmd/go/internal/cfg" + "sync" +) + +var ( + tags map[string]bool + tagsOnce sync.Once +) + +// Tags returns a set of build tags that are true for the target platform. +// It includes GOOS, GOARCH, the compiler, possibly "cgo", +// release tags like "go1.13", and user-specified build tags. +func Tags() map[string]bool { + tagsOnce.Do(func() { + tags = loadTags() + }) + return tags +} + +func loadTags() map[string]bool { + tags := map[string]bool{ + cfg.BuildContext.GOOS: true, + cfg.BuildContext.GOARCH: true, + cfg.BuildContext.Compiler: true, + } + if cfg.BuildContext.CgoEnabled { + tags["cgo"] = true + } + for _, tag := range cfg.BuildContext.BuildTags { + tags[tag] = true + } + for _, tag := range cfg.BuildContext.ToolTags { + tags[tag] = true + } + for _, tag := range cfg.BuildContext.ReleaseTags { + tags[tag] = true + } + return tags +} + +var ( + anyTags map[string]bool + anyTagsOnce sync.Once +) + +// AnyTags returns a special set of build tags that satisfy nearly all +// build tag expressions. Only "ignore" and malformed build tag requirements +// are considered false. +func AnyTags() map[string]bool { + anyTagsOnce.Do(func() { + anyTags = map[string]bool{"*": true} + }) + return anyTags +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/.h.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/.h.go new file mode 100644 index 0000000000000000000000000000000000000000..53c529e7774a6dcf883c17b2e9556fa3d3b1a726 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/.h.go @@ -0,0 +1,3 @@ +package android + +import _ "h" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/a_android.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/a_android.go new file mode 100644 index 0000000000000000000000000000000000000000..2ed972eca57cbd68bf94e254c891b9927c2225c6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/a_android.go @@ -0,0 +1,3 @@ +package android + +import _ "a" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/b_android_arm64.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/b_android_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..ee9c312b5d7497ae2df10291317b8a8b1be36fcb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/b_android_arm64.go @@ -0,0 +1,3 @@ +package android + +import _ "b" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/c_linux.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/c_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..91624ce637ffc0d6bdebb995893e941bc2c7c354 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/c_linux.go @@ -0,0 +1,3 @@ +package android + +import _ "c" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/d_linux_arm64.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/d_linux_arm64.go new file mode 100644 index 0000000000000000000000000000000000000000..34e07df2477b727d3175fcca9773cc945439b525 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/d_linux_arm64.go @@ -0,0 +1,3 @@ +package android + +import _ "d" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/e.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/e.go new file mode 100644 index 0000000000000000000000000000000000000000..f1b9c888c2cafdd83ab651a52f75b31bb1f83516 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/e.go @@ -0,0 +1,6 @@ +//go:build android +// +build android + +package android + +import _ "e" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/f.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/f.go new file mode 100644 index 0000000000000000000000000000000000000000..bb0ff7b73f67c15fffeb5ea95f0628047a794cdf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/f.go @@ -0,0 +1,6 @@ +//go:build linux +// +build linux + +package android + +import _ "f" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/g.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/g.go new file mode 100644 index 0000000000000000000000000000000000000000..ee19424890a963fa1618310d2875de3438df2a0e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/g.go @@ -0,0 +1,6 @@ +//go:build !android +// +build !android + +package android + +import _ "g" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/tags.txt b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/tags.txt new file mode 100644 index 0000000000000000000000000000000000000000..aaf5a6b91d7fa9bc3fd3b0be1dc163f000abbe1f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/tags.txt @@ -0,0 +1 @@ +android arm64 \ No newline at end of file diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/want.txt b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/want.txt new file mode 100644 index 0000000000000000000000000000000000000000..0fdf397db08b5cecda1b6394d4fef7395c1933ba --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/android/want.txt @@ -0,0 +1,6 @@ +a +b +c +d +e +f diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/.h.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/.h.go new file mode 100644 index 0000000000000000000000000000000000000000..53c529e7774a6dcf883c17b2e9556fa3d3b1a726 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/.h.go @@ -0,0 +1,3 @@ +package android + +import _ "h" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/a_illumos.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/a_illumos.go new file mode 100644 index 0000000000000000000000000000000000000000..2e6cb50805a491c88a0e26c83da6921e42f10651 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/a_illumos.go @@ -0,0 +1,3 @@ +package illumos + +import _ "a" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/b_illumos_amd64.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/b_illumos_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..2834d80660c9db23846a518227cbb20ab1724315 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/b_illumos_amd64.go @@ -0,0 +1,3 @@ +package illumos + +import _ "b" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/c_solaris.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/c_solaris.go new file mode 100644 index 0000000000000000000000000000000000000000..d7f9462f159cb35754fe68c40ed1461a08ccf064 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/c_solaris.go @@ -0,0 +1,3 @@ +package illumos + +import _ "c" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/d_solaris_amd64.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/d_solaris_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..0f52c2bb484fdfa7ed0672008dd5dbc4a5a597e7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/d_solaris_amd64.go @@ -0,0 +1,3 @@ +package illumos + +import _ "d" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/e.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/e.go new file mode 100644 index 0000000000000000000000000000000000000000..fddf2c429909b7776cd4cdddce2ed70f0cd35542 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/e.go @@ -0,0 +1,6 @@ +//go:build illumos +// +build illumos + +package illumos + +import _ "e" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/f.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/f.go new file mode 100644 index 0000000000000000000000000000000000000000..4b6d528e4c2225f29ed67e382a3561586b4f2170 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/f.go @@ -0,0 +1,6 @@ +//go:build solaris +// +build solaris + +package illumos + +import _ "f" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/g.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/g.go new file mode 100644 index 0000000000000000000000000000000000000000..1bf826b81510b42fafb4a11a0e9e92c7ad8d0860 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/g.go @@ -0,0 +1,6 @@ +//go:build !illumos +// +build !illumos + +package illumos + +import _ "g" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/tags.txt b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/tags.txt new file mode 100644 index 0000000000000000000000000000000000000000..b6386a32605da28470daea37889bc67ac633db2f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/tags.txt @@ -0,0 +1 @@ +illumos amd64 diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/want.txt b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/want.txt new file mode 100644 index 0000000000000000000000000000000000000000..0fdf397db08b5cecda1b6394d4fef7395c1933ba --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/illumos/want.txt @@ -0,0 +1,6 @@ +a +b +c +d +e +f diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/tags.txt b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/tags.txt new file mode 100644 index 0000000000000000000000000000000000000000..f59ec20aabf5842d237244ece8c81ab184faeac1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/tags.txt @@ -0,0 +1 @@ +* \ No newline at end of file diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/want.txt b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/want.txt new file mode 100644 index 0000000000000000000000000000000000000000..139f5f49755c40f251f28e427f1a112c8eae12f9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/want.txt @@ -0,0 +1,4 @@ +import1 +import2 +import3 +import4 diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x.go new file mode 100644 index 0000000000000000000000000000000000000000..98f9191053bc3d970c7ef2c6f881e48e4f858ebc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x.go @@ -0,0 +1,3 @@ +package x + +import "import1" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x1.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x1.go new file mode 100644 index 0000000000000000000000000000000000000000..eaaea979e9dc82285ab0f830cbc676295d0732d1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x1.go @@ -0,0 +1,6 @@ +//go:build blahblh && linux && !linux && windows && darwin +// +build blahblh,linux,!linux,windows,darwin + +package x + +import "import4" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x_darwin.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x_darwin.go new file mode 100644 index 0000000000000000000000000000000000000000..a0c3fdd21b5f5cc2ce45bb9dc0272e7123f61b18 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x_darwin.go @@ -0,0 +1,3 @@ +package xxxx + +import "import3" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x_windows.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..63c508248fbff9ce6e7ca9defb380209943cbaf7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/imports/testdata/star/x_windows.go @@ -0,0 +1,3 @@ +package x + +import "import2" diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/list/context.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/list/context.go new file mode 100644 index 0000000000000000000000000000000000000000..9d6494cfba01699b2b60f95778e49e2238117253 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/list/context.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package list + +import ( + "go/build" +) + +type Context struct { + GOARCH string `json:",omitempty"` // target architecture + GOOS string `json:",omitempty"` // target operating system + GOROOT string `json:",omitempty"` // Go root + GOPATH string `json:",omitempty"` // Go path + CgoEnabled bool `json:",omitempty"` // whether cgo can be used + UseAllFiles bool `json:",omitempty"` // use files regardless of //go:build lines, file names + Compiler string `json:",omitempty"` // compiler to assume when computing target paths + BuildTags []string `json:",omitempty"` // build constraints to match in +build lines + ToolTags []string `json:",omitempty"` // toolchain-specific build constraints + ReleaseTags []string `json:",omitempty"` // releases the current release is compatible with + InstallSuffix string `json:",omitempty"` // suffix to use in the name of the install dir +} + +func newContext(c *build.Context) *Context { + return &Context{ + GOARCH: c.GOARCH, + GOOS: c.GOOS, + GOROOT: c.GOROOT, + GOPATH: c.GOPATH, + CgoEnabled: c.CgoEnabled, + UseAllFiles: c.UseAllFiles, + Compiler: c.Compiler, + BuildTags: c.BuildTags, + ToolTags: c.ToolTags, + ReleaseTags: c.ReleaseTags, + InstallSuffix: c.InstallSuffix, + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/list/list.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/list/list.go new file mode 100644 index 0000000000000000000000000000000000000000..d9b09077c1ae1295d3865305e03c8ab4e30b594b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/list/list.go @@ -0,0 +1,999 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package list implements the “go list” command. +package list + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "text/template" + + "golang.org/x/sync/semaphore" + + "cmd/go/internal/base" + "cmd/go/internal/cache" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/modinfo" + "cmd/go/internal/modload" + "cmd/go/internal/str" + "cmd/go/internal/work" +) + +var CmdList = &base.Command{ + // Note: -f -json -m are listed explicitly because they are the most common list flags. + // Do not send CLs removing them because they're covered by [list flags]. + UsageLine: "go list [-f format] [-json] [-m] [list flags] [build flags] [packages]", + Short: "list packages or modules", + Long: ` +List lists the named packages, one per line. +The most commonly-used flags are -f and -json, which control the form +of the output printed for each package. Other list flags, documented below, +control more specific details. + +The default output shows the package import path: + + bytes + encoding/json + github.com/gorilla/mux + golang.org/x/net/html + +The -f flag specifies an alternate format for the list, using the +syntax of package template. The default output is equivalent +to -f '{{.ImportPath}}'. The struct being passed to the template is: + + type Package struct { + Dir string // directory containing package sources + ImportPath string // import path of package in dir + ImportComment string // path in import comment on package statement + Name string // package name + Doc string // package documentation string + Target string // install path + Shlib string // the shared library that contains this package (only set when -linkshared) + Goroot bool // is this package in the Go root? + Standard bool // is this package part of the standard Go library? + Stale bool // would 'go install' do anything for this package? + StaleReason string // explanation for Stale==true + Root string // Go root or Go path dir containing this package + ConflictDir string // this directory shadows Dir in $GOPATH + BinaryOnly bool // binary-only package (no longer supported) + ForTest string // package is only for use in named test + Export string // file containing export data (when using -export) + BuildID string // build ID of the compiled package (when using -export) + Module *Module // info about package's containing module, if any (can be nil) + Match []string // command-line patterns matching this package + DepOnly bool // package is only a dependency, not explicitly listed + DefaultGODEBUG string // default GODEBUG setting, for main packages + + // Source files + GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) + CgoFiles []string // .go source files that import "C" + CompiledGoFiles []string // .go files presented to compiler (when using -compiled) + IgnoredGoFiles []string // .go source files ignored due to build constraints + IgnoredOtherFiles []string // non-.go source files ignored due to build constraints + CFiles []string // .c source files + CXXFiles []string // .cc, .cxx and .cpp source files + MFiles []string // .m source files + HFiles []string // .h, .hh, .hpp and .hxx source files + FFiles []string // .f, .F, .for and .f90 Fortran source files + SFiles []string // .s source files + SwigFiles []string // .swig files + SwigCXXFiles []string // .swigcxx files + SysoFiles []string // .syso object files to add to archive + TestGoFiles []string // _test.go files in package + XTestGoFiles []string // _test.go files outside package + + // Embedded files + EmbedPatterns []string // //go:embed patterns + EmbedFiles []string // files matched by EmbedPatterns + TestEmbedPatterns []string // //go:embed patterns in TestGoFiles + TestEmbedFiles []string // files matched by TestEmbedPatterns + XTestEmbedPatterns []string // //go:embed patterns in XTestGoFiles + XTestEmbedFiles []string // files matched by XTestEmbedPatterns + + // Cgo directives + CgoCFLAGS []string // cgo: flags for C compiler + CgoCPPFLAGS []string // cgo: flags for C preprocessor + CgoCXXFLAGS []string // cgo: flags for C++ compiler + CgoFFLAGS []string // cgo: flags for Fortran compiler + CgoLDFLAGS []string // cgo: flags for linker + CgoPkgConfig []string // cgo: pkg-config names + + // Dependency information + Imports []string // import paths used by this package + ImportMap map[string]string // map from source import to ImportPath (identity entries omitted) + Deps []string // all (recursively) imported dependencies + TestImports []string // imports from TestGoFiles + XTestImports []string // imports from XTestGoFiles + + // Error information + Incomplete bool // this package or a dependency has an error + Error *PackageError // error loading package + DepsErrors []*PackageError // errors loading dependencies + } + +Packages stored in vendor directories report an ImportPath that includes the +path to the vendor directory (for example, "d/vendor/p" instead of "p"), +so that the ImportPath uniquely identifies a given copy of a package. +The Imports, Deps, TestImports, and XTestImports lists also contain these +expanded import paths. See golang.org/s/go15vendor for more about vendoring. + +The error information, if any, is + + type PackageError struct { + ImportStack []string // shortest path from package named on command line to this one + Pos string // position of error (if present, file:line:col) + Err string // the error itself + } + +The module information is a Module struct, defined in the discussion +of list -m below. + +The template function "join" calls strings.Join. + +The template function "context" returns the build context, defined as: + + type Context struct { + GOARCH string // target architecture + GOOS string // target operating system + GOROOT string // Go root + GOPATH string // Go path + CgoEnabled bool // whether cgo can be used + UseAllFiles bool // use files regardless of //go:build lines, file names + Compiler string // compiler to assume when computing target paths + BuildTags []string // build constraints to match in //go:build lines + ToolTags []string // toolchain-specific build constraints + ReleaseTags []string // releases the current release is compatible with + InstallSuffix string // suffix to use in the name of the install dir + } + +For more information about the meaning of these fields see the documentation +for the go/build package's Context type. + +The -json flag causes the package data to be printed in JSON format +instead of using the template format. The JSON flag can optionally be +provided with a set of comma-separated required field names to be output. +If so, those required fields will always appear in JSON output, but +others may be omitted to save work in computing the JSON struct. + +The -compiled flag causes list to set CompiledGoFiles to the Go source +files presented to the compiler. Typically this means that it repeats +the files listed in GoFiles and then also adds the Go code generated +by processing CgoFiles and SwigFiles. The Imports list contains the +union of all imports from both GoFiles and CompiledGoFiles. + +The -deps flag causes list to iterate over not just the named packages +but also all their dependencies. It visits them in a depth-first post-order +traversal, so that a package is listed only after all its dependencies. +Packages not explicitly listed on the command line will have the DepOnly +field set to true. + +The -e flag changes the handling of erroneous packages, those that +cannot be found or are malformed. By default, the list command +prints an error to standard error for each erroneous package and +omits the packages from consideration during the usual printing. +With the -e flag, the list command never prints errors to standard +error and instead processes the erroneous packages with the usual +printing. Erroneous packages will have a non-empty ImportPath and +a non-nil Error field; other information may or may not be missing +(zeroed). + +The -export flag causes list to set the Export field to the name of a +file containing up-to-date export information for the given package, +and the BuildID field to the build ID of the compiled package. + +The -find flag causes list to identify the named packages but not +resolve their dependencies: the Imports and Deps lists will be empty. +With the -find flag, the -deps, -test and -export commands cannot be +used. + +The -test flag causes list to report not only the named packages +but also their test binaries (for packages with tests), to convey to +source code analysis tools exactly how test binaries are constructed. +The reported import path for a test binary is the import path of +the package followed by a ".test" suffix, as in "math/rand.test". +When building a test, it is sometimes necessary to rebuild certain +dependencies specially for that test (most commonly the tested +package itself). The reported import path of a package recompiled +for a particular test binary is followed by a space and the name of +the test binary in brackets, as in "math/rand [math/rand.test]" +or "regexp [sort.test]". The ForTest field is also set to the name +of the package being tested ("math/rand" or "sort" in the previous +examples). + +The Dir, Target, Shlib, Root, ConflictDir, and Export file paths +are all absolute paths. + +By default, the lists GoFiles, CgoFiles, and so on hold names of files in Dir +(that is, paths relative to Dir, not absolute paths). +The generated files added when using the -compiled and -test flags +are absolute paths referring to cached copies of generated Go source files. +Although they are Go source files, the paths may not end in ".go". + +The -m flag causes list to list modules instead of packages. + +When listing modules, the -f flag still specifies a format template +applied to a Go struct, but now a Module struct: + + type Module struct { + Path string // module path + Query string // version query corresponding to this version + Version string // module version + Versions []string // available module versions + Replace *Module // replaced by this module + Time *time.Time // time version was created + Update *Module // available update (with -u) + Main bool // is this the main module? + Indirect bool // module is only indirectly needed by main module + Dir string // directory holding local copy of files, if any + GoMod string // path to go.mod file describing module, if any + GoVersion string // go version used in module + Retracted []string // retraction information, if any (with -retracted or -u) + Deprecated string // deprecation message, if any (with -u) + Error *ModuleError // error loading module + Origin any // provenance of module + Reuse bool // reuse of old module info is safe + } + + type ModuleError struct { + Err string // the error itself + } + +The file GoMod refers to may be outside the module directory if the +module is in the module cache or if the -modfile flag is used. + +The default output is to print the module path and then +information about the version and replacement if any. +For example, 'go list -m all' might print: + + my/main/module + golang.org/x/text v0.3.0 => /tmp/text + rsc.io/pdf v0.1.1 + +The Module struct has a String method that formats this +line of output, so that the default format is equivalent +to -f '{{.String}}'. + +Note that when a module has been replaced, its Replace field +describes the replacement module, and its Dir field is set to +the replacement's source code, if present. (That is, if Replace +is non-nil, then Dir is set to Replace.Dir, with no access to +the replaced source code.) + +The -u flag adds information about available upgrades. +When the latest version of a given module is newer than +the current one, list -u sets the Module's Update field +to information about the newer module. list -u will also set +the module's Retracted field if the current version is retracted. +The Module's String method indicates an available upgrade by +formatting the newer version in brackets after the current version. +If a version is retracted, the string "(retracted)" will follow it. +For example, 'go list -m -u all' might print: + + my/main/module + golang.org/x/text v0.3.0 [v0.4.0] => /tmp/text + rsc.io/pdf v0.1.1 (retracted) [v0.1.2] + +(For tools, 'go list -m -u -json all' may be more convenient to parse.) + +The -versions flag causes list to set the Module's Versions field +to a list of all known versions of that module, ordered according +to semantic versioning, earliest to latest. The flag also changes +the default output format to display the module path followed by the +space-separated version list. + +The -retracted flag causes list to report information about retracted +module versions. When -retracted is used with -f or -json, the Retracted +field will be set to a string explaining why the version was retracted. +The string is taken from comments on the retract directive in the +module's go.mod file. When -retracted is used with -versions, retracted +versions are listed together with unretracted versions. The -retracted +flag may be used with or without -m. + +The arguments to list -m are interpreted as a list of modules, not packages. +The main module is the module containing the current directory. +The active modules are the main module and its dependencies. +With no arguments, list -m shows the main module. +With arguments, list -m shows the modules specified by the arguments. +Any of the active modules can be specified by its module path. +The special pattern "all" specifies all the active modules, first the main +module and then dependencies sorted by module path. +A pattern containing "..." specifies the active modules whose +module paths match the pattern. +A query of the form path@version specifies the result of that query, +which is not limited to active modules. +See 'go help modules' for more about module queries. + +The template function "module" takes a single string argument +that must be a module path or query and returns the specified +module as a Module struct. If an error occurs, the result will +be a Module struct with a non-nil Error field. + +When using -m, the -reuse=old.json flag accepts the name of file containing +the JSON output of a previous 'go list -m -json' invocation with the +same set of modifier flags (such as -u, -retracted, and -versions). +The go command may use this file to determine that a module is unchanged +since the previous invocation and avoid redownloading information about it. +Modules that are not redownloaded will be marked in the new output by +setting the Reuse field to true. Normally the module cache provides this +kind of reuse automatically; the -reuse flag can be useful on systems that +do not preserve the module cache. + +For more about build flags, see 'go help build'. + +For more about specifying packages, see 'go help packages'. + +For more about modules, see https://golang.org/ref/mod. + `, +} + +func init() { + CmdList.Run = runList // break init cycle + work.AddBuildFlags(CmdList, work.DefaultBuildFlags) + if cfg.Experiment != nil && cfg.Experiment.CoverageRedesign { + work.AddCoverFlags(CmdList, nil) + } + CmdList.Flag.Var(&listJsonFields, "json", "") +} + +var ( + listCompiled = CmdList.Flag.Bool("compiled", false, "") + listDeps = CmdList.Flag.Bool("deps", false, "") + listE = CmdList.Flag.Bool("e", false, "") + listExport = CmdList.Flag.Bool("export", false, "") + listFmt = CmdList.Flag.String("f", "", "") + listFind = CmdList.Flag.Bool("find", false, "") + listJson bool + listJsonFields jsonFlag // If not empty, only output these fields. + listM = CmdList.Flag.Bool("m", false, "") + listRetracted = CmdList.Flag.Bool("retracted", false, "") + listReuse = CmdList.Flag.String("reuse", "", "") + listTest = CmdList.Flag.Bool("test", false, "") + listU = CmdList.Flag.Bool("u", false, "") + listVersions = CmdList.Flag.Bool("versions", false, "") +) + +// A StringsFlag is a command-line flag that interprets its argument +// as a space-separated list of possibly-quoted strings. +type jsonFlag map[string]bool + +func (v *jsonFlag) Set(s string) error { + if v, err := strconv.ParseBool(s); err == nil { + listJson = v + return nil + } + listJson = true + if *v == nil { + *v = make(map[string]bool) + } + for _, f := range strings.Split(s, ",") { + (*v)[f] = true + } + return nil +} + +func (v *jsonFlag) String() string { + var fields []string + for f := range *v { + fields = append(fields, f) + } + sort.Strings(fields) + return strings.Join(fields, ",") +} + +func (v *jsonFlag) IsBoolFlag() bool { + return true +} + +func (v *jsonFlag) needAll() bool { + return len(*v) == 0 +} + +func (v *jsonFlag) needAny(fields ...string) bool { + if v.needAll() { + return true + } + for _, f := range fields { + if (*v)[f] { + return true + } + } + return false +} + +var nl = []byte{'\n'} + +func runList(ctx context.Context, cmd *base.Command, args []string) { + modload.InitWorkfile() + + if *listFmt != "" && listJson { + base.Fatalf("go list -f cannot be used with -json") + } + if *listReuse != "" && !*listM { + base.Fatalf("go list -reuse cannot be used without -m") + } + if *listReuse != "" && modload.HasModRoot() { + base.Fatalf("go list -reuse cannot be used inside a module") + } + + work.BuildInit() + out := newTrackingWriter(os.Stdout) + defer out.w.Flush() + + if *listFmt == "" { + if *listM { + *listFmt = "{{.String}}" + if *listVersions { + *listFmt = `{{.Path}}{{range .Versions}} {{.}}{{end}}{{if .Deprecated}} (deprecated){{end}}` + } + } else { + *listFmt = "{{.ImportPath}}" + } + } + + var do func(x any) + if listJson { + do = func(x any) { + if !listJsonFields.needAll() { + v := reflect.ValueOf(x).Elem() // do is always called with a non-nil pointer. + // Clear all non-requested fields. + for i := 0; i < v.NumField(); i++ { + if !listJsonFields.needAny(v.Type().Field(i).Name) { + v.Field(i).SetZero() + } + } + } + b, err := json.MarshalIndent(x, "", "\t") + if err != nil { + out.Flush() + base.Fatalf("%s", err) + } + out.Write(b) + out.Write(nl) + } + } else { + var cachedCtxt *Context + context := func() *Context { + if cachedCtxt == nil { + cachedCtxt = newContext(&cfg.BuildContext) + } + return cachedCtxt + } + fm := template.FuncMap{ + "join": strings.Join, + "context": context, + "module": func(path string) *modinfo.ModulePublic { return modload.ModuleInfo(ctx, path) }, + } + tmpl, err := template.New("main").Funcs(fm).Parse(*listFmt) + if err != nil { + base.Fatalf("%s", err) + } + do = func(x any) { + if err := tmpl.Execute(out, x); err != nil { + out.Flush() + base.Fatalf("%s", err) + } + if out.NeedNL() { + out.Write(nl) + } + } + } + + modload.Init() + if *listRetracted { + if cfg.BuildMod == "vendor" { + base.Fatalf("go list -retracted cannot be used when vendoring is enabled") + } + if !modload.Enabled() { + base.Fatalf("go list -retracted can only be used in module-aware mode") + } + } + + if *listM { + // Module mode. + if *listCompiled { + base.Fatalf("go list -compiled cannot be used with -m") + } + if *listDeps { + // TODO(rsc): Could make this mean something with -m. + base.Fatalf("go list -deps cannot be used with -m") + } + if *listExport { + base.Fatalf("go list -export cannot be used with -m") + } + if *listFind { + base.Fatalf("go list -find cannot be used with -m") + } + if *listTest { + base.Fatalf("go list -test cannot be used with -m") + } + + if modload.Init(); !modload.Enabled() { + base.Fatalf("go: list -m cannot be used with GO111MODULE=off") + } + + modload.LoadModFile(ctx) // Sets cfg.BuildMod as a side-effect. + if cfg.BuildMod == "vendor" { + const actionDisabledFormat = "go: can't %s using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)" + + if *listVersions { + base.Fatalf(actionDisabledFormat, "determine available versions") + } + if *listU { + base.Fatalf(actionDisabledFormat, "determine available upgrades") + } + + for _, arg := range args { + // In vendor mode, the module graph is incomplete: it contains only the + // explicit module dependencies and the modules that supply packages in + // the import graph. Reject queries that imply more information than that. + if arg == "all" { + base.Fatalf(actionDisabledFormat, "compute 'all'") + } + if strings.Contains(arg, "...") { + base.Fatalf(actionDisabledFormat, "match module patterns") + } + } + } + + var mode modload.ListMode + if *listU { + mode |= modload.ListU | modload.ListRetracted | modload.ListDeprecated + } + if *listRetracted { + mode |= modload.ListRetracted + } + if *listVersions { + mode |= modload.ListVersions + if *listRetracted { + mode |= modload.ListRetractedVersions + } + } + if *listReuse != "" && len(args) == 0 { + base.Fatalf("go: list -m -reuse only has an effect with module@version arguments") + } + mods, err := modload.ListModules(ctx, args, mode, *listReuse) + if !*listE { + for _, m := range mods { + if m.Error != nil { + base.Error(errors.New(m.Error.Err)) + } + } + if err != nil { + base.Error(err) + } + base.ExitIfErrors() + } + for _, m := range mods { + do(m) + } + return + } + + // Package mode (not -m). + if *listU { + base.Fatalf("go list -u can only be used with -m") + } + if *listVersions { + base.Fatalf("go list -versions can only be used with -m") + } + + // These pairings make no sense. + if *listFind && *listDeps { + base.Fatalf("go list -deps cannot be used with -find") + } + if *listFind && *listTest { + base.Fatalf("go list -test cannot be used with -find") + } + if *listFind && *listExport { + base.Fatalf("go list -export cannot be used with -find") + } + + pkgOpts := load.PackageOpts{ + IgnoreImports: *listFind, + ModResolveTests: *listTest, + AutoVCS: true, + SuppressBuildInfo: !*listExport && !listJsonFields.needAny("Stale", "StaleReason"), + SuppressEmbedFiles: !*listExport && !listJsonFields.needAny("EmbedFiles", "TestEmbedFiles", "XTestEmbedFiles"), + } + pkgs := load.PackagesAndErrors(ctx, pkgOpts, args) + if !*listE { + w := 0 + for _, pkg := range pkgs { + if pkg.Error != nil { + base.Errorf("%v", pkg.Error) + continue + } + pkgs[w] = pkg + w++ + } + pkgs = pkgs[:w] + base.ExitIfErrors() + } + + if *listTest { + c := cache.Default() + // Add test binaries to packages to be listed. + + var wg sync.WaitGroup + sema := semaphore.NewWeighted(int64(runtime.GOMAXPROCS(0))) + type testPackageSet struct { + p, pmain, ptest, pxtest *load.Package + } + var testPackages []testPackageSet + for _, p := range pkgs { + if len(p.TestGoFiles)+len(p.XTestGoFiles) > 0 { + var pmain, ptest, pxtest *load.Package + var err error + if *listE { + sema.Acquire(ctx, 1) + wg.Add(1) + done := func() { + sema.Release(1) + wg.Done() + } + pmain, ptest, pxtest = load.TestPackagesAndErrors(ctx, done, pkgOpts, p, nil) + } else { + pmain, ptest, pxtest, err = load.TestPackagesFor(ctx, pkgOpts, p, nil) + if err != nil { + base.Fatalf("go: can't load test package: %s", err) + } + } + testPackages = append(testPackages, testPackageSet{p, pmain, ptest, pxtest}) + } + } + wg.Wait() + for _, pkgset := range testPackages { + p, pmain, ptest, pxtest := pkgset.p, pkgset.pmain, pkgset.ptest, pkgset.pxtest + if pmain != nil { + pkgs = append(pkgs, pmain) + data := *pmain.Internal.TestmainGo + sema.Acquire(ctx, 1) + wg.Add(1) + go func() { + h := cache.NewHash("testmain") + h.Write([]byte("testmain\n")) + h.Write(data) + out, _, err := c.Put(h.Sum(), bytes.NewReader(data)) + if err != nil { + base.Fatalf("%s", err) + } + pmain.GoFiles[0] = c.OutputFile(out) + sema.Release(1) + wg.Done() + }() + + } + if ptest != nil && ptest != p { + pkgs = append(pkgs, ptest) + } + if pxtest != nil { + pkgs = append(pkgs, pxtest) + } + } + + wg.Wait() + } + + // Remember which packages are named on the command line. + cmdline := make(map[*load.Package]bool) + for _, p := range pkgs { + cmdline[p] = true + } + + if *listDeps { + // Note: This changes the order of the listed packages + // from "as written on the command line" to + // "a depth-first post-order traversal". + // (The dependency exploration order for a given node + // is alphabetical, same as listed in .Deps.) + // Note that -deps is applied after -test, + // so that you only get descriptions of tests for the things named + // explicitly on the command line, not for all dependencies. + pkgs = loadPackageList(pkgs) + } + + // Do we need to run a build to gather information? + needStale := (listJson && listJsonFields.needAny("Stale", "StaleReason")) || strings.Contains(*listFmt, ".Stale") + if needStale || *listExport || *listCompiled { + b := work.NewBuilder("") + if *listE { + b.AllowErrors = true + } + defer func() { + if err := b.Close(); err != nil { + base.Fatal(err) + } + }() + + b.IsCmdList = true + b.NeedExport = *listExport + b.NeedCompiledGoFiles = *listCompiled + a := &work.Action{} + // TODO: Use pkgsFilter? + for _, p := range pkgs { + if len(p.GoFiles)+len(p.CgoFiles) > 0 { + a.Deps = append(a.Deps, b.AutoAction(work.ModeInstall, work.ModeInstall, p)) + } + } + if cfg.Experiment.CoverageRedesign && cfg.BuildCover { + load.PrepareForCoverageBuild(pkgs) + } + b.Do(ctx, a) + } + + for _, p := range pkgs { + // Show vendor-expanded paths in listing + p.TestImports = p.Resolve(p.TestImports) + p.XTestImports = p.Resolve(p.XTestImports) + p.DepOnly = !cmdline[p] + + if *listCompiled { + p.Imports = str.StringList(p.Imports, p.Internal.CompiledImports) + } + } + + if *listTest || (cfg.BuildPGO == "auto" && len(cmdline) > 1) { + all := pkgs + if !*listDeps { + all = loadPackageList(pkgs) + } + // Update import paths to distinguish the real package p + // from p recompiled for q.test, or to distinguish between + // p compiled with different PGO profiles. + // This must happen only once the build code is done + // looking at import paths, because it will get very confused + // if it sees these. + old := make(map[string]string) + for _, p := range all { + if p.ForTest != "" || p.Internal.ForMain != "" { + new := p.Desc() + old[new] = p.ImportPath + p.ImportPath = new + } + p.DepOnly = !cmdline[p] + } + // Update import path lists to use new strings. + m := make(map[string]string) + for _, p := range all { + for _, p1 := range p.Internal.Imports { + if p1.ForTest != "" || p1.Internal.ForMain != "" { + m[old[p1.ImportPath]] = p1.ImportPath + } + } + for i, old := range p.Imports { + if new := m[old]; new != "" { + p.Imports[i] = new + } + } + clear(m) + } + } + + if listJsonFields.needAny("Deps", "DepsErrors") { + all := pkgs + // Make sure we iterate through packages in a postorder traversal, + // which load.PackageList guarantees. If *listDeps, then all is + // already in PackageList order. Otherwise, calling load.PackageList + // provides the guarantee. In the case of an import cycle, the last package + // visited in the cycle, importing the first encountered package in the cycle, + // is visited first. The cycle import error will be bubbled up in the traversal + // order up to the first package in the cycle, covering all the packages + // in the cycle. + if !*listDeps { + all = load.PackageList(pkgs) + } + if listJsonFields.needAny("Deps") { + for _, p := range all { + collectDeps(p) + } + } + if listJsonFields.needAny("DepsErrors") { + for _, p := range all { + collectDepsErrors(p) + } + } + } + + // TODO(golang.org/issue/40676): This mechanism could be extended to support + // -u without -m. + if *listRetracted { + // Load retractions for modules that provide packages that will be printed. + // TODO(golang.org/issue/40775): Packages from the same module refer to + // distinct ModulePublic instance. It would be nice if they could all point + // to the same instance. This would require additional global state in + // modload.loaded, so that should be refactored first. For now, we update + // all instances. + modToArg := make(map[*modinfo.ModulePublic]string) + argToMods := make(map[string][]*modinfo.ModulePublic) + var args []string + addModule := func(mod *modinfo.ModulePublic) { + if mod.Version == "" { + return + } + arg := fmt.Sprintf("%s@%s", mod.Path, mod.Version) + if argToMods[arg] == nil { + args = append(args, arg) + } + argToMods[arg] = append(argToMods[arg], mod) + modToArg[mod] = arg + } + for _, p := range pkgs { + if p.Module == nil { + continue + } + addModule(p.Module) + if p.Module.Replace != nil { + addModule(p.Module.Replace) + } + } + + if len(args) > 0 { + var mode modload.ListMode + if *listRetracted { + mode |= modload.ListRetracted + } + rmods, err := modload.ListModules(ctx, args, mode, *listReuse) + if err != nil && !*listE { + base.Error(err) + } + for i, arg := range args { + rmod := rmods[i] + for _, mod := range argToMods[arg] { + mod.Retracted = rmod.Retracted + if rmod.Error != nil && mod.Error == nil { + mod.Error = rmod.Error + } + } + } + } + } + + // Record non-identity import mappings in p.ImportMap. + for _, p := range pkgs { + nRaw := len(p.Internal.RawImports) + for i, path := range p.Imports { + var srcPath string + if i < nRaw { + srcPath = p.Internal.RawImports[i] + } else { + // This path is not within the raw imports, so it must be an import + // found only within CompiledGoFiles. Those paths are found in + // CompiledImports. + srcPath = p.Internal.CompiledImports[i-nRaw] + } + + if path != srcPath { + if p.ImportMap == nil { + p.ImportMap = make(map[string]string) + } + p.ImportMap[srcPath] = path + } + } + } + + for _, p := range pkgs { + do(&p.PackagePublic) + } +} + +// loadPackageList is like load.PackageList, but prints error messages and exits +// with nonzero status if listE is not set and any package in the expanded list +// has errors. +func loadPackageList(roots []*load.Package) []*load.Package { + pkgs := load.PackageList(roots) + + if !*listE { + for _, pkg := range pkgs { + if pkg.Error != nil { + base.Errorf("%v", pkg.Error) + } + } + } + + return pkgs +} + +// collectDeps populates p.Deps by iterating over p.Internal.Imports. +// collectDeps must be called on all of p's Imports before being called on p. +func collectDeps(p *load.Package) { + deps := make(map[string]bool) + + for _, p := range p.Internal.Imports { + deps[p.ImportPath] = true + for _, q := range p.Deps { + deps[q] = true + } + } + + p.Deps = make([]string, 0, len(deps)) + for dep := range deps { + p.Deps = append(p.Deps, dep) + } + sort.Strings(p.Deps) +} + +// collectDeps populates p.DepsErrors by iterating over p.Internal.Imports. +// collectDepsErrors must be called on all of p's Imports before being called on p. +func collectDepsErrors(p *load.Package) { + depsErrors := make(map[*load.PackageError]bool) + + for _, p := range p.Internal.Imports { + if p.Error != nil { + depsErrors[p.Error] = true + } + for _, q := range p.DepsErrors { + depsErrors[q] = true + } + } + + p.DepsErrors = make([]*load.PackageError, 0, len(depsErrors)) + for deperr := range depsErrors { + p.DepsErrors = append(p.DepsErrors, deperr) + } + // Sort packages by the package on the top of the stack, which should be + // the package the error was produced for. Each package can have at most + // one error set on it. + sort.Slice(p.DepsErrors, func(i, j int) bool { + stki, stkj := p.DepsErrors[i].ImportStack, p.DepsErrors[j].ImportStack + // Some packages are missing import stacks. To ensure deterministic + // sort order compare two errors that are missing import stacks by + // their errors' error texts. + if len(stki) == 0 { + if len(stkj) != 0 { + return true + } + + return p.DepsErrors[i].Err.Error() < p.DepsErrors[j].Err.Error() + } else if len(stkj) == 0 { + return false + } + pathi, pathj := stki[len(stki)-1], stkj[len(stkj)-1] + return pathi < pathj + }) +} + +// TrackingWriter tracks the last byte written on every write so +// we can avoid printing a newline if one was already written or +// if there is no output at all. +type TrackingWriter struct { + w *bufio.Writer + last byte +} + +func newTrackingWriter(w io.Writer) *TrackingWriter { + return &TrackingWriter{ + w: bufio.NewWriter(w), + last: '\n', + } +} + +func (t *TrackingWriter) Write(p []byte) (n int, err error) { + n, err = t.w.Write(p) + if n > 0 { + t.last = p[n-1] + } + return +} + +func (t *TrackingWriter) Flush() { + t.w.Flush() +} + +func (t *TrackingWriter) NeedNL() bool { + return t.last != '\n' +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/load/flag.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/flag.go new file mode 100644 index 0000000000000000000000000000000000000000..55bdab013505abc21d547e39bcf8e944a22099f4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/flag.go @@ -0,0 +1,96 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package load + +import ( + "cmd/go/internal/base" + "cmd/internal/quoted" + "fmt" + "strings" +) + +var ( + BuildAsmflags PerPackageFlag // -asmflags + BuildGcflags PerPackageFlag // -gcflags + BuildLdflags PerPackageFlag // -ldflags + BuildGccgoflags PerPackageFlag // -gccgoflags +) + +// A PerPackageFlag is a command-line flag implementation (a flag.Value) +// that allows specifying different effective flags for different packages. +// See 'go help build' for more details about per-package flags. +type PerPackageFlag struct { + raw string + present bool + values []ppfValue +} + +// A ppfValue is a single = per-package flag value. +type ppfValue struct { + match func(*Package) bool // compiled pattern + flags []string +} + +// Set is called each time the flag is encountered on the command line. +func (f *PerPackageFlag) Set(v string) error { + return f.set(v, base.Cwd()) +} + +// set is the implementation of Set, taking a cwd (current working directory) for easier testing. +func (f *PerPackageFlag) set(v, cwd string) error { + f.raw = v + f.present = true + match := func(p *Package) bool { return p.Internal.CmdlinePkg || p.Internal.CmdlineFiles } // default predicate with no pattern + // For backwards compatibility with earlier flag splitting, ignore spaces around flags. + v = strings.TrimSpace(v) + if v == "" { + // Special case: -gcflags="" means no flags for command-line arguments + // (overrides previous -gcflags="-whatever"). + f.values = append(f.values, ppfValue{match, []string{}}) + return nil + } + if !strings.HasPrefix(v, "-") { + i := strings.Index(v, "=") + if i < 0 { + return fmt.Errorf("missing = in =") + } + if i == 0 { + return fmt.Errorf("missing in =") + } + if v[0] == '\'' || v[0] == '"' { + return fmt.Errorf("parameter may not start with quote character %c", v[0]) + } + pattern := strings.TrimSpace(v[:i]) + match = MatchPackage(pattern, cwd) + v = v[i+1:] + } + flags, err := quoted.Split(v) + if err != nil { + return err + } + if flags == nil { + flags = []string{} + } + f.values = append(f.values, ppfValue{match, flags}) + return nil +} + +func (f *PerPackageFlag) String() string { return f.raw } + +// Present reports whether the flag appeared on the command line. +func (f *PerPackageFlag) Present() bool { + return f.present +} + +// For returns the flags to use for the given package. +func (f *PerPackageFlag) For(p *Package) []string { + flags := []string{} + for _, v := range f.values { + if v.match(p) { + flags = v.flags + } + } + return flags +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/load/flag_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/flag_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d3223e12d52ec999bd4e29d5e1c62274f580eb93 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/flag_test.go @@ -0,0 +1,135 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package load + +import ( + "fmt" + "path/filepath" + "reflect" + "testing" +) + +type ppfTestPackage struct { + path string + dir string + cmdline bool + flags []string +} + +type ppfTest struct { + args []string + pkgs []ppfTestPackage +} + +var ppfTests = []ppfTest{ + // -gcflags=-S applies only to packages on command line. + { + args: []string{"-S"}, + pkgs: []ppfTestPackage{ + {cmdline: true, flags: []string{"-S"}}, + {cmdline: false, flags: []string{}}, + }, + }, + + // -gcflags=-S -gcflags= overrides the earlier -S. + { + args: []string{"-S", ""}, + pkgs: []ppfTestPackage{ + {cmdline: true, flags: []string{}}, + }, + }, + + // -gcflags=net=-S applies only to package net + { + args: []string{"net=-S"}, + pkgs: []ppfTestPackage{ + {path: "math", cmdline: true, flags: []string{}}, + {path: "net", flags: []string{"-S"}}, + }, + }, + + // -gcflags=net=-S -gcflags=net= also overrides the earlier -S + { + args: []string{"net=-S", "net="}, + pkgs: []ppfTestPackage{ + {path: "net", flags: []string{}}, + }, + }, + + // -gcflags=net/...=-S net math + // applies -S to net and net/http but not math + { + args: []string{"net/...=-S"}, + pkgs: []ppfTestPackage{ + {path: "net", flags: []string{"-S"}}, + {path: "net/http", flags: []string{"-S"}}, + {path: "math", flags: []string{}}, + }, + }, + + // -gcflags=net/...=-S -gcflags=-m net math + // applies -m to net and math and -S to other packages matching net/... + // (net matches too, but it was grabbed by the later -gcflags). + { + args: []string{"net/...=-S", "-m"}, + pkgs: []ppfTestPackage{ + {path: "net", cmdline: true, flags: []string{"-m"}}, + {path: "math", cmdline: true, flags: []string{"-m"}}, + {path: "net", cmdline: false, flags: []string{"-S"}}, + {path: "net/http", flags: []string{"-S"}}, + {path: "math", flags: []string{}}, + }, + }, + + // relative path patterns + // ppfDirTest(pattern, n, dirs...) says the first n dirs should match and the others should not. + ppfDirTest(".", 1, "/my/test/dir", "/my/test", "/my/test/other", "/my/test/dir/sub"), + ppfDirTest("..", 1, "/my/test", "/my/test/dir", "/my/test/other", "/my/test/dir/sub"), + ppfDirTest("./sub", 1, "/my/test/dir/sub", "/my/test", "/my/test/dir", "/my/test/other", "/my/test/dir/sub/sub"), + ppfDirTest("../other", 1, "/my/test/other", "/my/test", "/my/test/dir", "/my/test/other/sub", "/my/test/dir/other", "/my/test/dir/sub"), + ppfDirTest("./...", 3, "/my/test/dir", "/my/test/dir/sub", "/my/test/dir/sub/sub", "/my/test/other", "/my/test/other/sub"), + ppfDirTest("../...", 4, "/my/test/dir", "/my/test/other", "/my/test/dir/sub", "/my/test/other/sub", "/my/other/test"), + ppfDirTest("../...sub...", 3, "/my/test/dir/sub", "/my/test/othersub", "/my/test/yellowsubmarine", "/my/other/test"), +} + +func ppfDirTest(pattern string, nmatch int, dirs ...string) ppfTest { + var pkgs []ppfTestPackage + for i, d := range dirs { + flags := []string{} + if i < nmatch { + flags = []string{"-S"} + } + pkgs = append(pkgs, ppfTestPackage{path: "p", dir: d, flags: flags}) + } + return ppfTest{args: []string{pattern + "=-S"}, pkgs: pkgs} +} + +func TestPerPackageFlag(t *testing.T) { + nativeDir := func(d string) string { + if filepath.Separator == '\\' { + return `C:` + filepath.FromSlash(d) + } + return d + } + + for i, tt := range ppfTests { + t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { + ppFlags := new(PerPackageFlag) + for _, arg := range tt.args { + t.Logf("set(%s)", arg) + if err := ppFlags.set(arg, nativeDir("/my/test/dir")); err != nil { + t.Fatal(err) + } + } + for _, p := range tt.pkgs { + dir := nativeDir(p.dir) + flags := ppFlags.For(&Package{PackagePublic: PackagePublic{ImportPath: p.path, Dir: dir}, Internal: PackageInternal{CmdlinePkg: p.cmdline}}) + if !reflect.DeepEqual(flags, p.flags) { + t.Errorf("For(%v, %v, %v) = %v, want %v", p.path, dir, p.cmdline, flags, p.flags) + } + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/load/godebug.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/godebug.go new file mode 100644 index 0000000000000000000000000000000000000000..c79245e5cd92313571ad4e3513ab3e548690fce0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/godebug.go @@ -0,0 +1,126 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package load + +import ( + "cmd/go/internal/modload" + "errors" + "fmt" + "go/build" + "internal/godebugs" + "sort" + "strconv" + "strings" +) + +var ErrNotGoDebug = errors.New("not //go:debug line") + +func ParseGoDebug(text string) (key, value string, err error) { + if !strings.HasPrefix(text, "//go:debug") { + return "", "", ErrNotGoDebug + } + i := strings.IndexAny(text, " \t") + if i < 0 { + if strings.TrimSpace(text) == "//go:debug" { + return "", "", fmt.Errorf("missing key=value") + } + return "", "", ErrNotGoDebug + } + k, v, ok := strings.Cut(strings.TrimSpace(text[i:]), "=") + if !ok { + return "", "", fmt.Errorf("missing key=value") + } + if strings.ContainsAny(k, " \t") { + return "", "", fmt.Errorf("key contains space") + } + if strings.ContainsAny(v, " \t") { + return "", "", fmt.Errorf("value contains space") + } + if strings.ContainsAny(k, ",") { + return "", "", fmt.Errorf("key contains comma") + } + if strings.ContainsAny(v, ",") { + return "", "", fmt.Errorf("value contains comma") + } + + for _, info := range godebugs.All { + if k == info.Name { + return k, v, nil + } + } + return "", "", fmt.Errorf("unknown //go:debug setting %q", k) +} + +// defaultGODEBUG returns the default GODEBUG setting for the main package p. +// When building a test binary, directives, testDirectives, and xtestDirectives +// list additional directives from the package under test. +func defaultGODEBUG(p *Package, directives, testDirectives, xtestDirectives []build.Directive) string { + if p.Name != "main" { + return "" + } + goVersion := modload.MainModules.GoVersion() + if modload.RootMode == modload.NoRoot && p.Module != nil { + // This is go install pkg@version or go run pkg@version. + // Use the Go version from the package. + // If there isn't one, then + goVersion = p.Module.GoVersion + if goVersion == "" { + goVersion = "1.20" + } + } + + m := godebugForGoVersion(goVersion) + for _, list := range [][]build.Directive{p.Internal.Build.Directives, directives, testDirectives, xtestDirectives} { + for _, d := range list { + k, v, err := ParseGoDebug(d.Text) + if err != nil { + continue + } + if m == nil { + m = make(map[string]string) + } + m[k] = v + } + } + var keys []string + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + var b strings.Builder + for _, k := range keys { + if b.Len() > 0 { + b.WriteString(",") + } + b.WriteString(k) + b.WriteString("=") + b.WriteString(m[k]) + } + return b.String() +} + +func godebugForGoVersion(v string) map[string]string { + if strings.Count(v, ".") >= 2 { + i := strings.Index(v, ".") + j := i + 1 + strings.Index(v[i+1:], ".") + v = v[:j] + } + + if !strings.HasPrefix(v, "1.") { + return nil + } + n, err := strconv.Atoi(v[len("1."):]) + if err != nil { + return nil + } + + def := make(map[string]string) + for _, info := range godebugs.All { + if n < info.Changed { + def[info.Name] = info.Old + } + } + return def +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/load/path.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/path.go new file mode 100644 index 0000000000000000000000000000000000000000..584cdff89163253c02dbc76ee34b72587e0f798f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/path.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package load + +import ( + "path/filepath" +) + +// expandPath returns the symlink-expanded form of path. +func expandPath(p string) string { + x, err := filepath.EvalSymlinks(p) + if err == nil { + return x + } + return p +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/load/pkg.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/pkg.go new file mode 100644 index 0000000000000000000000000000000000000000..1549800afb5f7e5ceee22cd4d76cb3a4ee9d7209 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/pkg.go @@ -0,0 +1,3568 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package load loads packages. +package load + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "go/build" + "go/scanner" + "go/token" + "internal/platform" + "io/fs" + "os" + pathpkg "path" + "path/filepath" + "runtime" + "runtime/debug" + "slices" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/go/internal/gover" + "cmd/go/internal/imports" + "cmd/go/internal/modfetch" + "cmd/go/internal/modindex" + "cmd/go/internal/modinfo" + "cmd/go/internal/modload" + "cmd/go/internal/par" + "cmd/go/internal/search" + "cmd/go/internal/str" + "cmd/go/internal/trace" + "cmd/go/internal/vcs" + "cmd/internal/pkgpattern" + + "golang.org/x/mod/modfile" + "golang.org/x/mod/module" +) + +// A Package describes a single package found in a directory. +type Package struct { + PackagePublic // visible in 'go list' + Internal PackageInternal // for use inside go command only +} + +type PackagePublic struct { + // Note: These fields are part of the go command's public API. + // See list.go. It is okay to add fields, but not to change or + // remove existing ones. Keep in sync with ../list/list.go + Dir string `json:",omitempty"` // directory containing package sources + ImportPath string `json:",omitempty"` // import path of package in dir + ImportComment string `json:",omitempty"` // path in import comment on package statement + Name string `json:",omitempty"` // package name + Doc string `json:",omitempty"` // package documentation string + Target string `json:",omitempty"` // installed target for this package (may be executable) + Shlib string `json:",omitempty"` // the shared library that contains this package (only set when -linkshared) + Root string `json:",omitempty"` // Go root, Go path dir, or module root dir containing this package + ConflictDir string `json:",omitempty"` // Dir is hidden by this other directory + ForTest string `json:",omitempty"` // package is only for use in named test + Export string `json:",omitempty"` // file containing export data (set by go list -export) + BuildID string `json:",omitempty"` // build ID of the compiled package (set by go list -export) + Module *modinfo.ModulePublic `json:",omitempty"` // info about package's module, if any + Match []string `json:",omitempty"` // command-line patterns matching this package + Goroot bool `json:",omitempty"` // is this package found in the Go root? + Standard bool `json:",omitempty"` // is this package part of the standard Go library? + DepOnly bool `json:",omitempty"` // package is only as a dependency, not explicitly listed + BinaryOnly bool `json:",omitempty"` // package cannot be recompiled + Incomplete bool `json:",omitempty"` // was there an error loading this package or dependencies? + + DefaultGODEBUG string `json:",omitempty"` // default GODEBUG setting (only for Name=="main") + + // Stale and StaleReason remain here *only* for the list command. + // They are only initialized in preparation for list execution. + // The regular build determines staleness on the fly during action execution. + Stale bool `json:",omitempty"` // would 'go install' do anything for this package? + StaleReason string `json:",omitempty"` // why is Stale true? + + // Source files + // If you add to this list you MUST add to p.AllFiles (below) too. + // Otherwise file name security lists will not apply to any new additions. + GoFiles []string `json:",omitempty"` // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) + CgoFiles []string `json:",omitempty"` // .go source files that import "C" + CompiledGoFiles []string `json:",omitempty"` // .go output from running cgo on CgoFiles + IgnoredGoFiles []string `json:",omitempty"` // .go source files ignored due to build constraints + InvalidGoFiles []string `json:",omitempty"` // .go source files with detected problems (parse error, wrong package name, and so on) + IgnoredOtherFiles []string `json:",omitempty"` // non-.go source files ignored due to build constraints + CFiles []string `json:",omitempty"` // .c source files + CXXFiles []string `json:",omitempty"` // .cc, .cpp and .cxx source files + MFiles []string `json:",omitempty"` // .m source files + HFiles []string `json:",omitempty"` // .h, .hh, .hpp and .hxx source files + FFiles []string `json:",omitempty"` // .f, .F, .for and .f90 Fortran source files + SFiles []string `json:",omitempty"` // .s source files + SwigFiles []string `json:",omitempty"` // .swig files + SwigCXXFiles []string `json:",omitempty"` // .swigcxx files + SysoFiles []string `json:",omitempty"` // .syso system object files added to package + + // Embedded files + EmbedPatterns []string `json:",omitempty"` // //go:embed patterns + EmbedFiles []string `json:",omitempty"` // files matched by EmbedPatterns + + // Cgo directives + CgoCFLAGS []string `json:",omitempty"` // cgo: flags for C compiler + CgoCPPFLAGS []string `json:",omitempty"` // cgo: flags for C preprocessor + CgoCXXFLAGS []string `json:",omitempty"` // cgo: flags for C++ compiler + CgoFFLAGS []string `json:",omitempty"` // cgo: flags for Fortran compiler + CgoLDFLAGS []string `json:",omitempty"` // cgo: flags for linker + CgoPkgConfig []string `json:",omitempty"` // cgo: pkg-config names + + // Dependency information + Imports []string `json:",omitempty"` // import paths used by this package + ImportMap map[string]string `json:",omitempty"` // map from source import to ImportPath (identity entries omitted) + Deps []string `json:",omitempty"` // all (recursively) imported dependencies + + // Error information + // Incomplete is above, packed into the other bools + Error *PackageError `json:",omitempty"` // error loading this package (not dependencies) + DepsErrors []*PackageError `json:",omitempty"` // errors loading dependencies, collected by go list before output + + // Test information + // If you add to this list you MUST add to p.AllFiles (below) too. + // Otherwise file name security lists will not apply to any new additions. + TestGoFiles []string `json:",omitempty"` // _test.go files in package + TestImports []string `json:",omitempty"` // imports from TestGoFiles + TestEmbedPatterns []string `json:",omitempty"` // //go:embed patterns + TestEmbedFiles []string `json:",omitempty"` // files matched by TestEmbedPatterns + XTestGoFiles []string `json:",omitempty"` // _test.go files outside package + XTestImports []string `json:",omitempty"` // imports from XTestGoFiles + XTestEmbedPatterns []string `json:",omitempty"` // //go:embed patterns + XTestEmbedFiles []string `json:",omitempty"` // files matched by XTestEmbedPatterns +} + +// AllFiles returns the names of all the files considered for the package. +// This is used for sanity and security checks, so we include all files, +// even IgnoredGoFiles, because some subcommands consider them. +// The go/build package filtered others out (like foo_wrongGOARCH.s) +// and that's OK. +func (p *Package) AllFiles() []string { + files := str.StringList( + p.GoFiles, + p.CgoFiles, + // no p.CompiledGoFiles, because they are from GoFiles or generated by us + p.IgnoredGoFiles, + // no p.InvalidGoFiles, because they are from GoFiles + p.IgnoredOtherFiles, + p.CFiles, + p.CXXFiles, + p.MFiles, + p.HFiles, + p.FFiles, + p.SFiles, + p.SwigFiles, + p.SwigCXXFiles, + p.SysoFiles, + p.TestGoFiles, + p.XTestGoFiles, + ) + + // EmbedFiles may overlap with the other files. + // Dedup, but delay building the map as long as possible. + // Only files in the current directory (no slash in name) + // need to be checked against the files variable above. + var have map[string]bool + for _, file := range p.EmbedFiles { + if !strings.Contains(file, "/") { + if have == nil { + have = make(map[string]bool) + for _, file := range files { + have[file] = true + } + } + if have[file] { + continue + } + } + files = append(files, file) + } + return files +} + +// Desc returns the package "description", for use in b.showOutput. +func (p *Package) Desc() string { + if p.ForTest != "" { + return p.ImportPath + " [" + p.ForTest + ".test]" + } + if p.Internal.ForMain != "" { + return p.ImportPath + " [" + p.Internal.ForMain + "]" + } + return p.ImportPath +} + +// IsTestOnly reports whether p is a test-only package. +// +// A “test-only” package is one that: +// - is a test-only variant of an ordinary package, or +// - is a synthesized "main" package for a test binary, or +// - contains only _test.go files. +func (p *Package) IsTestOnly() bool { + return p.ForTest != "" || + p.Internal.TestmainGo != nil || + len(p.TestGoFiles)+len(p.XTestGoFiles) > 0 && len(p.GoFiles)+len(p.CgoFiles) == 0 +} + +type PackageInternal struct { + // Unexported fields are not part of the public API. + Build *build.Package + Imports []*Package // this package's direct imports + CompiledImports []string // additional Imports necessary when using CompiledGoFiles (all from standard library); 1:1 with the end of PackagePublic.Imports + RawImports []string // this package's original imports as they appear in the text of the program; 1:1 with the end of PackagePublic.Imports + ForceLibrary bool // this package is a library (even if named "main") + CmdlineFiles bool // package built from files listed on command line + CmdlinePkg bool // package listed on command line + CmdlinePkgLiteral bool // package listed as literal on command line (not via wildcard) + Local bool // imported via local path (./ or ../) + LocalPrefix string // interpret ./ and ../ imports relative to this prefix + ExeName string // desired name for temporary executable + FuzzInstrument bool // package should be instrumented for fuzzing + Cover CoverSetup // coverage mode and other setup info of -cover is being applied to this package + CoverVars map[string]*CoverVar // variables created by coverage analysis + OmitDebug bool // tell linker not to write debug information + GobinSubdir bool // install target would be subdir of GOBIN + BuildInfo *debug.BuildInfo // add this info to package main + TestmainGo *[]byte // content for _testmain.go + Embed map[string][]string // //go:embed comment mapping + OrigImportPath string // original import path before adding '_test' suffix + PGOProfile string // path to PGO profile + ForMain string // the main package if this package is built specifically for it + + Asmflags []string // -asmflags for this package + Gcflags []string // -gcflags for this package + Ldflags []string // -ldflags for this package + Gccgoflags []string // -gccgoflags for this package +} + +// A NoGoError indicates that no Go files for the package were applicable to the +// build for that package. +// +// That may be because there were no files whatsoever, or because all files were +// excluded, or because all non-excluded files were test sources. +type NoGoError struct { + Package *Package +} + +func (e *NoGoError) Error() string { + if len(e.Package.IgnoredGoFiles) > 0 { + // Go files exist, but they were ignored due to build constraints. + return "build constraints exclude all Go files in " + e.Package.Dir + } + if len(e.Package.TestGoFiles)+len(e.Package.XTestGoFiles) > 0 { + // Test Go files exist, but we're not interested in them. + // The double-negative is unfortunate but we want e.Package.Dir + // to appear at the end of error message. + return "no non-test Go files in " + e.Package.Dir + } + return "no Go files in " + e.Package.Dir +} + +// setLoadPackageDataError presents an error found when loading package data +// as a *PackageError. It has special cases for some common errors to improve +// messages shown to users and reduce redundancy. +// +// setLoadPackageDataError returns true if it's safe to load information about +// imported packages, for example, if there was a parse error loading imports +// in one file, but other files are okay. +func (p *Package) setLoadPackageDataError(err error, path string, stk *ImportStack, importPos []token.Position) { + matchErr, isMatchErr := err.(*search.MatchError) + if isMatchErr && matchErr.Match.Pattern() == path { + if matchErr.Match.IsLiteral() { + // The error has a pattern has a pattern similar to the import path. + // It may be slightly different (./foo matching example.com/foo), + // but close enough to seem redundant. + // Unwrap the error so we don't show the pattern. + err = matchErr.Err + } + } + + // Replace (possibly wrapped) *build.NoGoError with *load.NoGoError. + // The latter is more specific about the cause. + var nogoErr *build.NoGoError + if errors.As(err, &nogoErr) { + if p.Dir == "" && nogoErr.Dir != "" { + p.Dir = nogoErr.Dir + } + err = &NoGoError{Package: p} + } + + // Take only the first error from a scanner.ErrorList. PackageError only + // has room for one position, so we report the first error with a position + // instead of all of the errors without a position. + var pos string + var isScanErr bool + if scanErr, ok := err.(scanner.ErrorList); ok && len(scanErr) > 0 { + isScanErr = true // For stack push/pop below. + + scanPos := scanErr[0].Pos + scanPos.Filename = base.ShortPath(scanPos.Filename) + pos = scanPos.String() + err = errors.New(scanErr[0].Msg) + } + + // Report the error on the importing package if the problem is with the import declaration + // for example, if the package doesn't exist or if the import path is malformed. + // On the other hand, don't include a position if the problem is with the imported package, + // for example there are no Go files (NoGoError), or there's a problem in the imported + // package's source files themselves (scanner errors). + // + // TODO(matloob): Perhaps make each of those the errors in the first group + // (including modload.ImportMissingError, ImportMissingSumError, and the + // corresponding "cannot find package %q in any of" GOPATH-mode error + // produced in build.(*Context).Import; modload.AmbiguousImportError, + // and modload.PackageNotInModuleError; and the malformed module path errors + // produced in golang.org/x/mod/module.CheckMod) implement an interface + // to make it easier to check for them? That would save us from having to + // move the modload errors into this package to avoid a package import cycle, + // and from having to export an error type for the errors produced in build. + if !isMatchErr && (nogoErr != nil || isScanErr) { + stk.Push(path) + defer stk.Pop() + } + + p.Error = &PackageError{ + ImportStack: stk.Copy(), + Pos: pos, + Err: err, + } + p.Incomplete = true + + if path != stk.Top() { + p.Error.setPos(importPos) + } +} + +// Resolve returns the resolved version of imports, +// which should be p.TestImports or p.XTestImports, NOT p.Imports. +// The imports in p.TestImports and p.XTestImports are not recursively +// loaded during the initial load of p, so they list the imports found in +// the source file, but most processing should be over the vendor-resolved +// import paths. We do this resolution lazily both to avoid file system work +// and because the eventual real load of the test imports (during 'go test') +// can produce better error messages if it starts with the original paths. +// The initial load of p loads all the non-test imports and rewrites +// the vendored paths, so nothing should ever call p.vendored(p.Imports). +func (p *Package) Resolve(imports []string) []string { + if len(imports) > 0 && len(p.Imports) > 0 && &imports[0] == &p.Imports[0] { + panic("internal error: p.Resolve(p.Imports) called") + } + seen := make(map[string]bool) + var all []string + for _, path := range imports { + path = ResolveImportPath(p, path) + if !seen[path] { + seen[path] = true + all = append(all, path) + } + } + sort.Strings(all) + return all +} + +// CoverVar holds the name of the generated coverage variables targeting the named file. +type CoverVar struct { + File string // local file name + Var string // name of count struct +} + +// CoverSetup holds parameters related to coverage setup for a given package (covermode, etc). +type CoverSetup struct { + Mode string // coverage mode for this package + Cfg string // path to config file to pass to "go tool cover" + GenMeta bool // ask cover tool to emit a static meta data if set +} + +func (p *Package) copyBuild(opts PackageOpts, pp *build.Package) { + p.Internal.Build = pp + + if pp.PkgTargetRoot != "" && cfg.BuildPkgdir != "" { + old := pp.PkgTargetRoot + pp.PkgRoot = cfg.BuildPkgdir + pp.PkgTargetRoot = cfg.BuildPkgdir + if pp.PkgObj != "" { + pp.PkgObj = filepath.Join(cfg.BuildPkgdir, strings.TrimPrefix(pp.PkgObj, old)) + } + } + + p.Dir = pp.Dir + p.ImportPath = pp.ImportPath + p.ImportComment = pp.ImportComment + p.Name = pp.Name + p.Doc = pp.Doc + p.Root = pp.Root + p.ConflictDir = pp.ConflictDir + p.BinaryOnly = pp.BinaryOnly + + // TODO? Target + p.Goroot = pp.Goroot + p.Standard = p.Goroot && p.ImportPath != "" && search.IsStandardImportPath(p.ImportPath) + p.GoFiles = pp.GoFiles + p.CgoFiles = pp.CgoFiles + p.IgnoredGoFiles = pp.IgnoredGoFiles + p.InvalidGoFiles = pp.InvalidGoFiles + p.IgnoredOtherFiles = pp.IgnoredOtherFiles + p.CFiles = pp.CFiles + p.CXXFiles = pp.CXXFiles + p.MFiles = pp.MFiles + p.HFiles = pp.HFiles + p.FFiles = pp.FFiles + p.SFiles = pp.SFiles + p.SwigFiles = pp.SwigFiles + p.SwigCXXFiles = pp.SwigCXXFiles + p.SysoFiles = pp.SysoFiles + if cfg.BuildMSan { + // There's no way for .syso files to be built both with and without + // support for memory sanitizer. Assume they are built without, + // and drop them. + p.SysoFiles = nil + } + p.CgoCFLAGS = pp.CgoCFLAGS + p.CgoCPPFLAGS = pp.CgoCPPFLAGS + p.CgoCXXFLAGS = pp.CgoCXXFLAGS + p.CgoFFLAGS = pp.CgoFFLAGS + p.CgoLDFLAGS = pp.CgoLDFLAGS + p.CgoPkgConfig = pp.CgoPkgConfig + // We modify p.Imports in place, so make copy now. + p.Imports = make([]string, len(pp.Imports)) + copy(p.Imports, pp.Imports) + p.Internal.RawImports = pp.Imports + p.TestGoFiles = pp.TestGoFiles + p.TestImports = pp.TestImports + p.XTestGoFiles = pp.XTestGoFiles + p.XTestImports = pp.XTestImports + if opts.IgnoreImports { + p.Imports = nil + p.Internal.RawImports = nil + p.TestImports = nil + p.XTestImports = nil + } + p.EmbedPatterns = pp.EmbedPatterns + p.TestEmbedPatterns = pp.TestEmbedPatterns + p.XTestEmbedPatterns = pp.XTestEmbedPatterns + p.Internal.OrigImportPath = pp.ImportPath +} + +// A PackageError describes an error loading information about a package. +type PackageError struct { + ImportStack []string // shortest path from package named on command line to this one + Pos string // position of error + Err error // the error itself + IsImportCycle bool // the error is an import cycle + Hard bool // whether the error is soft or hard; soft errors are ignored in some places + alwaysPrintStack bool // whether to always print the ImportStack +} + +func (p *PackageError) Error() string { + // TODO(#43696): decide when to print the stack or the position based on + // the error type and whether the package is in the main module. + // Document the rationale. + if p.Pos != "" && (len(p.ImportStack) == 0 || !p.alwaysPrintStack) { + // Omit import stack. The full path to the file where the error + // is the most important thing. + return p.Pos + ": " + p.Err.Error() + } + + // If the error is an ImportPathError, and the last path on the stack appears + // in the error message, omit that path from the stack to avoid repetition. + // If an ImportPathError wraps another ImportPathError that matches the + // last path on the stack, we don't omit the path. An error like + // "package A imports B: error loading C caused by B" would not be clearer + // if "imports B" were omitted. + if len(p.ImportStack) == 0 { + return p.Err.Error() + } + var optpos string + if p.Pos != "" { + optpos = "\n\t" + p.Pos + } + return "package " + strings.Join(p.ImportStack, "\n\timports ") + optpos + ": " + p.Err.Error() +} + +func (p *PackageError) Unwrap() error { return p.Err } + +// PackageError implements MarshalJSON so that Err is marshaled as a string +// and non-essential fields are omitted. +func (p *PackageError) MarshalJSON() ([]byte, error) { + perr := struct { + ImportStack []string + Pos string + Err string + }{p.ImportStack, p.Pos, p.Err.Error()} + return json.Marshal(perr) +} + +func (p *PackageError) setPos(posList []token.Position) { + if len(posList) == 0 { + return + } + pos := posList[0] + pos.Filename = base.ShortPath(pos.Filename) + p.Pos = pos.String() +} + +// ImportPathError is a type of error that prevents a package from being loaded +// for a given import path. When such a package is loaded, a *Package is +// returned with Err wrapping an ImportPathError: the error is attached to +// the imported package, not the importing package. +// +// The string returned by ImportPath must appear in the string returned by +// Error. Errors that wrap ImportPathError (such as PackageError) may omit +// the import path. +type ImportPathError interface { + error + ImportPath() string +} + +var ( + _ ImportPathError = (*importError)(nil) + _ ImportPathError = (*mainPackageError)(nil) + _ ImportPathError = (*modload.ImportMissingError)(nil) + _ ImportPathError = (*modload.ImportMissingSumError)(nil) + _ ImportPathError = (*modload.DirectImportFromImplicitDependencyError)(nil) +) + +type importError struct { + importPath string + err error // created with fmt.Errorf +} + +func ImportErrorf(path, format string, args ...any) ImportPathError { + err := &importError{importPath: path, err: fmt.Errorf(format, args...)} + if errStr := err.Error(); !strings.Contains(errStr, path) { + panic(fmt.Sprintf("path %q not in error %q", path, errStr)) + } + return err +} + +func (e *importError) Error() string { + return e.err.Error() +} + +func (e *importError) Unwrap() error { + // Don't return e.err directly, since we're only wrapping an error if %w + // was passed to ImportErrorf. + return errors.Unwrap(e.err) +} + +func (e *importError) ImportPath() string { + return e.importPath +} + +// An ImportStack is a stack of import paths, possibly with the suffix " (test)" appended. +// The import path of a test package is the import path of the corresponding +// non-test package with the suffix "_test" added. +type ImportStack []string + +func (s *ImportStack) Push(p string) { + *s = append(*s, p) +} + +func (s *ImportStack) Pop() { + *s = (*s)[0 : len(*s)-1] +} + +func (s *ImportStack) Copy() []string { + return append([]string{}, *s...) +} + +func (s *ImportStack) Top() string { + if len(*s) == 0 { + return "" + } + return (*s)[len(*s)-1] +} + +// shorterThan reports whether sp is shorter than t. +// We use this to record the shortest import sequence +// that leads to a particular package. +func (sp *ImportStack) shorterThan(t []string) bool { + s := *sp + if len(s) != len(t) { + return len(s) < len(t) + } + // If they are the same length, settle ties using string ordering. + for i := range s { + if s[i] != t[i] { + return s[i] < t[i] + } + } + return false // they are equal +} + +// packageCache is a lookup cache for LoadImport, +// so that if we look up a package multiple times +// we return the same pointer each time. +var packageCache = map[string]*Package{} + +// ClearPackageCache clears the in-memory package cache and the preload caches. +// It is only for use by GOPATH-based "go get". +// TODO(jayconrod): When GOPATH-based "go get" is removed, delete this function. +func ClearPackageCache() { + clear(packageCache) + resolvedImportCache.Clear() + packageDataCache.Clear() +} + +// ClearPackageCachePartial clears packages with the given import paths from the +// in-memory package cache and the preload caches. It is only for use by +// GOPATH-based "go get". +// TODO(jayconrod): When GOPATH-based "go get" is removed, delete this function. +func ClearPackageCachePartial(args []string) { + shouldDelete := make(map[string]bool) + for _, arg := range args { + shouldDelete[arg] = true + if p := packageCache[arg]; p != nil { + delete(packageCache, arg) + } + } + resolvedImportCache.DeleteIf(func(key importSpec) bool { + return shouldDelete[key.path] + }) + packageDataCache.DeleteIf(func(key string) bool { + return shouldDelete[key] + }) +} + +// ReloadPackageNoFlags is like LoadImport but makes sure +// not to use the package cache. +// It is only for use by GOPATH-based "go get". +// TODO(rsc): When GOPATH-based "go get" is removed, delete this function. +func ReloadPackageNoFlags(arg string, stk *ImportStack) *Package { + p := packageCache[arg] + if p != nil { + delete(packageCache, arg) + resolvedImportCache.DeleteIf(func(key importSpec) bool { + return key.path == p.ImportPath + }) + packageDataCache.Delete(p.ImportPath) + } + return LoadPackage(context.TODO(), PackageOpts{}, arg, base.Cwd(), stk, nil, 0) +} + +// dirToImportPath returns the pseudo-import path we use for a package +// outside the Go path. It begins with _/ and then contains the full path +// to the directory. If the package lives in c:\home\gopher\my\pkg then +// the pseudo-import path is _/c_/home/gopher/my/pkg. +// Using a pseudo-import path like this makes the ./ imports no longer +// a special case, so that all the code to deal with ordinary imports works +// automatically. +func dirToImportPath(dir string) string { + return pathpkg.Join("_", strings.Map(makeImportValid, filepath.ToSlash(dir))) +} + +func makeImportValid(r rune) rune { + // Should match Go spec, compilers, and ../../go/parser/parser.go:/isValidImport. + const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD" + if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) { + return '_' + } + return r +} + +// Mode flags for loadImport and download (in get.go). +const ( + // ResolveImport means that loadImport should do import path expansion. + // That is, ResolveImport means that the import path came from + // a source file and has not been expanded yet to account for + // vendoring or possible module adjustment. + // Every import path should be loaded initially with ResolveImport, + // and then the expanded version (for example with the /vendor/ in it) + // gets recorded as the canonical import path. At that point, future loads + // of that package must not pass ResolveImport, because + // disallowVendor will reject direct use of paths containing /vendor/. + ResolveImport = 1 << iota + + // ResolveModule is for download (part of "go get") and indicates + // that the module adjustment should be done, but not vendor adjustment. + ResolveModule + + // GetTestDeps is for download (part of "go get") and indicates + // that test dependencies should be fetched too. + GetTestDeps + + // The remainder are internal modes for calls to loadImport. + + // cmdlinePkg is for a package mentioned on the command line. + cmdlinePkg + + // cmdlinePkgLiteral is for a package mentioned on the command line + // without using any wildcards or meta-patterns. + cmdlinePkgLiteral +) + +// LoadImport scans the directory named by path, which must be an import path, +// but possibly a local import path (an absolute file system path or one beginning +// with ./ or ../). A local relative path is interpreted relative to srcDir. +// It returns a *Package describing the package found in that directory. +// LoadImport does not set tool flags and should only be used by +// this package, as part of a bigger load operation, and by GOPATH-based "go get". +// TODO(rsc): When GOPATH-based "go get" is removed, unexport this function. +// The returned PackageError, if any, describes why parent is not allowed +// to import the named package, with the error referring to importPos. +// The PackageError can only be non-nil when parent is not nil. +func LoadImport(ctx context.Context, opts PackageOpts, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { + return loadImport(ctx, opts, nil, path, srcDir, parent, stk, importPos, mode) +} + +// LoadPackage does Load import, but without a parent package load contezt +func LoadPackage(ctx context.Context, opts PackageOpts, path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { + p, err := loadImport(ctx, opts, nil, path, srcDir, nil, stk, importPos, mode) + if err != nil { + base.Fatalf("internal error: loadImport of %q with nil parent returned an error", path) + } + return p +} + +func loadImport(ctx context.Context, opts PackageOpts, pre *preload, path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { + ctx, span := trace.StartSpan(ctx, "modload.loadImport "+path) + defer span.Done() + + if path == "" { + panic("LoadImport called with empty package path") + } + + var parentPath, parentRoot string + parentIsStd := false + if parent != nil { + parentPath = parent.ImportPath + parentRoot = parent.Root + parentIsStd = parent.Standard + } + bp, loaded, err := loadPackageData(ctx, path, parentPath, srcDir, parentRoot, parentIsStd, mode) + if loaded && pre != nil && !opts.IgnoreImports { + pre.preloadImports(ctx, opts, bp.Imports, bp) + } + if bp == nil { + p := &Package{ + PackagePublic: PackagePublic{ + ImportPath: path, + Incomplete: true, + }, + } + if importErr, ok := err.(ImportPathError); !ok || importErr.ImportPath() != path { + // Only add path to the error's import stack if it's not already present + // in the error. + // + // TODO(bcmills): setLoadPackageDataError itself has a similar Push / Pop + // sequence that empirically doesn't trigger for these errors, guarded by + // a somewhat complex condition. Figure out how to generalize that + // condition and eliminate the explicit calls here. + stk.Push(path) + defer stk.Pop() + } + p.setLoadPackageDataError(err, path, stk, nil) + return p, nil + } + + setCmdline := func(p *Package) { + if mode&cmdlinePkg != 0 { + p.Internal.CmdlinePkg = true + } + if mode&cmdlinePkgLiteral != 0 { + p.Internal.CmdlinePkgLiteral = true + } + } + + importPath := bp.ImportPath + p := packageCache[importPath] + if p != nil { + stk.Push(path) + p = reusePackage(p, stk) + stk.Pop() + setCmdline(p) + } else { + p = new(Package) + p.Internal.Local = build.IsLocalImport(path) + p.ImportPath = importPath + packageCache[importPath] = p + + setCmdline(p) + + // Load package. + // loadPackageData may return bp != nil even if an error occurs, + // in order to return partial information. + p.load(ctx, opts, path, stk, importPos, bp, err) + + if !cfg.ModulesEnabled && path != cleanImport(path) { + p.Error = &PackageError{ + ImportStack: stk.Copy(), + Err: ImportErrorf(path, "non-canonical import path %q: should be %q", path, pathpkg.Clean(path)), + } + p.Incomplete = true + p.Error.setPos(importPos) + } + } + + // Checked on every import because the rules depend on the code doing the importing. + if perr := disallowInternal(ctx, srcDir, parent, parentPath, p, stk); perr != nil { + perr.setPos(importPos) + return p, perr + } + if mode&ResolveImport != 0 { + if perr := disallowVendor(srcDir, path, parentPath, p, stk); perr != nil { + perr.setPos(importPos) + return p, perr + } + } + + if p.Name == "main" && parent != nil && parent.Dir != p.Dir { + perr := &PackageError{ + ImportStack: stk.Copy(), + Err: ImportErrorf(path, "import %q is a program, not an importable package", path), + } + perr.setPos(importPos) + return p, perr + } + + if p.Internal.Local && parent != nil && !parent.Internal.Local { + var err error + if path == "." { + err = ImportErrorf(path, "%s: cannot import current directory", path) + } else { + err = ImportErrorf(path, "local import %q in non-local package", path) + } + perr := &PackageError{ + ImportStack: stk.Copy(), + Err: err, + } + perr.setPos(importPos) + return p, perr + } + + return p, nil +} + +// loadPackageData loads information needed to construct a *Package. The result +// is cached, and later calls to loadPackageData for the same package will return +// the same data. +// +// loadPackageData returns a non-nil package even if err is non-nil unless +// the package path is malformed (for example, the path contains "mod/" or "@"). +// +// loadPackageData returns a boolean, loaded, which is true if this is the +// first time the package was loaded. Callers may preload imports in this case. +func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoot string, parentIsStd bool, mode int) (bp *build.Package, loaded bool, err error) { + ctx, span := trace.StartSpan(ctx, "load.loadPackageData "+path) + defer span.Done() + + if path == "" { + panic("loadPackageData called with empty package path") + } + + if strings.HasPrefix(path, "mod/") { + // Paths beginning with "mod/" might accidentally + // look in the module cache directory tree in $GOPATH/pkg/mod/. + // This prefix is owned by the Go core for possible use in the + // standard library (since it does not begin with a domain name), + // so it's OK to disallow entirely. + return nil, false, fmt.Errorf("disallowed import path %q", path) + } + + if strings.Contains(path, "@") { + return nil, false, errors.New("can only use path@version syntax with 'go get' and 'go install' in module-aware mode") + } + + // Determine canonical package path and directory. + // For a local import the identifier is the pseudo-import path + // we create from the full directory to the package. + // Otherwise it is the usual import path. + // For vendored imports, it is the expanded form. + // + // Note that when modules are enabled, local import paths are normally + // canonicalized by modload.LoadPackages before now. However, if there's an + // error resolving a local path, it will be returned untransformed + // so that 'go list -e' reports something useful. + importKey := importSpec{ + path: path, + parentPath: parentPath, + parentDir: parentDir, + parentRoot: parentRoot, + parentIsStd: parentIsStd, + mode: mode, + } + r := resolvedImportCache.Do(importKey, func() resolvedImport { + var r resolvedImport + if cfg.ModulesEnabled { + r.dir, r.path, r.err = modload.Lookup(parentPath, parentIsStd, path) + } else if build.IsLocalImport(path) { + r.dir = filepath.Join(parentDir, path) + r.path = dirToImportPath(r.dir) + } else if mode&ResolveImport != 0 { + // We do our own path resolution, because we want to + // find out the key to use in packageCache without the + // overhead of repeated calls to buildContext.Import. + // The code is also needed in a few other places anyway. + r.path = resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd) + } else if mode&ResolveModule != 0 { + r.path = moduleImportPath(path, parentPath, parentDir, parentRoot) + } + if r.path == "" { + r.path = path + } + return r + }) + // Invariant: r.path is set to the resolved import path. If the path cannot + // be resolved, r.path is set to path, the source import path. + // r.path is never empty. + + // Load the package from its directory. If we already found the package's + // directory when resolving its import path, use that. + p, err := packageDataCache.Do(r.path, func() (*build.Package, error) { + loaded = true + var data struct { + p *build.Package + err error + } + if r.dir != "" { + var buildMode build.ImportMode + buildContext := cfg.BuildContext + if !cfg.ModulesEnabled { + buildMode = build.ImportComment + } else { + buildContext.GOPATH = "" // Clear GOPATH so packages are imported as pure module packages + } + modroot := modload.PackageModRoot(ctx, r.path) + if modroot == "" && str.HasPathPrefix(r.dir, cfg.GOROOTsrc) { + modroot = cfg.GOROOTsrc + gorootSrcCmd := filepath.Join(cfg.GOROOTsrc, "cmd") + if str.HasPathPrefix(r.dir, gorootSrcCmd) { + modroot = gorootSrcCmd + } + } + if modroot != "" { + if rp, err := modindex.GetPackage(modroot, r.dir); err == nil { + data.p, data.err = rp.Import(cfg.BuildContext, buildMode) + goto Happy + } else if !errors.Is(err, modindex.ErrNotIndexed) { + base.Fatal(err) + } + } + data.p, data.err = buildContext.ImportDir(r.dir, buildMode) + Happy: + if cfg.ModulesEnabled { + // Override data.p.Root, since ImportDir sets it to $GOPATH, if + // the module is inside $GOPATH/src. + if info := modload.PackageModuleInfo(ctx, path); info != nil { + data.p.Root = info.Dir + } + } + if r.err != nil { + if data.err != nil { + // ImportDir gave us one error, and the module loader gave us another. + // We arbitrarily choose to keep the error from ImportDir because + // that's what our tests already expect, and it seems to provide a bit + // more detail in most cases. + } else if errors.Is(r.err, imports.ErrNoGo) { + // ImportDir said there were files in the package, but the module + // loader said there weren't. Which one is right? + // Without this special-case hack, the TestScript/test_vet case fails + // on the vetfail/p1 package (added in CL 83955). + // Apparently, imports.ShouldBuild biases toward rejecting files + // with invalid build constraints, whereas ImportDir biases toward + // accepting them. + // + // TODO(#41410: Figure out how this actually ought to work and fix + // this mess). + } else { + data.err = r.err + } + } + } else if r.err != nil { + data.p = new(build.Package) + data.err = r.err + } else if cfg.ModulesEnabled && path != "unsafe" { + data.p = new(build.Package) + data.err = fmt.Errorf("unknown import path %q: internal error: module loader did not resolve import", r.path) + } else { + buildMode := build.ImportComment + if mode&ResolveImport == 0 || r.path != path { + // Not vendoring, or we already found the vendored path. + buildMode |= build.IgnoreVendor + } + data.p, data.err = cfg.BuildContext.Import(r.path, parentDir, buildMode) + } + data.p.ImportPath = r.path + + // Set data.p.BinDir in cases where go/build.Context.Import + // may give us a path we don't want. + if !data.p.Goroot { + if cfg.GOBIN != "" { + data.p.BinDir = cfg.GOBIN + } else if cfg.ModulesEnabled { + data.p.BinDir = modload.BinDir() + } + } + + if !cfg.ModulesEnabled && data.err == nil && + data.p.ImportComment != "" && data.p.ImportComment != path && + !strings.Contains(path, "/vendor/") && !strings.HasPrefix(path, "vendor/") { + data.err = fmt.Errorf("code in directory %s expects import %q", data.p.Dir, data.p.ImportComment) + } + return data.p, data.err + }) + + return p, loaded, err +} + +// importSpec describes an import declaration in source code. It is used as a +// cache key for resolvedImportCache. +type importSpec struct { + path string + parentPath, parentDir, parentRoot string + parentIsStd bool + mode int +} + +// resolvedImport holds a canonical identifier for a package. It may also contain +// a path to the package's directory and an error if one occurred. resolvedImport +// is the value type in resolvedImportCache. +type resolvedImport struct { + path, dir string + err error +} + +// resolvedImportCache maps import strings to canonical package names. +var resolvedImportCache par.Cache[importSpec, resolvedImport] + +// packageDataCache maps canonical package names (string) to package metadata. +var packageDataCache par.ErrCache[string, *build.Package] + +// preloadWorkerCount is the number of concurrent goroutines that can load +// packages. Experimentally, there are diminishing returns with more than +// 4 workers. This was measured on the following machines. +// +// * MacBookPro with a 4-core Intel Core i7 CPU +// * Linux workstation with 6-core Intel Xeon CPU +// * Linux workstation with 24-core Intel Xeon CPU +// +// It is very likely (though not confirmed) that this workload is limited +// by memory bandwidth. We don't have a good way to determine the number of +// workers that would saturate the bus though, so runtime.GOMAXPROCS +// seems like a reasonable default. +var preloadWorkerCount = runtime.GOMAXPROCS(0) + +// preload holds state for managing concurrent preloading of package data. +// +// A preload should be created with newPreload before loading a large +// package graph. flush must be called when package loading is complete +// to ensure preload goroutines are no longer active. This is necessary +// because of global mutable state that cannot safely be read and written +// concurrently. In particular, packageDataCache may be cleared by "go get" +// in GOPATH mode, and modload.loaded (accessed via modload.Lookup) may be +// modified by modload.LoadPackages. +type preload struct { + cancel chan struct{} + sema chan struct{} +} + +// newPreload creates a new preloader. flush must be called later to avoid +// accessing global state while it is being modified. +func newPreload() *preload { + pre := &preload{ + cancel: make(chan struct{}), + sema: make(chan struct{}, preloadWorkerCount), + } + return pre +} + +// preloadMatches loads data for package paths matched by patterns. +// When preloadMatches returns, some packages may not be loaded yet, but +// loadPackageData and loadImport are always safe to call. +func (pre *preload) preloadMatches(ctx context.Context, opts PackageOpts, matches []*search.Match) { + for _, m := range matches { + for _, pkg := range m.Pkgs { + select { + case <-pre.cancel: + return + case pre.sema <- struct{}{}: + go func(pkg string) { + mode := 0 // don't use vendoring or module import resolution + bp, loaded, err := loadPackageData(ctx, pkg, "", base.Cwd(), "", false, mode) + <-pre.sema + if bp != nil && loaded && err == nil && !opts.IgnoreImports { + pre.preloadImports(ctx, opts, bp.Imports, bp) + } + }(pkg) + } + } + } +} + +// preloadImports queues a list of imports for preloading. +// When preloadImports returns, some packages may not be loaded yet, +// but loadPackageData and loadImport are always safe to call. +func (pre *preload) preloadImports(ctx context.Context, opts PackageOpts, imports []string, parent *build.Package) { + parentIsStd := parent.Goroot && parent.ImportPath != "" && search.IsStandardImportPath(parent.ImportPath) + for _, path := range imports { + if path == "C" || path == "unsafe" { + continue + } + select { + case <-pre.cancel: + return + case pre.sema <- struct{}{}: + go func(path string) { + bp, loaded, err := loadPackageData(ctx, path, parent.ImportPath, parent.Dir, parent.Root, parentIsStd, ResolveImport) + <-pre.sema + if bp != nil && loaded && err == nil && !opts.IgnoreImports { + pre.preloadImports(ctx, opts, bp.Imports, bp) + } + }(path) + } + } +} + +// flush stops pending preload operations. flush blocks until preload calls to +// loadPackageData have completed. The preloader will not make any new calls +// to loadPackageData. +func (pre *preload) flush() { + // flush is usually deferred. + // Don't hang program waiting for workers on panic. + if v := recover(); v != nil { + panic(v) + } + + close(pre.cancel) + for i := 0; i < preloadWorkerCount; i++ { + pre.sema <- struct{}{} + } +} + +func cleanImport(path string) string { + orig := path + path = pathpkg.Clean(path) + if strings.HasPrefix(orig, "./") && path != ".." && !strings.HasPrefix(path, "../") { + path = "./" + path + } + return path +} + +var isDirCache par.Cache[string, bool] + +func isDir(path string) bool { + return isDirCache.Do(path, func() bool { + fi, err := fsys.Stat(path) + return err == nil && fi.IsDir() + }) +} + +// ResolveImportPath returns the true meaning of path when it appears in parent. +// There are two different resolutions applied. +// First, there is Go 1.5 vendoring (golang.org/s/go15vendor). +// If vendor expansion doesn't trigger, then the path is also subject to +// Go 1.11 module legacy conversion (golang.org/issue/25069). +func ResolveImportPath(parent *Package, path string) (found string) { + var parentPath, parentDir, parentRoot string + parentIsStd := false + if parent != nil { + parentPath = parent.ImportPath + parentDir = parent.Dir + parentRoot = parent.Root + parentIsStd = parent.Standard + } + return resolveImportPath(path, parentPath, parentDir, parentRoot, parentIsStd) +} + +func resolveImportPath(path, parentPath, parentDir, parentRoot string, parentIsStd bool) (found string) { + if cfg.ModulesEnabled { + if _, p, e := modload.Lookup(parentPath, parentIsStd, path); e == nil { + return p + } + return path + } + found = vendoredImportPath(path, parentPath, parentDir, parentRoot) + if found != path { + return found + } + return moduleImportPath(path, parentPath, parentDir, parentRoot) +} + +// dirAndRoot returns the source directory and workspace root +// for the package p, guaranteeing that root is a path prefix of dir. +func dirAndRoot(path string, dir, root string) (string, string) { + origDir, origRoot := dir, root + dir = filepath.Clean(dir) + root = filepath.Join(root, "src") + if !str.HasFilePathPrefix(dir, root) || path != "command-line-arguments" && filepath.Join(root, path) != dir { + // Look for symlinks before reporting error. + dir = expandPath(dir) + root = expandPath(root) + } + + if !str.HasFilePathPrefix(dir, root) || len(dir) <= len(root) || dir[len(root)] != filepath.Separator || path != "command-line-arguments" && !build.IsLocalImport(path) && filepath.Join(root, path) != dir { + debug.PrintStack() + base.Fatalf("unexpected directory layout:\n"+ + " import path: %s\n"+ + " root: %s\n"+ + " dir: %s\n"+ + " expand root: %s\n"+ + " expand dir: %s\n"+ + " separator: %s", + path, + filepath.Join(origRoot, "src"), + filepath.Clean(origDir), + origRoot, + origDir, + string(filepath.Separator)) + } + + return dir, root +} + +// vendoredImportPath returns the vendor-expansion of path when it appears in parent. +// If parent is x/y/z, then path might expand to x/y/z/vendor/path, x/y/vendor/path, +// x/vendor/path, vendor/path, or else stay path if none of those exist. +// vendoredImportPath returns the expanded path or, if no expansion is found, the original. +func vendoredImportPath(path, parentPath, parentDir, parentRoot string) (found string) { + if parentRoot == "" { + return path + } + + dir, root := dirAndRoot(parentPath, parentDir, parentRoot) + + vpath := "vendor/" + path + for i := len(dir); i >= len(root); i-- { + if i < len(dir) && dir[i] != filepath.Separator { + continue + } + // Note: checking for the vendor directory before checking + // for the vendor/path directory helps us hit the + // isDir cache more often. It also helps us prepare a more useful + // list of places we looked, to report when an import is not found. + if !isDir(filepath.Join(dir[:i], "vendor")) { + continue + } + targ := filepath.Join(dir[:i], vpath) + if isDir(targ) && hasGoFiles(targ) { + importPath := parentPath + if importPath == "command-line-arguments" { + // If parent.ImportPath is 'command-line-arguments'. + // set to relative directory to root (also chopped root directory) + importPath = dir[len(root)+1:] + } + // We started with parent's dir c:\gopath\src\foo\bar\baz\quux\xyzzy. + // We know the import path for parent's dir. + // We chopped off some number of path elements and + // added vendor\path to produce c:\gopath\src\foo\bar\baz\vendor\path. + // Now we want to know the import path for that directory. + // Construct it by chopping the same number of path elements + // (actually the same number of bytes) from parent's import path + // and then append /vendor/path. + chopped := len(dir) - i + if chopped == len(importPath)+1 { + // We walked up from c:\gopath\src\foo\bar + // and found c:\gopath\src\vendor\path. + // We chopped \foo\bar (length 8) but the import path is "foo/bar" (length 7). + // Use "vendor/path" without any prefix. + return vpath + } + return importPath[:len(importPath)-chopped] + "/" + vpath + } + } + return path +} + +var ( + modulePrefix = []byte("\nmodule ") + goModPathCache par.Cache[string, string] +) + +// goModPath returns the module path in the go.mod in dir, if any. +func goModPath(dir string) (path string) { + return goModPathCache.Do(dir, func() string { + data, err := os.ReadFile(filepath.Join(dir, "go.mod")) + if err != nil { + return "" + } + var i int + if bytes.HasPrefix(data, modulePrefix[1:]) { + i = 0 + } else { + i = bytes.Index(data, modulePrefix) + if i < 0 { + return "" + } + i++ + } + line := data[i:] + + // Cut line at \n, drop trailing \r if present. + if j := bytes.IndexByte(line, '\n'); j >= 0 { + line = line[:j] + } + if line[len(line)-1] == '\r' { + line = line[:len(line)-1] + } + line = line[len("module "):] + + // If quoted, unquote. + path = strings.TrimSpace(string(line)) + if path != "" && path[0] == '"' { + s, err := strconv.Unquote(path) + if err != nil { + return "" + } + path = s + } + return path + }) +} + +// findVersionElement returns the slice indices of the final version element /vN in path. +// If there is no such element, it returns -1, -1. +func findVersionElement(path string) (i, j int) { + j = len(path) + for i = len(path) - 1; i >= 0; i-- { + if path[i] == '/' { + if isVersionElement(path[i+1 : j]) { + return i, j + } + j = i + } + } + return -1, -1 +} + +// isVersionElement reports whether s is a well-formed path version element: +// v2, v3, v10, etc, but not v0, v05, v1. +func isVersionElement(s string) bool { + if len(s) < 2 || s[0] != 'v' || s[1] == '0' || s[1] == '1' && len(s) == 2 { + return false + } + for i := 1; i < len(s); i++ { + if s[i] < '0' || '9' < s[i] { + return false + } + } + return true +} + +// moduleImportPath translates import paths found in go modules +// back down to paths that can be resolved in ordinary builds. +// +// Define “new” code as code with a go.mod file in the same directory +// or a parent directory. If an import in new code says x/y/v2/z but +// x/y/v2/z does not exist and x/y/go.mod says “module x/y/v2”, +// then go build will read the import as x/y/z instead. +// See golang.org/issue/25069. +func moduleImportPath(path, parentPath, parentDir, parentRoot string) (found string) { + if parentRoot == "" { + return path + } + + // If there are no vN elements in path, leave it alone. + // (The code below would do the same, but only after + // some other file system accesses that we can avoid + // here by returning early.) + if i, _ := findVersionElement(path); i < 0 { + return path + } + + dir, root := dirAndRoot(parentPath, parentDir, parentRoot) + + // Consider dir and parents, up to and including root. + for i := len(dir); i >= len(root); i-- { + if i < len(dir) && dir[i] != filepath.Separator { + continue + } + if goModPath(dir[:i]) != "" { + goto HaveGoMod + } + } + // This code is not in a tree with a go.mod, + // so apply no changes to the path. + return path + +HaveGoMod: + // This import is in a tree with a go.mod. + // Allow it to refer to code in GOPATH/src/x/y/z as x/y/v2/z + // if GOPATH/src/x/y/go.mod says module "x/y/v2", + + // If x/y/v2/z exists, use it unmodified. + if bp, _ := cfg.BuildContext.Import(path, "", build.IgnoreVendor); bp.Dir != "" { + return path + } + + // Otherwise look for a go.mod supplying a version element. + // Some version-like elements may appear in paths but not + // be module versions; we skip over those to look for module + // versions. For example the module m/v2 might have a + // package m/v2/api/v1/foo. + limit := len(path) + for limit > 0 { + i, j := findVersionElement(path[:limit]) + if i < 0 { + return path + } + if bp, _ := cfg.BuildContext.Import(path[:i], "", build.IgnoreVendor); bp.Dir != "" { + if mpath := goModPath(bp.Dir); mpath != "" { + // Found a valid go.mod file, so we're stopping the search. + // If the path is m/v2/p and we found m/go.mod that says + // "module m/v2", then we return "m/p". + if mpath == path[:j] { + return path[:i] + path[j:] + } + // Otherwise just return the original path. + // We didn't find anything worth rewriting, + // and the go.mod indicates that we should + // not consider parent directories. + return path + } + } + limit = i + } + return path +} + +// hasGoFiles reports whether dir contains any files with names ending in .go. +// For a vendor check we must exclude directories that contain no .go files. +// Otherwise it is not possible to vendor just a/b/c and still import the +// non-vendored a/b. See golang.org/issue/13832. +func hasGoFiles(dir string) bool { + files, _ := os.ReadDir(dir) + for _, f := range files { + if !f.IsDir() && strings.HasSuffix(f.Name(), ".go") { + return true + } + } + return false +} + +// reusePackage reuses package p to satisfy the import at the top +// of the import stack stk. If this use causes an import loop, +// reusePackage updates p's error information to record the loop. +func reusePackage(p *Package, stk *ImportStack) *Package { + // We use p.Internal.Imports==nil to detect a package that + // is in the midst of its own loadPackage call + // (all the recursion below happens before p.Internal.Imports gets set). + if p.Internal.Imports == nil { + if p.Error == nil { + p.Error = &PackageError{ + ImportStack: stk.Copy(), + Err: errors.New("import cycle not allowed"), + IsImportCycle: true, + } + } else if !p.Error.IsImportCycle { + // If the error is already set, but it does not indicate that + // we are in an import cycle, set IsImportCycle so that we don't + // end up stuck in a loop down the road. + p.Error.IsImportCycle = true + } + p.Incomplete = true + } + // Don't rewrite the import stack in the error if we have an import cycle. + // If we do, we'll lose the path that describes the cycle. + if p.Error != nil && !p.Error.IsImportCycle && stk.shorterThan(p.Error.ImportStack) { + p.Error.ImportStack = stk.Copy() + } + return p +} + +// disallowInternal checks that srcDir (containing package importerPath, if non-empty) +// is allowed to import p. +// If the import is allowed, disallowInternal returns the original package p. +// If not, it returns a new package containing just an appropriate error. +func disallowInternal(ctx context.Context, srcDir string, importer *Package, importerPath string, p *Package, stk *ImportStack) *PackageError { + // golang.org/s/go14internal: + // An import of a path containing the element “internal” + // is disallowed if the importing code is outside the tree + // rooted at the parent of the “internal” directory. + + // There was an error loading the package; stop here. + if p.Error != nil { + return nil + } + + // The generated 'testmain' package is allowed to access testing/internal/..., + // as if it were generated into the testing directory tree + // (it's actually in a temporary directory outside any Go tree). + // This cleans up a former kludge in passing functionality to the testing package. + if str.HasPathPrefix(p.ImportPath, "testing/internal") && importerPath == "testmain" { + return nil + } + + // We can't check standard packages with gccgo. + if cfg.BuildContext.Compiler == "gccgo" && p.Standard { + return nil + } + + // The sort package depends on internal/reflectlite, but during bootstrap + // the path rewriting causes the normal internal checks to fail. + // Instead, just ignore the internal rules during bootstrap. + if p.Standard && strings.HasPrefix(importerPath, "bootstrap/") { + return nil + } + + // importerPath is empty: we started + // with a name given on the command line, not an + // import. Anything listed on the command line is fine. + if importerPath == "" { + return nil + } + + // Check for "internal" element: three cases depending on begin of string and/or end of string. + i, ok := findInternal(p.ImportPath) + if !ok { + return nil + } + + // Internal is present. + // Map import path back to directory corresponding to parent of internal. + if i > 0 { + i-- // rewind over slash in ".../internal" + } + + if p.Module == nil { + parent := p.Dir[:i+len(p.Dir)-len(p.ImportPath)] + + if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) { + return nil + } + + // Look for symlinks before reporting error. + srcDir = expandPath(srcDir) + parent = expandPath(parent) + if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) { + return nil + } + } else { + // p is in a module, so make it available based on the importer's import path instead + // of the file path (https://golang.org/issue/23970). + if importer.Internal.CmdlineFiles { + // The importer is a list of command-line files. + // Pretend that the import path is the import path of the + // directory containing them. + // If the directory is outside the main modules, this will resolve to ".", + // which is not a prefix of any valid module. + importerPath, _ = modload.MainModules.DirImportPath(ctx, importer.Dir) + } + parentOfInternal := p.ImportPath[:i] + if str.HasPathPrefix(importerPath, parentOfInternal) { + return nil + } + } + + // Internal is present, and srcDir is outside parent's tree. Not allowed. + perr := &PackageError{ + alwaysPrintStack: true, + ImportStack: stk.Copy(), + Err: ImportErrorf(p.ImportPath, "use of internal package "+p.ImportPath+" not allowed"), + } + return perr +} + +// findInternal looks for the final "internal" path element in the given import path. +// If there isn't one, findInternal returns ok=false. +// Otherwise, findInternal returns ok=true and the index of the "internal". +func findInternal(path string) (index int, ok bool) { + // Three cases, depending on internal at start/end of string or not. + // The order matters: we must return the index of the final element, + // because the final one produces the most restrictive requirement + // on the importer. + switch { + case strings.HasSuffix(path, "/internal"): + return len(path) - len("internal"), true + case strings.Contains(path, "/internal/"): + return strings.LastIndex(path, "/internal/") + 1, true + case path == "internal", strings.HasPrefix(path, "internal/"): + return 0, true + } + return 0, false +} + +// disallowVendor checks that srcDir is allowed to import p as path. +// If the import is allowed, disallowVendor returns the original package p. +// If not, it returns a PackageError. +func disallowVendor(srcDir string, path string, importerPath string, p *Package, stk *ImportStack) *PackageError { + // If the importerPath is empty, we started + // with a name given on the command line, not an + // import. Anything listed on the command line is fine. + if importerPath == "" { + return nil + } + + if perr := disallowVendorVisibility(srcDir, p, importerPath, stk); perr != nil { + return perr + } + + // Paths like x/vendor/y must be imported as y, never as x/vendor/y. + if i, ok := FindVendor(path); ok { + perr := &PackageError{ + ImportStack: stk.Copy(), + Err: ImportErrorf(path, "%s must be imported as %s", path, path[i+len("vendor/"):]), + } + return perr + } + + return nil +} + +// disallowVendorVisibility checks that srcDir is allowed to import p. +// The rules are the same as for /internal/ except that a path ending in /vendor +// is not subject to the rules, only subdirectories of vendor. +// This allows people to have packages and commands named vendor, +// for maximal compatibility with existing source trees. +func disallowVendorVisibility(srcDir string, p *Package, importerPath string, stk *ImportStack) *PackageError { + // The stack does not include p.ImportPath. + // If there's nothing on the stack, we started + // with a name given on the command line, not an + // import. Anything listed on the command line is fine. + if importerPath == "" { + return nil + } + + // Check for "vendor" element. + i, ok := FindVendor(p.ImportPath) + if !ok { + return nil + } + + // Vendor is present. + // Map import path back to directory corresponding to parent of vendor. + if i > 0 { + i-- // rewind over slash in ".../vendor" + } + truncateTo := i + len(p.Dir) - len(p.ImportPath) + if truncateTo < 0 || len(p.Dir) < truncateTo { + return nil + } + parent := p.Dir[:truncateTo] + if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) { + return nil + } + + // Look for symlinks before reporting error. + srcDir = expandPath(srcDir) + parent = expandPath(parent) + if str.HasFilePathPrefix(filepath.Clean(srcDir), filepath.Clean(parent)) { + return nil + } + + // Vendor is present, and srcDir is outside parent's tree. Not allowed. + + perr := &PackageError{ + ImportStack: stk.Copy(), + Err: errors.New("use of vendored package not allowed"), + } + return perr +} + +// FindVendor looks for the last non-terminating "vendor" path element in the given import path. +// If there isn't one, FindVendor returns ok=false. +// Otherwise, FindVendor returns ok=true and the index of the "vendor". +// +// Note that terminating "vendor" elements don't count: "x/vendor" is its own package, +// not the vendored copy of an import "" (the empty import path). +// This will allow people to have packages or commands named vendor. +// This may help reduce breakage, or it may just be confusing. We'll see. +func FindVendor(path string) (index int, ok bool) { + // Two cases, depending on internal at start of string or not. + // The order matters: we must return the index of the final element, + // because the final one is where the effective import path starts. + switch { + case strings.Contains(path, "/vendor/"): + return strings.LastIndex(path, "/vendor/") + 1, true + case strings.HasPrefix(path, "vendor/"): + return 0, true + } + return 0, false +} + +type TargetDir int + +const ( + ToTool TargetDir = iota // to GOROOT/pkg/tool (default for cmd/*) + ToBin // to bin dir inside package root (default for non-cmd/*) + StalePath // an old import path; fail to build +) + +// InstallTargetDir reports the target directory for installing the command p. +func InstallTargetDir(p *Package) TargetDir { + if strings.HasPrefix(p.ImportPath, "code.google.com/p/go.tools/cmd/") { + return StalePath + } + if p.Goroot && strings.HasPrefix(p.ImportPath, "cmd/") && p.Name == "main" { + switch p.ImportPath { + case "cmd/go", "cmd/gofmt": + return ToBin + } + return ToTool + } + return ToBin +} + +var cgoExclude = map[string]bool{ + "runtime/cgo": true, +} + +var cgoSyscallExclude = map[string]bool{ + "runtime/cgo": true, + "runtime/race": true, + "runtime/msan": true, + "runtime/asan": true, +} + +var foldPath = make(map[string]string) + +// exeFromImportPath returns an executable name +// for a package using the import path. +// +// The executable name is the last element of the import path. +// In module-aware mode, an additional rule is used on import paths +// consisting of two or more path elements. If the last element is +// a vN path element specifying the major version, then the +// second last element of the import path is used instead. +func (p *Package) exeFromImportPath() string { + _, elem := pathpkg.Split(p.ImportPath) + if cfg.ModulesEnabled { + // If this is example.com/mycmd/v2, it's more useful to + // install it as mycmd than as v2. See golang.org/issue/24667. + if elem != p.ImportPath && isVersionElement(elem) { + _, elem = pathpkg.Split(pathpkg.Dir(p.ImportPath)) + } + } + return elem +} + +// exeFromFiles returns an executable name for a package +// using the first element in GoFiles or CgoFiles collections without the prefix. +// +// Returns empty string in case of empty collection. +func (p *Package) exeFromFiles() string { + var src string + if len(p.GoFiles) > 0 { + src = p.GoFiles[0] + } else if len(p.CgoFiles) > 0 { + src = p.CgoFiles[0] + } else { + return "" + } + _, elem := filepath.Split(src) + return elem[:len(elem)-len(".go")] +} + +// DefaultExecName returns the default executable name for a package +func (p *Package) DefaultExecName() string { + if p.Internal.CmdlineFiles { + return p.exeFromFiles() + } + return p.exeFromImportPath() +} + +// load populates p using information from bp, err, which should +// be the result of calling build.Context.Import. +// stk contains the import stack, not including path itself. +func (p *Package) load(ctx context.Context, opts PackageOpts, path string, stk *ImportStack, importPos []token.Position, bp *build.Package, err error) { + p.copyBuild(opts, bp) + + // The localPrefix is the path we interpret ./ imports relative to, + // if we support them at all (not in module mode!). + // Synthesized main packages sometimes override this. + if p.Internal.Local && !cfg.ModulesEnabled { + p.Internal.LocalPrefix = dirToImportPath(p.Dir) + } + + // setError sets p.Error if it hasn't already been set. We may proceed + // after encountering some errors so that 'go list -e' has more complete + // output. If there's more than one error, we should report the first. + setError := func(err error) { + if p.Error == nil { + p.Error = &PackageError{ + ImportStack: stk.Copy(), + Err: err, + } + p.Incomplete = true + + // Add the importer's position information if the import position exists, and + // the current package being examined is the importer. + // If we have not yet accepted package p onto the import stack, + // then the cause of the error is not within p itself: the error + // must be either in an explicit command-line argument, + // or on the importer side (indicated by a non-empty importPos). + if path != stk.Top() && len(importPos) > 0 { + p.Error.setPos(importPos) + } + } + } + + if err != nil { + p.Incomplete = true + p.setLoadPackageDataError(err, path, stk, importPos) + } + + useBindir := p.Name == "main" + if !p.Standard { + switch cfg.BuildBuildmode { + case "c-archive", "c-shared", "plugin": + useBindir = false + } + } + + if useBindir { + // Report an error when the old code.google.com/p/go.tools paths are used. + if InstallTargetDir(p) == StalePath { + // TODO(matloob): remove this branch, and StalePath itself. code.google.com/p/go is so + // old, even this code checking for it is stale now! + newPath := strings.Replace(p.ImportPath, "code.google.com/p/go.", "golang.org/x/", 1) + e := ImportErrorf(p.ImportPath, "the %v command has moved; use %v instead.", p.ImportPath, newPath) + setError(e) + return + } + elem := p.DefaultExecName() + cfg.ExeSuffix + full := filepath.Join(cfg.BuildContext.GOOS+"_"+cfg.BuildContext.GOARCH, elem) + if cfg.BuildContext.GOOS != runtime.GOOS || cfg.BuildContext.GOARCH != runtime.GOARCH { + // Install cross-compiled binaries to subdirectories of bin. + elem = full + } + if p.Internal.Build.BinDir == "" && cfg.ModulesEnabled { + p.Internal.Build.BinDir = modload.BinDir() + } + if p.Internal.Build.BinDir != "" { + // Install to GOBIN or bin of GOPATH entry. + p.Target = filepath.Join(p.Internal.Build.BinDir, elem) + if !p.Goroot && strings.Contains(elem, string(filepath.Separator)) && cfg.GOBIN != "" { + // Do not create $GOBIN/goos_goarch/elem. + p.Target = "" + p.Internal.GobinSubdir = true + } + } + if InstallTargetDir(p) == ToTool { + // This is for 'go tool'. + // Override all the usual logic and force it into the tool directory. + if cfg.BuildToolchainName == "gccgo" { + p.Target = filepath.Join(build.ToolDir, elem) + } else { + p.Target = filepath.Join(cfg.GOROOTpkg, "tool", full) + } + } + } else if p.Internal.Local { + // Local import turned into absolute path. + // No permanent install target. + p.Target = "" + } else if p.Standard && cfg.BuildContext.Compiler == "gccgo" { + // gccgo has a preinstalled standard library that cmd/go cannot rebuild. + p.Target = "" + } else { + p.Target = p.Internal.Build.PkgObj + if cfg.BuildBuildmode == "shared" && p.Internal.Build.PkgTargetRoot != "" { + // TODO(matloob): This shouldn't be necessary, but the cmd/cgo/internal/testshared + // test fails without Target set for this condition. Figure out why and + // fix it. + p.Target = filepath.Join(p.Internal.Build.PkgTargetRoot, p.ImportPath+".a") + } + if cfg.BuildLinkshared && p.Internal.Build.PkgTargetRoot != "" { + // TODO(bcmills): The reliance on PkgTargetRoot implies that -linkshared does + // not work for any package that lacks a PkgTargetRoot — such as a non-main + // package in module mode. We should probably fix that. + targetPrefix := filepath.Join(p.Internal.Build.PkgTargetRoot, p.ImportPath) + p.Target = targetPrefix + ".a" + shlibnamefile := targetPrefix + ".shlibname" + shlib, err := os.ReadFile(shlibnamefile) + if err != nil && !os.IsNotExist(err) { + base.Fatalf("reading shlibname: %v", err) + } + if err == nil { + libname := strings.TrimSpace(string(shlib)) + if cfg.BuildContext.Compiler == "gccgo" { + p.Shlib = filepath.Join(p.Internal.Build.PkgTargetRoot, "shlibs", libname) + } else { + p.Shlib = filepath.Join(p.Internal.Build.PkgTargetRoot, libname) + } + } + } + } + + // Build augmented import list to add implicit dependencies. + // Be careful not to add imports twice, just to avoid confusion. + importPaths := p.Imports + addImport := func(path string, forCompiler bool) { + for _, p := range importPaths { + if path == p { + return + } + } + importPaths = append(importPaths, path) + if forCompiler { + p.Internal.CompiledImports = append(p.Internal.CompiledImports, path) + } + } + + if !opts.IgnoreImports { + // Cgo translation adds imports of "unsafe", "runtime/cgo" and "syscall", + // except for certain packages, to avoid circular dependencies. + if p.UsesCgo() { + addImport("unsafe", true) + } + if p.UsesCgo() && (!p.Standard || !cgoExclude[p.ImportPath]) && cfg.BuildContext.Compiler != "gccgo" { + addImport("runtime/cgo", true) + } + if p.UsesCgo() && (!p.Standard || !cgoSyscallExclude[p.ImportPath]) { + addImport("syscall", true) + } + + // SWIG adds imports of some standard packages. + if p.UsesSwig() { + addImport("unsafe", true) + if cfg.BuildContext.Compiler != "gccgo" { + addImport("runtime/cgo", true) + } + addImport("syscall", true) + addImport("sync", true) + + // TODO: The .swig and .swigcxx files can use + // %go_import directives to import other packages. + } + + // The linker loads implicit dependencies. + if p.Name == "main" && !p.Internal.ForceLibrary { + ldDeps, err := LinkerDeps(p) + if err != nil { + setError(err) + return + } + for _, dep := range ldDeps { + addImport(dep, false) + } + } + } + + // Check for case-insensitive collisions of import paths. + fold := str.ToFold(p.ImportPath) + if other := foldPath[fold]; other == "" { + foldPath[fold] = p.ImportPath + } else if other != p.ImportPath { + setError(ImportErrorf(p.ImportPath, "case-insensitive import collision: %q and %q", p.ImportPath, other)) + return + } + + if !SafeArg(p.ImportPath) { + setError(ImportErrorf(p.ImportPath, "invalid import path %q", p.ImportPath)) + return + } + + // Errors after this point are caused by this package, not the importing + // package. Pushing the path here prevents us from reporting the error + // with the position of the import declaration. + stk.Push(path) + defer stk.Pop() + + pkgPath := p.ImportPath + if p.Internal.CmdlineFiles { + pkgPath = "command-line-arguments" + } + if cfg.ModulesEnabled { + p.Module = modload.PackageModuleInfo(ctx, pkgPath) + } + p.DefaultGODEBUG = defaultGODEBUG(p, nil, nil, nil) + + if !opts.SuppressEmbedFiles { + p.EmbedFiles, p.Internal.Embed, err = resolveEmbed(p.Dir, p.EmbedPatterns) + if err != nil { + p.Incomplete = true + setError(err) + embedErr := err.(*EmbedError) + p.Error.setPos(p.Internal.Build.EmbedPatternPos[embedErr.Pattern]) + } + } + + // Check for case-insensitive collision of input files. + // To avoid problems on case-insensitive files, we reject any package + // where two different input files have equal names under a case-insensitive + // comparison. + inputs := p.AllFiles() + f1, f2 := str.FoldDup(inputs) + if f1 != "" { + setError(fmt.Errorf("case-insensitive file name collision: %q and %q", f1, f2)) + return + } + + // If first letter of input file is ASCII, it must be alphanumeric. + // This avoids files turning into flags when invoking commands, + // and other problems we haven't thought of yet. + // Also, _cgo_ files must be generated by us, not supplied. + // They are allowed to have //go:cgo_ldflag directives. + // The directory scan ignores files beginning with _, + // so we shouldn't see any _cgo_ files anyway, but just be safe. + for _, file := range inputs { + if !SafeArg(file) || strings.HasPrefix(file, "_cgo_") { + setError(fmt.Errorf("invalid input file name %q", file)) + return + } + } + if name := pathpkg.Base(p.ImportPath); !SafeArg(name) { + setError(fmt.Errorf("invalid input directory name %q", name)) + return + } + if strings.ContainsAny(p.Dir, "\r\n") { + setError(fmt.Errorf("invalid package directory %q", p.Dir)) + return + } + + // Build list of imported packages and full dependency list. + imports := make([]*Package, 0, len(p.Imports)) + for i, path := range importPaths { + if path == "C" { + continue + } + p1, err := LoadImport(ctx, opts, path, p.Dir, p, stk, p.Internal.Build.ImportPos[path], ResolveImport) + if err != nil && p.Error == nil { + p.Error = err + p.Incomplete = true + } + + path = p1.ImportPath + importPaths[i] = path + if i < len(p.Imports) { + p.Imports[i] = path + } + + imports = append(imports, p1) + if p1.Incomplete { + p.Incomplete = true + } + } + p.Internal.Imports = imports + if p.Error == nil && p.Name == "main" && !p.Internal.ForceLibrary && !p.Incomplete && !opts.SuppressBuildInfo { + // TODO(bcmills): loading VCS metadata can be fairly slow. + // Consider starting this as a background goroutine and retrieving the result + // asynchronously when we're actually ready to build the package, or when we + // actually need to evaluate whether the package's metadata is stale. + p.setBuildInfo(ctx, opts.AutoVCS) + } + + // If cgo is not enabled, ignore cgo supporting sources + // just as we ignore go files containing import "C". + if !cfg.BuildContext.CgoEnabled { + p.CFiles = nil + p.CXXFiles = nil + p.MFiles = nil + p.SwigFiles = nil + p.SwigCXXFiles = nil + // Note that SFiles are okay (they go to the Go assembler) + // and HFiles are okay (they might be used by the SFiles). + // Also Sysofiles are okay (they might not contain object + // code; see issue #16050). + } + + // The gc toolchain only permits C source files with cgo or SWIG. + if len(p.CFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() && cfg.BuildContext.Compiler == "gc" { + setError(fmt.Errorf("C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CFiles, " "))) + return + } + + // C++, Objective-C, and Fortran source files are permitted only with cgo or SWIG, + // regardless of toolchain. + if len(p.CXXFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() { + setError(fmt.Errorf("C++ source files not allowed when not using cgo or SWIG: %s", strings.Join(p.CXXFiles, " "))) + return + } + if len(p.MFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() { + setError(fmt.Errorf("Objective-C source files not allowed when not using cgo or SWIG: %s", strings.Join(p.MFiles, " "))) + return + } + if len(p.FFiles) > 0 && !p.UsesCgo() && !p.UsesSwig() { + setError(fmt.Errorf("Fortran source files not allowed when not using cgo or SWIG: %s", strings.Join(p.FFiles, " "))) + return + } +} + +// An EmbedError indicates a problem with a go:embed directive. +type EmbedError struct { + Pattern string + Err error +} + +func (e *EmbedError) Error() string { + return fmt.Sprintf("pattern %s: %v", e.Pattern, e.Err) +} + +func (e *EmbedError) Unwrap() error { + return e.Err +} + +// ResolveEmbed resolves //go:embed patterns and returns only the file list. +// For use by go mod vendor to find embedded files it should copy into the +// vendor directory. +// TODO(#42504): Once go mod vendor uses load.PackagesAndErrors, just +// call (*Package).ResolveEmbed +func ResolveEmbed(dir string, patterns []string) ([]string, error) { + files, _, err := resolveEmbed(dir, patterns) + return files, err +} + +// resolveEmbed resolves //go:embed patterns to precise file lists. +// It sets files to the list of unique files matched (for go list), +// and it sets pmap to the more precise mapping from +// patterns to files. +func resolveEmbed(pkgdir string, patterns []string) (files []string, pmap map[string][]string, err error) { + var pattern string + defer func() { + if err != nil { + err = &EmbedError{ + Pattern: pattern, + Err: err, + } + } + }() + + // TODO(rsc): All these messages need position information for better error reports. + pmap = make(map[string][]string) + have := make(map[string]int) + dirOK := make(map[string]bool) + pid := 0 // pattern ID, to allow reuse of have map + for _, pattern = range patterns { + pid++ + + glob := pattern + all := strings.HasPrefix(pattern, "all:") + if all { + glob = pattern[len("all:"):] + } + // Check pattern is valid for //go:embed. + if _, err := pathpkg.Match(glob, ""); err != nil || !validEmbedPattern(glob) { + return nil, nil, fmt.Errorf("invalid pattern syntax") + } + + // Glob to find matches. + match, err := fsys.Glob(str.QuoteGlob(str.WithFilePathSeparator(pkgdir)) + filepath.FromSlash(glob)) + if err != nil { + return nil, nil, err + } + + // Filter list of matches down to the ones that will still exist when + // the directory is packaged up as a module. (If p.Dir is in the module cache, + // only those files exist already, but if p.Dir is in the current module, + // then there may be other things lying around, like symbolic links or .git directories.) + var list []string + for _, file := range match { + // relative path to p.Dir which begins without prefix slash + rel := filepath.ToSlash(str.TrimFilePathPrefix(file, pkgdir)) + + what := "file" + info, err := fsys.Lstat(file) + if err != nil { + return nil, nil, err + } + if info.IsDir() { + what = "directory" + } + + // Check that directories along path do not begin a new module + // (do not contain a go.mod). + for dir := file; len(dir) > len(pkgdir)+1 && !dirOK[dir]; dir = filepath.Dir(dir) { + if _, err := fsys.Stat(filepath.Join(dir, "go.mod")); err == nil { + return nil, nil, fmt.Errorf("cannot embed %s %s: in different module", what, rel) + } + if dir != file { + if info, err := fsys.Lstat(dir); err == nil && !info.IsDir() { + return nil, nil, fmt.Errorf("cannot embed %s %s: in non-directory %s", what, rel, dir[len(pkgdir)+1:]) + } + } + dirOK[dir] = true + if elem := filepath.Base(dir); isBadEmbedName(elem) { + if dir == file { + return nil, nil, fmt.Errorf("cannot embed %s %s: invalid name %s", what, rel, elem) + } else { + return nil, nil, fmt.Errorf("cannot embed %s %s: in invalid directory %s", what, rel, elem) + } + } + } + + switch { + default: + return nil, nil, fmt.Errorf("cannot embed irregular file %s", rel) + + case info.Mode().IsRegular(): + if have[rel] != pid { + have[rel] = pid + list = append(list, rel) + } + + case info.IsDir(): + // Gather all files in the named directory, stopping at module boundaries + // and ignoring files that wouldn't be packaged into a module. + count := 0 + err := fsys.Walk(file, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel := filepath.ToSlash(str.TrimFilePathPrefix(path, pkgdir)) + name := info.Name() + if path != file && (isBadEmbedName(name) || ((name[0] == '.' || name[0] == '_') && !all)) { + // Ignore bad names, assuming they won't go into modules. + // Also avoid hidden files that user may not know about. + // See golang.org/issue/42328. + if info.IsDir() { + return fs.SkipDir + } + return nil + } + if info.IsDir() { + if _, err := fsys.Stat(filepath.Join(path, "go.mod")); err == nil { + return filepath.SkipDir + } + return nil + } + if !info.Mode().IsRegular() { + return nil + } + count++ + if have[rel] != pid { + have[rel] = pid + list = append(list, rel) + } + return nil + }) + if err != nil { + return nil, nil, err + } + if count == 0 { + return nil, nil, fmt.Errorf("cannot embed directory %s: contains no embeddable files", rel) + } + } + } + + if len(list) == 0 { + return nil, nil, fmt.Errorf("no matching files found") + } + sort.Strings(list) + pmap[pattern] = list + } + + for file := range have { + files = append(files, file) + } + sort.Strings(files) + return files, pmap, nil +} + +func validEmbedPattern(pattern string) bool { + return pattern != "." && fs.ValidPath(pattern) +} + +// isBadEmbedName reports whether name is the base name of a file that +// can't or won't be included in modules and therefore shouldn't be treated +// as existing for embedding. +func isBadEmbedName(name string) bool { + if err := module.CheckFilePath(name); err != nil { + return true + } + switch name { + // Empty string should be impossible but make it bad. + case "": + return true + // Version control directories won't be present in module. + case ".bzr", ".hg", ".git", ".svn": + return true + } + return false +} + +// vcsStatusCache maps repository directories (string) +// to their VCS information. +var vcsStatusCache par.ErrCache[string, vcs.Status] + +func appendBuildSetting(info *debug.BuildInfo, key, value string) { + value = strings.ReplaceAll(value, "\n", " ") // make value safe + info.Settings = append(info.Settings, debug.BuildSetting{Key: key, Value: value}) +} + +// setBuildInfo gathers build information and sets it into +// p.Internal.BuildInfo, which will later be formatted as a string and embedded +// in the binary. setBuildInfo should only be called on a main package with no +// errors. +// +// This information can be retrieved using debug.ReadBuildInfo. +// +// Note that the GoVersion field is not set here to avoid encoding it twice. +// It is stored separately in the binary, mostly for historical reasons. +func (p *Package) setBuildInfo(ctx context.Context, autoVCS bool) { + setPkgErrorf := func(format string, args ...any) { + if p.Error == nil { + p.Error = &PackageError{Err: fmt.Errorf(format, args...)} + p.Incomplete = true + } + } + + var debugModFromModinfo func(*modinfo.ModulePublic) *debug.Module + debugModFromModinfo = func(mi *modinfo.ModulePublic) *debug.Module { + version := mi.Version + if version == "" { + version = "(devel)" + } + dm := &debug.Module{ + Path: mi.Path, + Version: version, + } + if mi.Replace != nil { + dm.Replace = debugModFromModinfo(mi.Replace) + } else if mi.Version != "" { + dm.Sum = modfetch.Sum(ctx, module.Version{Path: mi.Path, Version: mi.Version}) + } + return dm + } + + var main debug.Module + if p.Module != nil { + main = *debugModFromModinfo(p.Module) + } + + visited := make(map[*Package]bool) + mdeps := make(map[module.Version]*debug.Module) + var q []*Package + q = append(q, p.Internal.Imports...) + for len(q) > 0 { + p1 := q[0] + q = q[1:] + if visited[p1] { + continue + } + visited[p1] = true + if p1.Module != nil { + m := module.Version{Path: p1.Module.Path, Version: p1.Module.Version} + if p1.Module.Path != main.Path && mdeps[m] == nil { + mdeps[m] = debugModFromModinfo(p1.Module) + } + } + q = append(q, p1.Internal.Imports...) + } + sortedMods := make([]module.Version, 0, len(mdeps)) + for mod := range mdeps { + sortedMods = append(sortedMods, mod) + } + gover.ModSort(sortedMods) + deps := make([]*debug.Module, len(sortedMods)) + for i, mod := range sortedMods { + deps[i] = mdeps[mod] + } + + pkgPath := p.ImportPath + if p.Internal.CmdlineFiles { + pkgPath = "command-line-arguments" + } + info := &debug.BuildInfo{ + Path: pkgPath, + Main: main, + Deps: deps, + } + appendSetting := func(key, value string) { + appendBuildSetting(info, key, value) + } + + // Add command-line flags relevant to the build. + // This is informational, not an exhaustive list. + // Please keep the list sorted. + if cfg.BuildASan { + appendSetting("-asan", "true") + } + if BuildAsmflags.present { + appendSetting("-asmflags", BuildAsmflags.String()) + } + buildmode := cfg.BuildBuildmode + if buildmode == "default" { + if p.Name == "main" { + buildmode = "exe" + } else { + buildmode = "archive" + } + } + appendSetting("-buildmode", buildmode) + appendSetting("-compiler", cfg.BuildContext.Compiler) + if gccgoflags := BuildGccgoflags.String(); gccgoflags != "" && cfg.BuildContext.Compiler == "gccgo" { + appendSetting("-gccgoflags", gccgoflags) + } + if gcflags := BuildGcflags.String(); gcflags != "" && cfg.BuildContext.Compiler == "gc" { + appendSetting("-gcflags", gcflags) + } + if ldflags := BuildLdflags.String(); ldflags != "" { + // https://go.dev/issue/52372: only include ldflags if -trimpath is not set, + // since it can include system paths through various linker flags (notably + // -extar, -extld, and -extldflags). + // + // TODO: since we control cmd/link, in theory we can parse ldflags to + // determine whether they may refer to system paths. If we do that, we can + // redact only those paths from the recorded -ldflags setting and still + // record the system-independent parts of the flags. + if !cfg.BuildTrimpath { + appendSetting("-ldflags", ldflags) + } + } + if cfg.BuildMSan { + appendSetting("-msan", "true") + } + // N.B. -pgo added later by setPGOProfilePath. + if cfg.BuildRace { + appendSetting("-race", "true") + } + if tags := cfg.BuildContext.BuildTags; len(tags) > 0 { + appendSetting("-tags", strings.Join(tags, ",")) + } + if cfg.BuildTrimpath { + appendSetting("-trimpath", "true") + } + if p.DefaultGODEBUG != "" { + appendSetting("DefaultGODEBUG", p.DefaultGODEBUG) + } + cgo := "0" + if cfg.BuildContext.CgoEnabled { + cgo = "1" + } + appendSetting("CGO_ENABLED", cgo) + // https://go.dev/issue/52372: only include CGO flags if -trimpath is not set. + // (If -trimpath is set, it is possible that these flags include system paths.) + // If cgo is involved, reproducibility is already pretty well ruined anyway, + // given that we aren't stamping header or library versions. + // + // TODO(bcmills): perhaps we could at least parse the flags and stamp the + // subset of flags that are known not to be paths? + if cfg.BuildContext.CgoEnabled && !cfg.BuildTrimpath { + for _, name := range []string{"CGO_CFLAGS", "CGO_CPPFLAGS", "CGO_CXXFLAGS", "CGO_LDFLAGS"} { + appendSetting(name, cfg.Getenv(name)) + } + } + appendSetting("GOARCH", cfg.BuildContext.GOARCH) + if cfg.RawGOEXPERIMENT != "" { + appendSetting("GOEXPERIMENT", cfg.RawGOEXPERIMENT) + } + appendSetting("GOOS", cfg.BuildContext.GOOS) + if key, val := cfg.GetArchEnv(); key != "" && val != "" { + appendSetting(key, val) + } + + // Add VCS status if all conditions are true: + // + // - -buildvcs is enabled. + // - p is a non-test contained within a main module (there may be multiple + // main modules in a workspace, but local replacements don't count). + // - Both the current directory and p's module's root directory are contained + // in the same local repository. + // - We know the VCS commands needed to get the status. + setVCSError := func(err error) { + setPkgErrorf("error obtaining VCS status: %v\n\tUse -buildvcs=false to disable VCS stamping.", err) + } + + var repoDir string + var vcsCmd *vcs.Cmd + var err error + const allowNesting = true + + wantVCS := false + switch cfg.BuildBuildvcs { + case "true": + wantVCS = true // Include VCS metadata even for tests if requested explicitly; see https://go.dev/issue/52648. + case "auto": + wantVCS = autoVCS && !p.IsTestOnly() + case "false": + default: + panic(fmt.Sprintf("unexpected value for cfg.BuildBuildvcs: %q", cfg.BuildBuildvcs)) + } + + if wantVCS && p.Module != nil && p.Module.Version == "" && !p.Standard { + if p.Module.Path == "bootstrap" && cfg.GOROOT == os.Getenv("GOROOT_BOOTSTRAP") { + // During bootstrapping, the bootstrap toolchain is built in module + // "bootstrap" (instead of "std"), with GOROOT set to GOROOT_BOOTSTRAP + // (so the bootstrap toolchain packages don't even appear to be in GOROOT). + goto omitVCS + } + repoDir, vcsCmd, err = vcs.FromDir(base.Cwd(), "", allowNesting) + if err != nil && !errors.Is(err, os.ErrNotExist) { + setVCSError(err) + return + } + if !str.HasFilePathPrefix(p.Module.Dir, repoDir) && + !str.HasFilePathPrefix(repoDir, p.Module.Dir) { + // The module containing the main package does not overlap with the + // repository containing the working directory. Don't include VCS info. + // If the repo contains the module or vice versa, but they are not + // the same directory, it's likely an error (see below). + goto omitVCS + } + if cfg.BuildBuildvcs == "auto" && vcsCmd != nil && vcsCmd.Cmd != "" { + if _, err := cfg.LookPath(vcsCmd.Cmd); err != nil { + // We fould a repository, but the required VCS tool is not present. + // "-buildvcs=auto" means that we should silently drop the VCS metadata. + goto omitVCS + } + } + } + if repoDir != "" && vcsCmd.Status != nil { + // Check that the current directory, package, and module are in the same + // repository. vcs.FromDir allows nested Git repositories, but nesting + // is not allowed for other VCS tools. The current directory may be outside + // p.Module.Dir when a workspace is used. + pkgRepoDir, _, err := vcs.FromDir(p.Dir, "", allowNesting) + if err != nil { + setVCSError(err) + return + } + if pkgRepoDir != repoDir { + if cfg.BuildBuildvcs != "auto" { + setVCSError(fmt.Errorf("main package is in repository %q but current directory is in repository %q", pkgRepoDir, repoDir)) + return + } + goto omitVCS + } + modRepoDir, _, err := vcs.FromDir(p.Module.Dir, "", allowNesting) + if err != nil { + setVCSError(err) + return + } + if modRepoDir != repoDir { + if cfg.BuildBuildvcs != "auto" { + setVCSError(fmt.Errorf("main module is in repository %q but current directory is in repository %q", modRepoDir, repoDir)) + return + } + goto omitVCS + } + + st, err := vcsStatusCache.Do(repoDir, func() (vcs.Status, error) { + return vcsCmd.Status(vcsCmd, repoDir) + }) + if err != nil { + setVCSError(err) + return + } + + appendSetting("vcs", vcsCmd.Cmd) + if st.Revision != "" { + appendSetting("vcs.revision", st.Revision) + } + if !st.CommitTime.IsZero() { + stamp := st.CommitTime.UTC().Format(time.RFC3339Nano) + appendSetting("vcs.time", stamp) + } + appendSetting("vcs.modified", strconv.FormatBool(st.Uncommitted)) + } +omitVCS: + + p.Internal.BuildInfo = info +} + +// SafeArg reports whether arg is a "safe" command-line argument, +// meaning that when it appears in a command-line, it probably +// doesn't have some special meaning other than its own name. +// Obviously args beginning with - are not safe (they look like flags). +// Less obviously, args beginning with @ are not safe (they look like +// GNU binutils flagfile specifiers, sometimes called "response files"). +// To be conservative, we reject almost any arg beginning with non-alphanumeric ASCII. +// We accept leading . _ and / as likely in file system paths. +// There is a copy of this function in cmd/compile/internal/gc/noder.go. +func SafeArg(name string) bool { + if name == "" { + return false + } + c := name[0] + return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf +} + +// LinkerDeps returns the list of linker-induced dependencies for main package p. +func LinkerDeps(p *Package) ([]string, error) { + // Everything links runtime. + deps := []string{"runtime"} + + // External linking mode forces an import of runtime/cgo. + if what := externalLinkingReason(p); what != "" && cfg.BuildContext.Compiler != "gccgo" { + if !cfg.BuildContext.CgoEnabled { + return nil, fmt.Errorf("%s requires external (cgo) linking, but cgo is not enabled", what) + } + deps = append(deps, "runtime/cgo") + } + // On ARM with GOARM=5, it forces an import of math, for soft floating point. + if cfg.Goarch == "arm" { + deps = append(deps, "math") + } + // Using the race detector forces an import of runtime/race. + if cfg.BuildRace { + deps = append(deps, "runtime/race") + } + // Using memory sanitizer forces an import of runtime/msan. + if cfg.BuildMSan { + deps = append(deps, "runtime/msan") + } + // Using address sanitizer forces an import of runtime/asan. + if cfg.BuildASan { + deps = append(deps, "runtime/asan") + } + // Building for coverage forces an import of runtime/coverage. + if cfg.BuildCover && cfg.Experiment.CoverageRedesign { + deps = append(deps, "runtime/coverage") + } + + return deps, nil +} + +// externalLinkingReason reports the reason external linking is required +// even for programs that do not use cgo, or the empty string if external +// linking is not required. +func externalLinkingReason(p *Package) (what string) { + // Some targets must use external linking even inside GOROOT. + if platform.MustLinkExternal(cfg.Goos, cfg.Goarch, false) { + return cfg.Goos + "/" + cfg.Goarch + } + + // Some build modes always require external linking. + switch cfg.BuildBuildmode { + case "c-shared", "plugin": + return "-buildmode=" + cfg.BuildBuildmode + } + + // Using -linkshared always requires external linking. + if cfg.BuildLinkshared { + return "-linkshared" + } + + // Decide whether we are building a PIE, + // bearing in mind that some systems default to PIE. + isPIE := false + if cfg.BuildBuildmode == "pie" { + isPIE = true + } else if cfg.BuildBuildmode == "default" && platform.DefaultPIE(cfg.BuildContext.GOOS, cfg.BuildContext.GOARCH, cfg.BuildRace) { + isPIE = true + } + // If we are building a PIE, and we are on a system + // that does not support PIE with internal linking mode, + // then we must use external linking. + if isPIE && !platform.InternalLinkPIESupported(cfg.BuildContext.GOOS, cfg.BuildContext.GOARCH) { + if cfg.BuildBuildmode == "pie" { + return "-buildmode=pie" + } + return "default PIE binary" + } + + // Using -ldflags=-linkmode=external forces external linking. + // If there are multiple -linkmode options, the last one wins. + if p != nil { + ldflags := BuildLdflags.For(p) + for i := len(ldflags) - 1; i >= 0; i-- { + a := ldflags[i] + if a == "-linkmode=external" || + a == "-linkmode" && i+1 < len(ldflags) && ldflags[i+1] == "external" { + return a + } else if a == "-linkmode=internal" || + a == "-linkmode" && i+1 < len(ldflags) && ldflags[i+1] == "internal" { + return "" + } + } + } + + return "" +} + +// mkAbs rewrites list, which must be paths relative to p.Dir, +// into a sorted list of absolute paths. It edits list in place but for +// convenience also returns list back to its caller. +func (p *Package) mkAbs(list []string) []string { + for i, f := range list { + list[i] = filepath.Join(p.Dir, f) + } + sort.Strings(list) + return list +} + +// InternalGoFiles returns the list of Go files being built for the package, +// using absolute paths. +func (p *Package) InternalGoFiles() []string { + return p.mkAbs(str.StringList(p.GoFiles, p.CgoFiles, p.TestGoFiles)) +} + +// InternalXGoFiles returns the list of Go files being built for the XTest package, +// using absolute paths. +func (p *Package) InternalXGoFiles() []string { + return p.mkAbs(p.XTestGoFiles) +} + +// InternalAllGoFiles returns the list of all Go files possibly relevant for the package, +// using absolute paths. "Possibly relevant" means that files are not excluded +// due to build tags, but files with names beginning with . or _ are still excluded. +func (p *Package) InternalAllGoFiles() []string { + return p.mkAbs(str.StringList(p.IgnoredGoFiles, p.GoFiles, p.CgoFiles, p.TestGoFiles, p.XTestGoFiles)) +} + +// UsesSwig reports whether the package needs to run SWIG. +func (p *Package) UsesSwig() bool { + return len(p.SwigFiles) > 0 || len(p.SwigCXXFiles) > 0 +} + +// UsesCgo reports whether the package needs to run cgo +func (p *Package) UsesCgo() bool { + return len(p.CgoFiles) > 0 +} + +// PackageList returns the list of packages in the dag rooted at roots +// as visited in a depth-first post-order traversal. +func PackageList(roots []*Package) []*Package { + seen := map[*Package]bool{} + all := []*Package{} + var walk func(*Package) + walk = func(p *Package) { + if seen[p] { + return + } + seen[p] = true + for _, p1 := range p.Internal.Imports { + walk(p1) + } + all = append(all, p) + } + for _, root := range roots { + walk(root) + } + return all +} + +// TestPackageList returns the list of packages in the dag rooted at roots +// as visited in a depth-first post-order traversal, including the test +// imports of the roots. This ignores errors in test packages. +func TestPackageList(ctx context.Context, opts PackageOpts, roots []*Package) []*Package { + seen := map[*Package]bool{} + all := []*Package{} + var walk func(*Package) + walk = func(p *Package) { + if seen[p] { + return + } + seen[p] = true + for _, p1 := range p.Internal.Imports { + walk(p1) + } + all = append(all, p) + } + walkTest := func(root *Package, path string) { + var stk ImportStack + p1, err := LoadImport(ctx, opts, path, root.Dir, root, &stk, root.Internal.Build.TestImportPos[path], ResolveImport) + if err != nil && root.Error == nil { + // Assign error importing the package to the importer. + root.Error = err + root.Incomplete = true + } + if p1.Error == nil { + walk(p1) + } + } + for _, root := range roots { + walk(root) + for _, path := range root.TestImports { + walkTest(root, path) + } + for _, path := range root.XTestImports { + walkTest(root, path) + } + } + return all +} + +// LoadImportWithFlags loads the package with the given import path and +// sets tool flags on that package. This function is useful loading implicit +// dependencies (like sync/atomic for coverage). +// TODO(jayconrod): delete this function and set flags automatically +// in LoadImport instead. +func LoadImportWithFlags(path, srcDir string, parent *Package, stk *ImportStack, importPos []token.Position, mode int) (*Package, *PackageError) { + p, err := LoadImport(context.TODO(), PackageOpts{}, path, srcDir, parent, stk, importPos, mode) + setToolFlags(p) + return p, err +} + +// LoadPackageWithFlags is the same as LoadImportWithFlags but without a parent. +// It's then guaranteed to not return an error +func LoadPackageWithFlags(path, srcDir string, stk *ImportStack, importPos []token.Position, mode int) *Package { + p := LoadPackage(context.TODO(), PackageOpts{}, path, srcDir, stk, importPos, mode) + setToolFlags(p) + return p +} + +// PackageOpts control the behavior of PackagesAndErrors and other package +// loading functions. +type PackageOpts struct { + // IgnoreImports controls whether we ignore explicit and implicit imports + // when loading packages. Implicit imports are added when supporting Cgo + // or SWIG and when linking main packages. + IgnoreImports bool + + // ModResolveTests indicates whether calls to the module loader should also + // resolve test dependencies of the requested packages. + // + // If ModResolveTests is true, then the module loader needs to resolve test + // dependencies at the same time as packages; otherwise, the test dependencies + // of those packages could be missing, and resolving those missing dependencies + // could change the selected versions of modules that provide other packages. + ModResolveTests bool + + // MainOnly is true if the caller only wants to load main packages. + // For a literal argument matching a non-main package, a stub may be returned + // with an error. For a non-literal argument (with "..."), non-main packages + // are not be matched, and their dependencies may not be loaded. A warning + // may be printed for non-literal arguments that match no main packages. + MainOnly bool + + // AutoVCS controls whether we also load version-control metadata for main packages + // when -buildvcs=auto (the default). + AutoVCS bool + + // SuppressBuildInfo is true if the caller does not need p.Stale, p.StaleReason, or p.Internal.BuildInfo + // to be populated on the package. + SuppressBuildInfo bool + + // SuppressEmbedFiles is true if the caller does not need any embed files to be populated on the + // package. + SuppressEmbedFiles bool +} + +// PackagesAndErrors returns the packages named by the command line arguments +// 'patterns'. If a named package cannot be loaded, PackagesAndErrors returns +// a *Package with the Error field describing the failure. If errors are found +// loading imported packages, the DepsErrors field is set. The Incomplete field +// may be set as well. +// +// To obtain a flat list of packages, use PackageList. +// To report errors loading packages, use ReportPackageErrors. +func PackagesAndErrors(ctx context.Context, opts PackageOpts, patterns []string) []*Package { + ctx, span := trace.StartSpan(ctx, "load.PackagesAndErrors") + defer span.Done() + + for _, p := range patterns { + // Listing is only supported with all patterns referring to either: + // - Files that are part of the same directory. + // - Explicit package paths or patterns. + if strings.HasSuffix(p, ".go") { + // We need to test whether the path is an actual Go file and not a + // package path or pattern ending in '.go' (see golang.org/issue/34653). + if fi, err := fsys.Stat(p); err == nil && !fi.IsDir() { + pkgs := []*Package{GoFilesPackage(ctx, opts, patterns)} + setPGOProfilePath(pkgs) + return pkgs + } + } + } + + var matches []*search.Match + if modload.Init(); cfg.ModulesEnabled { + modOpts := modload.PackageOpts{ + ResolveMissingImports: true, + LoadTests: opts.ModResolveTests, + SilencePackageErrors: true, + } + matches, _ = modload.LoadPackages(ctx, modOpts, patterns...) + } else { + noModRoots := []string{} + matches = search.ImportPaths(patterns, noModRoots) + } + + var ( + pkgs []*Package + stk ImportStack + seenPkg = make(map[*Package]bool) + ) + + pre := newPreload() + defer pre.flush() + pre.preloadMatches(ctx, opts, matches) + + for _, m := range matches { + for _, pkg := range m.Pkgs { + if pkg == "" { + panic(fmt.Sprintf("ImportPaths returned empty package for pattern %s", m.Pattern())) + } + mode := cmdlinePkg + if m.IsLiteral() { + // Note: do not set = m.IsLiteral unconditionally + // because maybe we'll see p matching both + // a literal and also a non-literal pattern. + mode |= cmdlinePkgLiteral + } + p, perr := loadImport(ctx, opts, pre, pkg, base.Cwd(), nil, &stk, nil, mode) + if perr != nil { + base.Fatalf("internal error: loadImport of %q with nil parent returned an error", pkg) + } + p.Match = append(p.Match, m.Pattern()) + if seenPkg[p] { + continue + } + seenPkg[p] = true + pkgs = append(pkgs, p) + } + + if len(m.Errs) > 0 { + // In addition to any packages that were actually resolved from the + // pattern, there was some error in resolving the pattern itself. + // Report it as a synthetic package. + p := new(Package) + p.ImportPath = m.Pattern() + // Pass an empty ImportStack and nil importPos: the error arose from a pattern, not an import. + var stk ImportStack + var importPos []token.Position + p.setLoadPackageDataError(m.Errs[0], m.Pattern(), &stk, importPos) + p.Incomplete = true + p.Match = append(p.Match, m.Pattern()) + p.Internal.CmdlinePkg = true + if m.IsLiteral() { + p.Internal.CmdlinePkgLiteral = true + } + pkgs = append(pkgs, p) + } + } + + if opts.MainOnly { + pkgs = mainPackagesOnly(pkgs, matches) + } + + // Now that CmdlinePkg is set correctly, + // compute the effective flags for all loaded packages + // (not just the ones matching the patterns but also + // their dependencies). + setToolFlags(pkgs...) + + setPGOProfilePath(pkgs) + + return pkgs +} + +// setPGOProfilePath sets the PGO profile path for pkgs. +// In -pgo=auto mode, it finds the default PGO profile. +func setPGOProfilePath(pkgs []*Package) { + updateBuildInfo := func(p *Package, file string) { + // Don't create BuildInfo for packages that didn't already have it. + if p.Internal.BuildInfo == nil { + return + } + + if cfg.BuildTrimpath { + appendBuildSetting(p.Internal.BuildInfo, "-pgo", filepath.Base(file)) + } else { + appendBuildSetting(p.Internal.BuildInfo, "-pgo", file) + } + // Adding -pgo breaks the sort order in BuildInfo.Settings. Restore it. + slices.SortFunc(p.Internal.BuildInfo.Settings, func(x, y debug.BuildSetting) int { + return strings.Compare(x.Key, y.Key) + }) + } + + switch cfg.BuildPGO { + case "off": + return + + case "auto": + // Locate PGO profiles from the main packages, and + // attach the profile to the main package and its + // dependencies. + // If we're building multiple main packages, they may + // have different profiles. We may need to split (unshare) + // the dependency graph so they can attach different + // profiles. + for _, p := range pkgs { + if p.Name != "main" { + continue + } + pmain := p + file := filepath.Join(pmain.Dir, "default.pgo") + if _, err := os.Stat(file); err != nil { + continue // no profile + } + + // Packages already visited. The value should replace + // the key, as it may be a forked copy of the original + // Package. + visited := make(map[*Package]*Package) + var split func(p *Package) *Package + split = func(p *Package) *Package { + if p1 := visited[p]; p1 != nil { + return p1 + } + + if len(pkgs) > 1 && p != pmain { + // Make a copy, then attach profile. + // No need to copy if there is only one root package (we can + // attach profile directly in-place). + // Also no need to copy the main package. + if p.Internal.PGOProfile != "" { + panic("setPGOProfilePath: already have profile") + } + p1 := new(Package) + *p1 = *p + // Unalias the Internal.Imports slice, which is we're going to + // modify. We don't copy other slices as we don't change them. + p1.Internal.Imports = slices.Clone(p.Internal.Imports) + p1.Internal.ForMain = pmain.ImportPath + visited[p] = p1 + p = p1 + } else { + visited[p] = p + } + p.Internal.PGOProfile = file + updateBuildInfo(p, file) + // Recurse to dependencies. + for i, pp := range p.Internal.Imports { + p.Internal.Imports[i] = split(pp) + } + return p + } + + // Replace the package and imports with the PGO version. + split(pmain) + } + + default: + // Profile specified from the command line. + // Make it absolute path, as the compiler runs on various directories. + file, err := filepath.Abs(cfg.BuildPGO) + if err != nil { + base.Fatalf("fail to get absolute path of PGO file %s: %v", cfg.BuildPGO, err) + } + + for _, p := range PackageList(pkgs) { + p.Internal.PGOProfile = file + updateBuildInfo(p, file) + } + } +} + +// CheckPackageErrors prints errors encountered loading pkgs and their +// dependencies, then exits with a non-zero status if any errors were found. +func CheckPackageErrors(pkgs []*Package) { + var anyIncomplete bool + for _, pkg := range pkgs { + if pkg.Incomplete { + anyIncomplete = true + } + } + if anyIncomplete { + all := PackageList(pkgs) + for _, p := range all { + if p.Error != nil { + base.Errorf("%v", p.Error) + } + } + } + base.ExitIfErrors() + + // Check for duplicate loads of the same package. + // That should be impossible, but if it does happen then + // we end up trying to build the same package twice, + // usually in parallel overwriting the same files, + // which doesn't work very well. + seen := map[string]bool{} + reported := map[string]bool{} + for _, pkg := range PackageList(pkgs) { + // -pgo=auto with multiple main packages can cause a package being + // built multiple times (with different profiles). + // We check that package import path + profile path is unique. + key := pkg.ImportPath + if pkg.Internal.PGOProfile != "" { + key += " pgo:" + pkg.Internal.PGOProfile + } + if seen[key] && !reported[key] { + reported[key] = true + base.Errorf("internal error: duplicate loads of %s", pkg.ImportPath) + } + seen[key] = true + } + base.ExitIfErrors() +} + +// mainPackagesOnly filters out non-main packages matched only by arguments +// containing "..." and returns the remaining main packages. +// +// Packages with missing, invalid, or ambiguous names may be treated as +// possibly-main packages. +// +// mainPackagesOnly sets a non-main package's Error field and returns it if it +// is named by a literal argument. +// +// mainPackagesOnly prints warnings for non-literal arguments that only match +// non-main packages. +func mainPackagesOnly(pkgs []*Package, matches []*search.Match) []*Package { + treatAsMain := map[string]bool{} + for _, m := range matches { + if m.IsLiteral() { + for _, path := range m.Pkgs { + treatAsMain[path] = true + } + } + } + + var mains []*Package + for _, pkg := range pkgs { + if pkg.Name == "main" || (pkg.Name == "" && pkg.Error != nil) { + treatAsMain[pkg.ImportPath] = true + mains = append(mains, pkg) + continue + } + + if len(pkg.InvalidGoFiles) > 0 { // TODO(#45999): && pkg.Name == "", but currently go/build sets pkg.Name arbitrarily if it is ambiguous. + // The package has (or may have) conflicting names, and we can't easily + // tell whether one of them is "main". So assume that it could be, and + // report an error for the package. + treatAsMain[pkg.ImportPath] = true + } + if treatAsMain[pkg.ImportPath] { + if pkg.Error == nil { + pkg.Error = &PackageError{Err: &mainPackageError{importPath: pkg.ImportPath}} + pkg.Incomplete = true + } + mains = append(mains, pkg) + } + } + + for _, m := range matches { + if m.IsLiteral() || len(m.Pkgs) == 0 { + continue + } + foundMain := false + for _, path := range m.Pkgs { + if treatAsMain[path] { + foundMain = true + break + } + } + if !foundMain { + fmt.Fprintf(os.Stderr, "go: warning: %q matched only non-main packages\n", m.Pattern()) + } + } + + return mains +} + +type mainPackageError struct { + importPath string +} + +func (e *mainPackageError) Error() string { + return fmt.Sprintf("package %s is not a main package", e.importPath) +} + +func (e *mainPackageError) ImportPath() string { + return e.importPath +} + +func setToolFlags(pkgs ...*Package) { + for _, p := range PackageList(pkgs) { + p.Internal.Asmflags = BuildAsmflags.For(p) + p.Internal.Gcflags = BuildGcflags.For(p) + p.Internal.Ldflags = BuildLdflags.For(p) + p.Internal.Gccgoflags = BuildGccgoflags.For(p) + } +} + +// GoFilesPackage creates a package for building a collection of Go files +// (typically named on the command line). The target is named p.a for +// package p or named after the first Go file for package main. +func GoFilesPackage(ctx context.Context, opts PackageOpts, gofiles []string) *Package { + modload.Init() + + for _, f := range gofiles { + if !strings.HasSuffix(f, ".go") { + pkg := new(Package) + pkg.Internal.Local = true + pkg.Internal.CmdlineFiles = true + pkg.Name = f + pkg.Error = &PackageError{ + Err: fmt.Errorf("named files must be .go files: %s", pkg.Name), + } + pkg.Incomplete = true + return pkg + } + } + + var stk ImportStack + ctxt := cfg.BuildContext + ctxt.UseAllFiles = true + + // Synthesize fake "directory" that only shows the named files, + // to make it look like this is a standard package or + // command directory. So that local imports resolve + // consistently, the files must all be in the same directory. + var dirent []fs.FileInfo + var dir string + for _, file := range gofiles { + fi, err := fsys.Stat(file) + if err != nil { + base.Fatalf("%s", err) + } + if fi.IsDir() { + base.Fatalf("%s is a directory, should be a Go file", file) + } + dir1 := filepath.Dir(file) + if dir == "" { + dir = dir1 + } else if dir != dir1 { + base.Fatalf("named files must all be in one directory; have %s and %s", dir, dir1) + } + dirent = append(dirent, fi) + } + ctxt.ReadDir = func(string) ([]fs.FileInfo, error) { return dirent, nil } + + if cfg.ModulesEnabled { + modload.ImportFromFiles(ctx, gofiles) + } + + var err error + if dir == "" { + dir = base.Cwd() + } + dir, err = filepath.Abs(dir) + if err != nil { + base.Fatalf("%s", err) + } + + bp, err := ctxt.ImportDir(dir, 0) + pkg := new(Package) + pkg.Internal.Local = true + pkg.Internal.CmdlineFiles = true + pkg.load(ctx, opts, "command-line-arguments", &stk, nil, bp, err) + if !cfg.ModulesEnabled { + pkg.Internal.LocalPrefix = dirToImportPath(dir) + } + pkg.ImportPath = "command-line-arguments" + pkg.Target = "" + pkg.Match = gofiles + + if pkg.Name == "main" { + exe := pkg.DefaultExecName() + cfg.ExeSuffix + + if cfg.GOBIN != "" { + pkg.Target = filepath.Join(cfg.GOBIN, exe) + } else if cfg.ModulesEnabled { + pkg.Target = filepath.Join(modload.BinDir(), exe) + } + } + + if opts.MainOnly && pkg.Name != "main" && pkg.Error == nil { + pkg.Error = &PackageError{Err: &mainPackageError{importPath: pkg.ImportPath}} + pkg.Incomplete = true + } + setToolFlags(pkg) + + return pkg +} + +// PackagesAndErrorsOutsideModule is like PackagesAndErrors but runs in +// module-aware mode and ignores the go.mod file in the current directory or any +// parent directory, if there is one. This is used in the implementation of 'go +// install pkg@version' and other commands that support similar forms. +// +// modload.ForceUseModules must be true, and modload.RootMode must be NoRoot +// before calling this function. +// +// PackagesAndErrorsOutsideModule imposes several constraints to avoid +// ambiguity. All arguments must have the same version suffix (not just a suffix +// that resolves to the same version). They must refer to packages in the same +// module, which must not be std or cmd. That module is not considered the main +// module, but its go.mod file (if it has one) must not contain directives that +// would cause it to be interpreted differently if it were the main module +// (replace, exclude). +func PackagesAndErrorsOutsideModule(ctx context.Context, opts PackageOpts, args []string) ([]*Package, error) { + if !modload.ForceUseModules { + panic("modload.ForceUseModules must be true") + } + if modload.RootMode != modload.NoRoot { + panic("modload.RootMode must be NoRoot") + } + + // Check that the arguments satisfy syntactic constraints. + var version string + var firstPath string + for _, arg := range args { + if i := strings.Index(arg, "@"); i >= 0 { + firstPath, version = arg[:i], arg[i+1:] + if version == "" { + return nil, fmt.Errorf("%s: version must not be empty", arg) + } + break + } + } + patterns := make([]string, len(args)) + for i, arg := range args { + p, found := strings.CutSuffix(arg, "@"+version) + if !found { + return nil, fmt.Errorf("%s: all arguments must refer to packages in the same module at the same version (@%s)", arg, version) + } + switch { + case build.IsLocalImport(p): + return nil, fmt.Errorf("%s: argument must be a package path, not a relative path", arg) + case filepath.IsAbs(p): + return nil, fmt.Errorf("%s: argument must be a package path, not an absolute path", arg) + case search.IsMetaPackage(p): + return nil, fmt.Errorf("%s: argument must be a package path, not a meta-package", arg) + case pathpkg.Clean(p) != p: + return nil, fmt.Errorf("%s: argument must be a clean package path", arg) + case !strings.Contains(p, "...") && search.IsStandardImportPath(p) && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, p): + return nil, fmt.Errorf("%s: argument must not be a package in the standard library", arg) + default: + patterns[i] = p + } + } + + // Query the module providing the first argument, load its go.mod file, and + // check that it doesn't contain directives that would cause it to be + // interpreted differently if it were the main module. + // + // If multiple modules match the first argument, accept the longest match + // (first result). It's possible this module won't provide packages named by + // later arguments, and other modules would. Let's not try to be too + // magical though. + allowed := modload.CheckAllowed + if modload.IsRevisionQuery(firstPath, version) { + // Don't check for retractions if a specific revision is requested. + allowed = nil + } + noneSelected := func(path string) (version string) { return "none" } + qrs, err := modload.QueryPackages(ctx, patterns[0], version, noneSelected, allowed) + if err != nil { + return nil, fmt.Errorf("%s: %w", args[0], err) + } + rootMod := qrs[0].Mod + data, err := modfetch.GoMod(ctx, rootMod.Path, rootMod.Version) + if err != nil { + return nil, fmt.Errorf("%s: %w", args[0], err) + } + f, err := modfile.Parse("go.mod", data, nil) + if err != nil { + return nil, fmt.Errorf("%s (in %s): %w", args[0], rootMod, err) + } + directiveFmt := "%s (in %s):\n" + + "\tThe go.mod file for the module providing named packages contains one or\n" + + "\tmore %s directives. It must not contain directives that would cause\n" + + "\tit to be interpreted differently than if it were the main module." + if len(f.Replace) > 0 { + return nil, fmt.Errorf(directiveFmt, args[0], rootMod, "replace") + } + if len(f.Exclude) > 0 { + return nil, fmt.Errorf(directiveFmt, args[0], rootMod, "exclude") + } + + // Since we are in NoRoot mode, the build list initially contains only + // the dummy command-line-arguments module. Add a requirement on the + // module that provides the packages named on the command line. + if _, err := modload.EditBuildList(ctx, nil, []module.Version{rootMod}); err != nil { + return nil, fmt.Errorf("%s: %w", args[0], err) + } + + // Load packages for all arguments. + pkgs := PackagesAndErrors(ctx, opts, patterns) + + // Check that named packages are all provided by the same module. + for _, pkg := range pkgs { + var pkgErr error + if pkg.Module == nil { + // Packages in std, cmd, and their vendored dependencies + // don't have this field set. + pkgErr = fmt.Errorf("package %s not provided by module %s", pkg.ImportPath, rootMod) + } else if pkg.Module.Path != rootMod.Path || pkg.Module.Version != rootMod.Version { + pkgErr = fmt.Errorf("package %s provided by module %s@%s\n\tAll packages must be provided by the same module (%s).", pkg.ImportPath, pkg.Module.Path, pkg.Module.Version, rootMod) + } + if pkgErr != nil && pkg.Error == nil { + pkg.Error = &PackageError{Err: pkgErr} + pkg.Incomplete = true + } + } + + matchers := make([]func(string) bool, len(patterns)) + for i, p := range patterns { + if strings.Contains(p, "...") { + matchers[i] = pkgpattern.MatchPattern(p) + } + } + return pkgs, nil +} + +// EnsureImport ensures that package p imports the named package. +func EnsureImport(p *Package, pkg string) { + for _, d := range p.Internal.Imports { + if d.Name == pkg { + return + } + } + + p1, err := LoadImportWithFlags(pkg, p.Dir, p, &ImportStack{}, nil, 0) + if err != nil { + base.Fatalf("load %s: %v", pkg, err) + } + if p1.Error != nil { + base.Fatalf("load %s: %v", pkg, p1.Error) + } + + p.Internal.Imports = append(p.Internal.Imports, p1) +} + +// PrepareForCoverageBuild is a helper invoked for "go install +// -cover", "go run -cover", and "go build -cover" (but not used by +// "go test -cover"). It walks through the packages being built (and +// dependencies) and marks them for coverage instrumentation when +// appropriate, and possibly adding additional deps where needed. +func PrepareForCoverageBuild(pkgs []*Package) { + var match []func(*Package) bool + + matchMainModAndCommandLine := func(p *Package) bool { + // note that p.Standard implies p.Module == nil below. + return p.Internal.CmdlineFiles || p.Internal.CmdlinePkg || (p.Module != nil && p.Module.Main) + } + + if len(cfg.BuildCoverPkg) != 0 { + // If -coverpkg has been specified, then we instrument only + // the specific packages selected by the user-specified pattern(s). + match = make([]func(*Package) bool, len(cfg.BuildCoverPkg)) + for i := range cfg.BuildCoverPkg { + match[i] = MatchPackage(cfg.BuildCoverPkg[i], base.Cwd()) + } + } else { + // Without -coverpkg, instrument only packages in the main module + // (if any), as well as packages/files specifically named on the + // command line. + match = []func(*Package) bool{matchMainModAndCommandLine} + } + + // Visit the packages being built or installed, along with all of + // their dependencies, and mark them to be instrumented, taking + // into account the matchers we've set up in the sequence above. + SelectCoverPackages(PackageList(pkgs), match, "build") +} + +func SelectCoverPackages(roots []*Package, match []func(*Package) bool, op string) []*Package { + var warntag string + var includeMain bool + switch op { + case "build": + warntag = "built" + includeMain = true + case "test": + warntag = "tested" + default: + panic("internal error, bad mode passed to SelectCoverPackages") + } + + covered := []*Package{} + matched := make([]bool, len(match)) + for _, p := range roots { + haveMatch := false + for i := range match { + if match[i](p) { + matched[i] = true + haveMatch = true + } + } + if !haveMatch { + continue + } + + // There is nothing to cover in package unsafe; it comes from + // the compiler. + if p.ImportPath == "unsafe" { + continue + } + + // A package which only has test files can't be imported as a + // dependency, and at the moment we don't try to instrument it + // for coverage. There isn't any technical reason why + // *_test.go files couldn't be instrumented, but it probably + // doesn't make much sense to lump together coverage metrics + // (ex: percent stmts covered) of *_test.go files with + // non-test Go code. + if len(p.GoFiles)+len(p.CgoFiles) == 0 { + continue + } + + // Silently ignore attempts to run coverage on sync/atomic + // and/or runtime/internal/atomic when using atomic coverage + // mode. Atomic coverage mode uses sync/atomic, so we can't + // also do coverage on it. + if cfg.BuildCoverMode == "atomic" && p.Standard && + (p.ImportPath == "sync/atomic" || p.ImportPath == "runtime/internal/atomic") { + continue + } + + // If using the race detector, silently ignore attempts to run + // coverage on the runtime packages. It will cause the race + // detector to be invoked before it has been initialized. Note + // the use of "regonly" instead of just ignoring the package + // completely-- we do this due to the requirements of the + // package ID numbering scheme. See the comment in + // $GOROOT/src/internal/coverage/pkid.go dealing with + // hard-coding of runtime package IDs. + cmode := cfg.BuildCoverMode + if cfg.BuildRace && p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal")) { + cmode = "regonly" + } + + // If -coverpkg is in effect and for some reason we don't want + // coverage data for the main package, make sure that we at + // least process it for registration hooks. + if includeMain && p.Name == "main" && !haveMatch { + haveMatch = true + cmode = "regonly" + } + + // Mark package for instrumentation. + p.Internal.Cover.Mode = cmode + covered = append(covered, p) + + // Force import of sync/atomic into package if atomic mode. + if cfg.BuildCoverMode == "atomic" { + EnsureImport(p, "sync/atomic") + } + + // Generate covervars if using legacy coverage design. + if !cfg.Experiment.CoverageRedesign { + var coverFiles []string + coverFiles = append(coverFiles, p.GoFiles...) + coverFiles = append(coverFiles, p.CgoFiles...) + p.Internal.CoverVars = DeclareCoverVars(p, coverFiles...) + } + } + + // Warn about -coverpkg arguments that are not actually used. + for i := range cfg.BuildCoverPkg { + if !matched[i] { + fmt.Fprintf(os.Stderr, "warning: no packages being %s depend on matches for pattern %s\n", warntag, cfg.BuildCoverPkg[i]) + } + } + + return covered +} + +// DeclareCoverVars attaches the required cover variables names +// to the files, to be used when annotating the files. This +// function only called when using legacy coverage test/build +// (e.g. GOEXPERIMENT=coverageredesign is off). +func DeclareCoverVars(p *Package, files ...string) map[string]*CoverVar { + coverVars := make(map[string]*CoverVar) + coverIndex := 0 + // We create the cover counters as new top-level variables in the package. + // We need to avoid collisions with user variables (GoCover_0 is unlikely but still) + // and more importantly with dot imports of other covered packages, + // so we append 12 hex digits from the SHA-256 of the import path. + // The point is only to avoid accidents, not to defeat users determined to + // break things. + sum := sha256.Sum256([]byte(p.ImportPath)) + h := fmt.Sprintf("%x", sum[:6]) + for _, file := range files { + if base.IsTestFile(file) { + continue + } + // For a package that is "local" (imported via ./ import or command line, outside GOPATH), + // we record the full path to the file name. + // Otherwise we record the import path, then a forward slash, then the file name. + // This makes profiles within GOPATH file system-independent. + // These names appear in the cmd/cover HTML interface. + var longFile string + if p.Internal.Local { + longFile = filepath.Join(p.Dir, file) + } else { + longFile = pathpkg.Join(p.ImportPath, file) + } + coverVars[file] = &CoverVar{ + File: longFile, + Var: fmt.Sprintf("GoCover_%d_%x", coverIndex, h), + } + coverIndex++ + } + return coverVars +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/load/pkg_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/pkg_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3bcddee30bcaef91d7ddeef0d1068986f575099a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/pkg_test.go @@ -0,0 +1,82 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package load + +import ( + "cmd/go/internal/cfg" + "testing" +) + +func TestPkgDefaultExecName(t *testing.T) { + oldModulesEnabled := cfg.ModulesEnabled + defer func() { cfg.ModulesEnabled = oldModulesEnabled }() + for _, tt := range []struct { + in string + files []string + wantMod string + wantGopath string + }{ + {"example.com/mycmd", []string{}, "mycmd", "mycmd"}, + {"example.com/mycmd/v0", []string{}, "v0", "v0"}, + {"example.com/mycmd/v1", []string{}, "v1", "v1"}, + {"example.com/mycmd/v2", []string{}, "mycmd", "v2"}, // Semantic import versioning, use second last element in module mode. + {"example.com/mycmd/v3", []string{}, "mycmd", "v3"}, // Semantic import versioning, use second last element in module mode. + {"mycmd", []string{}, "mycmd", "mycmd"}, + {"mycmd/v0", []string{}, "v0", "v0"}, + {"mycmd/v1", []string{}, "v1", "v1"}, + {"mycmd/v2", []string{}, "mycmd", "v2"}, // Semantic import versioning, use second last element in module mode. + {"v0", []string{}, "v0", "v0"}, + {"v1", []string{}, "v1", "v1"}, + {"v2", []string{}, "v2", "v2"}, + {"command-line-arguments", []string{"output.go", "foo.go"}, "output", "output"}, + } { + { + cfg.ModulesEnabled = true + pkg := new(Package) + pkg.ImportPath = tt.in + pkg.GoFiles = tt.files + pkg.Internal.CmdlineFiles = len(tt.files) > 0 + gotMod := pkg.DefaultExecName() + if gotMod != tt.wantMod { + t.Errorf("pkg.DefaultExecName with ImportPath = %q in module mode = %v; want %v", tt.in, gotMod, tt.wantMod) + } + } + { + cfg.ModulesEnabled = false + pkg := new(Package) + pkg.ImportPath = tt.in + pkg.GoFiles = tt.files + pkg.Internal.CmdlineFiles = len(tt.files) > 0 + gotGopath := pkg.DefaultExecName() + if gotGopath != tt.wantGopath { + t.Errorf("pkg.DefaultExecName with ImportPath = %q in gopath mode = %v; want %v", tt.in, gotGopath, tt.wantGopath) + } + } + } +} + +func TestIsVersionElement(t *testing.T) { + t.Parallel() + for _, tt := range []struct { + in string + want bool + }{ + {"v0", false}, + {"v05", false}, + {"v1", false}, + {"v2", true}, + {"v3", true}, + {"v9", true}, + {"v10", true}, + {"v11", true}, + {"v", false}, + {"vx", false}, + } { + got := isVersionElement(tt.in) + if got != tt.want { + t.Errorf("isVersionElement(%q) = %v; want %v", tt.in, got, tt.want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/load/search.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/search.go new file mode 100644 index 0000000000000000000000000000000000000000..565996a21fa1147db603107bf7f5cfedb3ee7f2c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/search.go @@ -0,0 +1,57 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package load + +import ( + "path/filepath" + "strings" + + "cmd/go/internal/search" + "cmd/internal/pkgpattern" +) + +// MatchPackage(pattern, cwd)(p) reports whether package p matches pattern in the working directory cwd. +func MatchPackage(pattern, cwd string) func(*Package) bool { + switch { + case search.IsRelativePath(pattern): + // Split pattern into leading pattern-free directory path + // (including all . and .. elements) and the final pattern. + var dir string + i := strings.Index(pattern, "...") + if i < 0 { + dir, pattern = pattern, "" + } else { + j := strings.LastIndex(pattern[:i], "/") + dir, pattern = pattern[:j], pattern[j+1:] + } + dir = filepath.Join(cwd, dir) + if pattern == "" { + return func(p *Package) bool { return p.Dir == dir } + } + matchPath := pkgpattern.MatchPattern(pattern) + return func(p *Package) bool { + // Compute relative path to dir and see if it matches the pattern. + rel, err := filepath.Rel(dir, p.Dir) + if err != nil { + // Cannot make relative - e.g. different drive letters on Windows. + return false + } + rel = filepath.ToSlash(rel) + if rel == ".." || strings.HasPrefix(rel, "../") { + return false + } + return matchPath(rel) + } + case pattern == "all": + return func(p *Package) bool { return true } + case pattern == "std": + return func(p *Package) bool { return p.Standard } + case pattern == "cmd": + return func(p *Package) bool { return p.Standard && strings.HasPrefix(p.ImportPath, "cmd/") } + default: + matchPath := pkgpattern.MatchPattern(pattern) + return func(p *Package) bool { return matchPath(p.ImportPath) } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/load/test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/test.go new file mode 100644 index 0000000000000000000000000000000000000000..d09ababfdda4cf6135bcab3d44c7402390cc1a16 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/load/test.go @@ -0,0 +1,995 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package load + +import ( + "bytes" + "context" + "errors" + "fmt" + "go/ast" + "go/build" + "go/doc" + "go/parser" + "go/token" + "internal/lazytemplate" + "path/filepath" + "slices" + "sort" + "strings" + "unicode" + "unicode/utf8" + + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/go/internal/str" + "cmd/go/internal/trace" +) + +var TestMainDeps = []string{ + // Dependencies for testmain. + "os", + "reflect", + "testing", + "testing/internal/testdeps", +} + +type TestCover struct { + Mode string + Local bool + Pkgs []*Package + Paths []string + Vars []coverInfo +} + +// TestPackagesFor is like TestPackagesAndErrors but it returns +// an error if the test packages or their dependencies have errors. +// Only test packages without errors are returned. +func TestPackagesFor(ctx context.Context, opts PackageOpts, p *Package, cover *TestCover) (pmain, ptest, pxtest *Package, err error) { + pmain, ptest, pxtest = TestPackagesAndErrors(ctx, nil, opts, p, cover) + for _, p1 := range []*Package{ptest, pxtest, pmain} { + if p1 == nil { + // pxtest may be nil + continue + } + if p1.Error != nil { + err = p1.Error + break + } + if p1.Incomplete { + ps := PackageList([]*Package{p1}) + for _, p := range ps { + if p.Error != nil { + err = p.Error + break + } + } + break + } + } + if pmain.Error != nil || pmain.Incomplete { + pmain = nil + } + if ptest.Error != nil || ptest.Incomplete { + ptest = nil + } + if pxtest != nil && (pxtest.Error != nil || pxtest.Incomplete) { + pxtest = nil + } + return pmain, ptest, pxtest, err +} + +// TestPackagesAndErrors returns three packages: +// - pmain, the package main corresponding to the test binary (running tests in ptest and pxtest). +// - ptest, the package p compiled with added "package p" test files. +// - pxtest, the result of compiling any "package p_test" (external) test files. +// +// If the package has no "package p_test" test files, pxtest will be nil. +// If the non-test compilation of package p can be reused +// (for example, if there are no "package p" test files and +// package p need not be instrumented for coverage or any other reason), +// then the returned ptest == p. +// +// If done is non-nil, TestPackagesAndErrors will finish filling out the returned +// package structs in a goroutine and call done once finished. The members of the +// returned packages should not be accessed until done is called. +// +// The caller is expected to have checked that len(p.TestGoFiles)+len(p.XTestGoFiles) > 0, +// or else there's no point in any of this. +func TestPackagesAndErrors(ctx context.Context, done func(), opts PackageOpts, p *Package, cover *TestCover) (pmain, ptest, pxtest *Package) { + ctx, span := trace.StartSpan(ctx, "load.TestPackagesAndErrors") + defer span.Done() + + pre := newPreload() + defer pre.flush() + allImports := append([]string{}, p.TestImports...) + allImports = append(allImports, p.XTestImports...) + pre.preloadImports(ctx, opts, allImports, p.Internal.Build) + + var ptestErr, pxtestErr *PackageError + var imports, ximports []*Package + var stk ImportStack + var testEmbed, xtestEmbed map[string][]string + var incomplete bool + stk.Push(p.ImportPath + " (test)") + rawTestImports := str.StringList(p.TestImports) + for i, path := range p.TestImports { + p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.TestImportPos[path], ResolveImport) + if err != nil && ptestErr == nil { + ptestErr = err + incomplete = true + } + if p1.Incomplete { + incomplete = true + } + p.TestImports[i] = p1.ImportPath + imports = append(imports, p1) + } + var err error + p.TestEmbedFiles, testEmbed, err = resolveEmbed(p.Dir, p.TestEmbedPatterns) + if err != nil { + ptestErr = &PackageError{ + ImportStack: stk.Copy(), + Err: err, + } + incomplete = true + embedErr := err.(*EmbedError) + ptestErr.setPos(p.Internal.Build.TestEmbedPatternPos[embedErr.Pattern]) + } + stk.Pop() + + stk.Push(p.ImportPath + "_test") + pxtestNeedsPtest := false + var pxtestIncomplete bool + rawXTestImports := str.StringList(p.XTestImports) + for i, path := range p.XTestImports { + p1, err := loadImport(ctx, opts, pre, path, p.Dir, p, &stk, p.Internal.Build.XTestImportPos[path], ResolveImport) + if err != nil && pxtestErr == nil { + pxtestErr = err + } + if p1.Incomplete { + pxtestIncomplete = true + } + if p1.ImportPath == p.ImportPath { + pxtestNeedsPtest = true + } else { + ximports = append(ximports, p1) + } + p.XTestImports[i] = p1.ImportPath + } + p.XTestEmbedFiles, xtestEmbed, err = resolveEmbed(p.Dir, p.XTestEmbedPatterns) + if err != nil && pxtestErr == nil { + pxtestErr = &PackageError{ + ImportStack: stk.Copy(), + Err: err, + } + embedErr := err.(*EmbedError) + pxtestErr.setPos(p.Internal.Build.XTestEmbedPatternPos[embedErr.Pattern]) + } + pxtestIncomplete = pxtestIncomplete || pxtestErr != nil + stk.Pop() + + // Test package. + if len(p.TestGoFiles) > 0 || p.Name == "main" || cover != nil && cover.Local { + ptest = new(Package) + *ptest = *p + ptest.Error = ptestErr + ptest.Incomplete = incomplete + ptest.ForTest = p.ImportPath + ptest.GoFiles = nil + ptest.GoFiles = append(ptest.GoFiles, p.GoFiles...) + ptest.GoFiles = append(ptest.GoFiles, p.TestGoFiles...) + ptest.Target = "" + // Note: The preparation of the vet config requires that common + // indexes in ptest.Imports and ptest.Internal.RawImports + // all line up (but RawImports can be shorter than the others). + // That is, for 0 ≤ i < len(RawImports), + // RawImports[i] is the import string in the program text, and + // Imports[i] is the expanded import string (vendoring applied or relative path expanded away). + // Any implicitly added imports appear in Imports and Internal.Imports + // but not RawImports (because they were not in the source code). + // We insert TestImports, imports, and rawTestImports at the start of + // these lists to preserve the alignment. + // Note that p.Internal.Imports may not be aligned with p.Imports/p.Internal.RawImports, + // but we insert at the beginning there too just for consistency. + ptest.Imports = str.StringList(p.TestImports, p.Imports) + ptest.Internal.Imports = append(imports, p.Internal.Imports...) + ptest.Internal.RawImports = str.StringList(rawTestImports, p.Internal.RawImports) + ptest.Internal.ForceLibrary = true + ptest.Internal.BuildInfo = nil + ptest.Internal.Build = new(build.Package) + *ptest.Internal.Build = *p.Internal.Build + m := map[string][]token.Position{} + for k, v := range p.Internal.Build.ImportPos { + m[k] = append(m[k], v...) + } + for k, v := range p.Internal.Build.TestImportPos { + m[k] = append(m[k], v...) + } + ptest.Internal.Build.ImportPos = m + if testEmbed == nil && len(p.Internal.Embed) > 0 { + testEmbed = map[string][]string{} + } + for k, v := range p.Internal.Embed { + testEmbed[k] = v + } + ptest.Internal.Embed = testEmbed + ptest.EmbedFiles = str.StringList(p.EmbedFiles, p.TestEmbedFiles) + ptest.Internal.OrigImportPath = p.Internal.OrigImportPath + ptest.Internal.PGOProfile = p.Internal.PGOProfile + ptest.Internal.Build.Directives = append(slices.Clip(p.Internal.Build.Directives), p.Internal.Build.TestDirectives...) + } else { + ptest = p + } + + // External test package. + if len(p.XTestGoFiles) > 0 { + pxtest = &Package{ + PackagePublic: PackagePublic{ + Name: p.Name + "_test", + ImportPath: p.ImportPath + "_test", + Root: p.Root, + Dir: p.Dir, + Goroot: p.Goroot, + GoFiles: p.XTestGoFiles, + Imports: p.XTestImports, + ForTest: p.ImportPath, + Module: p.Module, + Error: pxtestErr, + Incomplete: pxtestIncomplete, + EmbedFiles: p.XTestEmbedFiles, + }, + Internal: PackageInternal{ + LocalPrefix: p.Internal.LocalPrefix, + Build: &build.Package{ + ImportPos: p.Internal.Build.XTestImportPos, + Directives: p.Internal.Build.XTestDirectives, + }, + Imports: ximports, + RawImports: rawXTestImports, + + Asmflags: p.Internal.Asmflags, + Gcflags: p.Internal.Gcflags, + Ldflags: p.Internal.Ldflags, + Gccgoflags: p.Internal.Gccgoflags, + Embed: xtestEmbed, + OrigImportPath: p.Internal.OrigImportPath, + PGOProfile: p.Internal.PGOProfile, + }, + } + if pxtestNeedsPtest { + pxtest.Internal.Imports = append(pxtest.Internal.Imports, ptest) + } + } + + // Arrange for testing.Testing to report true. + ldflags := append(p.Internal.Ldflags, "-X", "testing.testBinary=1") + gccgoflags := append(p.Internal.Gccgoflags, "-Wl,--defsym,testing.gccgoTestBinary=1") + + // Build main package. + pmain = &Package{ + PackagePublic: PackagePublic{ + Name: "main", + Dir: p.Dir, + GoFiles: []string{"_testmain.go"}, + ImportPath: p.ImportPath + ".test", + Root: p.Root, + Imports: str.StringList(TestMainDeps), + Module: p.Module, + }, + Internal: PackageInternal{ + Build: &build.Package{Name: "main"}, + BuildInfo: p.Internal.BuildInfo, + Asmflags: p.Internal.Asmflags, + Gcflags: p.Internal.Gcflags, + Ldflags: ldflags, + Gccgoflags: gccgoflags, + OrigImportPath: p.Internal.OrigImportPath, + PGOProfile: p.Internal.PGOProfile, + }, + } + + pb := p.Internal.Build + pmain.DefaultGODEBUG = defaultGODEBUG(pmain, pb.Directives, pb.TestDirectives, pb.XTestDirectives) + + // The generated main also imports testing, regexp, and os. + // Also the linker introduces implicit dependencies reported by LinkerDeps. + stk.Push("testmain") + deps := TestMainDeps // cap==len, so safe for append + ldDeps, err := LinkerDeps(p) + if err != nil && pmain.Error == nil { + pmain.Error = &PackageError{Err: err} + } + for _, d := range ldDeps { + deps = append(deps, d) + } + for _, dep := range deps { + if dep == ptest.ImportPath { + pmain.Internal.Imports = append(pmain.Internal.Imports, ptest) + } else { + p1, err := loadImport(ctx, opts, pre, dep, "", nil, &stk, nil, 0) + if err != nil && pmain.Error == nil { + pmain.Error = err + pmain.Incomplete = true + } + pmain.Internal.Imports = append(pmain.Internal.Imports, p1) + } + } + stk.Pop() + + parallelizablePart := func() { + if cover != nil && cover.Pkgs != nil && !cfg.Experiment.CoverageRedesign { + // Add imports, but avoid duplicates. + seen := map[*Package]bool{p: true, ptest: true} + for _, p1 := range pmain.Internal.Imports { + seen[p1] = true + } + for _, p1 := range cover.Pkgs { + if seen[p1] { + // Don't add duplicate imports. + continue + } + seen[p1] = true + pmain.Internal.Imports = append(pmain.Internal.Imports, p1) + } + } + + allTestImports := make([]*Package, 0, len(pmain.Internal.Imports)+len(imports)+len(ximports)) + allTestImports = append(allTestImports, pmain.Internal.Imports...) + allTestImports = append(allTestImports, imports...) + allTestImports = append(allTestImports, ximports...) + setToolFlags(allTestImports...) + + // Do initial scan for metadata needed for writing _testmain.go + // Use that metadata to update the list of imports for package main. + // The list of imports is used by recompileForTest and by the loop + // afterward that gathers t.Cover information. + t, err := loadTestFuncs(p) + if err != nil && pmain.Error == nil { + pmain.setLoadPackageDataError(err, p.ImportPath, &stk, nil) + } + t.Cover = cover + if len(ptest.GoFiles)+len(ptest.CgoFiles) > 0 { + pmain.Internal.Imports = append(pmain.Internal.Imports, ptest) + pmain.Imports = append(pmain.Imports, ptest.ImportPath) + t.ImportTest = true + } + if pxtest != nil { + pmain.Internal.Imports = append(pmain.Internal.Imports, pxtest) + pmain.Imports = append(pmain.Imports, pxtest.ImportPath) + t.ImportXtest = true + } + + // Sort and dedup pmain.Imports. + // Only matters for go list -test output. + sort.Strings(pmain.Imports) + w := 0 + for _, path := range pmain.Imports { + if w == 0 || path != pmain.Imports[w-1] { + pmain.Imports[w] = path + w++ + } + } + pmain.Imports = pmain.Imports[:w] + pmain.Internal.RawImports = str.StringList(pmain.Imports) + + // Replace pmain's transitive dependencies with test copies, as necessary. + cycleErr := recompileForTest(pmain, p, ptest, pxtest) + if cycleErr != nil { + ptest.Error = cycleErr + ptest.Incomplete = true + } + + if cover != nil { + if cfg.Experiment.CoverageRedesign { + // Here ptest needs to inherit the proper coverage mode (since + // it contains p's Go files), whereas pmain contains only + // test harness code (don't want to instrument it, and + // we don't want coverage hooks in the pkg init). + ptest.Internal.Cover.Mode = p.Internal.Cover.Mode + pmain.Internal.Cover.Mode = "testmain" + } + // Should we apply coverage analysis locally, only for this + // package and only for this test? Yes, if -cover is on but + // -coverpkg has not specified a list of packages for global + // coverage. + if cover.Local { + ptest.Internal.Cover.Mode = cover.Mode + + if !cfg.Experiment.CoverageRedesign { + var coverFiles []string + coverFiles = append(coverFiles, ptest.GoFiles...) + coverFiles = append(coverFiles, ptest.CgoFiles...) + ptest.Internal.CoverVars = DeclareCoverVars(ptest, coverFiles...) + } + } + + if !cfg.Experiment.CoverageRedesign { + for _, cp := range pmain.Internal.Imports { + if len(cp.Internal.CoverVars) > 0 { + t.Cover.Vars = append(t.Cover.Vars, coverInfo{cp, cp.Internal.CoverVars}) + } + } + } + } + + data, err := formatTestmain(t) + if err != nil && pmain.Error == nil { + pmain.Error = &PackageError{Err: err} + pmain.Incomplete = true + } + // Set TestmainGo even if it is empty: the presence of a TestmainGo + // indicates that this package is, in fact, a test main. + pmain.Internal.TestmainGo = &data + } + + if done != nil { + go func() { + parallelizablePart() + done() + }() + } else { + parallelizablePart() + } + + return pmain, ptest, pxtest +} + +// recompileForTest copies and replaces certain packages in pmain's dependency +// graph. This is necessary for two reasons. First, if ptest is different than +// preal, packages that import the package under test should get ptest instead +// of preal. This is particularly important if pxtest depends on functionality +// exposed in test sources in ptest. Second, if there is a main package +// (other than pmain) anywhere, we need to set p.Internal.ForceLibrary and +// clear p.Internal.BuildInfo in the test copy to prevent link conflicts. +// This may happen if both -coverpkg and the command line patterns include +// multiple main packages. +func recompileForTest(pmain, preal, ptest, pxtest *Package) *PackageError { + // The "test copy" of preal is ptest. + // For each package that depends on preal, make a "test copy" + // that depends on ptest. And so on, up the dependency tree. + testCopy := map[*Package]*Package{preal: ptest} + for _, p := range PackageList([]*Package{pmain}) { + if p == preal { + continue + } + // Copy on write. + didSplit := p == pmain || p == pxtest || p == ptest + split := func() { + if didSplit { + return + } + didSplit = true + if testCopy[p] != nil { + panic("recompileForTest loop") + } + p1 := new(Package) + testCopy[p] = p1 + *p1 = *p + p1.ForTest = preal.ImportPath + p1.Internal.Imports = make([]*Package, len(p.Internal.Imports)) + copy(p1.Internal.Imports, p.Internal.Imports) + p1.Imports = make([]string, len(p.Imports)) + copy(p1.Imports, p.Imports) + p = p1 + p.Target = "" + p.Internal.BuildInfo = nil + p.Internal.ForceLibrary = true + p.Internal.PGOProfile = preal.Internal.PGOProfile + } + + // Update p.Internal.Imports to use test copies. + for i, imp := range p.Internal.Imports { + if p1 := testCopy[imp]; p1 != nil && p1 != imp { + split() + + // If the test dependencies cause a cycle with pmain, this is + // where it is introduced. + // (There are no cycles in the graph until this assignment occurs.) + p.Internal.Imports[i] = p1 + } + } + + // Force main packages the test imports to be built as libraries. + // Normal imports of main packages are forbidden by the package loader, + // but this can still happen if -coverpkg patterns include main packages: + // covered packages are imported by pmain. Linking multiple packages + // compiled with '-p main' causes duplicate symbol errors. + // See golang.org/issue/30907, golang.org/issue/34114. + if p.Name == "main" && p != pmain && p != ptest { + split() + } + // Split and attach PGO information to test dependencies if preal + // is built with PGO. + if preal.Internal.PGOProfile != "" && p.Internal.PGOProfile == "" { + split() + } + } + + // Do search to find cycle. + // importerOf maps each import path to its importer nearest to p. + importerOf := map[*Package]*Package{} + for _, p := range ptest.Internal.Imports { + importerOf[p] = nil + } + + // q is a breadth-first queue of packages to search for target. + // Every package added to q has a corresponding entry in pathTo. + // + // We search breadth-first for two reasons: + // + // 1. We want to report the shortest cycle. + // + // 2. If p contains multiple cycles, the first cycle we encounter might not + // contain target. To ensure termination, we have to break all cycles + // other than the first. + q := slices.Clip(ptest.Internal.Imports) + for len(q) > 0 { + p := q[0] + q = q[1:] + if p == ptest { + // The stack is supposed to be in the order x imports y imports z. + // We collect in the reverse order: z is imported by y is imported + // by x, and then we reverse it. + var stk []string + for p != nil { + stk = append(stk, p.ImportPath) + p = importerOf[p] + } + // complete the cycle: we set importer[p] = nil to break the cycle + // in importerOf, it's an implicit importerOf[p] == pTest. Add it + // back here since we reached nil in the loop above to demonstrate + // the cycle as (for example) package p imports package q imports package r + // imports package p. + stk = append(stk, ptest.ImportPath) + slices.Reverse(stk) + + return &PackageError{ + ImportStack: stk, + Err: errors.New("import cycle not allowed in test"), + IsImportCycle: true, + } + } + for _, dep := range p.Internal.Imports { + if _, ok := importerOf[dep]; !ok { + importerOf[dep] = p + q = append(q, dep) + } + } + } + + return nil +} + +// isTestFunc tells whether fn has the type of a testing function. arg +// specifies the parameter type we look for: B, M or T. +func isTestFunc(fn *ast.FuncDecl, arg string) bool { + if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 || + fn.Type.Params.List == nil || + len(fn.Type.Params.List) != 1 || + len(fn.Type.Params.List[0].Names) > 1 { + return false + } + ptr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr) + if !ok { + return false + } + // We can't easily check that the type is *testing.M + // because we don't know how testing has been imported, + // but at least check that it's *M or *something.M. + // Same applies for B and T. + if name, ok := ptr.X.(*ast.Ident); ok && name.Name == arg { + return true + } + if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == arg { + return true + } + return false +} + +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +type coverInfo struct { + Package *Package + Vars map[string]*CoverVar +} + +// loadTestFuncs returns the testFuncs describing the tests that will be run. +// The returned testFuncs is always non-nil, even if an error occurred while +// processing test files. +func loadTestFuncs(ptest *Package) (*testFuncs, error) { + t := &testFuncs{ + Package: ptest, + } + var err error + for _, file := range ptest.TestGoFiles { + if lerr := t.load(filepath.Join(ptest.Dir, file), "_test", &t.ImportTest, &t.NeedTest); lerr != nil && err == nil { + err = lerr + } + } + for _, file := range ptest.XTestGoFiles { + if lerr := t.load(filepath.Join(ptest.Dir, file), "_xtest", &t.ImportXtest, &t.NeedXtest); lerr != nil && err == nil { + err = lerr + } + } + return t, err +} + +// formatTestmain returns the content of the _testmain.go file for t. +func formatTestmain(t *testFuncs) ([]byte, error) { + var buf bytes.Buffer + tmpl := testmainTmpl + if cfg.Experiment.CoverageRedesign { + tmpl = testmainTmplNewCoverage + } + if err := tmpl.Execute(&buf, t); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +type testFuncs struct { + Tests []testFunc + Benchmarks []testFunc + FuzzTargets []testFunc + Examples []testFunc + TestMain *testFunc + Package *Package + ImportTest bool + NeedTest bool + ImportXtest bool + NeedXtest bool + Cover *TestCover +} + +// ImportPath returns the import path of the package being tested, if it is within GOPATH. +// This is printed by the testing package when running benchmarks. +func (t *testFuncs) ImportPath() string { + pkg := t.Package.ImportPath + if strings.HasPrefix(pkg, "_/") { + return "" + } + if pkg == "command-line-arguments" { + return "" + } + return pkg +} + +// Covered returns a string describing which packages are being tested for coverage. +// If the covered package is the same as the tested package, it returns the empty string. +// Otherwise it is a comma-separated human-readable list of packages beginning with +// " in", ready for use in the coverage message. +func (t *testFuncs) Covered() string { + if t.Cover == nil || t.Cover.Paths == nil { + return "" + } + return " in " + strings.Join(t.Cover.Paths, ", ") +} + +// Tested returns the name of the package being tested. +func (t *testFuncs) Tested() string { + return t.Package.Name +} + +type testFunc struct { + Package string // imported package name (_test or _xtest) + Name string // function name + Output string // output, for examples + Unordered bool // output is allowed to be unordered. +} + +var testFileSet = token.NewFileSet() + +func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error { + // Pass in the overlaid source if we have an overlay for this file. + src, err := fsys.Open(filename) + if err != nil { + return err + } + defer src.Close() + f, err := parser.ParseFile(testFileSet, filename, src, parser.ParseComments|parser.SkipObjectResolution) + if err != nil { + return err + } + for _, d := range f.Decls { + n, ok := d.(*ast.FuncDecl) + if !ok { + continue + } + if n.Recv != nil { + continue + } + name := n.Name.String() + switch { + case name == "TestMain": + if isTestFunc(n, "T") { + t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) + *doImport, *seen = true, true + continue + } + err := checkTestFunc(n, "M") + if err != nil { + return err + } + if t.TestMain != nil { + return errors.New("multiple definitions of TestMain") + } + t.TestMain = &testFunc{pkg, name, "", false} + *doImport, *seen = true, true + case isTest(name, "Test"): + err := checkTestFunc(n, "T") + if err != nil { + return err + } + t.Tests = append(t.Tests, testFunc{pkg, name, "", false}) + *doImport, *seen = true, true + case isTest(name, "Benchmark"): + err := checkTestFunc(n, "B") + if err != nil { + return err + } + t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false}) + *doImport, *seen = true, true + case isTest(name, "Fuzz"): + err := checkTestFunc(n, "F") + if err != nil { + return err + } + t.FuzzTargets = append(t.FuzzTargets, testFunc{pkg, name, "", false}) + *doImport, *seen = true, true + } + } + ex := doc.Examples(f) + sort.Slice(ex, func(i, j int) bool { return ex[i].Order < ex[j].Order }) + for _, e := range ex { + *doImport = true // import test file whether executed or not + if e.Output == "" && !e.EmptyOutput { + // Don't run examples with no output. + continue + } + t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered}) + *seen = true + } + return nil +} + +func checkTestFunc(fn *ast.FuncDecl, arg string) error { + var why string + if !isTestFunc(fn, arg) { + why = fmt.Sprintf("must be: func %s(%s *testing.%s)", fn.Name.String(), strings.ToLower(arg), arg) + } + if fn.Type.TypeParams.NumFields() > 0 { + why = "test functions cannot have type parameters" + } + if why != "" { + pos := testFileSet.Position(fn.Pos()) + return fmt.Errorf("%s: wrong signature for %s, %s", pos, fn.Name.String(), why) + } + return nil +} + +var testmainTmpl = lazytemplate.New("main", ` +// Code generated by 'go test'. DO NOT EDIT. + +package main + +import ( + "os" +{{if .TestMain}} + "reflect" +{{end}} + "testing" + "testing/internal/testdeps" + +{{if .ImportTest}} + {{if .NeedTest}}_test{{else}}_{{end}} {{.Package.ImportPath | printf "%q"}} +{{end}} +{{if .ImportXtest}} + {{if .NeedXtest}}_xtest{{else}}_{{end}} {{.Package.ImportPath | printf "%s_test" | printf "%q"}} +{{end}} +{{if .Cover}} +{{range $i, $p := .Cover.Vars}} + _cover{{$i}} {{$p.Package.ImportPath | printf "%q"}} +{{end}} +{{end}} +) + +var tests = []testing.InternalTest{ +{{range .Tests}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var benchmarks = []testing.InternalBenchmark{ +{{range .Benchmarks}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var fuzzTargets = []testing.InternalFuzzTarget{ +{{range .FuzzTargets}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var examples = []testing.InternalExample{ +{{range .Examples}} + {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}}, +{{end}} +} + +func init() { + testdeps.ImportPath = {{.ImportPath | printf "%q"}} +} + +{{if .Cover}} + +// Only updated by init functions, so no need for atomicity. +var ( + coverCounters = make(map[string][]uint32) + coverBlocks = make(map[string][]testing.CoverBlock) +) + +func init() { + {{range $i, $p := .Cover.Vars}} + {{range $file, $cover := $p.Vars}} + coverRegisterFile({{printf "%q" $cover.File}}, _cover{{$i}}.{{$cover.Var}}.Count[:], _cover{{$i}}.{{$cover.Var}}.Pos[:], _cover{{$i}}.{{$cover.Var}}.NumStmt[:]) + {{end}} + {{end}} +} + +func coverRegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) { + if 3*len(counter) != len(pos) || len(counter) != len(numStmts) { + panic("coverage: mismatched sizes") + } + if coverCounters[fileName] != nil { + // Already registered. + return + } + coverCounters[fileName] = counter + block := make([]testing.CoverBlock, len(counter)) + for i := range counter { + block[i] = testing.CoverBlock{ + Line0: pos[3*i+0], + Col0: uint16(pos[3*i+2]), + Line1: pos[3*i+1], + Col1: uint16(pos[3*i+2]>>16), + Stmts: numStmts[i], + } + } + coverBlocks[fileName] = block +} +{{end}} + +func main() { +{{if .Cover}} + testing.RegisterCover(testing.Cover{ + Mode: {{printf "%q" .Cover.Mode}}, + Counters: coverCounters, + Blocks: coverBlocks, + CoveredPackages: {{printf "%q" .Covered}}, + }) +{{end}} + m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, fuzzTargets, examples) +{{with .TestMain}} + {{.Package}}.{{.Name}}(m) + os.Exit(int(reflect.ValueOf(m).Elem().FieldByName("exitCode").Int())) +{{else}} + os.Exit(m.Run()) +{{end}} +} + +`) + +var testmainTmplNewCoverage = lazytemplate.New("main", ` +// Code generated by 'go test'. DO NOT EDIT. + +package main + +import ( + "os" +{{if .Cover}} + _ "unsafe" +{{end}} +{{if .TestMain}} + "reflect" +{{end}} + "testing" + "testing/internal/testdeps" + +{{if .ImportTest}} + {{if .NeedTest}}_test{{else}}_{{end}} {{.Package.ImportPath | printf "%q"}} +{{end}} +{{if .ImportXtest}} + {{if .NeedXtest}}_xtest{{else}}_{{end}} {{.Package.ImportPath | printf "%s_test" | printf "%q"}} +{{end}} +) + +var tests = []testing.InternalTest{ +{{range .Tests}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var benchmarks = []testing.InternalBenchmark{ +{{range .Benchmarks}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var fuzzTargets = []testing.InternalFuzzTarget{ +{{range .FuzzTargets}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var examples = []testing.InternalExample{ +{{range .Examples}} + {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}}, +{{end}} +} + +func init() { + testdeps.ImportPath = {{.ImportPath | printf "%q"}} +} + +{{if .Cover}} + +//go:linkname runtime_coverage_processCoverTestDir runtime/coverage.processCoverTestDir +func runtime_coverage_processCoverTestDir(dir string, cfile string, cmode string, cpkgs string) error + +//go:linkname testing_registerCover2 testing.registerCover2 +func testing_registerCover2(mode string, tearDown func(coverprofile string, gocoverdir string) (string, error), snapcov func() float64) + +//go:linkname runtime_coverage_markProfileEmitted runtime/coverage.markProfileEmitted +func runtime_coverage_markProfileEmitted(val bool) + +//go:linkname runtime_coverage_snapshot runtime/coverage.snapshot +func runtime_coverage_snapshot() float64 + +func coverTearDown(coverprofile string, gocoverdir string) (string, error) { + var err error + if gocoverdir == "" { + gocoverdir, err = os.MkdirTemp("", "gocoverdir") + if err != nil { + return "error setting GOCOVERDIR: bad os.MkdirTemp return", err + } + defer os.RemoveAll(gocoverdir) + } + runtime_coverage_markProfileEmitted(true) + cmode := {{printf "%q" .Cover.Mode}} + if err := runtime_coverage_processCoverTestDir(gocoverdir, coverprofile, cmode, {{printf "%q" .Covered}}); err != nil { + return "error generating coverage report", err + } + return "", nil +} +{{end}} + +func main() { +{{if .Cover}} + testing_registerCover2({{printf "%q" .Cover.Mode}}, coverTearDown, runtime_coverage_snapshot) +{{end}} + m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, fuzzTargets, examples) +{{with .TestMain}} + {{.Package}}.{{.Name}}(m) + os.Exit(int(reflect.ValueOf(m).Elem().FieldByName("exitCode").Int())) +{{else}} + os.Exit(m.Run()) +{{end}} +} + +`) diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go new file mode 100644 index 0000000000000000000000000000000000000000..d37331892d1efb5b909469f032a0b35025cba4ef --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock.go @@ -0,0 +1,83 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filelock provides a platform-independent API for advisory file +// locking. Calls to functions in this package on platforms that do not support +// advisory locks will return errors for which IsNotSupported returns true. +package filelock + +import ( + "errors" + "io/fs" +) + +// A File provides the minimal set of methods required to lock an open file. +// File implementations must be usable as map keys. +// The usual implementation is *os.File. +type File interface { + // Name returns the name of the file. + Name() string + + // Fd returns a valid file descriptor. + // (If the File is an *os.File, it must not be closed.) + Fd() uintptr + + // Stat returns the FileInfo structure describing file. + Stat() (fs.FileInfo, error) +} + +// Lock places an advisory write lock on the file, blocking until it can be +// locked. +// +// If Lock returns nil, no other process will be able to place a read or write +// lock on the file until this process exits, closes f, or calls Unlock on it. +// +// If f's descriptor is already read- or write-locked, the behavior of Lock is +// unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called when Lock succeeds. +func Lock(f File) error { + return lock(f, writeLock) +} + +// RLock places an advisory read lock on the file, blocking until it can be locked. +// +// If RLock returns nil, no other process will be able to place a write lock on +// the file until this process exits, closes f, or calls Unlock on it. +// +// If f is already read- or write-locked, the behavior of RLock is unspecified. +// +// Closing the file may or may not release the lock promptly. Callers should +// ensure that Unlock is always called if RLock succeeds. +func RLock(f File) error { + return lock(f, readLock) +} + +// Unlock removes an advisory lock placed on f by this process. +// +// The caller must not attempt to unlock a file that is not locked. +func Unlock(f File) error { + return unlock(f) +} + +// String returns the name of the function corresponding to lt +// (Lock, RLock, or Unlock). +func (lt lockType) String() string { + switch lt { + case readLock: + return "RLock" + case writeLock: + return "Lock" + default: + return "Unlock" + } +} + +// IsNotSupported returns a boolean indicating whether the error is known to +// report that a function is not supported (possibly for a specific input). +// It is satisfied by errors.ErrUnsupported as well as some syscall errors. +func IsNotSupported(err error) bool { + return errors.Is(err, errors.ErrUnsupported) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go new file mode 100644 index 0000000000000000000000000000000000000000..8a62839734e02fddac5531b767d3fc5f05433840 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_fcntl.go @@ -0,0 +1,210 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || (solaris && !illumos) + +// This code implements the filelock API using POSIX 'fcntl' locks, which attach +// to an (inode, process) pair rather than a file descriptor. To avoid unlocking +// files prematurely when the same file is opened through different descriptors, +// we allow only one read-lock at a time. +// +// Most platforms provide some alternative API, such as an 'flock' system call +// or an F_OFD_SETLK command for 'fcntl', that allows for better concurrency and +// does not require per-inode bookkeeping in the application. + +package filelock + +import ( + "errors" + "io" + "io/fs" + "math/rand" + "sync" + "syscall" + "time" +) + +type lockType int16 + +const ( + readLock lockType = syscall.F_RDLCK + writeLock lockType = syscall.F_WRLCK +) + +type inode = uint64 // type of syscall.Stat_t.Ino + +type inodeLock struct { + owner File + queue []<-chan File +} + +var ( + mu sync.Mutex + inodes = map[File]inode{} + locks = map[inode]inodeLock{} +) + +func lock(f File, lt lockType) (err error) { + // POSIX locks apply per inode and process, and the lock for an inode is + // released when *any* descriptor for that inode is closed. So we need to + // synchronize access to each inode internally, and must serialize lock and + // unlock calls that refer to the same inode through different descriptors. + fi, err := f.Stat() + if err != nil { + return err + } + ino := fi.Sys().(*syscall.Stat_t).Ino + + mu.Lock() + if i, dup := inodes[f]; dup && i != ino { + mu.Unlock() + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: errors.New("inode for file changed since last Lock or RLock"), + } + } + inodes[f] = ino + + var wait chan File + l := locks[ino] + if l.owner == f { + // This file already owns the lock, but the call may change its lock type. + } else if l.owner == nil { + // No owner: it's ours now. + l.owner = f + } else { + // Already owned: add a channel to wait on. + wait = make(chan File) + l.queue = append(l.queue, wait) + } + locks[ino] = l + mu.Unlock() + + if wait != nil { + wait <- f + } + + // Spurious EDEADLK errors arise on platforms that compute deadlock graphs at + // the process, rather than thread, level. Consider processes P and Q, with + // threads P.1, P.2, and Q.3. The following trace is NOT a deadlock, but will be + // reported as a deadlock on systems that consider only process granularity: + // + // P.1 locks file A. + // Q.3 locks file B. + // Q.3 blocks on file A. + // P.2 blocks on file B. (This is erroneously reported as a deadlock.) + // P.1 unlocks file A. + // Q.3 unblocks and locks file A. + // Q.3 unlocks files A and B. + // P.2 unblocks and locks file B. + // P.2 unlocks file B. + // + // These spurious errors were observed in practice on AIX and Solaris in + // cmd/go: see https://golang.org/issue/32817. + // + // We work around this bug by treating EDEADLK as always spurious. If there + // really is a lock-ordering bug between the interacting processes, it will + // become a livelock instead, but that's not appreciably worse than if we had + // a proper flock implementation (which generally does not even attempt to + // diagnose deadlocks). + // + // In the above example, that changes the trace to: + // + // P.1 locks file A. + // Q.3 locks file B. + // Q.3 blocks on file A. + // P.2 spuriously fails to lock file B and goes to sleep. + // P.1 unlocks file A. + // Q.3 unblocks and locks file A. + // Q.3 unlocks files A and B. + // P.2 wakes up and locks file B. + // P.2 unlocks file B. + // + // We know that the retry loop will not introduce a *spurious* livelock + // because, according to the POSIX specification, EDEADLK is only to be + // returned when “the lock is blocked by a lock from another process”. + // If that process is blocked on some lock that we are holding, then the + // resulting livelock is due to a real deadlock (and would manifest as such + // when using, for example, the flock implementation of this package). + // If the other process is *not* blocked on some other lock that we are + // holding, then it will eventually release the requested lock. + + nextSleep := 1 * time.Millisecond + const maxSleep = 500 * time.Millisecond + for { + err = setlkw(f.Fd(), lt) + if err != syscall.EDEADLK { + break + } + time.Sleep(nextSleep) + + nextSleep += nextSleep + if nextSleep > maxSleep { + nextSleep = maxSleep + } + // Apply 10% jitter to avoid synchronizing collisions when we finally unblock. + nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep)) + } + + if err != nil { + unlock(f) + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + + return nil +} + +func unlock(f File) error { + var owner File + + mu.Lock() + ino, ok := inodes[f] + if ok { + owner = locks[ino].owner + } + mu.Unlock() + + if owner != f { + panic("unlock called on a file that is not locked") + } + + err := setlkw(f.Fd(), syscall.F_UNLCK) + + mu.Lock() + l := locks[ino] + if len(l.queue) == 0 { + // No waiters: remove the map entry. + delete(locks, ino) + } else { + // The first waiter is sending us their file now. + // Receive it and update the queue. + l.owner = <-l.queue[0] + l.queue = l.queue[1:] + locks[ino] = l + } + delete(inodes, f) + mu.Unlock() + + return err +} + +// setlkw calls FcntlFlock with F_SETLKW for the entire file indicated by fd. +func setlkw(fd uintptr, lt lockType) error { + for { + err := syscall.FcntlFlock(fd, syscall.F_SETLKW, &syscall.Flock_t{ + Type: int16(lt), + Whence: io.SeekStart, + Start: 0, + Len: 0, // All bytes. + }) + if err != syscall.EINTR { + return err + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go new file mode 100644 index 0000000000000000000000000000000000000000..b16709ed51b1e263cd579fe60e2f8715d7dbf032 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_other.go @@ -0,0 +1,35 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix && !windows + +package filelock + +import ( + "errors" + "io/fs" +) + +type lockType int8 + +const ( + readLock = iota + 1 + writeLock +) + +func lock(f File, lt lockType) error { + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: errors.ErrUnsupported, + } +} + +func unlock(f File) error { + return &fs.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: errors.ErrUnsupported, + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d32bf06160572c23d10e96336804d191e1b5ccb1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_test.go @@ -0,0 +1,210 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !js && !plan9 && !wasip1 + +package filelock_test + +import ( + "fmt" + "internal/testenv" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "cmd/go/internal/lockedfile/internal/filelock" +) + +func lock(t *testing.T, f *os.File) { + t.Helper() + err := filelock.Lock(f) + t.Logf("Lock(fd %d) = %v", f.Fd(), err) + if err != nil { + t.Fail() + } +} + +func rLock(t *testing.T, f *os.File) { + t.Helper() + err := filelock.RLock(f) + t.Logf("RLock(fd %d) = %v", f.Fd(), err) + if err != nil { + t.Fail() + } +} + +func unlock(t *testing.T, f *os.File) { + t.Helper() + err := filelock.Unlock(f) + t.Logf("Unlock(fd %d) = %v", f.Fd(), err) + if err != nil { + t.Fail() + } +} + +func mustTempFile(t *testing.T) (f *os.File, remove func()) { + t.Helper() + + base := filepath.Base(t.Name()) + f, err := os.CreateTemp("", base) + if err != nil { + t.Fatalf(`os.CreateTemp("", %q) = %v`, base, err) + } + t.Logf("fd %d = %s", f.Fd(), f.Name()) + + return f, func() { + f.Close() + os.Remove(f.Name()) + } +} + +func mustOpen(t *testing.T, name string) *os.File { + t.Helper() + + f, err := os.OpenFile(name, os.O_RDWR, 0) + if err != nil { + t.Fatalf("os.Open(%q) = %v", name, err) + } + + t.Logf("fd %d = os.Open(%q)", f.Fd(), name) + return f +} + +const ( + quiescent = 10 * time.Millisecond + probablyStillBlocked = 10 * time.Second +) + +func mustBlock(t *testing.T, op string, f *os.File) (wait func(*testing.T)) { + t.Helper() + + desc := fmt.Sprintf("%s(fd %d)", op, f.Fd()) + + done := make(chan struct{}) + go func() { + t.Helper() + switch op { + case "Lock": + lock(t, f) + case "RLock": + rLock(t, f) + default: + panic("invalid op: " + op) + } + close(done) + }() + + select { + case <-done: + t.Fatalf("%s unexpectedly did not block", desc) + return nil + + case <-time.After(quiescent): + t.Logf("%s is blocked (as expected)", desc) + return func(t *testing.T) { + t.Helper() + select { + case <-time.After(probablyStillBlocked): + t.Fatalf("%s is unexpectedly still blocked", desc) + case <-done: + } + } + } +} + +func TestLockExcludesLock(t *testing.T) { + t.Parallel() + + f, remove := mustTempFile(t) + defer remove() + + other := mustOpen(t, f.Name()) + defer other.Close() + + lock(t, f) + lockOther := mustBlock(t, "Lock", other) + unlock(t, f) + lockOther(t) + unlock(t, other) +} + +func TestLockExcludesRLock(t *testing.T) { + t.Parallel() + + f, remove := mustTempFile(t) + defer remove() + + other := mustOpen(t, f.Name()) + defer other.Close() + + lock(t, f) + rLockOther := mustBlock(t, "RLock", other) + unlock(t, f) + rLockOther(t) + unlock(t, other) +} + +func TestRLockExcludesOnlyLock(t *testing.T) { + t.Parallel() + + f, remove := mustTempFile(t) + defer remove() + rLock(t, f) + + f2 := mustOpen(t, f.Name()) + defer f2.Close() + + doUnlockTF := false + switch runtime.GOOS { + case "aix", "solaris": + // When using POSIX locks (as on Solaris), we can't safely read-lock the + // same inode through two different descriptors at the same time: when the + // first descriptor is closed, the second descriptor would still be open but + // silently unlocked. So a second RLock must block instead of proceeding. + lockF2 := mustBlock(t, "RLock", f2) + unlock(t, f) + lockF2(t) + default: + rLock(t, f2) + doUnlockTF = true + } + + other := mustOpen(t, f.Name()) + defer other.Close() + lockOther := mustBlock(t, "Lock", other) + + unlock(t, f2) + if doUnlockTF { + unlock(t, f) + } + lockOther(t) + unlock(t, other) +} + +func TestLockNotDroppedByExecCommand(t *testing.T) { + testenv.MustHaveExec(t) + + f, remove := mustTempFile(t) + defer remove() + + lock(t, f) + + other := mustOpen(t, f.Name()) + defer other.Close() + + // Some kinds of file locks are dropped when a duplicated or forked file + // descriptor is unlocked. Double-check that the approach used by os/exec does + // not accidentally drop locks. + cmd := testenv.Command(t, os.Args[0], "-test.run=^$") + if err := cmd.Run(); err != nil { + t.Fatalf("exec failed: %v", err) + } + + lockOther := mustBlock(t, "Lock", other) + unlock(t, f) + lockOther(t) + unlock(t, other) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..6f73b1bfeea9bb89f338e1770f2c9d9754368217 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_unix.go @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || illumos || linux || netbsd || openbsd + +package filelock + +import ( + "io/fs" + "syscall" +) + +type lockType int16 + +const ( + readLock lockType = syscall.LOCK_SH + writeLock lockType = syscall.LOCK_EX +) + +func lock(f File, lt lockType) (err error) { + for { + err = syscall.Flock(int(f.Fd()), int(lt)) + if err != syscall.EINTR { + break + } + } + if err != nil { + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + return nil +} + +func unlock(f File) error { + return lock(f, syscall.LOCK_UN) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..647ee9921d2592ab8f52ae0c5699fa63fb967bd4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/internal/filelock/filelock_windows.go @@ -0,0 +1,57 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package filelock + +import ( + "internal/syscall/windows" + "io/fs" + "syscall" +) + +type lockType uint32 + +const ( + readLock lockType = 0 + writeLock lockType = windows.LOCKFILE_EXCLUSIVE_LOCK +) + +const ( + reserved = 0 + allBytes = ^uint32(0) +) + +func lock(f File, lt lockType) error { + // Per https://golang.org/issue/19098, “Programs currently expect the Fd + // method to return a handle that uses ordinary synchronous I/O.” + // However, LockFileEx still requires an OVERLAPPED structure, + // which contains the file offset of the beginning of the lock range. + // We want to lock the entire file, so we leave the offset as zero. + ol := new(syscall.Overlapped) + + err := windows.LockFileEx(syscall.Handle(f.Fd()), uint32(lt), reserved, allBytes, allBytes, ol) + if err != nil { + return &fs.PathError{ + Op: lt.String(), + Path: f.Name(), + Err: err, + } + } + return nil +} + +func unlock(f File) error { + ol := new(syscall.Overlapped) + err := windows.UnlockFileEx(syscall.Handle(f.Fd()), reserved, allBytes, allBytes, ol) + if err != nil { + return &fs.PathError{ + Op: "Unlock", + Path: f.Name(), + Err: err, + } + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile.go new file mode 100644 index 0000000000000000000000000000000000000000..82e1a89675e7042aab6429990491e0e6f4ab6219 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile.go @@ -0,0 +1,187 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lockedfile creates and manipulates files whose contents should only +// change atomically. +package lockedfile + +import ( + "fmt" + "io" + "io/fs" + "os" + "runtime" +) + +// A File is a locked *os.File. +// +// Closing the file releases the lock. +// +// If the program exits while a file is locked, the operating system releases +// the lock but may not do so promptly: callers must ensure that all locked +// files are closed before exiting. +type File struct { + osFile + closed bool +} + +// osFile embeds a *os.File while keeping the pointer itself unexported. +// (When we close a File, it must be the same file descriptor that we opened!) +type osFile struct { + *os.File +} + +// OpenFile is like os.OpenFile, but returns a locked file. +// If flag includes os.O_WRONLY or os.O_RDWR, the file is write-locked; +// otherwise, it is read-locked. +func OpenFile(name string, flag int, perm fs.FileMode) (*File, error) { + var ( + f = new(File) + err error + ) + f.osFile.File, err = openFile(name, flag, perm) + if err != nil { + return nil, err + } + + // Although the operating system will drop locks for open files when the go + // command exits, we want to hold locks for as little time as possible, and we + // especially don't want to leave a file locked after we're done with it. Our + // Close method is what releases the locks, so use a finalizer to report + // missing Close calls on a best-effort basis. + runtime.SetFinalizer(f, func(f *File) { + panic(fmt.Sprintf("lockedfile.File %s became unreachable without a call to Close", f.Name())) + }) + + return f, nil +} + +// Open is like os.Open, but returns a read-locked file. +func Open(name string) (*File, error) { + return OpenFile(name, os.O_RDONLY, 0) +} + +// Create is like os.Create, but returns a write-locked file. +func Create(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) +} + +// Edit creates the named file with mode 0666 (before umask), +// but does not truncate existing contents. +// +// If Edit succeeds, methods on the returned File can be used for I/O. +// The associated file descriptor has mode O_RDWR and the file is write-locked. +func Edit(name string) (*File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE, 0666) +} + +// Close unlocks and closes the underlying file. +// +// Close may be called multiple times; all calls after the first will return a +// non-nil error. +func (f *File) Close() error { + if f.closed { + return &fs.PathError{ + Op: "close", + Path: f.Name(), + Err: fs.ErrClosed, + } + } + f.closed = true + + err := closeFile(f.osFile.File) + runtime.SetFinalizer(f, nil) + return err +} + +// Read opens the named file with a read-lock and returns its contents. +func Read(name string) ([]byte, error) { + f, err := Open(name) + if err != nil { + return nil, err + } + defer f.Close() + + return io.ReadAll(f) +} + +// Write opens the named file (creating it with the given permissions if needed), +// then write-locks it and overwrites it with the given content. +func Write(name string, content io.Reader, perm fs.FileMode) (err error) { + f, err := OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + + _, err = io.Copy(f, content) + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} + +// Transform invokes t with the result of reading the named file, with its lock +// still held. +// +// If t returns a nil error, Transform then writes the returned contents back to +// the file, making a best effort to preserve existing contents on error. +// +// t must not modify the slice passed to it. +func Transform(name string, t func([]byte) ([]byte, error)) (err error) { + f, err := Edit(name) + if err != nil { + return err + } + defer f.Close() + + old, err := io.ReadAll(f) + if err != nil { + return err + } + + new, err := t(old) + if err != nil { + return err + } + + if len(new) > len(old) { + // The overall file size is increasing, so write the tail first: if we're + // about to run out of space on the disk, we would rather detect that + // failure before we have overwritten the original contents. + if _, err := f.WriteAt(new[len(old):], int64(len(old))); err != nil { + // Make a best effort to remove the incomplete tail. + f.Truncate(int64(len(old))) + return err + } + } + + // We're about to overwrite the old contents. In case of failure, make a best + // effort to roll back before we close the file. + defer func() { + if err != nil { + if _, err := f.WriteAt(old, 0); err == nil { + f.Truncate(int64(len(old))) + } + } + }() + + if len(new) >= len(old) { + if _, err := f.WriteAt(new[:len(old)], 0); err != nil { + return err + } + } else { + if _, err := f.WriteAt(new, 0); err != nil { + return err + } + // The overall file size is decreasing, so shrink the file to its final size + // after writing. We do this after writing (instead of before) so that if + // the write fails, enough filesystem space will likely still be reserved + // to contain the previous contents. + if err := f.Truncate(int64(len(new))); err != nil { + return err + } + } + + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile_filelock.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile_filelock.go new file mode 100644 index 0000000000000000000000000000000000000000..1a677a7fe4a60d3a98866a846bbd6e39b057cbe9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile_filelock.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !plan9 + +package lockedfile + +import ( + "io/fs" + "os" + + "cmd/go/internal/lockedfile/internal/filelock" +) + +func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) { + // On BSD systems, we could add the O_SHLOCK or O_EXLOCK flag to the OpenFile + // call instead of locking separately, but we have to support separate locking + // calls for Linux and Windows anyway, so it's simpler to use that approach + // consistently. + + f, err := os.OpenFile(name, flag&^os.O_TRUNC, perm) + if err != nil { + return nil, err + } + + switch flag & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR) { + case os.O_WRONLY, os.O_RDWR: + err = filelock.Lock(f) + default: + err = filelock.RLock(f) + } + if err != nil { + f.Close() + return nil, err + } + + if flag&os.O_TRUNC == os.O_TRUNC { + if err := f.Truncate(0); err != nil { + // The documentation for os.O_TRUNC says “if possible, truncate file when + // opened”, but doesn't define “possible” (golang.org/issue/28699). + // We'll treat regular files (and symlinks to regular files) as “possible” + // and ignore errors for the rest. + if fi, statErr := f.Stat(); statErr != nil || fi.Mode().IsRegular() { + filelock.Unlock(f) + f.Close() + return nil, err + } + } + } + + return f, nil +} + +func closeFile(f *os.File) error { + // Since locking syscalls operate on file descriptors, we must unlock the file + // while the descriptor is still valid — that is, before the file is closed — + // and avoid unlocking files that are already closed. + err := filelock.Unlock(f) + + if closeErr := f.Close(); err == nil { + err = closeErr + } + return err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile_plan9.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..a2ce794b967521af86975f9fe86f280ac0bc0dd0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile_plan9.go @@ -0,0 +1,94 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 + +package lockedfile + +import ( + "io/fs" + "math/rand" + "os" + "strings" + "time" +) + +// Opening an exclusive-use file returns an error. +// The expected error strings are: +// +// - "open/create -- file is locked" (cwfs, kfs) +// - "exclusive lock" (fossil) +// - "exclusive use file already open" (ramfs) +var lockedErrStrings = [...]string{ + "file is locked", + "exclusive lock", + "exclusive use file already open", +} + +// Even though plan9 doesn't support the Lock/RLock/Unlock functions to +// manipulate already-open files, IsLocked is still meaningful: os.OpenFile +// itself may return errors that indicate that a file with the ModeExclusive bit +// set is already open. +func isLocked(err error) bool { + s := err.Error() + + for _, frag := range lockedErrStrings { + if strings.Contains(s, frag) { + return true + } + } + + return false +} + +func openFile(name string, flag int, perm fs.FileMode) (*os.File, error) { + // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls. + // + // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open + // for I/O by only one fid at a time across all clients of the server. If a + // second open is attempted, it draws an error.” + // + // So we can try to open a locked file, but if it fails we're on our own to + // figure out when it becomes available. We'll use exponential backoff with + // some jitter and an arbitrary limit of 500ms. + + // If the file was unpacked or created by some other program, it might not + // have the ModeExclusive bit set. Set it before we call OpenFile, so that we + // can be confident that a successful OpenFile implies exclusive use. + if fi, err := os.Stat(name); err == nil { + if fi.Mode()&fs.ModeExclusive == 0 { + if err := os.Chmod(name, fi.Mode()|fs.ModeExclusive); err != nil { + return nil, err + } + } + } else if !os.IsNotExist(err) { + return nil, err + } + + nextSleep := 1 * time.Millisecond + const maxSleep = 500 * time.Millisecond + for { + f, err := os.OpenFile(name, flag, perm|fs.ModeExclusive) + if err == nil { + return f, nil + } + + if !isLocked(err) { + return nil, err + } + + time.Sleep(nextSleep) + + nextSleep += nextSleep + if nextSleep > maxSleep { + nextSleep = maxSleep + } + // Apply 10% jitter to avoid synchronizing collisions. + nextSleep += time.Duration((0.1*rand.Float64() - 0.05) * float64(nextSleep)) + } +} + +func closeFile(f *os.File) error { + return f.Close() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile_test.go new file mode 100644 index 0000000000000000000000000000000000000000..a9fa40b8df22a7f641734db4bc1b031f953c12c3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/lockedfile_test.go @@ -0,0 +1,286 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// js and wasip1 do not support inter-process file locking. +// +//go:build !js && !wasip1 + +package lockedfile_test + +import ( + "fmt" + "internal/testenv" + "os" + "path/filepath" + "testing" + "time" + + "cmd/go/internal/lockedfile" +) + +func mustTempDir(t *testing.T) (dir string, remove func()) { + t.Helper() + + dir, err := os.MkdirTemp("", filepath.Base(t.Name())) + if err != nil { + t.Fatal(err) + } + return dir, func() { os.RemoveAll(dir) } +} + +const ( + quiescent = 10 * time.Millisecond + probablyStillBlocked = 10 * time.Second +) + +func mustBlock(t *testing.T, desc string, f func()) (wait func(*testing.T)) { + t.Helper() + + done := make(chan struct{}) + go func() { + f() + close(done) + }() + + timer := time.NewTimer(quiescent) + defer timer.Stop() + select { + case <-done: + t.Fatalf("%s unexpectedly did not block", desc) + case <-timer.C: + } + + return func(t *testing.T) { + logTimer := time.NewTimer(quiescent) + defer logTimer.Stop() + + select { + case <-logTimer.C: + // We expect the operation to have unblocked by now, + // but maybe it's just slow. Write to the test log + // in case the test times out, but don't fail it. + t.Helper() + t.Logf("%s is unexpectedly still blocked after %v", desc, quiescent) + + // Wait for the operation to actually complete, no matter how long it + // takes. If the test has deadlocked, this will cause the test to time out + // and dump goroutines. + <-done + + case <-done: + } + } +} + +func TestMutexExcludes(t *testing.T) { + t.Parallel() + + dir, remove := mustTempDir(t) + defer remove() + + path := filepath.Join(dir, "lock") + + mu := lockedfile.MutexAt(path) + t.Logf("mu := MutexAt(_)") + + unlock, err := mu.Lock() + if err != nil { + t.Fatalf("mu.Lock: %v", err) + } + t.Logf("unlock, _ := mu.Lock()") + + mu2 := lockedfile.MutexAt(mu.Path) + t.Logf("mu2 := MutexAt(mu.Path)") + + wait := mustBlock(t, "mu2.Lock()", func() { + unlock2, err := mu2.Lock() + if err != nil { + t.Errorf("mu2.Lock: %v", err) + return + } + t.Logf("unlock2, _ := mu2.Lock()") + t.Logf("unlock2()") + unlock2() + }) + + t.Logf("unlock()") + unlock() + wait(t) +} + +func TestReadWaitsForLock(t *testing.T) { + t.Parallel() + + dir, remove := mustTempDir(t) + defer remove() + + path := filepath.Join(dir, "timestamp.txt") + + f, err := lockedfile.Create(path) + if err != nil { + t.Fatalf("Create: %v", err) + } + defer f.Close() + + const ( + part1 = "part 1\n" + part2 = "part 2\n" + ) + _, err = f.WriteString(part1) + if err != nil { + t.Fatalf("WriteString: %v", err) + } + t.Logf("WriteString(%q) = ", part1) + + wait := mustBlock(t, "Read", func() { + b, err := lockedfile.Read(path) + if err != nil { + t.Errorf("Read: %v", err) + return + } + + const want = part1 + part2 + got := string(b) + if got == want { + t.Logf("Read(_) = %q", got) + } else { + t.Errorf("Read(_) = %q, _; want %q", got, want) + } + }) + + _, err = f.WriteString(part2) + if err != nil { + t.Errorf("WriteString: %v", err) + } else { + t.Logf("WriteString(%q) = ", part2) + } + f.Close() + + wait(t) +} + +func TestCanLockExistingFile(t *testing.T) { + t.Parallel() + + dir, remove := mustTempDir(t) + defer remove() + path := filepath.Join(dir, "existing.txt") + + if err := os.WriteFile(path, []byte("ok"), 0777); err != nil { + t.Fatalf("os.WriteFile: %v", err) + } + + f, err := lockedfile.Edit(path) + if err != nil { + t.Fatalf("first Edit: %v", err) + } + + wait := mustBlock(t, "Edit", func() { + other, err := lockedfile.Edit(path) + if err != nil { + t.Errorf("second Edit: %v", err) + } + other.Close() + }) + + f.Close() + wait(t) +} + +// TestSpuriousEDEADLK verifies that the spurious EDEADLK reported in +// https://golang.org/issue/32817 no longer occurs. +func TestSpuriousEDEADLK(t *testing.T) { + // P.1 locks file A. + // Q.3 locks file B. + // Q.3 blocks on file A. + // P.2 blocks on file B. (Spurious EDEADLK occurs here.) + // P.1 unlocks file A. + // Q.3 unblocks and locks file A. + // Q.3 unlocks files A and B. + // P.2 unblocks and locks file B. + // P.2 unlocks file B. + + testenv.MustHaveExec(t) + + dirVar := t.Name() + "DIR" + + if dir := os.Getenv(dirVar); dir != "" { + // Q.3 locks file B. + b, err := lockedfile.Edit(filepath.Join(dir, "B")) + if err != nil { + t.Fatal(err) + } + defer b.Close() + + if err := os.WriteFile(filepath.Join(dir, "locked"), []byte("ok"), 0666); err != nil { + t.Fatal(err) + } + + // Q.3 blocks on file A. + a, err := lockedfile.Edit(filepath.Join(dir, "A")) + // Q.3 unblocks and locks file A. + if err != nil { + t.Fatal(err) + } + defer a.Close() + + // Q.3 unlocks files A and B. + return + } + + dir, remove := mustTempDir(t) + defer remove() + + // P.1 locks file A. + a, err := lockedfile.Edit(filepath.Join(dir, "A")) + if err != nil { + t.Fatal(err) + } + + cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$") + cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", dirVar, dir)) + + qDone := make(chan struct{}) + waitQ := mustBlock(t, "Edit A and B in subprocess", func() { + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("%v:\n%s", err, out) + } + close(qDone) + }) + + // Wait until process Q has either failed or locked file B. + // Otherwise, P.2 might not block on file B as intended. +locked: + for { + if _, err := os.Stat(filepath.Join(dir, "locked")); !os.IsNotExist(err) { + break locked + } + timer := time.NewTimer(1 * time.Millisecond) + select { + case <-qDone: + timer.Stop() + break locked + case <-timer.C: + } + } + + waitP2 := mustBlock(t, "Edit B", func() { + // P.2 blocks on file B. (Spurious EDEADLK occurs here.) + b, err := lockedfile.Edit(filepath.Join(dir, "B")) + // P.2 unblocks and locks file B. + if err != nil { + t.Error(err) + return + } + // P.2 unlocks file B. + b.Close() + }) + + // P.1 unlocks file A. + a.Close() + + waitQ(t) + waitP2(t) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/mutex.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/mutex.go new file mode 100644 index 0000000000000000000000000000000000000000..180a36c62016ba045f1829680e91182ac6d716a7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/mutex.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lockedfile + +import ( + "fmt" + "os" + "sync" +) + +// A Mutex provides mutual exclusion within and across processes by locking a +// well-known file. Such a file generally guards some other part of the +// filesystem: for example, a Mutex file in a directory might guard access to +// the entire tree rooted in that directory. +// +// Mutex does not implement sync.Locker: unlike a sync.Mutex, a lockedfile.Mutex +// can fail to lock (e.g. if there is a permission error in the filesystem). +// +// Like a sync.Mutex, a Mutex may be included as a field of a larger struct but +// must not be copied after first use. The Path field must be set before first +// use and must not be change thereafter. +type Mutex struct { + Path string // The path to the well-known lock file. Must be non-empty. + mu sync.Mutex // A redundant mutex. The race detector doesn't know about file locking, so in tests we may need to lock something that it understands. +} + +// MutexAt returns a new Mutex with Path set to the given non-empty path. +func MutexAt(path string) *Mutex { + if path == "" { + panic("lockedfile.MutexAt: path must be non-empty") + } + return &Mutex{Path: path} +} + +func (mu *Mutex) String() string { + return fmt.Sprintf("lockedfile.Mutex(%s)", mu.Path) +} + +// Lock attempts to lock the Mutex. +// +// If successful, Lock returns a non-nil unlock function: it is provided as a +// return-value instead of a separate method to remind the caller to check the +// accompanying error. (See https://golang.org/issue/20803.) +func (mu *Mutex) Lock() (unlock func(), err error) { + if mu.Path == "" { + panic("lockedfile.Mutex: missing Path during Lock") + } + + // We could use either O_RDWR or O_WRONLY here. If we choose O_RDWR and the + // file at mu.Path is write-only, the call to OpenFile will fail with a + // permission error. That's actually what we want: if we add an RLock method + // in the future, it should call OpenFile with O_RDONLY and will require the + // files must be readable, so we should not let the caller make any + // assumptions about Mutex working with write-only files. + f, err := OpenFile(mu.Path, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + mu.mu.Lock() + + return func() { + mu.mu.Unlock() + f.Close() + }, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/transform_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/transform_test.go new file mode 100644 index 0000000000000000000000000000000000000000..f8b28024042594eed75abe6f0ba6b2a5b5eda006 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/lockedfile/transform_test.go @@ -0,0 +1,105 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// js and wasip1 do not support inter-process file locking. +// +//go:build !js && !wasip1 + +package lockedfile_test + +import ( + "bytes" + "encoding/binary" + "math/rand" + "path/filepath" + "testing" + "time" + + "cmd/go/internal/lockedfile" +) + +func isPowerOf2(x int) bool { + return x > 0 && x&(x-1) == 0 +} + +func roundDownToPowerOf2(x int) int { + if x <= 0 { + panic("nonpositive x") + } + bit := 1 + for x != bit { + x = x &^ bit + bit <<= 1 + } + return x +} + +func TestTransform(t *testing.T) { + dir, remove := mustTempDir(t) + defer remove() + path := filepath.Join(dir, "blob.bin") + + const maxChunkWords = 8 << 10 + buf := make([]byte, 2*maxChunkWords*8) + for i := uint64(0); i < 2*maxChunkWords; i++ { + binary.LittleEndian.PutUint64(buf[i*8:], i) + } + if err := lockedfile.Write(path, bytes.NewReader(buf[:8]), 0666); err != nil { + t.Fatal(err) + } + + var attempts int64 = 128 + if !testing.Short() { + attempts *= 16 + } + const parallel = 32 + + var sem = make(chan bool, parallel) + + for n := attempts; n > 0; n-- { + sem <- true + go func() { + defer func() { <-sem }() + + time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond) + chunkWords := roundDownToPowerOf2(rand.Intn(maxChunkWords) + 1) + offset := rand.Intn(chunkWords) + + err := lockedfile.Transform(path, func(data []byte) (chunk []byte, err error) { + chunk = buf[offset*8 : (offset+chunkWords)*8] + + if len(data)&^7 != len(data) { + t.Errorf("read %d bytes, but each write is an integer multiple of 8 bytes", len(data)) + return chunk, nil + } + + words := len(data) / 8 + if !isPowerOf2(words) { + t.Errorf("read %d 8-byte words, but each write is a power-of-2 number of words", words) + return chunk, nil + } + + u := binary.LittleEndian.Uint64(data) + for i := 1; i < words; i++ { + next := binary.LittleEndian.Uint64(data[i*8:]) + if next != u+1 { + t.Errorf("wrote sequential integers, but read integer out of sequence at offset %d", i) + return chunk, nil + } + u = next + } + + return chunk, nil + }) + + if err != nil { + t.Errorf("unexpected error from Transform: %v", err) + } + }() + } + + for n := parallel; n > 0; n-- { + sem <- true + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap.go new file mode 100644 index 0000000000000000000000000000000000000000..fcbd3e08c1c5174f68bac1c6d5bd0fe0286e22cc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap.go @@ -0,0 +1,31 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This package is a lightly modified version of the mmap code +// in github.com/google/codesearch/index. + +// The mmap package provides an abstraction for memory mapping files +// on different platforms. +package mmap + +import ( + "os" +) + +// Data is mmap'ed read-only data from a file. +// The backing file is never closed, so Data +// remains valid for the lifetime of the process. +type Data struct { + f *os.File + Data []byte +} + +// Mmap maps the given file into memory. +func Mmap(file string) (Data, error) { + f, err := os.Open(file) + if err != nil { + return Data{}, err + } + return mmapFile(f) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap_other.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap_other.go new file mode 100644 index 0000000000000000000000000000000000000000..4d2844fc3731355df9feb8a6f1ca7d36fdf1d375 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap_other.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (js && wasm) || wasip1 || plan9 + +package mmap + +import ( + "io" + "os" +) + +// mmapFile on other systems doesn't mmap the file. It just reads everything. +func mmapFile(f *os.File) (Data, error) { + b, err := io.ReadAll(f) + if err != nil { + return Data{}, err + } + return Data{f, b}, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap_unix.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..5dce87236870bc5967d990cfed6fa14791d9aab9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap_unix.go @@ -0,0 +1,36 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package mmap + +import ( + "fmt" + "io/fs" + "os" + "syscall" +) + +func mmapFile(f *os.File) (Data, error) { + st, err := f.Stat() + if err != nil { + return Data{}, err + } + size := st.Size() + pagesize := int64(os.Getpagesize()) + if int64(int(size+(pagesize-1))) != size+(pagesize-1) { + return Data{}, fmt.Errorf("%s: too large for mmap", f.Name()) + } + n := int(size) + if n == 0 { + return Data{f, nil}, nil + } + mmapLength := int(((size + pagesize - 1) / pagesize) * pagesize) // round up to page size + data, err := syscall.Mmap(int(f.Fd()), 0, mmapLength, syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return Data{}, &fs.PathError{Op: "mmap", Path: f.Name(), Err: err} + } + return Data{f, data[:n]}, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap_windows.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..d00bef71e5cfc5fbbbd5208b39baa56951e456a6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/mmap/mmap_windows.go @@ -0,0 +1,41 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mmap + +import ( + "fmt" + "os" + "syscall" + "unsafe" + + "internal/syscall/windows" +) + +func mmapFile(f *os.File) (Data, error) { + st, err := f.Stat() + if err != nil { + return Data{}, err + } + size := st.Size() + if size == 0 { + return Data{f, nil}, nil + } + h, err := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, 0, 0, nil) + if err != nil { + return Data{}, fmt.Errorf("CreateFileMapping %s: %w", f.Name(), err) + } + + addr, err := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0) + if err != nil { + return Data{}, fmt.Errorf("MapViewOfFile %s: %w", f.Name(), err) + } + var info windows.MemoryBasicInformation + err = windows.VirtualQuery(addr, &info, unsafe.Sizeof(info)) + if err != nil { + return Data{}, fmt.Errorf("VirtualQuery %s: %w", f.Name(), err) + } + data := unsafe.Slice((*byte)(unsafe.Pointer(addr)), int(info.RegionSize)) + return Data{f, data}, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/download.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/download.go new file mode 100644 index 0000000000000000000000000000000000000000..373accef0679610cd064445706fa46ee8065f1c9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/download.go @@ -0,0 +1,389 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modcmd + +import ( + "context" + "encoding/json" + "errors" + "os" + "runtime" + "sync" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/modfetch" + "cmd/go/internal/modfetch/codehost" + "cmd/go/internal/modload" + "cmd/go/internal/toolchain" + + "golang.org/x/mod/module" +) + +var cmdDownload = &base.Command{ + UsageLine: "go mod download [-x] [-json] [-reuse=old.json] [modules]", + Short: "download modules to local cache", + Long: ` +Download downloads the named modules, which can be module patterns selecting +dependencies of the main module or module queries of the form path@version. + +With no arguments, download applies to the modules needed to build and test +the packages in the main module: the modules explicitly required by the main +module if it is at 'go 1.17' or higher, or all transitively-required modules +if at 'go 1.16' or lower. + +The go command will automatically download modules as needed during ordinary +execution. The "go mod download" command is useful mainly for pre-filling +the local cache or to compute the answers for a Go module proxy. + +By default, download writes nothing to standard output. It may print progress +messages and errors to standard error. + +The -json flag causes download to print a sequence of JSON objects +to standard output, describing each downloaded module (or failure), +corresponding to this Go struct: + + type Module struct { + Path string // module path + Query string // version query corresponding to this version + Version string // module version + Error string // error loading module + Info string // absolute path to cached .info file + GoMod string // absolute path to cached .mod file + Zip string // absolute path to cached .zip file + Dir string // absolute path to cached source root directory + Sum string // checksum for path, version (as in go.sum) + GoModSum string // checksum for go.mod (as in go.sum) + Origin any // provenance of module + Reuse bool // reuse of old module info is safe + } + +The -reuse flag accepts the name of file containing the JSON output of a +previous 'go mod download -json' invocation. The go command may use this +file to determine that a module is unchanged since the previous invocation +and avoid redownloading it. Modules that are not redownloaded will be marked +in the new output by setting the Reuse field to true. Normally the module +cache provides this kind of reuse automatically; the -reuse flag can be +useful on systems that do not preserve the module cache. + +The -x flag causes download to print the commands download executes. + +See https://golang.org/ref/mod#go-mod-download for more about 'go mod download'. + +See https://golang.org/ref/mod#version-queries for more about version queries. + `, +} + +var ( + downloadJSON = cmdDownload.Flag.Bool("json", false, "") + downloadReuse = cmdDownload.Flag.String("reuse", "", "") +) + +func init() { + cmdDownload.Run = runDownload // break init cycle + + // TODO(jayconrod): https://golang.org/issue/35849 Apply -x to other 'go mod' commands. + cmdDownload.Flag.BoolVar(&cfg.BuildX, "x", false, "") + base.AddChdirFlag(&cmdDownload.Flag) + base.AddModCommonFlags(&cmdDownload.Flag) +} + +// A ModuleJSON describes the result of go mod download. +type ModuleJSON struct { + Path string `json:",omitempty"` + Version string `json:",omitempty"` + Query string `json:",omitempty"` + Error string `json:",omitempty"` + Info string `json:",omitempty"` + GoMod string `json:",omitempty"` + Zip string `json:",omitempty"` + Dir string `json:",omitempty"` + Sum string `json:",omitempty"` + GoModSum string `json:",omitempty"` + + Origin *codehost.Origin `json:",omitempty"` + Reuse bool `json:",omitempty"` +} + +func runDownload(ctx context.Context, cmd *base.Command, args []string) { + modload.InitWorkfile() + + // Check whether modules are enabled and whether we're in a module. + modload.ForceUseModules = true + modload.ExplicitWriteGoMod = true + haveExplicitArgs := len(args) > 0 + + if modload.HasModRoot() || modload.WorkFilePath() != "" { + modload.LoadModFile(ctx) // to fill MainModules + + if haveExplicitArgs { + for _, mainModule := range modload.MainModules.Versions() { + targetAtUpgrade := mainModule.Path + "@upgrade" + targetAtPatch := mainModule.Path + "@patch" + for _, arg := range args { + switch arg { + case mainModule.Path, targetAtUpgrade, targetAtPatch: + os.Stderr.WriteString("go: skipping download of " + arg + " that resolves to the main module\n") + } + } + } + } else if modload.WorkFilePath() != "" { + // TODO(#44435): Think about what the correct query is to download the + // right set of modules. Also see code review comment at + // https://go-review.googlesource.com/c/go/+/359794/comments/ce946a80_6cf53992. + args = []string{"all"} + } else { + mainModule := modload.MainModules.Versions()[0] + modFile := modload.MainModules.ModFile(mainModule) + if modFile.Go == nil || gover.Compare(modFile.Go.Version, gover.ExplicitIndirectVersion) < 0 { + if len(modFile.Require) > 0 { + args = []string{"all"} + } + } else { + // As of Go 1.17, the go.mod file explicitly requires every module + // that provides any package imported by the main module. + // 'go mod download' is typically run before testing packages in the + // main module, so by default we shouldn't download the others + // (which are presumed irrelevant to the packages in the main module). + // See https://golang.org/issue/44435. + // + // However, we also need to load the full module graph, to ensure that + // we have downloaded enough of the module graph to run 'go list all', + // 'go mod graph', and similar commands. + _, err := modload.LoadModGraph(ctx, "") + if err != nil { + // TODO(#64008): call base.Fatalf instead of toolchain.SwitchOrFatal + // here, since we can only reach this point with an outdated toolchain + // if the go.mod file is inconsistent. + toolchain.SwitchOrFatal(ctx, err) + } + + for _, m := range modFile.Require { + args = append(args, m.Mod.Path) + } + } + } + } + + if len(args) == 0 { + if modload.HasModRoot() { + os.Stderr.WriteString("go: no module dependencies to download\n") + } else { + base.Errorf("go: no modules specified (see 'go help mod download')") + } + base.Exit() + } + + if *downloadReuse != "" && modload.HasModRoot() { + base.Fatalf("go mod download -reuse cannot be used inside a module") + } + + var mods []*ModuleJSON + type token struct{} + sem := make(chan token, runtime.GOMAXPROCS(0)) + infos, infosErr := modload.ListModules(ctx, args, 0, *downloadReuse) + + // There is a bit of a chicken-and-egg problem here: ideally we need to know + // which Go version to switch to to download the requested modules, but if we + // haven't downloaded the module's go.mod file yet the GoVersion field of its + // info struct is not yet populated. + // + // We also need to be careful to only print the info for each module once + // if the -json flag is set. + // + // In theory we could go through each module in the list, attempt to download + // its go.mod file, and record the maximum version (either from the file or + // from the resulting TooNewError), all before we try the actual full download + // of each module. + // + // For now, we go ahead and try all the downloads and collect the errors, and + // if any download failed due to a TooNewError, we switch toolchains and try + // again. Any downloads that already succeeded will still be in cache. + // That won't give optimal concurrency (we'll do two batches of concurrent + // downloads instead of all in one batch), and it might add a little overhead + // to look up the downloads from the first batch in the module cache when + // we see them again in the second batch. On the other hand, it's way simpler + // to implement, and not really any more expensive if the user is requesting + // no explicit arguments (their go.mod file should already list an appropriate + // toolchain version) or only one module (as is used by the Go Module Proxy). + + if infosErr != nil { + var sw toolchain.Switcher + sw.Error(infosErr) + if sw.NeedSwitch() { + sw.Switch(ctx) + } + // Otherwise, wait to report infosErr after we have downloaded + // when we can. + } + + if !haveExplicitArgs && modload.WorkFilePath() == "" { + // 'go mod download' is sometimes run without arguments to pre-populate the + // module cache. In modules that aren't at go 1.17 or higher, it may fetch + // modules that aren't needed to build packages in the main module. This is + // usually not intended, so don't save sums for downloaded modules + // (golang.org/issue/45332). We do still fix inconsistencies in go.mod + // though. + // + // TODO(#64008): In the future, report an error if go.mod or go.sum need to + // be updated after loading the build list. This may require setting + // the mode to "mod" or "readonly" depending on haveExplicitArgs. + if err := modload.WriteGoMod(ctx, modload.WriteOpts{}); err != nil { + base.Fatal(err) + } + } + + var downloadErrs sync.Map + for _, info := range infos { + if info.Replace != nil { + info = info.Replace + } + if info.Version == "" && info.Error == nil { + // main module or module replaced with file path. + // Nothing to download. + continue + } + m := &ModuleJSON{ + Path: info.Path, + Version: info.Version, + Query: info.Query, + Reuse: info.Reuse, + Origin: info.Origin, + } + mods = append(mods, m) + if info.Error != nil { + m.Error = info.Error.Err + continue + } + if m.Reuse { + continue + } + sem <- token{} + go func() { + err := DownloadModule(ctx, m) + if err != nil { + downloadErrs.Store(m, err) + m.Error = err.Error() + } + <-sem + }() + } + + // Fill semaphore channel to wait for goroutines to finish. + for n := cap(sem); n > 0; n-- { + sem <- token{} + } + + // If there were explicit arguments + // (like 'go mod download golang.org/x/tools@latest'), + // check whether we need to upgrade the toolchain in order to download them. + // + // (If invoked without arguments, we expect the module graph to already + // be tidy and the go.mod file to declare a 'go' version that satisfies + // transitive requirements. If that invariant holds, then we should have + // already upgraded when we loaded the module graph, and should not need + // an additional check here. See https://go.dev/issue/45551.) + // + // We also allow upgrades if in a workspace because in workspace mode + // with no arguments we download the module pattern "all", + // which may include dependencies that are normally pruned out + // of the individual modules in the workspace. + if haveExplicitArgs || modload.WorkFilePath() != "" { + var sw toolchain.Switcher + // Add errors to the Switcher in deterministic order so that they will be + // logged deterministically. + for _, m := range mods { + if erri, ok := downloadErrs.Load(m); ok { + sw.Error(erri.(error)) + } + } + // Only call sw.Switch if it will actually switch. + // Otherwise, we may want to write the errors as JSON + // (instead of using base.Error as sw.Switch would), + // and we may also have other errors to report from the + // initial infos returned by ListModules. + if sw.NeedSwitch() { + sw.Switch(ctx) + } + } + + if *downloadJSON { + for _, m := range mods { + b, err := json.MarshalIndent(m, "", "\t") + if err != nil { + base.Fatal(err) + } + os.Stdout.Write(append(b, '\n')) + if m.Error != "" { + base.SetExitStatus(1) + } + } + } else { + for _, m := range mods { + if m.Error != "" { + base.Error(errors.New(m.Error)) + } + } + base.ExitIfErrors() + } + + // If there were explicit arguments, update go.mod and especially go.sum. + // 'go mod download mod@version' is a useful way to add a sum without using + // 'go get mod@version', which may have other side effects. We print this in + // some error message hints. + // + // If we're in workspace mode, update go.work.sum with checksums for all of + // the modules we downloaded that aren't already recorded. Since a requirement + // in one module may upgrade a dependency of another, we can't be sure that + // the import graph matches the import graph of any given module in isolation, + // so we may end up needing to load packages from modules that wouldn't + // otherwise be relevant. + // + // TODO(#44435): If we adjust the set of modules downloaded in workspace mode, + // we may also need to adjust the logic for saving checksums here. + // + // Don't save sums for 'go mod download' without arguments unless we're in + // workspace mode; see comment above. + if haveExplicitArgs || modload.WorkFilePath() != "" { + if err := modload.WriteGoMod(ctx, modload.WriteOpts{}); err != nil { + base.Error(err) + } + } + + // If there was an error matching some of the requested packages, emit it now + // (after we've written the checksums for the modules that were downloaded + // successfully). + if infosErr != nil { + base.Error(infosErr) + } +} + +// DownloadModule runs 'go mod download' for m.Path@m.Version, +// leaving the results (including any error) in m itself. +func DownloadModule(ctx context.Context, m *ModuleJSON) error { + var err error + _, file, err := modfetch.InfoFile(ctx, m.Path, m.Version) + if err != nil { + return err + } + m.Info = file + m.GoMod, err = modfetch.GoModFile(ctx, m.Path, m.Version) + if err != nil { + return err + } + m.GoModSum, err = modfetch.GoModSum(ctx, m.Path, m.Version) + if err != nil { + return err + } + mod := module.Version{Path: m.Path, Version: m.Version} + m.Zip, err = modfetch.DownloadZip(ctx, mod) + if err != nil { + return err + } + m.Sum = modfetch.Sum(ctx, mod) + m.Dir, err = modfetch.Download(ctx, mod) + return err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/edit.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/edit.go new file mode 100644 index 0000000000000000000000000000000000000000..db131b088143bf1cae48723a0b4d75fd27da8adc --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/edit.go @@ -0,0 +1,546 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// go mod edit + +package modcmd + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/gover" + "cmd/go/internal/lockedfile" + "cmd/go/internal/modfetch" + "cmd/go/internal/modload" + + "golang.org/x/mod/modfile" + "golang.org/x/mod/module" +) + +var cmdEdit = &base.Command{ + UsageLine: "go mod edit [editing flags] [-fmt|-print|-json] [go.mod]", + Short: "edit go.mod from tools or scripts", + Long: ` +Edit provides a command-line interface for editing go.mod, +for use primarily by tools or scripts. It reads only go.mod; +it does not look up information about the modules involved. +By default, edit reads and writes the go.mod file of the main module, +but a different target file can be specified after the editing flags. + +The editing flags specify a sequence of editing operations. + +The -fmt flag reformats the go.mod file without making other changes. +This reformatting is also implied by any other modifications that use or +rewrite the go.mod file. The only time this flag is needed is if no other +flags are specified, as in 'go mod edit -fmt'. + +The -module flag changes the module's path (the go.mod file's module line). + +The -require=path@version and -droprequire=path flags +add and drop a requirement on the given module path and version. +Note that -require overrides any existing requirements on path. +These flags are mainly for tools that understand the module graph. +Users should prefer 'go get path@version' or 'go get path@none', +which make other go.mod adjustments as needed to satisfy +constraints imposed by other modules. + +The -exclude=path@version and -dropexclude=path@version flags +add and drop an exclusion for the given module path and version. +Note that -exclude=path@version is a no-op if that exclusion already exists. + +The -replace=old[@v]=new[@v] flag adds a replacement of the given +module path and version pair. If the @v in old@v is omitted, a +replacement without a version on the left side is added, which applies +to all versions of the old module path. If the @v in new@v is omitted, +the new path should be a local module root directory, not a module +path. Note that -replace overrides any redundant replacements for old[@v], +so omitting @v will drop existing replacements for specific versions. + +The -dropreplace=old[@v] flag drops a replacement of the given +module path and version pair. If the @v is omitted, a replacement without +a version on the left side is dropped. + +The -retract=version and -dropretract=version flags add and drop a +retraction on the given version. The version may be a single version +like "v1.2.3" or a closed interval like "[v1.1.0,v1.1.9]". Note that +-retract=version is a no-op if that retraction already exists. + +The -require, -droprequire, -exclude, -dropexclude, -replace, +-dropreplace, -retract, and -dropretract editing flags may be repeated, +and the changes are applied in the order given. + +The -go=version flag sets the expected Go language version. + +The -toolchain=name flag sets the Go toolchain to use. + +The -print flag prints the final go.mod in its text format instead of +writing it back to go.mod. + +The -json flag prints the final go.mod file in JSON format instead of +writing it back to go.mod. The JSON output corresponds to these Go types: + + type Module struct { + Path string + Version string + } + + type GoMod struct { + Module ModPath + Go string + Toolchain string + Require []Require + Exclude []Module + Replace []Replace + Retract []Retract + } + + type ModPath struct { + Path string + Deprecated string + } + + type Require struct { + Path string + Version string + Indirect bool + } + + type Replace struct { + Old Module + New Module + } + + type Retract struct { + Low string + High string + Rationale string + } + +Retract entries representing a single version (not an interval) will have +the "Low" and "High" fields set to the same value. + +Note that this only describes the go.mod file itself, not other modules +referred to indirectly. For the full set of modules available to a build, +use 'go list -m -json all'. + +Edit also provides the -C, -n, and -x build flags. + +See https://golang.org/ref/mod#go-mod-edit for more about 'go mod edit'. + `, +} + +var ( + editFmt = cmdEdit.Flag.Bool("fmt", false, "") + editGo = cmdEdit.Flag.String("go", "", "") + editToolchain = cmdEdit.Flag.String("toolchain", "", "") + editJSON = cmdEdit.Flag.Bool("json", false, "") + editPrint = cmdEdit.Flag.Bool("print", false, "") + editModule = cmdEdit.Flag.String("module", "", "") + edits []func(*modfile.File) // edits specified in flags +) + +type flagFunc func(string) + +func (f flagFunc) String() string { return "" } +func (f flagFunc) Set(s string) error { f(s); return nil } + +func init() { + cmdEdit.Run = runEdit // break init cycle + + cmdEdit.Flag.Var(flagFunc(flagRequire), "require", "") + cmdEdit.Flag.Var(flagFunc(flagDropRequire), "droprequire", "") + cmdEdit.Flag.Var(flagFunc(flagExclude), "exclude", "") + cmdEdit.Flag.Var(flagFunc(flagDropReplace), "dropreplace", "") + cmdEdit.Flag.Var(flagFunc(flagReplace), "replace", "") + cmdEdit.Flag.Var(flagFunc(flagDropExclude), "dropexclude", "") + cmdEdit.Flag.Var(flagFunc(flagRetract), "retract", "") + cmdEdit.Flag.Var(flagFunc(flagDropRetract), "dropretract", "") + + base.AddBuildFlagsNX(&cmdEdit.Flag) + base.AddChdirFlag(&cmdEdit.Flag) + base.AddModCommonFlags(&cmdEdit.Flag) +} + +func runEdit(ctx context.Context, cmd *base.Command, args []string) { + anyFlags := *editModule != "" || + *editGo != "" || + *editToolchain != "" || + *editJSON || + *editPrint || + *editFmt || + len(edits) > 0 + + if !anyFlags { + base.Fatalf("go: no flags specified (see 'go help mod edit').") + } + + if *editJSON && *editPrint { + base.Fatalf("go: cannot use both -json and -print") + } + + if len(args) > 1 { + base.Fatalf("go: too many arguments") + } + var gomod string + if len(args) == 1 { + gomod = args[0] + } else { + gomod = modload.ModFilePath() + } + + if *editModule != "" { + if err := module.CheckImportPath(*editModule); err != nil { + base.Fatalf("go: invalid -module: %v", err) + } + } + + if *editGo != "" && *editGo != "none" { + if !modfile.GoVersionRE.MatchString(*editGo) { + base.Fatalf(`go mod: invalid -go option; expecting something like "-go %s"`, gover.Local()) + } + } + if *editToolchain != "" && *editToolchain != "none" { + if !modfile.ToolchainRE.MatchString(*editToolchain) { + base.Fatalf(`go mod: invalid -toolchain option; expecting something like "-toolchain go%s"`, gover.Local()) + } + } + + data, err := lockedfile.Read(gomod) + if err != nil { + base.Fatal(err) + } + + modFile, err := modfile.Parse(gomod, data, nil) + if err != nil { + base.Fatalf("go: errors parsing %s:\n%s", base.ShortPath(gomod), err) + } + + if *editModule != "" { + modFile.AddModuleStmt(*editModule) + } + + if *editGo == "none" { + modFile.DropGoStmt() + } else if *editGo != "" { + if err := modFile.AddGoStmt(*editGo); err != nil { + base.Fatalf("go: internal error: %v", err) + } + } + if *editToolchain == "none" { + modFile.DropToolchainStmt() + } else if *editToolchain != "" { + if err := modFile.AddToolchainStmt(*editToolchain); err != nil { + base.Fatalf("go: internal error: %v", err) + } + } + + if len(edits) > 0 { + for _, edit := range edits { + edit(modFile) + } + } + modFile.SortBlocks() + modFile.Cleanup() // clean file after edits + + if *editJSON { + editPrintJSON(modFile) + return + } + + out, err := modFile.Format() + if err != nil { + base.Fatal(err) + } + + if *editPrint { + os.Stdout.Write(out) + return + } + + // Make a best-effort attempt to acquire the side lock, only to exclude + // previous versions of the 'go' command from making simultaneous edits. + if unlock, err := modfetch.SideLock(ctx); err == nil { + defer unlock() + } + + err = lockedfile.Transform(gomod, func(lockedData []byte) ([]byte, error) { + if !bytes.Equal(lockedData, data) { + return nil, errors.New("go.mod changed during editing; not overwriting") + } + return out, nil + }) + if err != nil { + base.Fatal(err) + } +} + +// parsePathVersion parses -flag=arg expecting arg to be path@version. +func parsePathVersion(flag, arg string) (path, version string) { + before, after, found := strings.Cut(arg, "@") + if !found { + base.Fatalf("go: -%s=%s: need path@version", flag, arg) + } + path, version = strings.TrimSpace(before), strings.TrimSpace(after) + if err := module.CheckImportPath(path); err != nil { + base.Fatalf("go: -%s=%s: invalid path: %v", flag, arg, err) + } + + if !allowedVersionArg(version) { + base.Fatalf("go: -%s=%s: invalid version %q", flag, arg, version) + } + + return path, version +} + +// parsePath parses -flag=arg expecting arg to be path (not path@version). +func parsePath(flag, arg string) (path string) { + if strings.Contains(arg, "@") { + base.Fatalf("go: -%s=%s: need just path, not path@version", flag, arg) + } + path = arg + if err := module.CheckImportPath(path); err != nil { + base.Fatalf("go: -%s=%s: invalid path: %v", flag, arg, err) + } + return path +} + +// parsePathVersionOptional parses path[@version], using adj to +// describe any errors. +func parsePathVersionOptional(adj, arg string, allowDirPath bool) (path, version string, err error) { + if allowDirPath && modfile.IsDirectoryPath(arg) { + return arg, "", nil + } + before, after, found := strings.Cut(arg, "@") + if !found { + path = arg + } else { + path, version = strings.TrimSpace(before), strings.TrimSpace(after) + } + if err := module.CheckImportPath(path); err != nil { + return path, version, fmt.Errorf("invalid %s path: %v", adj, err) + } + if path != arg && !allowedVersionArg(version) { + return path, version, fmt.Errorf("invalid %s version: %q", adj, version) + } + return path, version, nil +} + +// parseVersionInterval parses a single version like "v1.2.3" or a closed +// interval like "[v1.2.3,v1.4.5]". Note that a single version has the same +// representation as an interval with equal upper and lower bounds: both +// Low and High are set. +func parseVersionInterval(arg string) (modfile.VersionInterval, error) { + if !strings.HasPrefix(arg, "[") { + if !allowedVersionArg(arg) { + return modfile.VersionInterval{}, fmt.Errorf("invalid version: %q", arg) + } + return modfile.VersionInterval{Low: arg, High: arg}, nil + } + if !strings.HasSuffix(arg, "]") { + return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg) + } + s := arg[1 : len(arg)-1] + before, after, found := strings.Cut(s, ",") + if !found { + return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg) + } + low := strings.TrimSpace(before) + high := strings.TrimSpace(after) + if !allowedVersionArg(low) || !allowedVersionArg(high) { + return modfile.VersionInterval{}, fmt.Errorf("invalid version interval: %q", arg) + } + return modfile.VersionInterval{Low: low, High: high}, nil +} + +// allowedVersionArg returns whether a token may be used as a version in go.mod. +// We don't call modfile.CheckPathVersion, because that insists on versions +// being in semver form, but here we want to allow versions like "master" or +// "1234abcdef", which the go command will resolve the next time it runs (or +// during -fix). Even so, we need to make sure the version is a valid token. +func allowedVersionArg(arg string) bool { + return !modfile.MustQuote(arg) +} + +// flagRequire implements the -require flag. +func flagRequire(arg string) { + path, version := parsePathVersion("require", arg) + edits = append(edits, func(f *modfile.File) { + if err := f.AddRequire(path, version); err != nil { + base.Fatalf("go: -require=%s: %v", arg, err) + } + }) +} + +// flagDropRequire implements the -droprequire flag. +func flagDropRequire(arg string) { + path := parsePath("droprequire", arg) + edits = append(edits, func(f *modfile.File) { + if err := f.DropRequire(path); err != nil { + base.Fatalf("go: -droprequire=%s: %v", arg, err) + } + }) +} + +// flagExclude implements the -exclude flag. +func flagExclude(arg string) { + path, version := parsePathVersion("exclude", arg) + edits = append(edits, func(f *modfile.File) { + if err := f.AddExclude(path, version); err != nil { + base.Fatalf("go: -exclude=%s: %v", arg, err) + } + }) +} + +// flagDropExclude implements the -dropexclude flag. +func flagDropExclude(arg string) { + path, version := parsePathVersion("dropexclude", arg) + edits = append(edits, func(f *modfile.File) { + if err := f.DropExclude(path, version); err != nil { + base.Fatalf("go: -dropexclude=%s: %v", arg, err) + } + }) +} + +// flagReplace implements the -replace flag. +func flagReplace(arg string) { + before, after, found := strings.Cut(arg, "=") + if !found { + base.Fatalf("go: -replace=%s: need old[@v]=new[@w] (missing =)", arg) + } + old, new := strings.TrimSpace(before), strings.TrimSpace(after) + if strings.HasPrefix(new, ">") { + base.Fatalf("go: -replace=%s: separator between old and new is =, not =>", arg) + } + oldPath, oldVersion, err := parsePathVersionOptional("old", old, false) + if err != nil { + base.Fatalf("go: -replace=%s: %v", arg, err) + } + newPath, newVersion, err := parsePathVersionOptional("new", new, true) + if err != nil { + base.Fatalf("go: -replace=%s: %v", arg, err) + } + if newPath == new && !modfile.IsDirectoryPath(new) { + base.Fatalf("go: -replace=%s: unversioned new path must be local directory", arg) + } + + edits = append(edits, func(f *modfile.File) { + if err := f.AddReplace(oldPath, oldVersion, newPath, newVersion); err != nil { + base.Fatalf("go: -replace=%s: %v", arg, err) + } + }) +} + +// flagDropReplace implements the -dropreplace flag. +func flagDropReplace(arg string) { + path, version, err := parsePathVersionOptional("old", arg, true) + if err != nil { + base.Fatalf("go: -dropreplace=%s: %v", arg, err) + } + edits = append(edits, func(f *modfile.File) { + if err := f.DropReplace(path, version); err != nil { + base.Fatalf("go: -dropreplace=%s: %v", arg, err) + } + }) +} + +// flagRetract implements the -retract flag. +func flagRetract(arg string) { + vi, err := parseVersionInterval(arg) + if err != nil { + base.Fatalf("go: -retract=%s: %v", arg, err) + } + edits = append(edits, func(f *modfile.File) { + if err := f.AddRetract(vi, ""); err != nil { + base.Fatalf("go: -retract=%s: %v", arg, err) + } + }) +} + +// flagDropRetract implements the -dropretract flag. +func flagDropRetract(arg string) { + vi, err := parseVersionInterval(arg) + if err != nil { + base.Fatalf("go: -dropretract=%s: %v", arg, err) + } + edits = append(edits, func(f *modfile.File) { + if err := f.DropRetract(vi); err != nil { + base.Fatalf("go: -dropretract=%s: %v", arg, err) + } + }) +} + +// fileJSON is the -json output data structure. +type fileJSON struct { + Module editModuleJSON + Go string `json:",omitempty"` + Toolchain string `json:",omitempty"` + Require []requireJSON + Exclude []module.Version + Replace []replaceJSON + Retract []retractJSON +} + +type editModuleJSON struct { + Path string + Deprecated string `json:",omitempty"` +} + +type requireJSON struct { + Path string + Version string `json:",omitempty"` + Indirect bool `json:",omitempty"` +} + +type replaceJSON struct { + Old module.Version + New module.Version +} + +type retractJSON struct { + Low string `json:",omitempty"` + High string `json:",omitempty"` + Rationale string `json:",omitempty"` +} + +// editPrintJSON prints the -json output. +func editPrintJSON(modFile *modfile.File) { + var f fileJSON + if modFile.Module != nil { + f.Module = editModuleJSON{ + Path: modFile.Module.Mod.Path, + Deprecated: modFile.Module.Deprecated, + } + } + if modFile.Go != nil { + f.Go = modFile.Go.Version + } + if modFile.Toolchain != nil { + f.Toolchain = modFile.Toolchain.Name + } + for _, r := range modFile.Require { + f.Require = append(f.Require, requireJSON{Path: r.Mod.Path, Version: r.Mod.Version, Indirect: r.Indirect}) + } + for _, x := range modFile.Exclude { + f.Exclude = append(f.Exclude, x.Mod) + } + for _, r := range modFile.Replace { + f.Replace = append(f.Replace, replaceJSON{r.Old, r.New}) + } + for _, r := range modFile.Retract { + f.Retract = append(f.Retract, retractJSON{r.Low, r.High, r.Rationale}) + } + data, err := json.MarshalIndent(&f, "", "\t") + if err != nil { + base.Fatalf("go: internal error: %v", err) + } + data = append(data, '\n') + os.Stdout.Write(data) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/graph.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/graph.go new file mode 100644 index 0000000000000000000000000000000000000000..172c1dda5ce8fbbe30105ce49944e4b06ce4a78f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/graph.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// go mod graph + +package modcmd + +import ( + "bufio" + "context" + "os" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/modload" + "cmd/go/internal/toolchain" + + "golang.org/x/mod/module" +) + +var cmdGraph = &base.Command{ + UsageLine: "go mod graph [-go=version] [-x]", + Short: "print module requirement graph", + Long: ` +Graph prints the module requirement graph (with replacements applied) +in text form. Each line in the output has two space-separated fields: a module +and one of its requirements. Each module is identified as a string of the form +path@version, except for the main module, which has no @version suffix. + +The -go flag causes graph to report the module graph as loaded by the +given Go version, instead of the version indicated by the 'go' directive +in the go.mod file. + +The -x flag causes graph to print the commands graph executes. + +See https://golang.org/ref/mod#go-mod-graph for more about 'go mod graph'. + `, + Run: runGraph, +} + +var ( + graphGo goVersionFlag +) + +func init() { + cmdGraph.Flag.Var(&graphGo, "go", "") + cmdGraph.Flag.BoolVar(&cfg.BuildX, "x", false, "") + base.AddChdirFlag(&cmdGraph.Flag) + base.AddModCommonFlags(&cmdGraph.Flag) +} + +func runGraph(ctx context.Context, cmd *base.Command, args []string) { + modload.InitWorkfile() + + if len(args) > 0 { + base.Fatalf("go: 'go mod graph' accepts no arguments") + } + modload.ForceUseModules = true + modload.RootMode = modload.NeedRoot + + goVersion := graphGo.String() + if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 { + toolchain.SwitchOrFatal(ctx, &gover.TooNewError{ + What: "-go flag", + GoVersion: goVersion, + }) + } + + mg, err := modload.LoadModGraph(ctx, goVersion) + if err != nil { + base.Fatal(err) + } + + w := bufio.NewWriter(os.Stdout) + defer w.Flush() + + format := func(m module.Version) { + w.WriteString(m.Path) + if m.Version != "" { + w.WriteString("@") + w.WriteString(m.Version) + } + } + + mg.WalkBreadthFirst(func(m module.Version) { + reqs, _ := mg.RequiredBy(m) + for _, r := range reqs { + format(m) + w.WriteByte(' ') + format(r) + w.WriteByte('\n') + } + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/init.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/init.go new file mode 100644 index 0000000000000000000000000000000000000000..facdaa9911c36514539e413e6778751191c5eb33 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/init.go @@ -0,0 +1,49 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// go mod init + +package modcmd + +import ( + "cmd/go/internal/base" + "cmd/go/internal/modload" + "context" +) + +var cmdInit = &base.Command{ + UsageLine: "go mod init [module-path]", + Short: "initialize new module in current directory", + Long: ` +Init initializes and writes a new go.mod file in the current directory, in +effect creating a new module rooted at the current directory. The go.mod file +must not already exist. + +Init accepts one optional argument, the module path for the new module. If the +module path argument is omitted, init will attempt to infer the module path +using import comments in .go files, vendoring tool configuration files (like +Gopkg.lock), and the current directory (if in GOPATH). + +See https://golang.org/ref/mod#go-mod-init for more about 'go mod init'. +`, + Run: runInit, +} + +func init() { + base.AddChdirFlag(&cmdInit.Flag) + base.AddModCommonFlags(&cmdInit.Flag) +} + +func runInit(ctx context.Context, cmd *base.Command, args []string) { + if len(args) > 1 { + base.Fatalf("go: 'go mod init' accepts at most one argument") + } + var modPath string + if len(args) == 1 { + modPath = args[0] + } + + modload.ForceUseModules = true + modload.CreateModFile(ctx, modPath) // does all the hard work +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/mod.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/mod.go new file mode 100644 index 0000000000000000000000000000000000000000..125ba336a0edda95e7570253c5ec40b962a91c56 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/mod.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package modcmd implements the “go mod” command. +package modcmd + +import ( + "cmd/go/internal/base" +) + +var CmdMod = &base.Command{ + UsageLine: "go mod", + Short: "module maintenance", + Long: `Go mod provides access to operations on modules. + +Note that support for modules is built into all the go commands, +not just 'go mod'. For example, day-to-day adding, removing, upgrading, +and downgrading of dependencies should be done using 'go get'. +See 'go help modules' for an overview of module functionality. + `, + + Commands: []*base.Command{ + cmdDownload, + cmdEdit, + cmdGraph, + cmdInit, + cmdTidy, + cmdVendor, + cmdVerify, + cmdWhy, + }, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/tidy.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/tidy.go new file mode 100644 index 0000000000000000000000000000000000000000..36be9260574b0b35d8d3d3ffab2a4de04af33f9a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/tidy.go @@ -0,0 +1,139 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// go mod tidy + +package modcmd + +import ( + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/imports" + "cmd/go/internal/modload" + "cmd/go/internal/toolchain" + "context" + "fmt" + + "golang.org/x/mod/modfile" +) + +var cmdTidy = &base.Command{ + UsageLine: "go mod tidy [-e] [-v] [-x] [-go=version] [-compat=version]", + Short: "add missing and remove unused modules", + Long: ` +Tidy makes sure go.mod matches the source code in the module. +It adds any missing modules necessary to build the current module's +packages and dependencies, and it removes unused modules that +don't provide any relevant packages. It also adds any missing entries +to go.sum and removes any unnecessary ones. + +The -v flag causes tidy to print information about removed modules +to standard error. + +The -e flag causes tidy to attempt to proceed despite errors +encountered while loading packages. + +The -go flag causes tidy to update the 'go' directive in the go.mod +file to the given version, which may change which module dependencies +are retained as explicit requirements in the go.mod file. +(Go versions 1.17 and higher retain more requirements in order to +support lazy module loading.) + +The -compat flag preserves any additional checksums needed for the +'go' command from the indicated major Go release to successfully load +the module graph, and causes tidy to error out if that version of the +'go' command would load any imported package from a different module +version. By default, tidy acts as if the -compat flag were set to the +version prior to the one indicated by the 'go' directive in the go.mod +file. + +The -x flag causes tidy to print the commands download executes. + +See https://golang.org/ref/mod#go-mod-tidy for more about 'go mod tidy'. + `, + Run: runTidy, +} + +var ( + tidyE bool // if true, report errors but proceed anyway. + tidyGo goVersionFlag // go version to write to the tidied go.mod file (toggles lazy loading) + tidyCompat goVersionFlag // go version for which the tidied go.mod and go.sum files should be “compatible” +) + +func init() { + cmdTidy.Flag.BoolVar(&cfg.BuildV, "v", false, "") + cmdTidy.Flag.BoolVar(&cfg.BuildX, "x", false, "") + cmdTidy.Flag.BoolVar(&tidyE, "e", false, "") + cmdTidy.Flag.Var(&tidyGo, "go", "") + cmdTidy.Flag.Var(&tidyCompat, "compat", "") + base.AddChdirFlag(&cmdTidy.Flag) + base.AddModCommonFlags(&cmdTidy.Flag) +} + +// A goVersionFlag is a flag.Value representing a supported Go version. +// +// (Note that the -go argument to 'go mod edit' is *not* a goVersionFlag. +// It intentionally allows newer-than-supported versions as arguments.) +type goVersionFlag struct { + v string +} + +func (f *goVersionFlag) String() string { return f.v } +func (f *goVersionFlag) Get() any { return f.v } + +func (f *goVersionFlag) Set(s string) error { + if s != "" { + latest := gover.Local() + if !modfile.GoVersionRE.MatchString(s) { + return fmt.Errorf("expecting a Go version like %q", latest) + } + if gover.Compare(s, latest) > 0 { + return fmt.Errorf("maximum supported Go version is %s", latest) + } + } + + f.v = s + return nil +} + +func runTidy(ctx context.Context, cmd *base.Command, args []string) { + if len(args) > 0 { + base.Fatalf("go: 'go mod tidy' accepts no arguments") + } + + // Tidy aims to make 'go test' reproducible for any package in 'all', so we + // need to include test dependencies. For modules that specify go 1.15 or + // earlier this is a no-op (because 'all' saturates transitive test + // dependencies). + // + // However, with lazy loading (go 1.16+) 'all' includes only the packages that + // are transitively imported by the main module, not the test dependencies of + // those packages. In order to make 'go test' reproducible for the packages + // that are in 'all' but outside of the main module, we must explicitly + // request that their test dependencies be included. + modload.ForceUseModules = true + modload.RootMode = modload.NeedRoot + + goVersion := tidyGo.String() + if goVersion != "" && gover.Compare(gover.Local(), goVersion) < 0 { + toolchain.SwitchOrFatal(ctx, &gover.TooNewError{ + What: "-go flag", + GoVersion: goVersion, + }) + } + + modload.LoadPackages(ctx, modload.PackageOpts{ + TidyGoVersion: tidyGo.String(), + Tags: imports.AnyTags(), + Tidy: true, + TidyCompatibleVersion: tidyCompat.String(), + VendorModulesInGOROOTSrc: true, + ResolveMissingImports: true, + LoadTests: true, + AllowErrors: tidyE, + SilenceMissingStdImports: true, + Switcher: new(toolchain.Switcher), + }, "all") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/vendor.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/vendor.go new file mode 100644 index 0000000000000000000000000000000000000000..3db85bda53070712a868efb02d5d0969e74023a3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/vendor.go @@ -0,0 +1,481 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modcmd + +import ( + "bytes" + "context" + "errors" + "fmt" + "go/build" + "io" + "io/fs" + "os" + "path" + "path/filepath" + "sort" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/go/internal/gover" + "cmd/go/internal/imports" + "cmd/go/internal/load" + "cmd/go/internal/modload" + "cmd/go/internal/str" + + "golang.org/x/mod/module" +) + +var cmdVendor = &base.Command{ + UsageLine: "go mod vendor [-e] [-v] [-o outdir]", + Short: "make vendored copy of dependencies", + Long: ` +Vendor resets the main module's vendor directory to include all packages +needed to build and test all the main module's packages. +It does not include test code for vendored packages. + +The -v flag causes vendor to print the names of vendored +modules and packages to standard error. + +The -e flag causes vendor to attempt to proceed despite errors +encountered while loading packages. + +The -o flag causes vendor to create the vendor directory at the given +path instead of "vendor". The go command can only use a vendor directory +named "vendor" within the module root directory, so this flag is +primarily useful for other tools. + +See https://golang.org/ref/mod#go-mod-vendor for more about 'go mod vendor'. + `, + Run: runVendor, +} + +var vendorE bool // if true, report errors but proceed anyway +var vendorO string // if set, overrides the default output directory + +func init() { + cmdVendor.Flag.BoolVar(&cfg.BuildV, "v", false, "") + cmdVendor.Flag.BoolVar(&vendorE, "e", false, "") + cmdVendor.Flag.StringVar(&vendorO, "o", "", "") + base.AddChdirFlag(&cmdVendor.Flag) + base.AddModCommonFlags(&cmdVendor.Flag) +} + +func runVendor(ctx context.Context, cmd *base.Command, args []string) { + modload.InitWorkfile() + if modload.WorkFilePath() != "" { + base.Fatalf("go: 'go mod vendor' cannot be run in workspace mode. Run 'go work vendor' to vendor the workspace or set 'GOWORK=off' to exit workspace mode.") + } + RunVendor(ctx, vendorE, vendorO, args) +} + +func RunVendor(ctx context.Context, vendorE bool, vendorO string, args []string) { + if len(args) != 0 { + base.Fatalf("go: 'go mod vendor' accepts no arguments") + } + modload.ForceUseModules = true + modload.RootMode = modload.NeedRoot + + loadOpts := modload.PackageOpts{ + Tags: imports.AnyTags(), + VendorModulesInGOROOTSrc: true, + ResolveMissingImports: true, + UseVendorAll: true, + AllowErrors: vendorE, + SilenceMissingStdImports: true, + } + _, pkgs := modload.LoadPackages(ctx, loadOpts, "all") + + var vdir string + switch { + case filepath.IsAbs(vendorO): + vdir = vendorO + case vendorO != "": + vdir = filepath.Join(base.Cwd(), vendorO) + default: + vdir = filepath.Join(modload.VendorDir()) + } + if err := os.RemoveAll(vdir); err != nil { + base.Fatal(err) + } + + modpkgs := make(map[module.Version][]string) + for _, pkg := range pkgs { + m := modload.PackageModule(pkg) + if m.Path == "" || modload.MainModules.Contains(m.Path) { + continue + } + modpkgs[m] = append(modpkgs[m], pkg) + } + + includeAllReplacements := false + includeGoVersions := false + isExplicit := map[module.Version]bool{} + gv := modload.MainModules.GoVersion() + if gover.Compare(gv, "1.14") >= 0 && (modload.FindGoWork(base.Cwd()) != "" || modload.ModFile().Go != nil) { + // If the Go version is at least 1.14, annotate all explicit 'require' and + // 'replace' targets found in the go.mod file so that we can perform a + // stronger consistency check when -mod=vendor is set. + for _, m := range modload.MainModules.Versions() { + if modFile := modload.MainModules.ModFile(m); modFile != nil { + for _, r := range modFile.Require { + isExplicit[r.Mod] = true + } + } + + } + includeAllReplacements = true + } + if gover.Compare(gv, "1.17") >= 0 { + // If the Go version is at least 1.17, annotate all modules with their + // 'go' version directives. + includeGoVersions = true + } + + var vendorMods []module.Version + for m := range isExplicit { + vendorMods = append(vendorMods, m) + } + for m := range modpkgs { + if !isExplicit[m] { + vendorMods = append(vendorMods, m) + } + } + gover.ModSort(vendorMods) + + var ( + buf bytes.Buffer + w io.Writer = &buf + ) + if cfg.BuildV { + w = io.MultiWriter(&buf, os.Stderr) + } + + if modload.MainModules.WorkFile() != nil { + fmt.Fprintf(w, "## workspace\n") + } + + replacementWritten := make(map[module.Version]bool) + for _, m := range vendorMods { + replacement := modload.Replacement(m) + line := moduleLine(m, replacement) + replacementWritten[m] = true + io.WriteString(w, line) + + goVersion := "" + if includeGoVersions { + goVersion = modload.ModuleInfo(ctx, m.Path).GoVersion + } + switch { + case isExplicit[m] && goVersion != "": + fmt.Fprintf(w, "## explicit; go %s\n", goVersion) + case isExplicit[m]: + io.WriteString(w, "## explicit\n") + case goVersion != "": + fmt.Fprintf(w, "## go %s\n", goVersion) + } + + pkgs := modpkgs[m] + sort.Strings(pkgs) + for _, pkg := range pkgs { + fmt.Fprintf(w, "%s\n", pkg) + vendorPkg(vdir, pkg) + } + } + + if includeAllReplacements { + // Record unused and wildcard replacements at the end of the modules.txt file: + // without access to the complete build list, the consumer of the vendor + // directory can't otherwise determine that those replacements had no effect. + for _, m := range modload.MainModules.Versions() { + if workFile := modload.MainModules.WorkFile(); workFile != nil { + for _, r := range workFile.Replace { + if replacementWritten[r.Old] { + // We already recorded this replacement. + continue + } + replacementWritten[r.Old] = true + + line := moduleLine(r.Old, r.New) + buf.WriteString(line) + if cfg.BuildV { + os.Stderr.WriteString(line) + } + } + } + if modFile := modload.MainModules.ModFile(m); modFile != nil { + for _, r := range modFile.Replace { + if replacementWritten[r.Old] { + // We already recorded this replacement. + continue + } + replacementWritten[r.Old] = true + rNew := modload.Replacement(r.Old) + if rNew == (module.Version{}) { + // There is no replacement. Don't try to write it. + continue + } + + line := moduleLine(r.Old, rNew) + buf.WriteString(line) + if cfg.BuildV { + os.Stderr.WriteString(line) + } + } + } + } + } + + if buf.Len() == 0 { + fmt.Fprintf(os.Stderr, "go: no dependencies to vendor\n") + return + } + + if err := os.MkdirAll(vdir, 0777); err != nil { + base.Fatal(err) + } + + if err := os.WriteFile(filepath.Join(vdir, "modules.txt"), buf.Bytes(), 0666); err != nil { + base.Fatal(err) + } +} + +func moduleLine(m, r module.Version) string { + b := new(strings.Builder) + b.WriteString("# ") + b.WriteString(m.Path) + if m.Version != "" { + b.WriteString(" ") + b.WriteString(m.Version) + } + if r.Path != "" { + if str.HasFilePathPrefix(filepath.Clean(r.Path), "vendor") { + base.Fatalf("go: replacement path %s inside vendor directory", r.Path) + } + b.WriteString(" => ") + b.WriteString(r.Path) + if r.Version != "" { + b.WriteString(" ") + b.WriteString(r.Version) + } + } + b.WriteString("\n") + return b.String() +} + +func vendorPkg(vdir, pkg string) { + src, realPath, _ := modload.Lookup("", false, pkg) + if src == "" { + base.Errorf("internal error: no pkg for %s\n", pkg) + return + } + if realPath != pkg { + // TODO(#26904): Revisit whether this behavior still makes sense. + // This should actually be impossible today, because the import map is the + // identity function for packages outside of the standard library. + // + // Part of the purpose of the vendor directory is to allow the packages in + // the module to continue to build in GOPATH mode, and GOPATH-mode users + // won't know about replacement aliasing. How important is it to maintain + // compatibility? + fmt.Fprintf(os.Stderr, "warning: %s imported as both %s and %s; making two copies.\n", realPath, realPath, pkg) + } + + copiedFiles := make(map[string]bool) + dst := filepath.Join(vdir, pkg) + copyDir(dst, src, matchPotentialSourceFile, copiedFiles) + if m := modload.PackageModule(realPath); m.Path != "" { + copyMetadata(m.Path, realPath, dst, src, copiedFiles) + } + + ctx := build.Default + ctx.UseAllFiles = true + bp, err := ctx.ImportDir(src, build.IgnoreVendor) + // Because UseAllFiles is set on the build.Context, it's possible ta get + // a MultiplePackageError on an otherwise valid package: the package could + // have different names for GOOS=windows and GOOS=mac for example. On the + // other hand if there's a NoGoError, the package might have source files + // specifying "//go:build ignore" those packages should be skipped because + // embeds from ignored files can't be used. + // TODO(#42504): Find a better way to avoid errors from ImportDir. We'll + // need to figure this out when we switch to PackagesAndErrors as per the + // TODO above. + var multiplePackageError *build.MultiplePackageError + var noGoError *build.NoGoError + if err != nil { + if errors.As(err, &noGoError) { + return // No source files in this package are built. Skip embeds in ignored files. + } else if !errors.As(err, &multiplePackageError) { // multiplePackageErrors are OK, but others are not. + base.Fatalf("internal error: failed to find embedded files of %s: %v\n", pkg, err) + } + } + var embedPatterns []string + if gover.Compare(modload.MainModules.GoVersion(), "1.22") >= 0 { + embedPatterns = bp.EmbedPatterns + } else { + // Maintain the behavior of https://github.com/golang/go/issues/63473 + // so that we continue to agree with older versions of the go command + // about the contents of vendor directories in existing modules + embedPatterns = str.StringList(bp.EmbedPatterns, bp.TestEmbedPatterns, bp.XTestEmbedPatterns) + } + embeds, err := load.ResolveEmbed(bp.Dir, embedPatterns) + if err != nil { + base.Fatal(err) + } + for _, embed := range embeds { + embedDst := filepath.Join(dst, embed) + if copiedFiles[embedDst] { + continue + } + + // Copy the file as is done by copyDir below. + r, err := os.Open(filepath.Join(src, embed)) + if err != nil { + base.Fatal(err) + } + if err := os.MkdirAll(filepath.Dir(embedDst), 0777); err != nil { + base.Fatal(err) + } + w, err := os.Create(embedDst) + if err != nil { + base.Fatal(err) + } + if _, err := io.Copy(w, r); err != nil { + base.Fatal(err) + } + r.Close() + if err := w.Close(); err != nil { + base.Fatal(err) + } + } +} + +type metakey struct { + modPath string + dst string +} + +var copiedMetadata = make(map[metakey]bool) + +// copyMetadata copies metadata files from parents of src to parents of dst, +// stopping after processing the src parent for modPath. +func copyMetadata(modPath, pkg, dst, src string, copiedFiles map[string]bool) { + for parent := 0; ; parent++ { + if copiedMetadata[metakey{modPath, dst}] { + break + } + copiedMetadata[metakey{modPath, dst}] = true + if parent > 0 { + copyDir(dst, src, matchMetadata, copiedFiles) + } + if modPath == pkg { + break + } + pkg = path.Dir(pkg) + dst = filepath.Dir(dst) + src = filepath.Dir(src) + } +} + +// metaPrefixes is the list of metadata file prefixes. +// Vendoring copies metadata files from parents of copied directories. +// Note that this list could be arbitrarily extended, and it is longer +// in other tools (such as godep or dep). By using this limited set of +// prefixes and also insisting on capitalized file names, we are trying +// to nudge people toward more agreement on the naming +// and also trying to avoid false positives. +var metaPrefixes = []string{ + "AUTHORS", + "CONTRIBUTORS", + "COPYLEFT", + "COPYING", + "COPYRIGHT", + "LEGAL", + "LICENSE", + "NOTICE", + "PATENTS", +} + +// matchMetadata reports whether info is a metadata file. +func matchMetadata(dir string, info fs.DirEntry) bool { + name := info.Name() + for _, p := range metaPrefixes { + if strings.HasPrefix(name, p) { + return true + } + } + return false +} + +// matchPotentialSourceFile reports whether info may be relevant to a build operation. +func matchPotentialSourceFile(dir string, info fs.DirEntry) bool { + if strings.HasSuffix(info.Name(), "_test.go") { + return false + } + if info.Name() == "go.mod" || info.Name() == "go.sum" { + if gv := modload.MainModules.GoVersion(); gover.Compare(gv, "1.17") >= 0 { + // As of Go 1.17, we strip go.mod and go.sum files from dependency modules. + // Otherwise, 'go' commands invoked within the vendor subtree may misidentify + // an arbitrary directory within the vendor tree as a module root. + // (See https://golang.org/issue/42970.) + return false + } + } + if strings.HasSuffix(info.Name(), ".go") { + f, err := fsys.Open(filepath.Join(dir, info.Name())) + if err != nil { + base.Fatal(err) + } + defer f.Close() + + content, err := imports.ReadImports(f, false, nil) + if err == nil && !imports.ShouldBuild(content, imports.AnyTags()) { + // The file is explicitly tagged "ignore", so it can't affect the build. + // Leave it out. + return false + } + return true + } + + // We don't know anything about this file, so optimistically assume that it is + // needed. + return true +} + +// copyDir copies all regular files satisfying match(info) from src to dst. +func copyDir(dst, src string, match func(dir string, info fs.DirEntry) bool, copiedFiles map[string]bool) { + files, err := os.ReadDir(src) + if err != nil { + base.Fatal(err) + } + if err := os.MkdirAll(dst, 0777); err != nil { + base.Fatal(err) + } + for _, file := range files { + if file.IsDir() || !file.Type().IsRegular() || !match(src, file) { + continue + } + copiedFiles[file.Name()] = true + r, err := os.Open(filepath.Join(src, file.Name())) + if err != nil { + base.Fatal(err) + } + dstPath := filepath.Join(dst, file.Name()) + copiedFiles[dstPath] = true + w, err := os.Create(dstPath) + if err != nil { + base.Fatal(err) + } + if _, err := io.Copy(w, r); err != nil { + base.Fatal(err) + } + r.Close() + if err := w.Close(); err != nil { + base.Fatal(err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/verify.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/verify.go new file mode 100644 index 0000000000000000000000000000000000000000..d07f730c5d0dcf90747454817e80b5e879d831af --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/verify.go @@ -0,0 +1,143 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modcmd + +import ( + "bytes" + "context" + "errors" + "fmt" + "io/fs" + "os" + "runtime" + + "cmd/go/internal/base" + "cmd/go/internal/gover" + "cmd/go/internal/modfetch" + "cmd/go/internal/modload" + + "golang.org/x/mod/module" + "golang.org/x/mod/sumdb/dirhash" +) + +var cmdVerify = &base.Command{ + UsageLine: "go mod verify", + Short: "verify dependencies have expected content", + Long: ` +Verify checks that the dependencies of the current module, +which are stored in a local downloaded source cache, have not been +modified since being downloaded. If all the modules are unmodified, +verify prints "all modules verified." Otherwise it reports which +modules have been changed and causes 'go mod' to exit with a +non-zero status. + +See https://golang.org/ref/mod#go-mod-verify for more about 'go mod verify'. + `, + Run: runVerify, +} + +func init() { + base.AddChdirFlag(&cmdVerify.Flag) + base.AddModCommonFlags(&cmdVerify.Flag) +} + +func runVerify(ctx context.Context, cmd *base.Command, args []string) { + modload.InitWorkfile() + + if len(args) != 0 { + // NOTE(rsc): Could take a module pattern. + base.Fatalf("go: verify takes no arguments") + } + modload.ForceUseModules = true + modload.RootMode = modload.NeedRoot + + // Only verify up to GOMAXPROCS zips at once. + type token struct{} + sem := make(chan token, runtime.GOMAXPROCS(0)) + + mg, err := modload.LoadModGraph(ctx, "") + if err != nil { + base.Fatal(err) + } + mods := mg.BuildList() + // Use a slice of result channels, so that the output is deterministic. + errsChans := make([]<-chan []error, len(mods)) + + for i, mod := range mods { + sem <- token{} + errsc := make(chan []error, 1) + errsChans[i] = errsc + mod := mod // use a copy to avoid data races + go func() { + errsc <- verifyMod(ctx, mod) + <-sem + }() + } + + ok := true + for _, errsc := range errsChans { + errs := <-errsc + for _, err := range errs { + base.Errorf("%s", err) + ok = false + } + } + if ok { + fmt.Printf("all modules verified\n") + } +} + +func verifyMod(ctx context.Context, mod module.Version) []error { + if gover.IsToolchain(mod.Path) { + // "go" and "toolchain" have no disk footprint; nothing to verify. + return nil + } + if modload.MainModules.Contains(mod.Path) { + return nil + } + var errs []error + zip, zipErr := modfetch.CachePath(ctx, mod, "zip") + if zipErr == nil { + _, zipErr = os.Stat(zip) + } + dir, dirErr := modfetch.DownloadDir(ctx, mod) + data, err := os.ReadFile(zip + "hash") + if err != nil { + if zipErr != nil && errors.Is(zipErr, fs.ErrNotExist) && + dirErr != nil && errors.Is(dirErr, fs.ErrNotExist) { + // Nothing downloaded yet. Nothing to verify. + return nil + } + errs = append(errs, fmt.Errorf("%s %s: missing ziphash: %v", mod.Path, mod.Version, err)) + return errs + } + h := string(bytes.TrimSpace(data)) + + if zipErr != nil && errors.Is(zipErr, fs.ErrNotExist) { + // ok + } else { + hZ, err := dirhash.HashZip(zip, dirhash.DefaultHash) + if err != nil { + errs = append(errs, fmt.Errorf("%s %s: %v", mod.Path, mod.Version, err)) + return errs + } else if hZ != h { + errs = append(errs, fmt.Errorf("%s %s: zip has been modified (%v)", mod.Path, mod.Version, zip)) + } + } + if dirErr != nil && errors.Is(dirErr, fs.ErrNotExist) { + // ok + } else { + hD, err := dirhash.HashDir(dir, mod.Path+"@"+mod.Version, dirhash.DefaultHash) + if err != nil { + + errs = append(errs, fmt.Errorf("%s %s: %v", mod.Path, mod.Version, err)) + return errs + } + if hD != h { + errs = append(errs, fmt.Errorf("%s %s: dir has been modified (%v)", mod.Path, mod.Version, dir)) + } + } + return errs +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/why.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/why.go new file mode 100644 index 0000000000000000000000000000000000000000..198672d8064113267fe9846c0a0f69befe8e3bc9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modcmd/why.go @@ -0,0 +1,143 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modcmd + +import ( + "context" + "fmt" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/imports" + "cmd/go/internal/modload" +) + +var cmdWhy = &base.Command{ + UsageLine: "go mod why [-m] [-vendor] packages...", + Short: "explain why packages or modules are needed", + Long: ` +Why shows a shortest path in the import graph from the main module to +each of the listed packages. If the -m flag is given, why treats the +arguments as a list of modules and finds a path to any package in each +of the modules. + +By default, why queries the graph of packages matched by "go list all", +which includes tests for reachable packages. The -vendor flag causes why +to exclude tests of dependencies. + +The output is a sequence of stanzas, one for each package or module +name on the command line, separated by blank lines. Each stanza begins +with a comment line "# package" or "# module" giving the target +package or module. Subsequent lines give a path through the import +graph, one package per line. If the package or module is not +referenced from the main module, the stanza will display a single +parenthesized note indicating that fact. + +For example: + + $ go mod why golang.org/x/text/language golang.org/x/text/encoding + # golang.org/x/text/language + rsc.io/quote + rsc.io/sampler + golang.org/x/text/language + + # golang.org/x/text/encoding + (main module does not need package golang.org/x/text/encoding) + $ + +See https://golang.org/ref/mod#go-mod-why for more about 'go mod why'. + `, +} + +var ( + whyM = cmdWhy.Flag.Bool("m", false, "") + whyVendor = cmdWhy.Flag.Bool("vendor", false, "") +) + +func init() { + cmdWhy.Run = runWhy // break init cycle + base.AddChdirFlag(&cmdWhy.Flag) + base.AddModCommonFlags(&cmdWhy.Flag) +} + +func runWhy(ctx context.Context, cmd *base.Command, args []string) { + modload.InitWorkfile() + modload.ForceUseModules = true + modload.RootMode = modload.NeedRoot + modload.ExplicitWriteGoMod = true // don't write go.mod in ListModules + + loadOpts := modload.PackageOpts{ + Tags: imports.AnyTags(), + VendorModulesInGOROOTSrc: true, + LoadTests: !*whyVendor, + SilencePackageErrors: true, + UseVendorAll: *whyVendor, + } + + if *whyM { + for _, arg := range args { + if strings.Contains(arg, "@") { + base.Fatalf("go: %s: 'go mod why' requires a module path, not a version query", arg) + } + } + + mods, err := modload.ListModules(ctx, args, 0, "") + if err != nil { + base.Fatal(err) + } + + byModule := make(map[string][]string) + _, pkgs := modload.LoadPackages(ctx, loadOpts, "all") + for _, path := range pkgs { + m := modload.PackageModule(path) + if m.Path != "" { + byModule[m.Path] = append(byModule[m.Path], path) + } + } + sep := "" + for _, m := range mods { + best := "" + bestDepth := 1000000000 + for _, path := range byModule[m.Path] { + d := modload.WhyDepth(path) + if d > 0 && d < bestDepth { + best = path + bestDepth = d + } + } + why := modload.Why(best) + if why == "" { + vendoring := "" + if *whyVendor { + vendoring = " to vendor" + } + why = "(main module does not need" + vendoring + " module " + m.Path + ")\n" + } + fmt.Printf("%s# %s\n%s", sep, m.Path, why) + sep = "\n" + } + } else { + // Resolve to packages. + matches, _ := modload.LoadPackages(ctx, loadOpts, args...) + + modload.LoadPackages(ctx, loadOpts, "all") // rebuild graph, from main module (not from named packages) + + sep := "" + for _, m := range matches { + for _, path := range m.Pkgs { + why := modload.Why(path) + if why == "" { + vendoring := "" + if *whyVendor { + vendoring = " to vendor" + } + why = "(main module does not need" + vendoring + " package " + path + ")\n" + } + fmt.Printf("%s# %s\n%s", sep, path, why) + sep = "\n" + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/bootstrap.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/bootstrap.go new file mode 100644 index 0000000000000000000000000000000000000000..e23669fb00c76b7bb5d6a1f20523051354f2f3f5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/bootstrap.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cmd_go_bootstrap + +package modfetch + +import "golang.org/x/mod/module" + +func useSumDB(mod module.Version) bool { + return false +} + +func lookupSumDB(mod module.Version) (string, []string, error) { + panic("bootstrap") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/cache.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/cache.go new file mode 100644 index 0000000000000000000000000000000000000000..5a727c6dfa6d77faafa5a9bfe19b83a0e9c3a50f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/cache.go @@ -0,0 +1,815 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfetch + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "math/rand" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/lockedfile" + "cmd/go/internal/modfetch/codehost" + "cmd/go/internal/par" + "cmd/go/internal/robustio" + + "golang.org/x/mod/module" + "golang.org/x/mod/semver" +) + +func cacheDir(ctx context.Context, path string) (string, error) { + if err := checkCacheDir(ctx); err != nil { + return "", err + } + enc, err := module.EscapePath(path) + if err != nil { + return "", err + } + return filepath.Join(cfg.GOMODCACHE, "cache/download", enc, "/@v"), nil +} + +func CachePath(ctx context.Context, m module.Version, suffix string) (string, error) { + if gover.IsToolchain(m.Path) { + return "", ErrToolchain + } + dir, err := cacheDir(ctx, m.Path) + if err != nil { + return "", err + } + if !gover.ModIsValid(m.Path, m.Version) { + return "", fmt.Errorf("non-semver module version %q", m.Version) + } + if module.CanonicalVersion(m.Version) != m.Version { + return "", fmt.Errorf("non-canonical module version %q", m.Version) + } + encVer, err := module.EscapeVersion(m.Version) + if err != nil { + return "", err + } + return filepath.Join(dir, encVer+"."+suffix), nil +} + +// DownloadDir returns the directory to which m should have been downloaded. +// An error will be returned if the module path or version cannot be escaped. +// An error satisfying errors.Is(err, fs.ErrNotExist) will be returned +// along with the directory if the directory does not exist or if the directory +// is not completely populated. +func DownloadDir(ctx context.Context, m module.Version) (string, error) { + if gover.IsToolchain(m.Path) { + return "", ErrToolchain + } + if err := checkCacheDir(ctx); err != nil { + return "", err + } + enc, err := module.EscapePath(m.Path) + if err != nil { + return "", err + } + if !gover.ModIsValid(m.Path, m.Version) { + return "", fmt.Errorf("non-semver module version %q", m.Version) + } + if module.CanonicalVersion(m.Version) != m.Version { + return "", fmt.Errorf("non-canonical module version %q", m.Version) + } + encVer, err := module.EscapeVersion(m.Version) + if err != nil { + return "", err + } + + // Check whether the directory itself exists. + dir := filepath.Join(cfg.GOMODCACHE, enc+"@"+encVer) + if fi, err := os.Stat(dir); os.IsNotExist(err) { + return dir, err + } else if err != nil { + return dir, &DownloadDirPartialError{dir, err} + } else if !fi.IsDir() { + return dir, &DownloadDirPartialError{dir, errors.New("not a directory")} + } + + // Check if a .partial file exists. This is created at the beginning of + // a download and removed after the zip is extracted. + partialPath, err := CachePath(ctx, m, "partial") + if err != nil { + return dir, err + } + if _, err := os.Stat(partialPath); err == nil { + return dir, &DownloadDirPartialError{dir, errors.New("not completely extracted")} + } else if !os.IsNotExist(err) { + return dir, err + } + + // Check if a .ziphash file exists. It should be created before the + // zip is extracted, but if it was deleted (by another program?), we need + // to re-calculate it. Note that checkMod will repopulate the ziphash + // file if it doesn't exist, but if the module is excluded by checks + // through GONOSUMDB or GOPRIVATE, that check and repopulation won't happen. + ziphashPath, err := CachePath(ctx, m, "ziphash") + if err != nil { + return dir, err + } + if _, err := os.Stat(ziphashPath); os.IsNotExist(err) { + return dir, &DownloadDirPartialError{dir, errors.New("ziphash file is missing")} + } else if err != nil { + return dir, err + } + return dir, nil +} + +// DownloadDirPartialError is returned by DownloadDir if a module directory +// exists but was not completely populated. +// +// DownloadDirPartialError is equivalent to fs.ErrNotExist. +type DownloadDirPartialError struct { + Dir string + Err error +} + +func (e *DownloadDirPartialError) Error() string { return fmt.Sprintf("%s: %v", e.Dir, e.Err) } +func (e *DownloadDirPartialError) Is(err error) bool { return err == fs.ErrNotExist } + +// lockVersion locks a file within the module cache that guards the downloading +// and extraction of the zipfile for the given module version. +func lockVersion(ctx context.Context, mod module.Version) (unlock func(), err error) { + path, err := CachePath(ctx, mod, "lock") + if err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return nil, err + } + return lockedfile.MutexAt(path).Lock() +} + +// SideLock locks a file within the module cache that previously guarded +// edits to files outside the cache, such as go.sum and go.mod files in the +// user's working directory. +// If err is nil, the caller MUST eventually call the unlock function. +func SideLock(ctx context.Context) (unlock func(), err error) { + if err := checkCacheDir(ctx); err != nil { + return nil, err + } + + path := filepath.Join(cfg.GOMODCACHE, "cache", "lock") + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return nil, fmt.Errorf("failed to create cache directory: %w", err) + } + + return lockedfile.MutexAt(path).Lock() +} + +// A cachingRepo is a cache around an underlying Repo, +// avoiding redundant calls to ModulePath, Versions, Stat, Latest, and GoMod (but not CheckReuse or Zip). +// It is also safe for simultaneous use by multiple goroutines +// (so that it can be returned from Lookup multiple times). +// It serializes calls to the underlying Repo. +type cachingRepo struct { + path string + versionsCache par.ErrCache[string, *Versions] + statCache par.ErrCache[string, *RevInfo] + latestCache par.ErrCache[struct{}, *RevInfo] + gomodCache par.ErrCache[string, []byte] + + once sync.Once + initRepo func(context.Context) (Repo, error) + r Repo +} + +func newCachingRepo(ctx context.Context, path string, initRepo func(context.Context) (Repo, error)) *cachingRepo { + return &cachingRepo{ + path: path, + initRepo: initRepo, + } +} + +func (r *cachingRepo) repo(ctx context.Context) Repo { + r.once.Do(func() { + var err error + r.r, err = r.initRepo(ctx) + if err != nil { + r.r = errRepo{r.path, err} + } + }) + return r.r +} + +func (r *cachingRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error { + return r.repo(ctx).CheckReuse(ctx, old) +} + +func (r *cachingRepo) ModulePath() string { + return r.path +} + +func (r *cachingRepo) Versions(ctx context.Context, prefix string) (*Versions, error) { + v, err := r.versionsCache.Do(prefix, func() (*Versions, error) { + return r.repo(ctx).Versions(ctx, prefix) + }) + + if err != nil { + return nil, err + } + return &Versions{ + Origin: v.Origin, + List: append([]string(nil), v.List...), + }, nil +} + +type cachedInfo struct { + info *RevInfo + err error +} + +func (r *cachingRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) { + if gover.IsToolchain(r.path) { + // Skip disk cache; the underlying golang.org/toolchain repo is cached instead. + return r.repo(ctx).Stat(ctx, rev) + } + info, err := r.statCache.Do(rev, func() (*RevInfo, error) { + file, info, err := readDiskStat(ctx, r.path, rev) + if err == nil { + return info, err + } + + info, err = r.repo(ctx).Stat(ctx, rev) + if err == nil { + // If we resolved, say, 1234abcde to v0.0.0-20180604122334-1234abcdef78, + // then save the information under the proper version, for future use. + if info.Version != rev { + file, _ = CachePath(ctx, module.Version{Path: r.path, Version: info.Version}, "info") + r.statCache.Do(info.Version, func() (*RevInfo, error) { + return info, nil + }) + } + + if err := writeDiskStat(ctx, file, info); err != nil { + fmt.Fprintf(os.Stderr, "go: writing stat cache: %v\n", err) + } + } + return info, err + }) + if info != nil { + copy := *info + info = © + } + return info, err +} + +func (r *cachingRepo) Latest(ctx context.Context) (*RevInfo, error) { + if gover.IsToolchain(r.path) { + // Skip disk cache; the underlying golang.org/toolchain repo is cached instead. + return r.repo(ctx).Latest(ctx) + } + info, err := r.latestCache.Do(struct{}{}, func() (*RevInfo, error) { + info, err := r.repo(ctx).Latest(ctx) + + // Save info for likely future Stat call. + if err == nil { + r.statCache.Do(info.Version, func() (*RevInfo, error) { + return info, nil + }) + if file, _, err := readDiskStat(ctx, r.path, info.Version); err != nil { + writeDiskStat(ctx, file, info) + } + } + + return info, err + }) + if info != nil { + copy := *info + info = © + } + return info, err +} + +func (r *cachingRepo) GoMod(ctx context.Context, version string) ([]byte, error) { + if gover.IsToolchain(r.path) { + // Skip disk cache; the underlying golang.org/toolchain repo is cached instead. + return r.repo(ctx).GoMod(ctx, version) + } + text, err := r.gomodCache.Do(version, func() ([]byte, error) { + file, text, err := readDiskGoMod(ctx, r.path, version) + if err == nil { + // Note: readDiskGoMod already called checkGoMod. + return text, nil + } + + text, err = r.repo(ctx).GoMod(ctx, version) + if err == nil { + if err := checkGoMod(r.path, version, text); err != nil { + return text, err + } + if err := writeDiskGoMod(ctx, file, text); err != nil { + fmt.Fprintf(os.Stderr, "go: writing go.mod cache: %v\n", err) + } + } + return text, err + }) + if err != nil { + return nil, err + } + return append([]byte(nil), text...), nil +} + +func (r *cachingRepo) Zip(ctx context.Context, dst io.Writer, version string) error { + if gover.IsToolchain(r.path) { + return ErrToolchain + } + return r.repo(ctx).Zip(ctx, dst, version) +} + +// InfoFile is like Lookup(ctx, path).Stat(version) but also returns the name of the file +// containing the cached information. +func InfoFile(ctx context.Context, path, version string) (*RevInfo, string, error) { + if !gover.ModIsValid(path, version) { + return nil, "", fmt.Errorf("invalid version %q", version) + } + + if file, info, err := readDiskStat(ctx, path, version); err == nil { + return info, file, nil + } + + var info *RevInfo + var err2info map[error]*RevInfo + err := TryProxies(func(proxy string) error { + i, err := Lookup(ctx, proxy, path).Stat(ctx, version) + if err == nil { + info = i + } else { + if err2info == nil { + err2info = make(map[error]*RevInfo) + } + err2info[err] = info + } + return err + }) + if err != nil { + return err2info[err], "", err + } + + // Stat should have populated the disk cache for us. + file, err := CachePath(ctx, module.Version{Path: path, Version: version}, "info") + if err != nil { + return nil, "", err + } + return info, file, nil +} + +// GoMod is like Lookup(ctx, path).GoMod(rev) but avoids the +// repository path resolution in Lookup if the result is +// already cached on local disk. +func GoMod(ctx context.Context, path, rev string) ([]byte, error) { + // Convert commit hash to pseudo-version + // to increase cache hit rate. + if !gover.ModIsValid(path, rev) { + if _, info, err := readDiskStat(ctx, path, rev); err == nil { + rev = info.Version + } else { + if errors.Is(err, statCacheErr) { + return nil, err + } + err := TryProxies(func(proxy string) error { + info, err := Lookup(ctx, proxy, path).Stat(ctx, rev) + if err == nil { + rev = info.Version + } + return err + }) + if err != nil { + return nil, err + } + } + } + + _, data, err := readDiskGoMod(ctx, path, rev) + if err == nil { + return data, nil + } + + err = TryProxies(func(proxy string) (err error) { + data, err = Lookup(ctx, proxy, path).GoMod(ctx, rev) + return err + }) + return data, err +} + +// GoModFile is like GoMod but returns the name of the file containing +// the cached information. +func GoModFile(ctx context.Context, path, version string) (string, error) { + if !gover.ModIsValid(path, version) { + return "", fmt.Errorf("invalid version %q", version) + } + if _, err := GoMod(ctx, path, version); err != nil { + return "", err + } + // GoMod should have populated the disk cache for us. + file, err := CachePath(ctx, module.Version{Path: path, Version: version}, "mod") + if err != nil { + return "", err + } + return file, nil +} + +// GoModSum returns the go.sum entry for the module version's go.mod file. +// (That is, it returns the entry listed in go.sum as "path version/go.mod".) +func GoModSum(ctx context.Context, path, version string) (string, error) { + if !gover.ModIsValid(path, version) { + return "", fmt.Errorf("invalid version %q", version) + } + data, err := GoMod(ctx, path, version) + if err != nil { + return "", err + } + sum, err := goModSum(data) + if err != nil { + return "", err + } + return sum, nil +} + +var errNotCached = fmt.Errorf("not in cache") + +// readDiskStat reads a cached stat result from disk, +// returning the name of the cache file and the result. +// If the read fails, the caller can use +// writeDiskStat(file, info) to write a new cache entry. +func readDiskStat(ctx context.Context, path, rev string) (file string, info *RevInfo, err error) { + if gover.IsToolchain(path) { + return "", nil, errNotCached + } + file, data, err := readDiskCache(ctx, path, rev, "info") + if err != nil { + // If the cache already contains a pseudo-version with the given hash, we + // would previously return that pseudo-version without checking upstream. + // However, that produced an unfortunate side-effect: if the author added a + // tag to the repository, 'go get' would not pick up the effect of that new + // tag on the existing commits, and 'go' commands that referred to those + // commits would use the previous name instead of the new one. + // + // That's especially problematic if the original pseudo-version starts with + // v0.0.0-, as was the case for all pseudo-versions during vgo development, + // since a v0.0.0- pseudo-version has lower precedence than pretty much any + // tagged version. + // + // In practice, we're only looking up by hash during initial conversion of a + // legacy config and during an explicit 'go get', and a little extra latency + // for those operations seems worth the benefit of picking up more accurate + // versions. + // + // Fall back to this resolution scheme only if the GOPROXY setting prohibits + // us from resolving upstream tags. + if cfg.GOPROXY == "off" { + if file, info, err := readDiskStatByHash(ctx, path, rev); err == nil { + return file, info, nil + } + } + return file, nil, err + } + info = new(RevInfo) + if err := json.Unmarshal(data, info); err != nil { + return file, nil, errNotCached + } + // The disk might have stale .info files that have Name and Short fields set. + // We want to canonicalize to .info files with those fields omitted. + // Remarshal and update the cache file if needed. + data2, err := json.Marshal(info) + if err == nil && !bytes.Equal(data2, data) { + writeDiskCache(ctx, file, data) + } + return file, info, nil +} + +// readDiskStatByHash is a fallback for readDiskStat for the case +// where rev is a commit hash instead of a proper semantic version. +// In that case, we look for a cached pseudo-version that matches +// the commit hash. If we find one, we use it. +// This matters most for converting legacy package management +// configs, when we are often looking up commits by full hash. +// Without this check we'd be doing network I/O to the remote repo +// just to find out about a commit we already know about +// (and have cached under its pseudo-version). +func readDiskStatByHash(ctx context.Context, path, rev string) (file string, info *RevInfo, err error) { + if gover.IsToolchain(path) { + return "", nil, errNotCached + } + if cfg.GOMODCACHE == "" { + // Do not download to current directory. + return "", nil, errNotCached + } + + if !codehost.AllHex(rev) || len(rev) < 12 { + return "", nil, errNotCached + } + rev = rev[:12] + cdir, err := cacheDir(ctx, path) + if err != nil { + return "", nil, errNotCached + } + dir, err := os.Open(cdir) + if err != nil { + return "", nil, errNotCached + } + names, err := dir.Readdirnames(-1) + dir.Close() + if err != nil { + return "", nil, errNotCached + } + + // A given commit hash may map to more than one pseudo-version, + // depending on which tags are present on the repository. + // Take the highest such version. + var maxVersion string + suffix := "-" + rev + ".info" + err = errNotCached + for _, name := range names { + if strings.HasSuffix(name, suffix) { + v := strings.TrimSuffix(name, ".info") + if module.IsPseudoVersion(v) && semver.Compare(v, maxVersion) > 0 { + maxVersion = v + file, info, err = readDiskStat(ctx, path, strings.TrimSuffix(name, ".info")) + } + } + } + return file, info, err +} + +// oldVgoPrefix is the prefix in the old auto-generated cached go.mod files. +// We stopped trying to auto-generate the go.mod files. Now we use a trivial +// go.mod with only a module line, and we've dropped the version prefix +// entirely. If we see a version prefix, that means we're looking at an old copy +// and should ignore it. +var oldVgoPrefix = []byte("//vgo 0.0.") + +// readDiskGoMod reads a cached go.mod file from disk, +// returning the name of the cache file and the result. +// If the read fails, the caller can use +// writeDiskGoMod(file, data) to write a new cache entry. +func readDiskGoMod(ctx context.Context, path, rev string) (file string, data []byte, err error) { + if gover.IsToolchain(path) { + return "", nil, errNotCached + } + file, data, err = readDiskCache(ctx, path, rev, "mod") + + // If the file has an old auto-conversion prefix, pretend it's not there. + if bytes.HasPrefix(data, oldVgoPrefix) { + err = errNotCached + data = nil + } + + if err == nil { + if err := checkGoMod(path, rev, data); err != nil { + return "", nil, err + } + } + + return file, data, err +} + +// readDiskCache is the generic "read from a cache file" implementation. +// It takes the revision and an identifying suffix for the kind of data being cached. +// It returns the name of the cache file and the content of the file. +// If the read fails, the caller can use +// writeDiskCache(file, data) to write a new cache entry. +func readDiskCache(ctx context.Context, path, rev, suffix string) (file string, data []byte, err error) { + if gover.IsToolchain(path) { + return "", nil, errNotCached + } + file, err = CachePath(ctx, module.Version{Path: path, Version: rev}, suffix) + if err != nil { + return "", nil, errNotCached + } + data, err = robustio.ReadFile(file) + if err != nil { + return file, nil, errNotCached + } + return file, data, nil +} + +// writeDiskStat writes a stat result cache entry. +// The file name must have been returned by a previous call to readDiskStat. +func writeDiskStat(ctx context.Context, file string, info *RevInfo) error { + if file == "" { + return nil + } + + if info.Origin != nil { + // Clean the origin information, which might have too many + // validation criteria, for example if we are saving the result of + // m@master as m@pseudo-version. + clean := *info + info = &clean + o := *info.Origin + info.Origin = &o + + // Tags never matter if you are starting with a semver version, + // as we would be when finding this cache entry. + o.TagSum = "" + o.TagPrefix = "" + // Ref doesn't matter if you have a pseudoversion. + if module.IsPseudoVersion(info.Version) { + o.Ref = "" + } + } + + js, err := json.Marshal(info) + if err != nil { + return err + } + return writeDiskCache(ctx, file, js) +} + +// writeDiskGoMod writes a go.mod cache entry. +// The file name must have been returned by a previous call to readDiskGoMod. +func writeDiskGoMod(ctx context.Context, file string, text []byte) error { + return writeDiskCache(ctx, file, text) +} + +// writeDiskCache is the generic "write to a cache file" implementation. +// The file must have been returned by a previous call to readDiskCache. +func writeDiskCache(ctx context.Context, file string, data []byte) error { + if file == "" { + return nil + } + // Make sure directory for file exists. + if err := os.MkdirAll(filepath.Dir(file), 0777); err != nil { + return err + } + + // Write the file to a temporary location, and then rename it to its final + // path to reduce the likelihood of a corrupt file existing at that final path. + f, err := tempFile(ctx, filepath.Dir(file), filepath.Base(file), 0666) + if err != nil { + return err + } + defer func() { + // Only call os.Remove on f.Name() if we failed to rename it: otherwise, + // some other process may have created a new file with the same name after + // the rename completed. + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + if _, err := f.Write(data); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := robustio.Rename(f.Name(), file); err != nil { + return err + } + + if strings.HasSuffix(file, ".mod") { + rewriteVersionList(ctx, filepath.Dir(file)) + } + return nil +} + +// tempFile creates a new temporary file with given permission bits. +func tempFile(ctx context.Context, dir, prefix string, perm fs.FileMode) (f *os.File, err error) { + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+".tmp") + f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm) + if os.IsExist(err) { + if ctx.Err() != nil { + return nil, ctx.Err() + } + continue + } + break + } + return +} + +// rewriteVersionList rewrites the version list in dir +// after a new *.mod file has been written. +func rewriteVersionList(ctx context.Context, dir string) (err error) { + if filepath.Base(dir) != "@v" { + base.Fatalf("go: internal error: misuse of rewriteVersionList") + } + + listFile := filepath.Join(dir, "list") + + // Lock listfile when writing to it to try to avoid corruption to the file. + // Under rare circumstances, for instance, if the system loses power in the + // middle of a write it is possible for corrupt data to be written. This is + // not a problem for the go command itself, but may be an issue if the + // cache is being served by a GOPROXY HTTP server. This will be corrected + // the next time a new version of the module is fetched and the file is rewritten. + // TODO(matloob): golang.org/issue/43313 covers adding a go mod verify + // command that removes module versions that fail checksums. It should also + // remove list files that are detected to be corrupt. + f, err := lockedfile.Edit(listFile) + if err != nil { + return err + } + defer func() { + if cerr := f.Close(); cerr != nil && err == nil { + err = cerr + } + }() + infos, err := os.ReadDir(dir) + if err != nil { + return err + } + var list []string + for _, info := range infos { + // We look for *.mod files on the theory that if we can't supply + // the .mod file then there's no point in listing that version, + // since it's unusable. (We can have *.info without *.mod.) + // We don't require *.zip files on the theory that for code only + // involved in module graph construction, many *.zip files + // will never be requested. + name := info.Name() + if v, found := strings.CutSuffix(name, ".mod"); found { + if v != "" && module.CanonicalVersion(v) == v { + list = append(list, v) + } + } + } + semver.Sort(list) + + var buf bytes.Buffer + for _, v := range list { + buf.WriteString(v) + buf.WriteString("\n") + } + if fi, err := f.Stat(); err == nil && int(fi.Size()) == buf.Len() { + old := make([]byte, buf.Len()+1) + if n, err := f.ReadAt(old, 0); err == io.EOF && n == buf.Len() && bytes.Equal(buf.Bytes(), old) { + return nil // No edit needed. + } + } + // Remove existing contents, so that when we truncate to the actual size it will zero-fill, + // and we will be able to detect (some) incomplete writes as files containing trailing NUL bytes. + if err := f.Truncate(0); err != nil { + return err + } + // Reserve the final size and zero-fill. + if err := f.Truncate(int64(buf.Len())); err != nil { + return err + } + // Write the actual contents. If this fails partway through, + // the remainder of the file should remain as zeroes. + if _, err := f.Write(buf.Bytes()); err != nil { + f.Truncate(0) + return err + } + + return nil +} + +var ( + statCacheOnce sync.Once + statCacheErr error +) + +// checkCacheDir checks if the directory specified by GOMODCACHE exists. An +// error is returned if it does not. +func checkCacheDir(ctx context.Context) error { + if cfg.GOMODCACHE == "" { + // modload.Init exits if GOPATH[0] is empty, and cfg.GOMODCACHE + // is set to GOPATH[0]/pkg/mod if GOMODCACHE is empty, so this should never happen. + return fmt.Errorf("module cache not found: neither GOMODCACHE nor GOPATH is set") + } + if !filepath.IsAbs(cfg.GOMODCACHE) { + return fmt.Errorf("GOMODCACHE entry is relative; must be absolute path: %q.\n", cfg.GOMODCACHE) + } + + // os.Stat is slow on Windows, so we only call it once to prevent unnecessary + // I/O every time this function is called. + statCacheOnce.Do(func() { + fi, err := os.Stat(cfg.GOMODCACHE) + if err != nil { + if !os.IsNotExist(err) { + statCacheErr = fmt.Errorf("could not create module cache: %w", err) + return + } + if err := os.MkdirAll(cfg.GOMODCACHE, 0777); err != nil { + statCacheErr = fmt.Errorf("could not create module cache: %w", err) + return + } + return + } + if !fi.IsDir() { + statCacheErr = fmt.Errorf("could not create module cache: %q is not a directory", cfg.GOMODCACHE) + return + } + }) + return statCacheErr +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/cache_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/cache_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6aada6671614f90c859a4294f77c50dab5956f17 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/cache_test.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfetch + +import ( + "context" + "os" + "path/filepath" + "testing" +) + +func TestWriteDiskCache(t *testing.T) { + ctx := context.Background() + + tmpdir, err := os.MkdirTemp("", "go-writeCache-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpdir) + + err = writeDiskCache(ctx, filepath.Join(tmpdir, "file"), []byte("data")) + if err != nil { + t.Fatal(err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/codehost.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/codehost.go new file mode 100644 index 0000000000000000000000000000000000000000..69a3c57e26697552c7783ff0570f83a48c889060 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/codehost.go @@ -0,0 +1,376 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package codehost defines the interface implemented by a code hosting source, +// along with support code for use by implementations. +package codehost + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + "io" + "io/fs" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + "cmd/go/internal/cfg" + "cmd/go/internal/lockedfile" + "cmd/go/internal/str" + + "golang.org/x/mod/module" + "golang.org/x/mod/semver" +) + +// Downloaded size limits. +const ( + MaxGoMod = 16 << 20 // maximum size of go.mod file + MaxLICENSE = 16 << 20 // maximum size of LICENSE file + MaxZipFile = 500 << 20 // maximum size of downloaded zip file +) + +// A Repo represents a code hosting source. +// Typical implementations include local version control repositories, +// remote version control servers, and code hosting sites. +// +// A Repo must be safe for simultaneous use by multiple goroutines, +// and callers must not modify returned values, which may be cached and shared. +type Repo interface { + // CheckReuse checks whether the old origin information + // remains up to date. If so, whatever cached object it was + // taken from can be reused. + // The subdir gives subdirectory name where the module root is expected to be found, + // "" for the root or "sub/dir" for a subdirectory (no trailing slash). + CheckReuse(ctx context.Context, old *Origin, subdir string) error + + // List lists all tags with the given prefix. + Tags(ctx context.Context, prefix string) (*Tags, error) + + // Stat returns information about the revision rev. + // A revision can be any identifier known to the underlying service: + // commit hash, branch, tag, and so on. + Stat(ctx context.Context, rev string) (*RevInfo, error) + + // Latest returns the latest revision on the default branch, + // whatever that means in the underlying implementation. + Latest(ctx context.Context) (*RevInfo, error) + + // ReadFile reads the given file in the file tree corresponding to revision rev. + // It should refuse to read more than maxSize bytes. + // + // If the requested file does not exist it should return an error for which + // os.IsNotExist(err) returns true. + ReadFile(ctx context.Context, rev, file string, maxSize int64) (data []byte, err error) + + // ReadZip downloads a zip file for the subdir subdirectory + // of the given revision to a new file in a given temporary directory. + // It should refuse to read more than maxSize bytes. + // It returns a ReadCloser for a streamed copy of the zip file. + // All files in the zip file are expected to be + // nested in a single top-level directory, whose name is not specified. + ReadZip(ctx context.Context, rev, subdir string, maxSize int64) (zip io.ReadCloser, err error) + + // RecentTag returns the most recent tag on rev or one of its predecessors + // with the given prefix. allowed may be used to filter out unwanted versions. + RecentTag(ctx context.Context, rev, prefix string, allowed func(tag string) bool) (tag string, err error) + + // DescendsFrom reports whether rev or any of its ancestors has the given tag. + // + // DescendsFrom must return true for any tag returned by RecentTag for the + // same revision. + DescendsFrom(ctx context.Context, rev, tag string) (bool, error) +} + +// An Origin describes the provenance of a given repo method result. +// It can be passed to CheckReuse (usually in a different go command invocation) +// to see whether the result remains up-to-date. +type Origin struct { + VCS string `json:",omitempty"` // "git" etc + URL string `json:",omitempty"` // URL of repository + Subdir string `json:",omitempty"` // subdirectory in repo + + Hash string `json:",omitempty"` // commit hash or ID + + // If TagSum is non-empty, then the resolution of this module version + // depends on the set of tags present in the repo, specifically the tags + // of the form TagPrefix + a valid semver version. + // If the matching repo tags and their commit hashes still hash to TagSum, + // the Origin is still valid (at least as far as the tags are concerned). + // The exact checksum is up to the Repo implementation; see (*gitRepo).Tags. + TagPrefix string `json:",omitempty"` + TagSum string `json:",omitempty"` + + // If Ref is non-empty, then the resolution of this module version + // depends on Ref resolving to the revision identified by Hash. + // If Ref still resolves to Hash, the Origin is still valid (at least as far as Ref is concerned). + // For Git, the Ref is a full ref like "refs/heads/main" or "refs/tags/v1.2.3", + // and the Hash is the Git object hash the ref maps to. + // Other VCS might choose differently, but the idea is that Ref is the name + // with a mutable meaning while Hash is a name with an immutable meaning. + Ref string `json:",omitempty"` + + // If RepoSum is non-empty, then the resolution of this module version + // failed due to the repo being available but the version not being present. + // This depends on the entire state of the repo, which RepoSum summarizes. + // For Git, this is a hash of all the refs and their hashes. + RepoSum string `json:",omitempty"` +} + +// A Tags describes the available tags in a code repository. +type Tags struct { + Origin *Origin + List []Tag +} + +// A Tag describes a single tag in a code repository. +type Tag struct { + Name string + Hash string // content hash identifying tag's content, if available +} + +// isOriginTag reports whether tag should be preserved +// in the Tags method's Origin calculation. +// We can safely ignore tags that are not look like pseudo-versions, +// because ../coderepo.go's (*codeRepo).Versions ignores them too. +// We can also ignore non-semver tags, but we have to include semver +// tags with extra suffixes, because the pseudo-version base finder uses them. +func isOriginTag(tag string) bool { + // modfetch.(*codeRepo).Versions uses Canonical == tag, + // but pseudo-version calculation has a weaker condition that + // the canonical is a prefix of the tag. + // Include those too, so that if any new one appears, we'll invalidate the cache entry. + // This will lead to spurious invalidation of version list results, + // but tags of this form being created should be fairly rare + // (and invalidate pseudo-version results anyway). + c := semver.Canonical(tag) + return c != "" && strings.HasPrefix(tag, c) && !module.IsPseudoVersion(tag) +} + +// A RevInfo describes a single revision in a source code repository. +type RevInfo struct { + Origin *Origin + Name string // complete ID in underlying repository + Short string // shortened ID, for use in pseudo-version + Version string // version used in lookup + Time time.Time // commit time + Tags []string // known tags for commit +} + +// UnknownRevisionError is an error equivalent to fs.ErrNotExist, but for a +// revision rather than a file. +type UnknownRevisionError struct { + Rev string +} + +func (e *UnknownRevisionError) Error() string { + return "unknown revision " + e.Rev +} +func (UnknownRevisionError) Is(err error) bool { + return err == fs.ErrNotExist +} + +// ErrNoCommits is an error equivalent to fs.ErrNotExist indicating that a given +// repository or module contains no commits. +var ErrNoCommits error = noCommitsError{} + +type noCommitsError struct{} + +func (noCommitsError) Error() string { + return "no commits" +} +func (noCommitsError) Is(err error) bool { + return err == fs.ErrNotExist +} + +// AllHex reports whether the revision rev is entirely lower-case hexadecimal digits. +func AllHex(rev string) bool { + for i := 0; i < len(rev); i++ { + c := rev[i] + if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' { + continue + } + return false + } + return true +} + +// ShortenSHA1 shortens a SHA1 hash (40 hex digits) to the canonical length +// used in pseudo-versions (12 hex digits). +func ShortenSHA1(rev string) string { + if AllHex(rev) && len(rev) == 40 { + return rev[:12] + } + return rev +} + +// WorkDir returns the name of the cached work directory to use for the +// given repository type and name. +func WorkDir(ctx context.Context, typ, name string) (dir, lockfile string, err error) { + if cfg.GOMODCACHE == "" { + return "", "", fmt.Errorf("neither GOPATH nor GOMODCACHE are set") + } + + // We name the work directory for the SHA256 hash of the type and name. + // We intentionally avoid the actual name both because of possible + // conflicts with valid file system paths and because we want to ensure + // that one checkout is never nested inside another. That nesting has + // led to security problems in the past. + if strings.Contains(typ, ":") { + return "", "", fmt.Errorf("codehost.WorkDir: type cannot contain colon") + } + key := typ + ":" + name + dir = filepath.Join(cfg.GOMODCACHE, "cache/vcs", fmt.Sprintf("%x", sha256.Sum256([]byte(key)))) + + xLog, buildX := cfg.BuildXWriter(ctx) + if buildX { + fmt.Fprintf(xLog, "mkdir -p %s # %s %s\n", filepath.Dir(dir), typ, name) + } + if err := os.MkdirAll(filepath.Dir(dir), 0777); err != nil { + return "", "", err + } + + lockfile = dir + ".lock" + if buildX { + fmt.Fprintf(xLog, "# lock %s\n", lockfile) + } + + unlock, err := lockedfile.MutexAt(lockfile).Lock() + if err != nil { + return "", "", fmt.Errorf("codehost.WorkDir: can't find or create lock file: %v", err) + } + defer unlock() + + data, err := os.ReadFile(dir + ".info") + info, err2 := os.Stat(dir) + if err == nil && err2 == nil && info.IsDir() { + // Info file and directory both already exist: reuse. + have := strings.TrimSuffix(string(data), "\n") + if have != key { + return "", "", fmt.Errorf("%s exists with wrong content (have %q want %q)", dir+".info", have, key) + } + if buildX { + fmt.Fprintf(xLog, "# %s for %s %s\n", dir, typ, name) + } + return dir, lockfile, nil + } + + // Info file or directory missing. Start from scratch. + if xLog != nil { + fmt.Fprintf(xLog, "mkdir -p %s # %s %s\n", dir, typ, name) + } + os.RemoveAll(dir) + if err := os.MkdirAll(dir, 0777); err != nil { + return "", "", err + } + if err := os.WriteFile(dir+".info", []byte(key), 0666); err != nil { + os.RemoveAll(dir) + return "", "", err + } + return dir, lockfile, nil +} + +type RunError struct { + Cmd string + Err error + Stderr []byte + HelpText string +} + +func (e *RunError) Error() string { + text := e.Cmd + ": " + e.Err.Error() + stderr := bytes.TrimRight(e.Stderr, "\n") + if len(stderr) > 0 { + text += ":\n\t" + strings.ReplaceAll(string(stderr), "\n", "\n\t") + } + if len(e.HelpText) > 0 { + text += "\n" + e.HelpText + } + return text +} + +var dirLock sync.Map + +// Run runs the command line in the given directory +// (an empty dir means the current directory). +// It returns the standard output and, for a non-zero exit, +// a *RunError indicating the command, exit status, and standard error. +// Standard error is unavailable for commands that exit successfully. +func Run(ctx context.Context, dir string, cmdline ...any) ([]byte, error) { + return RunWithStdin(ctx, dir, nil, cmdline...) +} + +// bashQuoter escapes characters that have special meaning in double-quoted strings in the bash shell. +// See https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html. +var bashQuoter = strings.NewReplacer(`"`, `\"`, `$`, `\$`, "`", "\\`", `\`, `\\`) + +func RunWithStdin(ctx context.Context, dir string, stdin io.Reader, cmdline ...any) ([]byte, error) { + if dir != "" { + muIface, ok := dirLock.Load(dir) + if !ok { + muIface, _ = dirLock.LoadOrStore(dir, new(sync.Mutex)) + } + mu := muIface.(*sync.Mutex) + mu.Lock() + defer mu.Unlock() + } + + cmd := str.StringList(cmdline...) + if os.Getenv("TESTGOVCS") == "panic" { + panic(fmt.Sprintf("use of vcs: %v", cmd)) + } + if xLog, ok := cfg.BuildXWriter(ctx); ok { + text := new(strings.Builder) + if dir != "" { + text.WriteString("cd ") + text.WriteString(dir) + text.WriteString("; ") + } + for i, arg := range cmd { + if i > 0 { + text.WriteByte(' ') + } + switch { + case strings.ContainsAny(arg, "'"): + // Quote args that could be mistaken for quoted args. + text.WriteByte('"') + text.WriteString(bashQuoter.Replace(arg)) + text.WriteByte('"') + case strings.ContainsAny(arg, "$`\\*?[\"\t\n\v\f\r \u0085\u00a0"): + // Quote args that contain special characters, glob patterns, or spaces. + text.WriteByte('\'') + text.WriteString(arg) + text.WriteByte('\'') + default: + text.WriteString(arg) + } + } + fmt.Fprintf(xLog, "%s\n", text) + start := time.Now() + defer func() { + fmt.Fprintf(xLog, "%.3fs # %s\n", time.Since(start).Seconds(), text) + }() + } + // TODO: Impose limits on command output size. + // TODO: Set environment to get English error messages. + var stderr bytes.Buffer + var stdout bytes.Buffer + c := exec.CommandContext(ctx, cmd[0], cmd[1:]...) + c.Cancel = func() error { return c.Process.Signal(os.Interrupt) } + c.Dir = dir + c.Stdin = stdin + c.Stderr = &stderr + c.Stdout = &stdout + // For Git commands, manually supply GIT_DIR so Git works with safe.bareRepository=explicit set. Noop for other commands. + c.Env = append(c.Environ(), "GIT_DIR="+dir) + err := c.Run() + if err != nil { + err = &RunError{Cmd: strings.Join(cmd, " ") + " in " + dir, Stderr: stderr.Bytes(), Err: err} + } + return stdout.Bytes(), err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/git.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/git.go new file mode 100644 index 0000000000000000000000000000000000000000..bab4c5ebbedba4a25c8215752b3606b757edf117 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/git.go @@ -0,0 +1,925 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codehost + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "io" + "io/fs" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + "slices" + "sort" + "strconv" + "strings" + "sync" + "time" + + "cmd/go/internal/base" + "cmd/go/internal/lockedfile" + "cmd/go/internal/par" + "cmd/go/internal/web" + + "golang.org/x/mod/semver" +) + +// LocalGitRepo is like Repo but accepts both Git remote references +// and paths to repositories on the local file system. +func LocalGitRepo(ctx context.Context, remote string) (Repo, error) { + return newGitRepoCached(ctx, remote, true) +} + +// A notExistError wraps another error to retain its original text +// but makes it opaquely equivalent to fs.ErrNotExist. +type notExistError struct { + err error +} + +func (e notExistError) Error() string { return e.err.Error() } +func (notExistError) Is(err error) bool { return err == fs.ErrNotExist } + +const gitWorkDirType = "git3" + +var gitRepoCache par.ErrCache[gitCacheKey, Repo] + +type gitCacheKey struct { + remote string + localOK bool +} + +func newGitRepoCached(ctx context.Context, remote string, localOK bool) (Repo, error) { + return gitRepoCache.Do(gitCacheKey{remote, localOK}, func() (Repo, error) { + return newGitRepo(ctx, remote, localOK) + }) +} + +func newGitRepo(ctx context.Context, remote string, localOK bool) (Repo, error) { + r := &gitRepo{remote: remote} + if strings.Contains(remote, "://") { + // This is a remote path. + var err error + r.dir, r.mu.Path, err = WorkDir(ctx, gitWorkDirType, r.remote) + if err != nil { + return nil, err + } + + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + if _, err := os.Stat(filepath.Join(r.dir, "objects")); err != nil { + if _, err := Run(ctx, r.dir, "git", "init", "--bare"); err != nil { + os.RemoveAll(r.dir) + return nil, err + } + // We could just say git fetch https://whatever later, + // but this lets us say git fetch origin instead, which + // is a little nicer. More importantly, using a named remote + // avoids a problem with Git LFS. See golang.org/issue/25605. + if _, err := Run(ctx, r.dir, "git", "remote", "add", "origin", "--", r.remote); err != nil { + os.RemoveAll(r.dir) + return nil, err + } + if runtime.GOOS == "windows" { + // Git for Windows by default does not support paths longer than + // MAX_PATH (260 characters) because that may interfere with navigation + // in some Windows programs. However, cmd/go should be able to handle + // long paths just fine, and we expect people to use 'go clean' to + // manipulate the module cache, so it should be harmless to set here, + // and in some cases may be necessary in order to download modules with + // long branch names. + // + // See https://github.com/git-for-windows/git/wiki/Git-cannot-create-a-file-or-directory-with-a-long-path. + if _, err := Run(ctx, r.dir, "git", "config", "core.longpaths", "true"); err != nil { + os.RemoveAll(r.dir) + return nil, err + } + } + } + r.remoteURL = r.remote + r.remote = "origin" + } else { + // Local path. + // Disallow colon (not in ://) because sometimes + // that's rcp-style host:path syntax and sometimes it's not (c:\work). + // The go command has always insisted on URL syntax for ssh. + if strings.Contains(remote, ":") { + return nil, fmt.Errorf("git remote cannot use host:path syntax") + } + if !localOK { + return nil, fmt.Errorf("git remote must not be local directory") + } + r.local = true + info, err := os.Stat(remote) + if err != nil { + return nil, err + } + if !info.IsDir() { + return nil, fmt.Errorf("%s exists but is not a directory", remote) + } + r.dir = remote + r.mu.Path = r.dir + ".lock" + } + return r, nil +} + +type gitRepo struct { + ctx context.Context + + remote, remoteURL string + local bool + dir string + + mu lockedfile.Mutex // protects fetchLevel and git repo state + + fetchLevel int + + statCache par.ErrCache[string, *RevInfo] + + refsOnce sync.Once + // refs maps branch and tag refs (e.g., "HEAD", "refs/heads/master") + // to commits (e.g., "37ffd2e798afde829a34e8955b716ab730b2a6d6") + refs map[string]string + refsErr error + + localTagsOnce sync.Once + localTags sync.Map // map[string]bool +} + +const ( + // How much have we fetched into the git repo (in this process)? + fetchNone = iota // nothing yet + fetchSome // shallow fetches of individual hashes + fetchAll // "fetch -t origin": get all remote branches and tags +) + +// loadLocalTags loads tag references from the local git cache +// into the map r.localTags. +func (r *gitRepo) loadLocalTags(ctx context.Context) { + // The git protocol sends all known refs and ls-remote filters them on the client side, + // so we might as well record both heads and tags in one shot. + // Most of the time we only care about tags but sometimes we care about heads too. + out, err := Run(ctx, r.dir, "git", "tag", "-l") + if err != nil { + return + } + + for _, line := range strings.Split(string(out), "\n") { + if line != "" { + r.localTags.Store(line, true) + } + } +} + +func (r *gitRepo) CheckReuse(ctx context.Context, old *Origin, subdir string) error { + if old == nil { + return fmt.Errorf("missing origin") + } + if old.VCS != "git" || old.URL != r.remoteURL { + return fmt.Errorf("origin moved from %v %q to %v %q", old.VCS, old.URL, "git", r.remoteURL) + } + if old.Subdir != subdir { + return fmt.Errorf("origin moved from %v %q %q to %v %q %q", old.VCS, old.URL, old.Subdir, "git", r.remoteURL, subdir) + } + + // Note: Can have Hash with no Ref and no TagSum and no RepoSum, + // meaning the Hash simply has to remain in the repo. + // In that case we assume it does in the absence of any real way to check. + // But if neither Hash nor TagSum is present, we have nothing to check, + // which we take to mean we didn't record enough information to be sure. + if old.Hash == "" && old.TagSum == "" && old.RepoSum == "" { + return fmt.Errorf("non-specific origin") + } + + r.loadRefs(ctx) + if r.refsErr != nil { + return r.refsErr + } + + if old.Ref != "" { + hash, ok := r.refs[old.Ref] + if !ok { + return fmt.Errorf("ref %q deleted", old.Ref) + } + if hash != old.Hash { + return fmt.Errorf("ref %q moved from %s to %s", old.Ref, old.Hash, hash) + } + } + if old.TagSum != "" { + tags, err := r.Tags(ctx, old.TagPrefix) + if err != nil { + return err + } + if tags.Origin.TagSum != old.TagSum { + return fmt.Errorf("tags changed") + } + } + if old.RepoSum != "" { + if r.repoSum(r.refs) != old.RepoSum { + return fmt.Errorf("refs changed") + } + } + return nil +} + +// loadRefs loads heads and tags references from the remote into the map r.refs. +// The result is cached in memory. +func (r *gitRepo) loadRefs(ctx context.Context) (map[string]string, error) { + r.refsOnce.Do(func() { + // The git protocol sends all known refs and ls-remote filters them on the client side, + // so we might as well record both heads and tags in one shot. + // Most of the time we only care about tags but sometimes we care about heads too. + release, err := base.AcquireNet() + if err != nil { + r.refsErr = err + return + } + out, gitErr := Run(ctx, r.dir, "git", "ls-remote", "-q", r.remote) + release() + + if gitErr != nil { + if rerr, ok := gitErr.(*RunError); ok { + if bytes.Contains(rerr.Stderr, []byte("fatal: could not read Username")) { + rerr.HelpText = "Confirm the import path was entered correctly.\nIf this is a private repository, see https://golang.org/doc/faq#git_https for additional information." + } + } + + // If the remote URL doesn't exist at all, ideally we should treat the whole + // repository as nonexistent by wrapping the error in a notExistError. + // For HTTP and HTTPS, that's easy to detect: we'll try to fetch the URL + // ourselves and see what code it serves. + if u, err := url.Parse(r.remoteURL); err == nil && (u.Scheme == "http" || u.Scheme == "https") { + if _, err := web.GetBytes(u); errors.Is(err, fs.ErrNotExist) { + gitErr = notExistError{gitErr} + } + } + + r.refsErr = gitErr + return + } + + refs := make(map[string]string) + for _, line := range strings.Split(string(out), "\n") { + f := strings.Fields(line) + if len(f) != 2 { + continue + } + if f[1] == "HEAD" || strings.HasPrefix(f[1], "refs/heads/") || strings.HasPrefix(f[1], "refs/tags/") { + refs[f[1]] = f[0] + } + } + for ref, hash := range refs { + if k, found := strings.CutSuffix(ref, "^{}"); found { // record unwrapped annotated tag as value of tag + refs[k] = hash + delete(refs, ref) + } + } + r.refs = refs + }) + return r.refs, r.refsErr +} + +func (r *gitRepo) Tags(ctx context.Context, prefix string) (*Tags, error) { + refs, err := r.loadRefs(ctx) + if err != nil { + return nil, err + } + + tags := &Tags{ + Origin: &Origin{ + VCS: "git", + URL: r.remoteURL, + TagPrefix: prefix, + }, + List: []Tag{}, + } + for ref, hash := range refs { + if !strings.HasPrefix(ref, "refs/tags/") { + continue + } + tag := ref[len("refs/tags/"):] + if !strings.HasPrefix(tag, prefix) { + continue + } + tags.List = append(tags.List, Tag{tag, hash}) + } + sort.Slice(tags.List, func(i, j int) bool { + return tags.List[i].Name < tags.List[j].Name + }) + + dir := prefix[:strings.LastIndex(prefix, "/")+1] + h := sha256.New() + for _, tag := range tags.List { + if isOriginTag(strings.TrimPrefix(tag.Name, dir)) { + fmt.Fprintf(h, "%q %s\n", tag.Name, tag.Hash) + } + } + tags.Origin.TagSum = "t1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)) + return tags, nil +} + +// repoSum returns a checksum of the entire repo state, +// which can be checked (as Origin.RepoSum) to cache +// the absence of a specific module version. +// The caller must supply refs, the result of a successful r.loadRefs. +func (r *gitRepo) repoSum(refs map[string]string) string { + var list []string + for ref := range refs { + list = append(list, ref) + } + sort.Strings(list) + h := sha256.New() + for _, ref := range list { + fmt.Fprintf(h, "%q %s\n", ref, refs[ref]) + } + return "r1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +// unknownRevisionInfo returns a RevInfo containing an Origin containing a RepoSum of refs, +// for use when returning an UnknownRevisionError. +func (r *gitRepo) unknownRevisionInfo(refs map[string]string) *RevInfo { + return &RevInfo{ + Origin: &Origin{ + VCS: "git", + URL: r.remoteURL, + RepoSum: r.repoSum(refs), + }, + } +} + +func (r *gitRepo) Latest(ctx context.Context) (*RevInfo, error) { + refs, err := r.loadRefs(ctx) + if err != nil { + return nil, err + } + if refs["HEAD"] == "" { + return nil, ErrNoCommits + } + statInfo, err := r.Stat(ctx, refs["HEAD"]) + if err != nil { + return nil, err + } + + // Stat may return cached info, so make a copy to modify here. + info := new(RevInfo) + *info = *statInfo + info.Origin = new(Origin) + if statInfo.Origin != nil { + *info.Origin = *statInfo.Origin + } + info.Origin.Ref = "HEAD" + info.Origin.Hash = refs["HEAD"] + + return info, nil +} + +// findRef finds some ref name for the given hash, +// for use when the server requires giving a ref instead of a hash. +// There may be multiple ref names for a given hash, +// in which case this returns some name - it doesn't matter which. +func (r *gitRepo) findRef(ctx context.Context, hash string) (ref string, ok bool) { + refs, err := r.loadRefs(ctx) + if err != nil { + return "", false + } + for ref, h := range refs { + if h == hash { + return ref, true + } + } + return "", false +} + +// minHashDigits is the minimum number of digits to require +// before accepting a hex digit sequence as potentially identifying +// a specific commit in a git repo. (Of course, users can always +// specify more digits, and many will paste in all 40 digits, +// but many of git's commands default to printing short hashes +// as 7 digits.) +const minHashDigits = 7 + +// stat stats the given rev in the local repository, +// or else it fetches more info from the remote repository and tries again. +func (r *gitRepo) stat(ctx context.Context, rev string) (info *RevInfo, err error) { + if r.local { + return r.statLocal(ctx, rev, rev) + } + + // Fast path: maybe rev is a hash we already have locally. + didStatLocal := false + if len(rev) >= minHashDigits && len(rev) <= 40 && AllHex(rev) { + if info, err := r.statLocal(ctx, rev, rev); err == nil { + return info, nil + } + didStatLocal = true + } + + // Maybe rev is a tag we already have locally. + // (Note that we're excluding branches, which can be stale.) + r.localTagsOnce.Do(func() { r.loadLocalTags(ctx) }) + if _, ok := r.localTags.Load(rev); ok { + return r.statLocal(ctx, rev, "refs/tags/"+rev) + } + + // Maybe rev is the name of a tag or branch on the remote server. + // Or maybe it's the prefix of a hash of a named ref. + // Try to resolve to both a ref (git name) and full (40-hex-digit) commit hash. + refs, err := r.loadRefs(ctx) + if err != nil { + return nil, err + } + // loadRefs may return an error if git fails, for example segfaults, or + // could not load a private repo, but defer checking to the else block + // below, in case we already have the rev in question in the local cache. + var ref, hash string + if refs["refs/tags/"+rev] != "" { + ref = "refs/tags/" + rev + hash = refs[ref] + // Keep rev as is: tags are assumed not to change meaning. + } else if refs["refs/heads/"+rev] != "" { + ref = "refs/heads/" + rev + hash = refs[ref] + rev = hash // Replace rev, because meaning of refs/heads/foo can change. + } else if rev == "HEAD" && refs["HEAD"] != "" { + ref = "HEAD" + hash = refs[ref] + rev = hash // Replace rev, because meaning of HEAD can change. + } else if len(rev) >= minHashDigits && len(rev) <= 40 && AllHex(rev) { + // At the least, we have a hash prefix we can look up after the fetch below. + // Maybe we can map it to a full hash using the known refs. + prefix := rev + // Check whether rev is prefix of known ref hash. + for k, h := range refs { + if strings.HasPrefix(h, prefix) { + if hash != "" && hash != h { + // Hash is an ambiguous hash prefix. + // More information will not change that. + return nil, fmt.Errorf("ambiguous revision %s", rev) + } + if ref == "" || ref > k { // Break ties deterministically when multiple refs point at same hash. + ref = k + } + rev = h + hash = h + } + } + if hash == "" && len(rev) == 40 { // Didn't find a ref, but rev is a full hash. + hash = rev + } + } else { + return r.unknownRevisionInfo(refs), &UnknownRevisionError{Rev: rev} + } + + defer func() { + if info != nil { + info.Origin.Hash = info.Name + // There's a ref = hash below; don't write that hash down as Origin.Ref. + if ref != info.Origin.Hash { + info.Origin.Ref = ref + } + } + }() + + // Protect r.fetchLevel and the "fetch more and more" sequence. + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + // Perhaps r.localTags did not have the ref when we loaded local tags, + // but we've since done fetches that pulled down the hash we need + // (or already have the hash we need, just without its tag). + // Either way, try a local stat before falling back to network I/O. + if !didStatLocal { + if info, err := r.statLocal(ctx, rev, hash); err == nil { + tag, fromTag := strings.CutPrefix(ref, "refs/tags/") + if fromTag && !slices.Contains(info.Tags, tag) { + // The local repo includes the commit hash we want, but it is missing + // the corresponding tag. Add that tag and try again. + _, err := Run(ctx, r.dir, "git", "tag", tag, hash) + if err != nil { + return nil, err + } + r.localTags.Store(tag, true) + return r.statLocal(ctx, rev, ref) + } + return info, err + } + } + + // If we know a specific commit we need and its ref, fetch it. + // We do NOT fetch arbitrary hashes (when we don't know the ref) + // because we want to avoid ever importing a commit that isn't + // reachable from refs/tags/* or refs/heads/* or HEAD. + // Both Gerrit and GitHub expose every CL/PR as a named ref, + // and we don't want those commits masquerading as being real + // pseudo-versions in the main repo. + if r.fetchLevel <= fetchSome && ref != "" && hash != "" && !r.local { + r.fetchLevel = fetchSome + var refspec string + if ref == "HEAD" { + // Fetch the hash but give it a local name (refs/dummy), + // because that triggers the fetch behavior of creating any + // other known remote tags for the hash. We never use + // refs/dummy (it's not refs/tags/dummy) and it will be + // overwritten in the next command, and that's fine. + ref = hash + refspec = hash + ":refs/dummy" + } else { + // If we do know the ref name, save the mapping locally + // so that (if it is a tag) it can show up in localTags + // on a future call. Also, some servers refuse to allow + // full hashes in ref specs, so prefer a ref name if known. + refspec = ref + ":" + ref + } + + release, err := base.AcquireNet() + if err != nil { + return nil, err + } + // We explicitly set protocol.version=2 for this command to work around + // an apparent Git bug introduced in Git 2.21 (commit 61c771), + // which causes the handler for protocol version 1 to sometimes miss + // tags that point to the requested commit (see https://go.dev/issue/56881). + _, err = Run(ctx, r.dir, "git", "-c", "protocol.version=2", "fetch", "-f", "--depth=1", r.remote, refspec) + release() + + if err == nil { + return r.statLocal(ctx, rev, ref) + } + // Don't try to be smart about parsing the error. + // It's too complex and varies too much by git version. + // No matter what went wrong, fall back to a complete fetch. + } + + // Last resort. + // Fetch all heads and tags and hope the hash we want is in the history. + if err := r.fetchRefsLocked(ctx); err != nil { + return nil, err + } + + return r.statLocal(ctx, rev, rev) +} + +// fetchRefsLocked fetches all heads and tags from the origin, along with the +// ancestors of those commits. +// +// We only fetch heads and tags, not arbitrary other commits: we don't want to +// pull in off-branch commits (such as rejected GitHub pull requests) that the +// server may be willing to provide. (See the comments within the stat method +// for more detail.) +// +// fetchRefsLocked requires that r.mu remain locked for the duration of the call. +func (r *gitRepo) fetchRefsLocked(ctx context.Context) error { + if r.fetchLevel < fetchAll { + // NOTE: To work around a bug affecting Git clients up to at least 2.23.0 + // (2019-08-16), we must first expand the set of local refs, and only then + // unshallow the repository as a separate fetch operation. (See + // golang.org/issue/34266 and + // https://github.com/git/git/blob/4c86140027f4a0d2caaa3ab4bd8bfc5ce3c11c8a/transport.c#L1303-L1309.) + + release, err := base.AcquireNet() + if err != nil { + return err + } + defer release() + + if _, err := Run(ctx, r.dir, "git", "fetch", "-f", r.remote, "refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"); err != nil { + return err + } + + if _, err := os.Stat(filepath.Join(r.dir, "shallow")); err == nil { + if _, err := Run(ctx, r.dir, "git", "fetch", "--unshallow", "-f", r.remote); err != nil { + return err + } + } + + r.fetchLevel = fetchAll + } + return nil +} + +// statLocal returns a new RevInfo describing rev in the local git repository. +// It uses version as info.Version. +func (r *gitRepo) statLocal(ctx context.Context, version, rev string) (*RevInfo, error) { + out, err := Run(ctx, r.dir, "git", "-c", "log.showsignature=false", "log", "--no-decorate", "-n1", "--format=format:%H %ct %D", rev, "--") + if err != nil { + // Return info with Origin.RepoSum if possible to allow caching of negative lookup. + var info *RevInfo + if refs, err := r.loadRefs(ctx); err == nil { + info = r.unknownRevisionInfo(refs) + } + return info, &UnknownRevisionError{Rev: rev} + } + f := strings.Fields(string(out)) + if len(f) < 2 { + return nil, fmt.Errorf("unexpected response from git log: %q", out) + } + hash := f[0] + if strings.HasPrefix(hash, version) { + version = hash // extend to full hash + } + t, err := strconv.ParseInt(f[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid time from git log: %q", out) + } + + info := &RevInfo{ + Origin: &Origin{ + VCS: "git", + URL: r.remoteURL, + Hash: hash, + }, + Name: hash, + Short: ShortenSHA1(hash), + Time: time.Unix(t, 0).UTC(), + Version: hash, + } + if !strings.HasPrefix(hash, rev) { + info.Origin.Ref = rev + } + + // Add tags. Output looks like: + // ede458df7cd0fdca520df19a33158086a8a68e81 1523994202 HEAD -> master, tag: v1.2.4-annotated, tag: v1.2.3, origin/master, origin/HEAD + for i := 2; i < len(f); i++ { + if f[i] == "tag:" { + i++ + if i < len(f) { + info.Tags = append(info.Tags, strings.TrimSuffix(f[i], ",")) + } + } + } + sort.Strings(info.Tags) + + // Used hash as info.Version above. + // Use caller's suggested version if it appears in the tag list + // (filters out branch names, HEAD). + for _, tag := range info.Tags { + if version == tag { + info.Version = version + } + } + + return info, nil +} + +func (r *gitRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) { + if rev == "latest" { + return r.Latest(ctx) + } + return r.statCache.Do(rev, func() (*RevInfo, error) { + return r.stat(ctx, rev) + }) +} + +func (r *gitRepo) ReadFile(ctx context.Context, rev, file string, maxSize int64) ([]byte, error) { + // TODO: Could use git cat-file --batch. + info, err := r.Stat(ctx, rev) // download rev into local git repo + if err != nil { + return nil, err + } + out, err := Run(ctx, r.dir, "git", "cat-file", "blob", info.Name+":"+file) + if err != nil { + return nil, fs.ErrNotExist + } + return out, nil +} + +func (r *gitRepo) RecentTag(ctx context.Context, rev, prefix string, allowed func(tag string) bool) (tag string, err error) { + info, err := r.Stat(ctx, rev) + if err != nil { + return "", err + } + rev = info.Name // expand hash prefixes + + // describe sets tag and err using 'git for-each-ref' and reports whether the + // result is definitive. + describe := func() (definitive bool) { + var out []byte + out, err = Run(ctx, r.dir, "git", "for-each-ref", "--format", "%(refname)", "refs/tags", "--merged", rev) + if err != nil { + return true + } + + // prefixed tags aren't valid semver tags so compare without prefix, but only tags with correct prefix + var highest string + for _, line := range strings.Split(string(out), "\n") { + line = strings.TrimSpace(line) + // git do support lstrip in for-each-ref format, but it was added in v2.13.0. Stripping here + // instead gives support for git v2.7.0. + if !strings.HasPrefix(line, "refs/tags/") { + continue + } + line = line[len("refs/tags/"):] + + if !strings.HasPrefix(line, prefix) { + continue + } + if !allowed(line) { + continue + } + + semtag := line[len(prefix):] + if semver.Compare(semtag, highest) > 0 { + highest = semtag + } + } + + if highest != "" { + tag = prefix + highest + } + + return tag != "" && !AllHex(tag) + } + + if describe() { + return tag, err + } + + // Git didn't find a version tag preceding the requested rev. + // See whether any plausible tag exists. + tags, err := r.Tags(ctx, prefix+"v") + if err != nil { + return "", err + } + if len(tags.List) == 0 { + return "", nil + } + + // There are plausible tags, but we don't know if rev is a descendent of any of them. + // Fetch the history to find out. + + unlock, err := r.mu.Lock() + if err != nil { + return "", err + } + defer unlock() + + if err := r.fetchRefsLocked(ctx); err != nil { + return "", err + } + + // If we've reached this point, we have all of the commits that are reachable + // from all heads and tags. + // + // The only refs we should be missing are those that are no longer reachable + // (or never were reachable) from any branch or tag, including the master + // branch, and we don't want to resolve them anyway (they're probably + // unreachable for a reason). + // + // Try one last time in case some other goroutine fetched rev while we were + // waiting on the lock. + describe() + return tag, err +} + +func (r *gitRepo) DescendsFrom(ctx context.Context, rev, tag string) (bool, error) { + // The "--is-ancestor" flag was added to "git merge-base" in version 1.8.0, so + // this won't work with Git 1.7.1. According to golang.org/issue/28550, cmd/go + // already doesn't work with Git 1.7.1, so at least it's not a regression. + // + // git merge-base --is-ancestor exits with status 0 if rev is an ancestor, or + // 1 if not. + _, err := Run(ctx, r.dir, "git", "merge-base", "--is-ancestor", "--", tag, rev) + + // Git reports "is an ancestor" with exit code 0 and "not an ancestor" with + // exit code 1. + // Unfortunately, if we've already fetched rev with a shallow history, git + // merge-base has been observed to report a false-negative, so don't stop yet + // even if the exit code is 1! + if err == nil { + return true, nil + } + + // See whether the tag and rev even exist. + tags, err := r.Tags(ctx, tag) + if err != nil { + return false, err + } + if len(tags.List) == 0 { + return false, nil + } + + // NOTE: r.stat is very careful not to fetch commits that we shouldn't know + // about, like rejected GitHub pull requests, so don't try to short-circuit + // that here. + if _, err = r.stat(ctx, rev); err != nil { + return false, err + } + + // Now fetch history so that git can search for a path. + unlock, err := r.mu.Lock() + if err != nil { + return false, err + } + defer unlock() + + if r.fetchLevel < fetchAll { + // Fetch the complete history for all refs and heads. It would be more + // efficient to only fetch the history from rev to tag, but that's much more + // complicated, and any kind of shallow fetch is fairly likely to trigger + // bugs in JGit servers and/or the go command anyway. + if err := r.fetchRefsLocked(ctx); err != nil { + return false, err + } + } + + _, err = Run(ctx, r.dir, "git", "merge-base", "--is-ancestor", "--", tag, rev) + if err == nil { + return true, nil + } + if ee, ok := err.(*RunError).Err.(*exec.ExitError); ok && ee.ExitCode() == 1 { + return false, nil + } + return false, err +} + +func (r *gitRepo) ReadZip(ctx context.Context, rev, subdir string, maxSize int64) (zip io.ReadCloser, err error) { + // TODO: Use maxSize or drop it. + args := []string{} + if subdir != "" { + args = append(args, "--", subdir) + } + info, err := r.Stat(ctx, rev) // download rev into local git repo + if err != nil { + return nil, err + } + + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + if err := ensureGitAttributes(r.dir); err != nil { + return nil, err + } + + // Incredibly, git produces different archives depending on whether + // it is running on a Windows system or not, in an attempt to normalize + // text file line endings. Setting -c core.autocrlf=input means only + // translate files on the way into the repo, not on the way out (archive). + // The -c core.eol=lf should be unnecessary but set it anyway. + archive, err := Run(ctx, r.dir, "git", "-c", "core.autocrlf=input", "-c", "core.eol=lf", "archive", "--format=zip", "--prefix=prefix/", info.Name, args) + if err != nil { + if bytes.Contains(err.(*RunError).Stderr, []byte("did not match any files")) { + return nil, fs.ErrNotExist + } + return nil, err + } + + return io.NopCloser(bytes.NewReader(archive)), nil +} + +// ensureGitAttributes makes sure export-subst and export-ignore features are +// disabled for this repo. This is intended to be run prior to running git +// archive so that zip files are generated that produce consistent ziphashes +// for a given revision, independent of variables such as git version and the +// size of the repo. +// +// See: https://github.com/golang/go/issues/27153 +func ensureGitAttributes(repoDir string) (err error) { + const attr = "\n* -export-subst -export-ignore\n" + + d := repoDir + "/info" + p := d + "/attributes" + + if err := os.MkdirAll(d, 0755); err != nil { + return err + } + + f, err := os.OpenFile(p, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666) + if err != nil { + return err + } + defer func() { + closeErr := f.Close() + if closeErr != nil { + err = closeErr + } + }() + + b, err := io.ReadAll(f) + if err != nil { + return err + } + if !bytes.HasSuffix(b, []byte(attr)) { + _, err := f.WriteString(attr) + return err + } + + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/git_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/git_test.go new file mode 100644 index 0000000000000000000000000000000000000000..dba9935b5820b51a18224367456aa485fc76ff38 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/git_test.go @@ -0,0 +1,795 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codehost + +import ( + "archive/zip" + "bytes" + "cmd/go/internal/cfg" + "cmd/go/internal/vcweb/vcstest" + "context" + "flag" + "internal/testenv" + "io" + "io/fs" + "log" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +func TestMain(m *testing.M) { + flag.Parse() + if err := testMain(m); err != nil { + log.Fatal(err) + } +} + +var gitrepo1, hgrepo1, vgotest1 string + +var altRepos = func() []string { + return []string{ + "localGitRepo", + hgrepo1, + } +} + +// TODO: Convert gitrepo1 to svn, bzr, fossil and add tests. +// For now, at least the hgrepo1 tests check the general vcs.go logic. + +// localGitRepo is like gitrepo1 but allows archive access +// (although that doesn't really matter after CL 120041), +// and has a file:// URL instead of http:// or https:// +// (which might still matter). +var localGitRepo string + +// localGitURL initializes the repo in localGitRepo and returns its URL. +func localGitURL(t testing.TB) string { + testenv.MustHaveExecPath(t, "git") + if runtime.GOOS == "android" && strings.HasSuffix(testenv.Builder(), "-corellium") { + testenv.SkipFlaky(t, 59940) + } + + localGitURLOnce.Do(func() { + // Clone gitrepo1 into a local directory. + // If we use a file:// URL to access the local directory, + // then git starts up all the usual protocol machinery, + // which will let us test remote git archive invocations. + _, localGitURLErr = Run(context.Background(), "", "git", "clone", "--mirror", gitrepo1, localGitRepo) + if localGitURLErr != nil { + return + } + _, localGitURLErr = Run(context.Background(), localGitRepo, "git", "config", "daemon.uploadarch", "true") + }) + + if localGitURLErr != nil { + t.Fatal(localGitURLErr) + } + // Convert absolute path to file URL. LocalGitRepo will not accept + // Windows absolute paths because they look like a host:path remote. + // TODO(golang.org/issue/32456): use url.FromFilePath when implemented. + if strings.HasPrefix(localGitRepo, "/") { + return "file://" + localGitRepo + } else { + return "file:///" + filepath.ToSlash(localGitRepo) + } +} + +var ( + localGitURLOnce sync.Once + localGitURLErr error +) + +func testMain(m *testing.M) (err error) { + cfg.BuildX = testing.Verbose() + + srv, err := vcstest.NewServer() + if err != nil { + return err + } + defer func() { + if closeErr := srv.Close(); err == nil { + err = closeErr + } + }() + + gitrepo1 = srv.HTTP.URL + "/git/gitrepo1" + hgrepo1 = srv.HTTP.URL + "/hg/hgrepo1" + vgotest1 = srv.HTTP.URL + "/git/vgotest1" + + dir, err := os.MkdirTemp("", "gitrepo-test-") + if err != nil { + return err + } + defer func() { + if rmErr := os.RemoveAll(dir); err == nil { + err = rmErr + } + }() + + localGitRepo = filepath.Join(dir, "gitrepo2") + + // Redirect the module cache to a fresh directory to avoid crosstalk, and make + // it read/write so that the test can still clean it up easily when done. + cfg.GOMODCACHE = filepath.Join(dir, "modcache") + cfg.ModCacheRW = true + + m.Run() + return nil +} + +func testContext(t testing.TB) context.Context { + w := newTestWriter(t) + return cfg.WithBuildXWriter(context.Background(), w) +} + +// A testWriter is an io.Writer that writes to a test's log. +// +// The writer batches written data until the last byte of a write is a newline +// character, then flushes the batched data as a single call to Logf. +// Any remaining unflushed data is logged during Cleanup. +type testWriter struct { + t testing.TB + + mu sync.Mutex + buf bytes.Buffer +} + +func newTestWriter(t testing.TB) *testWriter { + w := &testWriter{t: t} + + t.Cleanup(func() { + w.mu.Lock() + defer w.mu.Unlock() + if b := w.buf.Bytes(); len(b) > 0 { + w.t.Logf("%s", b) + w.buf.Reset() + } + }) + + return w +} + +func (w *testWriter) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + n, err := w.buf.Write(p) + if b := w.buf.Bytes(); len(b) > 0 && b[len(b)-1] == '\n' { + w.t.Logf("%s", b) + w.buf.Reset() + } + return n, err +} + +func testRepo(ctx context.Context, t *testing.T, remote string) (Repo, error) { + if remote == "localGitRepo" { + return LocalGitRepo(ctx, localGitURL(t)) + } + vcsName := "git" + for _, k := range []string{"hg"} { + if strings.Contains(remote, "/"+k+"/") { + vcsName = k + } + } + if testing.Short() && vcsName == "hg" { + t.Skipf("skipping hg test in short mode: hg is slow") + } + testenv.MustHaveExecPath(t, vcsName) + if runtime.GOOS == "android" && strings.HasSuffix(testenv.Builder(), "-corellium") { + testenv.SkipFlaky(t, 59940) + } + return NewRepo(ctx, vcsName, remote) +} + +func TestTags(t *testing.T) { + t.Parallel() + + type tagsTest struct { + repo string + prefix string + tags []Tag + } + + runTest := func(tt tagsTest) func(*testing.T) { + return func(t *testing.T) { + t.Parallel() + ctx := testContext(t) + + r, err := testRepo(ctx, t, tt.repo) + if err != nil { + t.Fatal(err) + } + tags, err := r.Tags(ctx, tt.prefix) + if err != nil { + t.Fatal(err) + } + if tags == nil || !reflect.DeepEqual(tags.List, tt.tags) { + t.Errorf("Tags(%q): incorrect tags\nhave %v\nwant %v", tt.prefix, tags, tt.tags) + } + } + } + + for _, tt := range []tagsTest{ + {gitrepo1, "xxx", []Tag{}}, + {gitrepo1, "", []Tag{ + {"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"}, + {"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"}, + {"v2.0.1", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"}, + {"v2.0.2", "9d02800338b8a55be062c838d1f02e0c5780b9eb"}, + {"v2.3", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"}, + }}, + {gitrepo1, "v", []Tag{ + {"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"}, + {"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"}, + {"v2.0.1", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"}, + {"v2.0.2", "9d02800338b8a55be062c838d1f02e0c5780b9eb"}, + {"v2.3", "76a00fb249b7f93091bc2c89a789dab1fc1bc26f"}, + }}, + {gitrepo1, "v1", []Tag{ + {"v1.2.3", "ede458df7cd0fdca520df19a33158086a8a68e81"}, + {"v1.2.4-annotated", "ede458df7cd0fdca520df19a33158086a8a68e81"}, + }}, + {gitrepo1, "2", []Tag{}}, + } { + t.Run(path.Base(tt.repo)+"/"+tt.prefix, runTest(tt)) + if tt.repo == gitrepo1 { + // Clear hashes. + clearTags := []Tag{} + for _, tag := range tt.tags { + clearTags = append(clearTags, Tag{tag.Name, ""}) + } + tags := tt.tags + for _, tt.repo = range altRepos() { + if strings.Contains(tt.repo, "Git") { + tt.tags = tags + } else { + tt.tags = clearTags + } + t.Run(path.Base(tt.repo)+"/"+tt.prefix, runTest(tt)) + } + } + } +} + +func TestLatest(t *testing.T) { + t.Parallel() + + type latestTest struct { + repo string + info *RevInfo + } + runTest := func(tt latestTest) func(*testing.T) { + return func(t *testing.T) { + t.Parallel() + ctx := testContext(t) + + r, err := testRepo(ctx, t, tt.repo) + if err != nil { + t.Fatal(err) + } + info, err := r.Latest(ctx) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(info, tt.info) { + t.Errorf("Latest: incorrect info\nhave %+v (origin %+v)\nwant %+v (origin %+v)", info, info.Origin, tt.info, tt.info.Origin) + } + } + } + + for _, tt := range []latestTest{ + { + gitrepo1, + &RevInfo{ + Origin: &Origin{ + VCS: "git", + URL: gitrepo1, + Ref: "HEAD", + Hash: "ede458df7cd0fdca520df19a33158086a8a68e81", + }, + Name: "ede458df7cd0fdca520df19a33158086a8a68e81", + Short: "ede458df7cd0", + Version: "ede458df7cd0fdca520df19a33158086a8a68e81", + Time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC), + Tags: []string{"v1.2.3", "v1.2.4-annotated"}, + }, + }, + { + hgrepo1, + &RevInfo{ + Origin: &Origin{ + VCS: "hg", + URL: hgrepo1, + Hash: "18518c07eb8ed5c80221e997e518cccaa8c0c287", + }, + Name: "18518c07eb8ed5c80221e997e518cccaa8c0c287", + Short: "18518c07eb8e", + Version: "18518c07eb8ed5c80221e997e518cccaa8c0c287", + Time: time.Date(2018, 6, 27, 16, 16, 30, 0, time.UTC), + }, + }, + } { + t.Run(path.Base(tt.repo), runTest(tt)) + if tt.repo == gitrepo1 { + tt.repo = "localGitRepo" + info := *tt.info + tt.info = &info + o := *info.Origin + info.Origin = &o + o.URL = localGitURL(t) + t.Run(path.Base(tt.repo), runTest(tt)) + } + } +} + +func TestReadFile(t *testing.T) { + t.Parallel() + + type readFileTest struct { + repo string + rev string + file string + err string + data string + } + runTest := func(tt readFileTest) func(*testing.T) { + return func(t *testing.T) { + t.Parallel() + ctx := testContext(t) + + r, err := testRepo(ctx, t, tt.repo) + if err != nil { + t.Fatal(err) + } + data, err := r.ReadFile(ctx, tt.rev, tt.file, 100) + if err != nil { + if tt.err == "" { + t.Fatalf("ReadFile: unexpected error %v", err) + } + if !strings.Contains(err.Error(), tt.err) { + t.Fatalf("ReadFile: wrong error %q, want %q", err, tt.err) + } + if len(data) != 0 { + t.Errorf("ReadFile: non-empty data %q with error %v", data, err) + } + return + } + if tt.err != "" { + t.Fatalf("ReadFile: no error, wanted %v", tt.err) + } + if string(data) != tt.data { + t.Errorf("ReadFile: incorrect data\nhave %q\nwant %q", data, tt.data) + } + } + } + + for _, tt := range []readFileTest{ + { + repo: gitrepo1, + rev: "latest", + file: "README", + data: "", + }, + { + repo: gitrepo1, + rev: "v2", + file: "another.txt", + data: "another\n", + }, + { + repo: gitrepo1, + rev: "v2.3.4", + file: "another.txt", + err: fs.ErrNotExist.Error(), + }, + } { + t.Run(path.Base(tt.repo)+"/"+tt.rev+"/"+tt.file, runTest(tt)) + if tt.repo == gitrepo1 { + for _, tt.repo = range altRepos() { + t.Run(path.Base(tt.repo)+"/"+tt.rev+"/"+tt.file, runTest(tt)) + } + } + } +} + +type zipFile struct { + name string + size int64 +} + +func TestReadZip(t *testing.T) { + t.Parallel() + + type readZipTest struct { + repo string + rev string + subdir string + err string + files map[string]uint64 + } + runTest := func(tt readZipTest) func(*testing.T) { + return func(t *testing.T) { + t.Parallel() + ctx := testContext(t) + + r, err := testRepo(ctx, t, tt.repo) + if err != nil { + t.Fatal(err) + } + rc, err := r.ReadZip(ctx, tt.rev, tt.subdir, 100000) + if err != nil { + if tt.err == "" { + t.Fatalf("ReadZip: unexpected error %v", err) + } + if !strings.Contains(err.Error(), tt.err) { + t.Fatalf("ReadZip: wrong error %q, want %q", err, tt.err) + } + if rc != nil { + t.Errorf("ReadZip: non-nil io.ReadCloser with error %v", err) + } + return + } + defer rc.Close() + if tt.err != "" { + t.Fatalf("ReadZip: no error, wanted %v", tt.err) + } + zipdata, err := io.ReadAll(rc) + if err != nil { + t.Fatal(err) + } + z, err := zip.NewReader(bytes.NewReader(zipdata), int64(len(zipdata))) + if err != nil { + t.Fatalf("ReadZip: cannot read zip file: %v", err) + } + have := make(map[string]bool) + for _, f := range z.File { + size, ok := tt.files[f.Name] + if !ok { + t.Errorf("ReadZip: unexpected file %s", f.Name) + continue + } + have[f.Name] = true + if size != ^uint64(0) && f.UncompressedSize64 != size { + t.Errorf("ReadZip: file %s has unexpected size %d != %d", f.Name, f.UncompressedSize64, size) + } + } + for name := range tt.files { + if !have[name] { + t.Errorf("ReadZip: missing file %s", name) + } + } + } + } + + for _, tt := range []readZipTest{ + { + repo: gitrepo1, + rev: "v2.3.4", + subdir: "", + files: map[string]uint64{ + "prefix/": 0, + "prefix/README": 0, + "prefix/v2": 3, + }, + }, + { + repo: hgrepo1, + rev: "v2.3.4", + subdir: "", + files: map[string]uint64{ + "prefix/.hg_archival.txt": ^uint64(0), + "prefix/README": 0, + "prefix/v2": 3, + }, + }, + + { + repo: gitrepo1, + rev: "v2", + subdir: "", + files: map[string]uint64{ + "prefix/": 0, + "prefix/README": 0, + "prefix/v2": 3, + "prefix/another.txt": 8, + "prefix/foo.txt": 13, + }, + }, + { + repo: hgrepo1, + rev: "v2", + subdir: "", + files: map[string]uint64{ + "prefix/.hg_archival.txt": ^uint64(0), + "prefix/README": 0, + "prefix/v2": 3, + "prefix/another.txt": 8, + "prefix/foo.txt": 13, + }, + }, + + { + repo: gitrepo1, + rev: "v3", + subdir: "", + files: map[string]uint64{ + "prefix/": 0, + "prefix/v3/": 0, + "prefix/v3/sub/": 0, + "prefix/v3/sub/dir/": 0, + "prefix/v3/sub/dir/file.txt": 16, + "prefix/README": 0, + }, + }, + { + repo: hgrepo1, + rev: "v3", + subdir: "", + files: map[string]uint64{ + "prefix/.hg_archival.txt": ^uint64(0), + "prefix/.hgtags": 405, + "prefix/v3/sub/dir/file.txt": 16, + "prefix/README": 0, + }, + }, + + { + repo: gitrepo1, + rev: "v3", + subdir: "v3/sub/dir", + files: map[string]uint64{ + "prefix/": 0, + "prefix/v3/": 0, + "prefix/v3/sub/": 0, + "prefix/v3/sub/dir/": 0, + "prefix/v3/sub/dir/file.txt": 16, + }, + }, + { + repo: hgrepo1, + rev: "v3", + subdir: "v3/sub/dir", + files: map[string]uint64{ + "prefix/v3/sub/dir/file.txt": 16, + }, + }, + + { + repo: gitrepo1, + rev: "v3", + subdir: "v3/sub", + files: map[string]uint64{ + "prefix/": 0, + "prefix/v3/": 0, + "prefix/v3/sub/": 0, + "prefix/v3/sub/dir/": 0, + "prefix/v3/sub/dir/file.txt": 16, + }, + }, + { + repo: hgrepo1, + rev: "v3", + subdir: "v3/sub", + files: map[string]uint64{ + "prefix/v3/sub/dir/file.txt": 16, + }, + }, + + { + repo: gitrepo1, + rev: "aaaaaaaaab", + subdir: "", + err: "unknown revision", + }, + { + repo: hgrepo1, + rev: "aaaaaaaaab", + subdir: "", + err: "unknown revision", + }, + + { + repo: vgotest1, + rev: "submod/v1.0.4", + subdir: "submod", + files: map[string]uint64{ + "prefix/": 0, + "prefix/submod/": 0, + "prefix/submod/go.mod": 53, + "prefix/submod/pkg/": 0, + "prefix/submod/pkg/p.go": 31, + }, + }, + } { + t.Run(path.Base(tt.repo)+"/"+tt.rev+"/"+tt.subdir, runTest(tt)) + if tt.repo == gitrepo1 { + tt.repo = "localGitRepo" + t.Run(path.Base(tt.repo)+"/"+tt.rev+"/"+tt.subdir, runTest(tt)) + } + } +} + +var hgmap = map[string]string{ + "HEAD": "41964ddce1180313bdc01d0a39a2813344d6261d", // not tip due to bad hgrepo1 conversion + "9d02800338b8a55be062c838d1f02e0c5780b9eb": "8f49ee7a6ddcdec6f0112d9dca48d4a2e4c3c09e", + "76a00fb249b7f93091bc2c89a789dab1fc1bc26f": "88fde824ec8b41a76baa16b7e84212cee9f3edd0", + "ede458df7cd0fdca520df19a33158086a8a68e81": "41964ddce1180313bdc01d0a39a2813344d6261d", + "97f6aa59c81c623494825b43d39e445566e429a4": "c0cbbfb24c7c3c50c35c7b88e7db777da4ff625d", +} + +func TestStat(t *testing.T) { + t.Parallel() + + type statTest struct { + repo string + rev string + err string + info *RevInfo + } + runTest := func(tt statTest) func(*testing.T) { + return func(t *testing.T) { + t.Parallel() + ctx := testContext(t) + + r, err := testRepo(ctx, t, tt.repo) + if err != nil { + t.Fatal(err) + } + info, err := r.Stat(ctx, tt.rev) + if err != nil { + if tt.err == "" { + t.Fatalf("Stat: unexpected error %v", err) + } + if !strings.Contains(err.Error(), tt.err) { + t.Fatalf("Stat: wrong error %q, want %q", err, tt.err) + } + if info != nil && info.Origin == nil { + t.Errorf("Stat: non-nil info with nil Origin with error %q", err) + } + return + } + info.Origin = nil // TestLatest and ../../../testdata/script/reuse_git.txt test Origin well enough + if !reflect.DeepEqual(info, tt.info) { + t.Errorf("Stat: incorrect info\nhave %+v\nwant %+v", *info, *tt.info) + } + } + } + + for _, tt := range []statTest{ + { + repo: gitrepo1, + rev: "HEAD", + info: &RevInfo{ + Name: "ede458df7cd0fdca520df19a33158086a8a68e81", + Short: "ede458df7cd0", + Version: "ede458df7cd0fdca520df19a33158086a8a68e81", + Time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC), + Tags: []string{"v1.2.3", "v1.2.4-annotated"}, + }, + }, + { + repo: gitrepo1, + rev: "v2", // branch + info: &RevInfo{ + Name: "9d02800338b8a55be062c838d1f02e0c5780b9eb", + Short: "9d02800338b8", + Version: "9d02800338b8a55be062c838d1f02e0c5780b9eb", + Time: time.Date(2018, 4, 17, 20, 00, 32, 0, time.UTC), + Tags: []string{"v2.0.2"}, + }, + }, + { + repo: gitrepo1, + rev: "v2.3.4", // badly-named branch (semver should be a tag) + info: &RevInfo{ + Name: "76a00fb249b7f93091bc2c89a789dab1fc1bc26f", + Short: "76a00fb249b7", + Version: "76a00fb249b7f93091bc2c89a789dab1fc1bc26f", + Time: time.Date(2018, 4, 17, 19, 45, 48, 0, time.UTC), + Tags: []string{"v2.0.1", "v2.3"}, + }, + }, + { + repo: gitrepo1, + rev: "v2.3", // badly-named tag (we only respect full semver v2.3.0) + info: &RevInfo{ + Name: "76a00fb249b7f93091bc2c89a789dab1fc1bc26f", + Short: "76a00fb249b7", + Version: "v2.3", + Time: time.Date(2018, 4, 17, 19, 45, 48, 0, time.UTC), + Tags: []string{"v2.0.1", "v2.3"}, + }, + }, + { + repo: gitrepo1, + rev: "v1.2.3", // tag + info: &RevInfo{ + Name: "ede458df7cd0fdca520df19a33158086a8a68e81", + Short: "ede458df7cd0", + Version: "v1.2.3", + Time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC), + Tags: []string{"v1.2.3", "v1.2.4-annotated"}, + }, + }, + { + repo: gitrepo1, + rev: "ede458df", // hash prefix in refs + info: &RevInfo{ + Name: "ede458df7cd0fdca520df19a33158086a8a68e81", + Short: "ede458df7cd0", + Version: "ede458df7cd0fdca520df19a33158086a8a68e81", + Time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC), + Tags: []string{"v1.2.3", "v1.2.4-annotated"}, + }, + }, + { + repo: gitrepo1, + rev: "97f6aa59", // hash prefix not in refs + info: &RevInfo{ + Name: "97f6aa59c81c623494825b43d39e445566e429a4", + Short: "97f6aa59c81c", + Version: "97f6aa59c81c623494825b43d39e445566e429a4", + Time: time.Date(2018, 4, 17, 20, 0, 19, 0, time.UTC), + }, + }, + { + repo: gitrepo1, + rev: "v1.2.4-annotated", // annotated tag uses unwrapped commit hash + info: &RevInfo{ + Name: "ede458df7cd0fdca520df19a33158086a8a68e81", + Short: "ede458df7cd0", + Version: "v1.2.4-annotated", + Time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC), + Tags: []string{"v1.2.3", "v1.2.4-annotated"}, + }, + }, + { + repo: gitrepo1, + rev: "aaaaaaaaab", + err: "unknown revision", + }, + } { + t.Run(path.Base(tt.repo)+"/"+tt.rev, runTest(tt)) + if tt.repo == gitrepo1 { + for _, tt.repo = range altRepos() { + old := tt + var m map[string]string + if tt.repo == hgrepo1 { + m = hgmap + } + if tt.info != nil { + info := *tt.info + tt.info = &info + tt.info.Name = remap(tt.info.Name, m) + tt.info.Version = remap(tt.info.Version, m) + tt.info.Short = remap(tt.info.Short, m) + } + tt.rev = remap(tt.rev, m) + t.Run(path.Base(tt.repo)+"/"+tt.rev, runTest(tt)) + tt = old + } + } + } +} + +func remap(name string, m map[string]string) string { + if m[name] != "" { + return m[name] + } + if AllHex(name) { + for k, v := range m { + if strings.HasPrefix(k, name) { + return v[:len(name)] + } + } + } + return name +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/shell.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/shell.go new file mode 100644 index 0000000000000000000000000000000000000000..eaa01950b95ef395e57ac6f8d85036c44b6ff911 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/shell.go @@ -0,0 +1,141 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +// Interactive debugging shell for codehost.Repo implementations. + +package main + +import ( + "archive/zip" + "bufio" + "bytes" + "flag" + "fmt" + "io" + "log" + "os" + "strings" + "time" + + "cmd/go/internal/cfg" + "cmd/go/internal/modfetch/codehost" +) + +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run shell.go vcs remote\n") + os.Exit(2) +} + +func main() { + cfg.GOMODCACHE = "/tmp/vcswork" + log.SetFlags(0) + log.SetPrefix("shell: ") + flag.Usage = usage + flag.Parse() + if flag.NArg() != 2 { + usage() + } + + repo, err := codehost.NewRepo(flag.Arg(0), flag.Arg(1)) + if err != nil { + log.Fatal(err) + } + + b := bufio.NewReader(os.Stdin) + for { + fmt.Fprintf(os.Stderr, ">>> ") + line, err := b.ReadString('\n') + if err != nil { + log.Fatal(err) + } + f := strings.Fields(line) + if len(f) == 0 { + continue + } + switch f[0] { + default: + fmt.Fprintf(os.Stderr, "?unknown command\n") + continue + case "tags": + prefix := "" + if len(f) == 2 { + prefix = f[1] + } + if len(f) > 2 { + fmt.Fprintf(os.Stderr, "?usage: tags [prefix]\n") + continue + } + tags, err := repo.Tags(prefix) + if err != nil { + fmt.Fprintf(os.Stderr, "?%s\n", err) + continue + } + for _, tag := range tags { + fmt.Printf("%s\n", tag) + } + + case "stat": + if len(f) != 2 { + fmt.Fprintf(os.Stderr, "?usage: stat rev\n") + continue + } + info, err := repo.Stat(f[1]) + if err != nil { + fmt.Fprintf(os.Stderr, "?%s\n", err) + continue + } + fmt.Printf("name=%s short=%s version=%s time=%s\n", info.Name, info.Short, info.Version, info.Time.UTC().Format(time.RFC3339)) + + case "read": + if len(f) != 3 { + fmt.Fprintf(os.Stderr, "?usage: read rev file\n") + continue + } + data, err := repo.ReadFile(f[1], f[2], 10<<20) + if err != nil { + fmt.Fprintf(os.Stderr, "?%s\n", err) + continue + } + os.Stdout.Write(data) + + case "zip": + if len(f) != 4 { + fmt.Fprintf(os.Stderr, "?usage: zip rev subdir output\n") + continue + } + subdir := f[2] + if subdir == "-" { + subdir = "" + } + rc, err := repo.ReadZip(f[1], subdir, 10<<20) + if err != nil { + fmt.Fprintf(os.Stderr, "?%s\n", err) + continue + } + data, err := io.ReadAll(rc) + rc.Close() + if err != nil { + fmt.Fprintf(os.Stderr, "?%s\n", err) + continue + } + + if f[3] != "-" { + if err := os.WriteFile(f[3], data, 0666); err != nil { + fmt.Fprintf(os.Stderr, "?%s\n", err) + continue + } + } + z, err := zip.NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + fmt.Fprintf(os.Stderr, "?%s\n", err) + continue + } + for _, f := range z.File { + fmt.Printf("%s %d\n", f.Name, f.UncompressedSize64) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/svn.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/svn.go new file mode 100644 index 0000000000000000000000000000000000000000..9c1c10097bb437486f394b5a0304d487732edf78 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/svn.go @@ -0,0 +1,168 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codehost + +import ( + "archive/zip" + "context" + "encoding/xml" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strconv" + "time" + + "cmd/go/internal/base" +) + +func svnParseStat(rev, out string) (*RevInfo, error) { + var log struct { + Logentry struct { + Revision int64 `xml:"revision,attr"` + Date string `xml:"date"` + } `xml:"logentry"` + } + if err := xml.Unmarshal([]byte(out), &log); err != nil { + return nil, vcsErrorf("unexpected response from svn log --xml: %v\n%s", err, out) + } + + t, err := time.Parse(time.RFC3339, log.Logentry.Date) + if err != nil { + return nil, vcsErrorf("unexpected response from svn log --xml: %v\n%s", err, out) + } + + info := &RevInfo{ + Name: strconv.FormatInt(log.Logentry.Revision, 10), + Short: fmt.Sprintf("%012d", log.Logentry.Revision), + Time: t.UTC(), + Version: rev, + } + return info, nil +} + +func svnReadZip(ctx context.Context, dst io.Writer, workDir, rev, subdir, remote string) (err error) { + // The subversion CLI doesn't provide a command to write the repository + // directly to an archive, so we need to export it to the local filesystem + // instead. Unfortunately, the local filesystem might apply arbitrary + // normalization to the filenames, so we need to obtain those directly. + // + // 'svn export' prints the filenames as they are written, but from reading the + // svn source code (as of revision 1868933), those filenames are encoded using + // the system locale rather than preserved byte-for-byte from the origin. For + // our purposes, that won't do, but we don't want to go mucking around with + // the user's locale settings either — that could impact error messages, and + // we don't know what locales the user has available or what LC_* variables + // their platform supports. + // + // Instead, we'll do a two-pass export: first we'll run 'svn list' to get the + // canonical filenames, then we'll 'svn export' and look for those filenames + // in the local filesystem. (If there is an encoding problem at that point, we + // would probably reject the resulting module anyway.) + + remotePath := remote + if subdir != "" { + remotePath += "/" + subdir + } + + release, err := base.AcquireNet() + if err != nil { + return err + } + out, err := Run(ctx, workDir, []string{ + "svn", "list", + "--non-interactive", + "--xml", + "--incremental", + "--recursive", + "--revision", rev, + "--", remotePath, + }) + release() + if err != nil { + return err + } + + type listEntry struct { + Kind string `xml:"kind,attr"` + Name string `xml:"name"` + Size int64 `xml:"size"` + } + var list struct { + Entries []listEntry `xml:"entry"` + } + if err := xml.Unmarshal(out, &list); err != nil { + return vcsErrorf("unexpected response from svn list --xml: %v\n%s", err, out) + } + + exportDir := filepath.Join(workDir, "export") + // Remove any existing contents from a previous (failed) run. + if err := os.RemoveAll(exportDir); err != nil { + return err + } + defer os.RemoveAll(exportDir) // best-effort + + release, err = base.AcquireNet() + if err != nil { + return err + } + _, err = Run(ctx, workDir, []string{ + "svn", "export", + "--non-interactive", + "--quiet", + + // Suppress any platform- or host-dependent transformations. + "--native-eol", "LF", + "--ignore-externals", + "--ignore-keywords", + + "--revision", rev, + "--", remotePath, + exportDir, + }) + release() + if err != nil { + return err + } + + // Scrape the exported files out of the filesystem and encode them in the zipfile. + + // “All files in the zip file are expected to be + // nested in a single top-level directory, whose name is not specified.” + // We'll (arbitrarily) choose the base of the remote path. + basePath := path.Join(path.Base(remote), subdir) + + zw := zip.NewWriter(dst) + for _, e := range list.Entries { + if e.Kind != "file" { + continue + } + + zf, err := zw.Create(path.Join(basePath, e.Name)) + if err != nil { + return err + } + + f, err := os.Open(filepath.Join(exportDir, e.Name)) + if err != nil { + if os.IsNotExist(err) { + return vcsErrorf("file reported by 'svn list', but not written by 'svn export': %s", e.Name) + } + return fmt.Errorf("error opening file created by 'svn export': %v", err) + } + + n, err := io.Copy(zf, f) + f.Close() + if err != nil { + return err + } + if n != e.Size { + return vcsErrorf("file size differs between 'svn list' and 'svn export': file %s listed as %v bytes, but exported as %v bytes", e.Name, e.Size, n) + } + } + + return zw.Close() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/vcs.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/vcs.go new file mode 100644 index 0000000000000000000000000000000000000000..5bd100556b59c762c27766cb595c25be8fc3ebbd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/codehost/vcs.go @@ -0,0 +1,644 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codehost + +import ( + "context" + "errors" + "fmt" + "internal/lazyregexp" + "io" + "io/fs" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" + + "cmd/go/internal/base" + "cmd/go/internal/lockedfile" + "cmd/go/internal/par" + "cmd/go/internal/str" +) + +// A VCSError indicates an error using a version control system. +// The implication of a VCSError is that we know definitively where +// to get the code, but we can't access it due to the error. +// The caller should report this error instead of continuing to probe +// other possible module paths. +// +// TODO(golang.org/issue/31730): See if we can invert this. (Return a +// distinguished error for “repo not found” and treat everything else +// as terminal.) +type VCSError struct { + Err error +} + +func (e *VCSError) Error() string { return e.Err.Error() } + +func (e *VCSError) Unwrap() error { return e.Err } + +func vcsErrorf(format string, a ...any) error { + return &VCSError{Err: fmt.Errorf(format, a...)} +} + +type vcsCacheKey struct { + vcs string + remote string +} + +func NewRepo(ctx context.Context, vcs, remote string) (Repo, error) { + return vcsRepoCache.Do(vcsCacheKey{vcs, remote}, func() (Repo, error) { + repo, err := newVCSRepo(ctx, vcs, remote) + if err != nil { + return nil, &VCSError{err} + } + return repo, nil + }) +} + +var vcsRepoCache par.ErrCache[vcsCacheKey, Repo] + +type vcsRepo struct { + mu lockedfile.Mutex // protects all commands, so we don't have to decide which are safe on a per-VCS basis + + remote string + cmd *vcsCmd + dir string + + tagsOnce sync.Once + tags map[string]bool + + branchesOnce sync.Once + branches map[string]bool + + fetchOnce sync.Once + fetchErr error +} + +func newVCSRepo(ctx context.Context, vcs, remote string) (Repo, error) { + if vcs == "git" { + return newGitRepo(ctx, remote, false) + } + cmd := vcsCmds[vcs] + if cmd == nil { + return nil, fmt.Errorf("unknown vcs: %s %s", vcs, remote) + } + if !strings.Contains(remote, "://") { + return nil, fmt.Errorf("invalid vcs remote: %s %s", vcs, remote) + } + + r := &vcsRepo{remote: remote, cmd: cmd} + var err error + r.dir, r.mu.Path, err = WorkDir(ctx, vcsWorkDirType+vcs, r.remote) + if err != nil { + return nil, err + } + + if cmd.init == nil { + return r, nil + } + + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + if _, err := os.Stat(filepath.Join(r.dir, "."+vcs)); err != nil { + release, err := base.AcquireNet() + if err != nil { + return nil, err + } + _, err = Run(ctx, r.dir, cmd.init(r.remote)) + release() + + if err != nil { + os.RemoveAll(r.dir) + return nil, err + } + } + return r, nil +} + +const vcsWorkDirType = "vcs1." + +type vcsCmd struct { + vcs string // vcs name "hg" + init func(remote string) []string // cmd to init repo to track remote + tags func(remote string) []string // cmd to list local tags + tagRE *lazyregexp.Regexp // regexp to extract tag names from output of tags cmd + branches func(remote string) []string // cmd to list local branches + branchRE *lazyregexp.Regexp // regexp to extract branch names from output of tags cmd + badLocalRevRE *lazyregexp.Regexp // regexp of names that must not be served out of local cache without doing fetch first + statLocal func(rev, remote string) []string // cmd to stat local rev + parseStat func(rev, out string) (*RevInfo, error) // cmd to parse output of statLocal + fetch []string // cmd to fetch everything from remote + latest string // name of latest commit on remote (tip, HEAD, etc) + readFile func(rev, file, remote string) []string // cmd to read rev's file + readZip func(rev, subdir, remote, target string) []string // cmd to read rev's subdir as zip file + doReadZip func(ctx context.Context, dst io.Writer, workDir, rev, subdir, remote string) error // arbitrary function to read rev's subdir as zip file +} + +var re = lazyregexp.New + +var vcsCmds = map[string]*vcsCmd{ + "hg": { + vcs: "hg", + init: func(remote string) []string { + return []string{"hg", "clone", "-U", "--", remote, "."} + }, + tags: func(remote string) []string { + return []string{"hg", "tags", "-q"} + }, + tagRE: re(`(?m)^[^\n]+$`), + branches: func(remote string) []string { + return []string{"hg", "branches", "-c", "-q"} + }, + branchRE: re(`(?m)^[^\n]+$`), + badLocalRevRE: re(`(?m)^(tip)$`), + statLocal: func(rev, remote string) []string { + return []string{"hg", "log", "-l1", "-r", rev, "--template", "{node} {date|hgdate} {tags}"} + }, + parseStat: hgParseStat, + fetch: []string{"hg", "pull", "-f"}, + latest: "tip", + readFile: func(rev, file, remote string) []string { + return []string{"hg", "cat", "-r", rev, file} + }, + readZip: func(rev, subdir, remote, target string) []string { + pattern := []string{} + if subdir != "" { + pattern = []string{"-I", subdir + "/**"} + } + return str.StringList("hg", "archive", "-t", "zip", "--no-decode", "-r", rev, "--prefix=prefix/", pattern, "--", target) + }, + }, + + "svn": { + vcs: "svn", + init: nil, // no local checkout + tags: func(remote string) []string { + return []string{"svn", "list", "--", strings.TrimSuffix(remote, "/trunk") + "/tags"} + }, + tagRE: re(`(?m)^(.*?)/?$`), + statLocal: func(rev, remote string) []string { + suffix := "@" + rev + if rev == "latest" { + suffix = "" + } + return []string{"svn", "log", "-l1", "--xml", "--", remote + suffix} + }, + parseStat: svnParseStat, + latest: "latest", + readFile: func(rev, file, remote string) []string { + return []string{"svn", "cat", "--", remote + "/" + file + "@" + rev} + }, + doReadZip: svnReadZip, + }, + + "bzr": { + vcs: "bzr", + init: func(remote string) []string { + return []string{"bzr", "branch", "--use-existing-dir", "--", remote, "."} + }, + fetch: []string{ + "bzr", "pull", "--overwrite-tags", + }, + tags: func(remote string) []string { + return []string{"bzr", "tags"} + }, + tagRE: re(`(?m)^\S+`), + badLocalRevRE: re(`^revno:-`), + statLocal: func(rev, remote string) []string { + return []string{"bzr", "log", "-l1", "--long", "--show-ids", "-r", rev} + }, + parseStat: bzrParseStat, + latest: "revno:-1", + readFile: func(rev, file, remote string) []string { + return []string{"bzr", "cat", "-r", rev, file} + }, + readZip: func(rev, subdir, remote, target string) []string { + extra := []string{} + if subdir != "" { + extra = []string{"./" + subdir} + } + return str.StringList("bzr", "export", "--format=zip", "-r", rev, "--root=prefix/", "--", target, extra) + }, + }, + + "fossil": { + vcs: "fossil", + init: func(remote string) []string { + return []string{"fossil", "clone", "--", remote, ".fossil"} + }, + fetch: []string{"fossil", "pull", "-R", ".fossil"}, + tags: func(remote string) []string { + return []string{"fossil", "tag", "-R", ".fossil", "list"} + }, + tagRE: re(`XXXTODO`), + statLocal: func(rev, remote string) []string { + return []string{"fossil", "info", "-R", ".fossil", rev} + }, + parseStat: fossilParseStat, + latest: "trunk", + readFile: func(rev, file, remote string) []string { + return []string{"fossil", "cat", "-R", ".fossil", "-r", rev, file} + }, + readZip: func(rev, subdir, remote, target string) []string { + extra := []string{} + if subdir != "" && !strings.ContainsAny(subdir, "*?[],") { + extra = []string{"--include", subdir} + } + // Note that vcsRepo.ReadZip below rewrites this command + // to run in a different directory, to work around a fossil bug. + return str.StringList("fossil", "zip", "-R", ".fossil", "--name", "prefix", extra, "--", rev, target) + }, + }, +} + +func (r *vcsRepo) loadTags(ctx context.Context) { + out, err := Run(ctx, r.dir, r.cmd.tags(r.remote)) + if err != nil { + return + } + + // Run tag-listing command and extract tags. + r.tags = make(map[string]bool) + for _, tag := range r.cmd.tagRE.FindAllString(string(out), -1) { + if r.cmd.badLocalRevRE != nil && r.cmd.badLocalRevRE.MatchString(tag) { + continue + } + r.tags[tag] = true + } +} + +func (r *vcsRepo) loadBranches(ctx context.Context) { + if r.cmd.branches == nil { + return + } + + out, err := Run(ctx, r.dir, r.cmd.branches(r.remote)) + if err != nil { + return + } + + r.branches = make(map[string]bool) + for _, branch := range r.cmd.branchRE.FindAllString(string(out), -1) { + if r.cmd.badLocalRevRE != nil && r.cmd.badLocalRevRE.MatchString(branch) { + continue + } + r.branches[branch] = true + } +} + +func (r *vcsRepo) CheckReuse(ctx context.Context, old *Origin, subdir string) error { + return fmt.Errorf("vcs %s: CheckReuse: %w", r.cmd.vcs, errors.ErrUnsupported) +} + +func (r *vcsRepo) Tags(ctx context.Context, prefix string) (*Tags, error) { + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + r.tagsOnce.Do(func() { r.loadTags(ctx) }) + tags := &Tags{ + // None of the other VCS provide a reasonable way to compute TagSum + // without downloading the whole repo, so we only include VCS and URL + // in the Origin. + Origin: &Origin{ + VCS: r.cmd.vcs, + URL: r.remote, + }, + List: []Tag{}, + } + for tag := range r.tags { + if strings.HasPrefix(tag, prefix) { + tags.List = append(tags.List, Tag{tag, ""}) + } + } + sort.Slice(tags.List, func(i, j int) bool { + return tags.List[i].Name < tags.List[j].Name + }) + return tags, nil +} + +func (r *vcsRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) { + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + if rev == "latest" { + rev = r.cmd.latest + } + r.branchesOnce.Do(func() { r.loadBranches(ctx) }) + revOK := (r.cmd.badLocalRevRE == nil || !r.cmd.badLocalRevRE.MatchString(rev)) && !r.branches[rev] + if revOK { + if info, err := r.statLocal(ctx, rev); err == nil { + return info, nil + } + } + + r.fetchOnce.Do(func() { r.fetch(ctx) }) + if r.fetchErr != nil { + return nil, r.fetchErr + } + info, err := r.statLocal(ctx, rev) + if err != nil { + return nil, err + } + if !revOK { + info.Version = info.Name + } + return info, nil +} + +func (r *vcsRepo) fetch(ctx context.Context) { + if len(r.cmd.fetch) > 0 { + release, err := base.AcquireNet() + if err != nil { + r.fetchErr = err + return + } + _, r.fetchErr = Run(ctx, r.dir, r.cmd.fetch) + release() + } +} + +func (r *vcsRepo) statLocal(ctx context.Context, rev string) (*RevInfo, error) { + out, err := Run(ctx, r.dir, r.cmd.statLocal(rev, r.remote)) + if err != nil { + return nil, &UnknownRevisionError{Rev: rev} + } + info, err := r.cmd.parseStat(rev, string(out)) + if err != nil { + return nil, err + } + if info.Origin == nil { + info.Origin = new(Origin) + } + info.Origin.VCS = r.cmd.vcs + info.Origin.URL = r.remote + return info, nil +} + +func (r *vcsRepo) Latest(ctx context.Context) (*RevInfo, error) { + return r.Stat(ctx, "latest") +} + +func (r *vcsRepo) ReadFile(ctx context.Context, rev, file string, maxSize int64) ([]byte, error) { + if rev == "latest" { + rev = r.cmd.latest + } + _, err := r.Stat(ctx, rev) // download rev into local repo + if err != nil { + return nil, err + } + + // r.Stat acquires r.mu, so lock after that. + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + out, err := Run(ctx, r.dir, r.cmd.readFile(rev, file, r.remote)) + if err != nil { + return nil, fs.ErrNotExist + } + return out, nil +} + +func (r *vcsRepo) RecentTag(ctx context.Context, rev, prefix string, allowed func(string) bool) (tag string, err error) { + // We don't technically need to lock here since we're returning an error + // uncondititonally, but doing so anyway will help to avoid baking in + // lock-inversion bugs. + unlock, err := r.mu.Lock() + if err != nil { + return "", err + } + defer unlock() + + return "", vcsErrorf("vcs %s: RecentTag: %w", r.cmd.vcs, errors.ErrUnsupported) +} + +func (r *vcsRepo) DescendsFrom(ctx context.Context, rev, tag string) (bool, error) { + unlock, err := r.mu.Lock() + if err != nil { + return false, err + } + defer unlock() + + return false, vcsErrorf("vcs %s: DescendsFrom: %w", r.cmd.vcs, errors.ErrUnsupported) +} + +func (r *vcsRepo) ReadZip(ctx context.Context, rev, subdir string, maxSize int64) (zip io.ReadCloser, err error) { + if r.cmd.readZip == nil && r.cmd.doReadZip == nil { + return nil, vcsErrorf("vcs %s: ReadZip: %w", r.cmd.vcs, errors.ErrUnsupported) + } + + unlock, err := r.mu.Lock() + if err != nil { + return nil, err + } + defer unlock() + + if rev == "latest" { + rev = r.cmd.latest + } + f, err := os.CreateTemp("", "go-readzip-*.zip") + if err != nil { + return nil, err + } + if r.cmd.doReadZip != nil { + lw := &limitedWriter{ + W: f, + N: maxSize, + ErrLimitReached: errors.New("ReadZip: encoded file exceeds allowed size"), + } + err = r.cmd.doReadZip(ctx, lw, r.dir, rev, subdir, r.remote) + if err == nil { + _, err = f.Seek(0, io.SeekStart) + } + } else if r.cmd.vcs == "fossil" { + // If you run + // fossil zip -R .fossil --name prefix trunk /tmp/x.zip + // fossil fails with "unable to create directory /tmp" [sic]. + // Change the command to run in /tmp instead, + // replacing the -R argument with an absolute path. + args := r.cmd.readZip(rev, subdir, r.remote, filepath.Base(f.Name())) + for i := range args { + if args[i] == ".fossil" { + args[i] = filepath.Join(r.dir, ".fossil") + } + } + _, err = Run(ctx, filepath.Dir(f.Name()), args) + } else { + _, err = Run(ctx, r.dir, r.cmd.readZip(rev, subdir, r.remote, f.Name())) + } + if err != nil { + f.Close() + os.Remove(f.Name()) + return nil, err + } + return &deleteCloser{f}, nil +} + +// deleteCloser is a file that gets deleted on Close. +type deleteCloser struct { + *os.File +} + +func (d *deleteCloser) Close() error { + defer os.Remove(d.File.Name()) + return d.File.Close() +} + +func hgParseStat(rev, out string) (*RevInfo, error) { + f := strings.Fields(out) + if len(f) < 3 { + return nil, vcsErrorf("unexpected response from hg log: %q", out) + } + hash := f[0] + version := rev + if strings.HasPrefix(hash, version) { + version = hash // extend to full hash + } + t, err := strconv.ParseInt(f[1], 10, 64) + if err != nil { + return nil, vcsErrorf("invalid time from hg log: %q", out) + } + + var tags []string + for _, tag := range f[3:] { + if tag != "tip" { + tags = append(tags, tag) + } + } + sort.Strings(tags) + + info := &RevInfo{ + Origin: &Origin{ + Hash: hash, + }, + Name: hash, + Short: ShortenSHA1(hash), + Time: time.Unix(t, 0).UTC(), + Version: version, + Tags: tags, + } + return info, nil +} + +func bzrParseStat(rev, out string) (*RevInfo, error) { + var revno int64 + var tm time.Time + for _, line := range strings.Split(out, "\n") { + if line == "" || line[0] == ' ' || line[0] == '\t' { + // End of header, start of commit message. + break + } + if line[0] == '-' { + continue + } + before, after, found := strings.Cut(line, ":") + if !found { + // End of header, start of commit message. + break + } + key, val := before, strings.TrimSpace(after) + switch key { + case "revno": + if j := strings.Index(val, " "); j >= 0 { + val = val[:j] + } + i, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return nil, vcsErrorf("unexpected revno from bzr log: %q", line) + } + revno = i + case "timestamp": + j := strings.Index(val, " ") + if j < 0 { + return nil, vcsErrorf("unexpected timestamp from bzr log: %q", line) + } + t, err := time.Parse("2006-01-02 15:04:05 -0700", val[j+1:]) + if err != nil { + return nil, vcsErrorf("unexpected timestamp from bzr log: %q", line) + } + tm = t.UTC() + } + } + if revno == 0 || tm.IsZero() { + return nil, vcsErrorf("unexpected response from bzr log: %q", out) + } + + info := &RevInfo{ + Name: strconv.FormatInt(revno, 10), + Short: fmt.Sprintf("%012d", revno), + Time: tm, + Version: rev, + } + return info, nil +} + +func fossilParseStat(rev, out string) (*RevInfo, error) { + for _, line := range strings.Split(out, "\n") { + if strings.HasPrefix(line, "uuid:") || strings.HasPrefix(line, "hash:") { + f := strings.Fields(line) + if len(f) != 5 || len(f[1]) != 40 || f[4] != "UTC" { + return nil, vcsErrorf("unexpected response from fossil info: %q", line) + } + t, err := time.Parse(time.DateTime, f[2]+" "+f[3]) + if err != nil { + return nil, vcsErrorf("unexpected response from fossil info: %q", line) + } + hash := f[1] + version := rev + if strings.HasPrefix(hash, version) { + version = hash // extend to full hash + } + info := &RevInfo{ + Origin: &Origin{ + Hash: hash, + }, + Name: hash, + Short: ShortenSHA1(hash), + Time: t, + Version: version, + } + return info, nil + } + } + return nil, vcsErrorf("unexpected response from fossil info: %q", out) +} + +type limitedWriter struct { + W io.Writer + N int64 + ErrLimitReached error +} + +func (l *limitedWriter) Write(p []byte) (n int, err error) { + if l.N > 0 { + max := len(p) + if l.N < int64(max) { + max = int(l.N) + } + n, err = l.W.Write(p[:max]) + l.N -= int64(n) + if err != nil || n >= len(p) { + return n, err + } + } + + return n, l.ErrLimitReached +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/coderepo.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/coderepo.go new file mode 100644 index 0000000000000000000000000000000000000000..75c34e9fcb538f652f2915c328f3d9132ffa1d28 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/coderepo.go @@ -0,0 +1,1215 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfetch + +import ( + "archive/zip" + "bytes" + "context" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path" + "path/filepath" + "sort" + "strings" + "time" + + "cmd/go/internal/gover" + "cmd/go/internal/modfetch/codehost" + + "golang.org/x/mod/modfile" + "golang.org/x/mod/module" + "golang.org/x/mod/semver" + modzip "golang.org/x/mod/zip" +) + +// A codeRepo implements modfetch.Repo using an underlying codehost.Repo. +type codeRepo struct { + modPath string + + // code is the repository containing this module. + code codehost.Repo + // codeRoot is the import path at the root of code. + codeRoot string + // codeDir is the directory (relative to root) at which we expect to find the module. + // If pathMajor is non-empty and codeRoot is not the full modPath, + // then we look in both codeDir and codeDir/pathMajor[1:]. + codeDir string + + // pathMajor is the suffix of modPath that indicates its major version, + // or the empty string if modPath is at major version 0 or 1. + // + // pathMajor is typically of the form "/vN", but possibly ".vN", or + // ".vN-unstable" for modules resolved using gopkg.in. + pathMajor string + // pathPrefix is the prefix of modPath that excludes pathMajor. + // It is used only for logging. + pathPrefix string + + // pseudoMajor is the major version prefix to require when generating + // pseudo-versions for this module, derived from the module path. pseudoMajor + // is empty if the module path does not include a version suffix (that is, + // accepts either v0 or v1). + pseudoMajor string +} + +// newCodeRepo returns a Repo that reads the source code for the module with the +// given path, from the repo stored in code, with the root of the repo +// containing the path given by codeRoot. +func newCodeRepo(code codehost.Repo, codeRoot, path string) (Repo, error) { + if !hasPathPrefix(path, codeRoot) { + return nil, fmt.Errorf("mismatched repo: found %s for %s", codeRoot, path) + } + pathPrefix, pathMajor, ok := module.SplitPathVersion(path) + if !ok { + return nil, fmt.Errorf("invalid module path %q", path) + } + if codeRoot == path { + pathPrefix = path + } + pseudoMajor := module.PathMajorPrefix(pathMajor) + + // Compute codeDir = bar, the subdirectory within the repo + // corresponding to the module root. + // + // At this point we might have: + // path = github.com/rsc/foo/bar/v2 + // codeRoot = github.com/rsc/foo + // pathPrefix = github.com/rsc/foo/bar + // pathMajor = /v2 + // pseudoMajor = v2 + // + // which gives + // codeDir = bar + // + // We know that pathPrefix is a prefix of path, and codeRoot is a prefix of + // path, but codeRoot may or may not be a prefix of pathPrefix, because + // codeRoot may be the entire path (in which case codeDir should be empty). + // That occurs in two situations. + // + // One is when a go-import meta tag resolves the complete module path, + // including the pathMajor suffix: + // path = nanomsg.org/go/mangos/v2 + // codeRoot = nanomsg.org/go/mangos/v2 + // pathPrefix = nanomsg.org/go/mangos + // pathMajor = /v2 + // pseudoMajor = v2 + // + // The other is similar: for gopkg.in only, the major version is encoded + // with a dot rather than a slash, and thus can't be in a subdirectory. + // path = gopkg.in/yaml.v2 + // codeRoot = gopkg.in/yaml.v2 + // pathPrefix = gopkg.in/yaml + // pathMajor = .v2 + // pseudoMajor = v2 + // + codeDir := "" + if codeRoot != path { + if !hasPathPrefix(pathPrefix, codeRoot) { + return nil, fmt.Errorf("repository rooted at %s cannot contain module %s", codeRoot, path) + } + codeDir = strings.Trim(pathPrefix[len(codeRoot):], "/") + } + + r := &codeRepo{ + modPath: path, + code: code, + codeRoot: codeRoot, + codeDir: codeDir, + pathPrefix: pathPrefix, + pathMajor: pathMajor, + pseudoMajor: pseudoMajor, + } + + return r, nil +} + +func (r *codeRepo) ModulePath() string { + return r.modPath +} + +func (r *codeRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error { + return r.code.CheckReuse(ctx, old, r.codeDir) +} + +func (r *codeRepo) Versions(ctx context.Context, prefix string) (*Versions, error) { + // Special case: gopkg.in/macaroon-bakery.v2-unstable + // does not use the v2 tags (those are for macaroon-bakery.v2). + // It has no possible tags at all. + if strings.HasPrefix(r.modPath, "gopkg.in/") && strings.HasSuffix(r.modPath, "-unstable") { + return &Versions{}, nil + } + + p := prefix + if r.codeDir != "" { + p = r.codeDir + "/" + p + } + tags, err := r.code.Tags(ctx, p) + if err != nil { + return nil, &module.ModuleError{ + Path: r.modPath, + Err: err, + } + } + if tags.Origin != nil { + tags.Origin.Subdir = r.codeDir + } + + var list, incompatible []string + for _, tag := range tags.List { + if !strings.HasPrefix(tag.Name, p) { + continue + } + v := tag.Name + if r.codeDir != "" { + v = v[len(r.codeDir)+1:] + } + // Note: ./codehost/codehost.go's isOriginTag knows about these conditions too. + // If these are relaxed, isOriginTag will need to be relaxed as well. + if v == "" || v != semver.Canonical(v) { + // Ignore non-canonical tags: Stat rewrites those to canonical + // pseudo-versions. Note that we compare against semver.Canonical here + // instead of module.CanonicalVersion: revToRev strips "+incompatible" + // suffixes before looking up tags, so a tag like "v2.0.0+incompatible" + // would not resolve at all. (The Go version string "v2.0.0+incompatible" + // refers to the "v2.0.0" version tag, which we handle below.) + continue + } + if module.IsPseudoVersion(v) { + // Ignore tags that look like pseudo-versions: Stat rewrites those + // unambiguously to the underlying commit, and tagToVersion drops them. + continue + } + + if err := module.CheckPathMajor(v, r.pathMajor); err != nil { + if r.codeDir == "" && r.pathMajor == "" && semver.Major(v) > "v1" { + incompatible = append(incompatible, v) + } + continue + } + + list = append(list, v) + } + semver.Sort(list) + semver.Sort(incompatible) + + return r.appendIncompatibleVersions(ctx, tags.Origin, list, incompatible) +} + +// appendIncompatibleVersions appends "+incompatible" versions to list if +// appropriate, returning the final list. +// +// The incompatible list contains candidate versions without the '+incompatible' +// prefix. +// +// Both list and incompatible must be sorted in semantic order. +func (r *codeRepo) appendIncompatibleVersions(ctx context.Context, origin *codehost.Origin, list, incompatible []string) (*Versions, error) { + versions := &Versions{ + Origin: origin, + List: list, + } + if len(incompatible) == 0 || r.pathMajor != "" { + // No +incompatible versions are possible, so no need to check them. + return versions, nil + } + + versionHasGoMod := func(v string) (bool, error) { + _, err := r.code.ReadFile(ctx, v, "go.mod", codehost.MaxGoMod) + if err == nil { + return true, nil + } + if !os.IsNotExist(err) { + return false, &module.ModuleError{ + Path: r.modPath, + Err: err, + } + } + return false, nil + } + + if len(list) > 0 { + ok, err := versionHasGoMod(list[len(list)-1]) + if err != nil { + return nil, err + } + if ok { + // The latest compatible version has a go.mod file, so assume that all + // subsequent versions do as well, and do not include any +incompatible + // versions. Even if we are wrong, the author clearly intends module + // consumers to be on the v0/v1 line instead of a higher +incompatible + // version. (See https://golang.org/issue/34189.) + // + // We know of at least two examples where this behavior is desired + // (github.com/russross/blackfriday@v2.0.0 and + // github.com/libp2p/go-libp2p@v6.0.23), and (as of 2019-10-29) have no + // concrete examples for which it is undesired. + return versions, nil + } + } + + var ( + lastMajor string + lastMajorHasGoMod bool + ) + for i, v := range incompatible { + major := semver.Major(v) + + if major != lastMajor { + rem := incompatible[i:] + j := sort.Search(len(rem), func(j int) bool { + return semver.Major(rem[j]) != major + }) + latestAtMajor := rem[j-1] + + var err error + lastMajor = major + lastMajorHasGoMod, err = versionHasGoMod(latestAtMajor) + if err != nil { + return nil, err + } + } + + if lastMajorHasGoMod { + // The latest release of this major version has a go.mod file, so it is + // not allowed as +incompatible. It would be confusing to include some + // minor versions of this major version as +incompatible but require + // semantic import versioning for others, so drop all +incompatible + // versions for this major version. + // + // If we're wrong about a minor version in the middle, users will still be + // able to 'go get' specific tags for that version explicitly — they just + // won't appear in 'go list' or as the results for queries with inequality + // bounds. + continue + } + versions.List = append(versions.List, v+"+incompatible") + } + + return versions, nil +} + +func (r *codeRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) { + if rev == "latest" { + return r.Latest(ctx) + } + codeRev := r.revToRev(rev) + info, err := r.code.Stat(ctx, codeRev) + if err != nil { + // Note: info may be non-nil to supply Origin for caching error. + var revInfo *RevInfo + if info != nil { + revInfo = &RevInfo{ + Origin: info.Origin, + Version: rev, + } + } + return revInfo, &module.ModuleError{ + Path: r.modPath, + Err: &module.InvalidVersionError{ + Version: rev, + Err: err, + }, + } + } + return r.convert(ctx, info, rev) +} + +func (r *codeRepo) Latest(ctx context.Context) (*RevInfo, error) { + info, err := r.code.Latest(ctx) + if err != nil { + if info != nil { + return &RevInfo{Origin: info.Origin}, err + } + return nil, err + } + return r.convert(ctx, info, "") +} + +// convert converts a version as reported by the code host to a version as +// interpreted by the module system. +// +// If statVers is a valid module version, it is used for the Version field. +// Otherwise, the Version is derived from the passed-in info and recent tags. +func (r *codeRepo) convert(ctx context.Context, info *codehost.RevInfo, statVers string) (revInfo *RevInfo, err error) { + defer func() { + if info.Origin == nil { + return + } + if revInfo == nil { + revInfo = new(RevInfo) + } else if revInfo.Origin != nil { + panic("internal error: RevInfo Origin unexpectedly already populated") + } + + origin := *info.Origin + revInfo.Origin = &origin + origin.Subdir = r.codeDir + + v := revInfo.Version + if module.IsPseudoVersion(v) && (v != statVers || !strings.HasPrefix(v, "v0.0.0-")) { + // Add tags that are relevant to pseudo-version calculation to origin. + prefix := r.codeDir + if prefix != "" { + prefix += "/" + } + if r.pathMajor != "" { // "/v2" or "/.v2" + prefix += r.pathMajor[1:] + "." // += "v2." + } + tags, tagsErr := r.code.Tags(ctx, prefix) + if tagsErr != nil { + revInfo.Origin = nil + if err == nil { + err = tagsErr + } + } else { + origin.TagPrefix = tags.Origin.TagPrefix + origin.TagSum = tags.Origin.TagSum + } + } + }() + + // If this is a plain tag (no dir/ prefix) + // and the module path is unversioned, + // and if the underlying file tree has no go.mod, + // then allow using the tag with a +incompatible suffix. + // + // (If the version is +incompatible, then the go.mod file must not exist: + // +incompatible is not an ongoing opt-out from semantic import versioning.) + incompatibleOk := map[string]bool{} + canUseIncompatible := func(v string) bool { + if r.codeDir != "" || r.pathMajor != "" { + // A non-empty codeDir indicates a module within a subdirectory, + // which necessarily has a go.mod file indicating the module boundary. + // A non-empty pathMajor indicates a module path with a major-version + // suffix, which must match. + return false + } + + ok, seen := incompatibleOk[""] + if !seen { + _, errGoMod := r.code.ReadFile(ctx, info.Name, "go.mod", codehost.MaxGoMod) + ok = (errGoMod != nil) + incompatibleOk[""] = ok + } + if !ok { + // A go.mod file exists at the repo root. + return false + } + + // Per https://go.dev/issue/51324, previous versions of the 'go' command + // didn't always check for go.mod files in subdirectories, so if the user + // requests a +incompatible version explicitly, we should continue to allow + // it. Otherwise, if vN/go.mod exists, expect that release tags for that + // major version are intended for the vN module. + if v != "" && !strings.HasSuffix(statVers, "+incompatible") { + major := semver.Major(v) + ok, seen = incompatibleOk[major] + if !seen { + _, errGoModSub := r.code.ReadFile(ctx, info.Name, path.Join(major, "go.mod"), codehost.MaxGoMod) + ok = (errGoModSub != nil) + incompatibleOk[major] = ok + } + if !ok { + return false + } + } + + return true + } + + // checkCanonical verifies that the canonical version v is compatible with the + // module path represented by r, adding a "+incompatible" suffix if needed. + // + // If statVers is also canonical, checkCanonical also verifies that v is + // either statVers or statVers with the added "+incompatible" suffix. + checkCanonical := func(v string) (*RevInfo, error) { + // If r.codeDir is non-empty, then the go.mod file must exist: the module + // author — not the module consumer, — gets to decide how to carve up the repo + // into modules. + // + // Conversely, if the go.mod file exists, the module author — not the module + // consumer — gets to determine the module's path + // + // r.findDir verifies both of these conditions. Execute it now so that + // r.Stat will correctly return a notExistError if the go.mod location or + // declared module path doesn't match. + _, _, _, err := r.findDir(ctx, v) + if err != nil { + // TODO: It would be nice to return an error like "not a module". + // Right now we return "missing go.mod", which is a little confusing. + return nil, &module.ModuleError{ + Path: r.modPath, + Err: &module.InvalidVersionError{ + Version: v, + Err: notExistError{err: err}, + }, + } + } + + invalidf := func(format string, args ...any) error { + return &module.ModuleError{ + Path: r.modPath, + Err: &module.InvalidVersionError{ + Version: v, + Err: fmt.Errorf(format, args...), + }, + } + } + + // Add the +incompatible suffix if needed or requested explicitly, and + // verify that its presence or absence is appropriate for this version + // (which depends on whether it has an explicit go.mod file). + + if v == strings.TrimSuffix(statVers, "+incompatible") { + v = statVers + } + base := strings.TrimSuffix(v, "+incompatible") + var errIncompatible error + if !module.MatchPathMajor(base, r.pathMajor) { + if canUseIncompatible(base) { + v = base + "+incompatible" + } else { + if r.pathMajor != "" { + errIncompatible = invalidf("module path includes a major version suffix, so major version must match") + } else { + errIncompatible = invalidf("module contains a go.mod file, so module path must match major version (%q)", path.Join(r.pathPrefix, semver.Major(v))) + } + } + } else if strings.HasSuffix(v, "+incompatible") { + errIncompatible = invalidf("+incompatible suffix not allowed: major version %s is compatible", semver.Major(v)) + } + + if statVers != "" && statVers == module.CanonicalVersion(statVers) { + // Since the caller-requested version is canonical, it would be very + // confusing to resolve it to anything but itself, possibly with a + // "+incompatible" suffix. Error out explicitly. + if statBase := strings.TrimSuffix(statVers, "+incompatible"); statBase != base { + return nil, &module.ModuleError{ + Path: r.modPath, + Err: &module.InvalidVersionError{ + Version: statVers, + Err: fmt.Errorf("resolves to version %v (%s is not a tag)", v, statBase), + }, + } + } + } + + if errIncompatible != nil { + return nil, errIncompatible + } + + return &RevInfo{ + Name: info.Name, + Short: info.Short, + Time: info.Time, + Version: v, + }, nil + } + + // Determine version. + + if module.IsPseudoVersion(statVers) { + // Validate the go.mod location and major version before + // we check for an ancestor tagged with the pseude-version base. + // + // We can rule out an invalid subdirectory or major version with only + // shallow commit information, but checking the pseudo-version base may + // require downloading a (potentially more expensive) full history. + revInfo, err = checkCanonical(statVers) + if err != nil { + return revInfo, err + } + if err := r.validatePseudoVersion(ctx, info, statVers); err != nil { + return nil, err + } + return revInfo, nil + } + + // statVers is not a pseudo-version, so we need to either resolve it to a + // canonical version or verify that it is already a canonical tag + // (not a branch). + + // Derive or verify a version from a code repo tag. + // Tag must have a prefix matching codeDir. + tagPrefix := "" + if r.codeDir != "" { + tagPrefix = r.codeDir + "/" + } + + isRetracted, err := r.retractedVersions(ctx) + if err != nil { + isRetracted = func(string) bool { return false } + } + + // tagToVersion returns the version obtained by trimming tagPrefix from tag. + // If the tag is invalid, retracted, or a pseudo-version, tagToVersion returns + // an empty version. + tagToVersion := func(tag string) (v string, tagIsCanonical bool) { + if !strings.HasPrefix(tag, tagPrefix) { + return "", false + } + trimmed := tag[len(tagPrefix):] + // Tags that look like pseudo-versions would be confusing. Ignore them. + if module.IsPseudoVersion(tag) { + return "", false + } + + v = semver.Canonical(trimmed) // Not module.Canonical: we don't want to pick up an explicit "+incompatible" suffix from the tag. + if v == "" || !strings.HasPrefix(trimmed, v) { + return "", false // Invalid or incomplete version (just vX or vX.Y). + } + if v == trimmed { + tagIsCanonical = true + } + return v, tagIsCanonical + } + + // If the VCS gave us a valid version, use that. + if v, tagIsCanonical := tagToVersion(info.Version); tagIsCanonical { + if info, err := checkCanonical(v); err == nil { + return info, err + } + } + + // Look through the tags on the revision for either a usable canonical version + // or an appropriate base for a pseudo-version. + var ( + highestCanonical string + pseudoBase string + ) + for _, pathTag := range info.Tags { + v, tagIsCanonical := tagToVersion(pathTag) + if statVers != "" && semver.Compare(v, statVers) == 0 { + // The tag is equivalent to the version requested by the user. + if tagIsCanonical { + // This tag is the canonical form of the requested version, + // not some other form with extra build metadata. + // Use this tag so that the resolved version will match exactly. + // (If it isn't actually allowed, we'll error out in checkCanonical.) + return checkCanonical(v) + } else { + // The user explicitly requested something equivalent to this tag. We + // can't use the version from the tag directly: since the tag is not + // canonical, it could be ambiguous. For example, tags v0.0.1+a and + // v0.0.1+b might both exist and refer to different revisions. + // + // The tag is otherwise valid for the module, so we can at least use it as + // the base of an unambiguous pseudo-version. + // + // If multiple tags match, tagToVersion will canonicalize them to the same + // base version. + pseudoBase = v + } + } + // Save the highest non-retracted canonical tag for the revision. + // If we don't find a better match, we'll use it as the canonical version. + if tagIsCanonical && semver.Compare(highestCanonical, v) < 0 && !isRetracted(v) { + if module.MatchPathMajor(v, r.pathMajor) || canUseIncompatible(v) { + highestCanonical = v + } + } + } + + // If we found a valid canonical tag for the revision, return it. + // Even if we found a good pseudo-version base, a canonical version is better. + if highestCanonical != "" { + return checkCanonical(highestCanonical) + } + + // Find the highest tagged version in the revision's history, subject to + // major version and +incompatible constraints. Use that version as the + // pseudo-version base so that the pseudo-version sorts higher. Ignore + // retracted versions. + tagAllowed := func(tag string) bool { + v, _ := tagToVersion(tag) + if v == "" { + return false + } + if !module.MatchPathMajor(v, r.pathMajor) && !canUseIncompatible(v) { + return false + } + return !isRetracted(v) + } + if pseudoBase == "" { + tag, err := r.code.RecentTag(ctx, info.Name, tagPrefix, tagAllowed) + if err != nil && !errors.Is(err, errors.ErrUnsupported) { + return nil, err + } + if tag != "" { + pseudoBase, _ = tagToVersion(tag) + } + } + + return checkCanonical(module.PseudoVersion(r.pseudoMajor, pseudoBase, info.Time, info.Short)) +} + +// validatePseudoVersion checks that version has a major version compatible with +// r.modPath and encodes a base version and commit metadata that agrees with +// info. +// +// Note that verifying a nontrivial base version in particular may be somewhat +// expensive: in order to do so, r.code.DescendsFrom will need to fetch at least +// enough of the commit history to find a path between version and its base. +// Fortunately, many pseudo-versions — such as those for untagged repositories — +// have trivial bases! +func (r *codeRepo) validatePseudoVersion(ctx context.Context, info *codehost.RevInfo, version string) (err error) { + defer func() { + if err != nil { + if _, ok := err.(*module.ModuleError); !ok { + if _, ok := err.(*module.InvalidVersionError); !ok { + err = &module.InvalidVersionError{Version: version, Pseudo: true, Err: err} + } + err = &module.ModuleError{Path: r.modPath, Err: err} + } + } + }() + + rev, err := module.PseudoVersionRev(version) + if err != nil { + return err + } + if rev != info.Short { + switch { + case strings.HasPrefix(rev, info.Short): + return fmt.Errorf("revision is longer than canonical (expected %s)", info.Short) + case strings.HasPrefix(info.Short, rev): + return fmt.Errorf("revision is shorter than canonical (expected %s)", info.Short) + default: + return fmt.Errorf("does not match short name of revision (expected %s)", info.Short) + } + } + + t, err := module.PseudoVersionTime(version) + if err != nil { + return err + } + if !t.Equal(info.Time.Truncate(time.Second)) { + return fmt.Errorf("does not match version-control timestamp (expected %s)", info.Time.UTC().Format(module.PseudoVersionTimestampFormat)) + } + + tagPrefix := "" + if r.codeDir != "" { + tagPrefix = r.codeDir + "/" + } + + // A pseudo-version should have a precedence just above its parent revisions, + // and no higher. Otherwise, it would be possible for library authors to "pin" + // dependency versions (and bypass the usual minimum version selection) by + // naming an extremely high pseudo-version rather than an accurate one. + // + // Moreover, if we allow a pseudo-version to use any arbitrary pre-release + // tag, we end up with infinitely many possible names for each commit. Each + // name consumes resources in the module cache and proxies, so we want to + // restrict them to a finite set under control of the module author. + // + // We address both of these issues by requiring the tag upon which the + // pseudo-version is based to refer to some ancestor of the revision. We + // prefer the highest such tag when constructing a new pseudo-version, but do + // not enforce that property when resolving existing pseudo-versions: we don't + // know when the parent tags were added, and the highest-tagged parent may not + // have existed when the pseudo-version was first resolved. + base, err := module.PseudoVersionBase(strings.TrimSuffix(version, "+incompatible")) + if err != nil { + return err + } + if base == "" { + if r.pseudoMajor == "" && semver.Major(version) == "v1" { + return fmt.Errorf("major version without preceding tag must be v0, not v1") + } + return nil + } else { + for _, tag := range info.Tags { + versionOnly := strings.TrimPrefix(tag, tagPrefix) + if versionOnly == base { + // The base version is canonical, so if the version from the tag is + // literally equal (not just equivalent), then the tag is canonical too. + // + // We allow pseudo-versions to be derived from non-canonical tags on the + // same commit, so that tags like "v1.1.0+some-metadata" resolve as + // close as possible to the canonical version ("v1.1.0") while still + // enforcing a total ordering ("v1.1.1-0.[…]" with a unique suffix). + // + // However, canonical tags already have a total ordering, so there is no + // reason not to use the canonical tag directly, and we know that the + // canonical tag must already exist because the pseudo-version is + // derived from it. In that case, referring to the revision by a + // pseudo-version derived from its own canonical tag is just confusing. + return fmt.Errorf("tag (%s) found on revision %s is already canonical, so should not be replaced with a pseudo-version derived from that tag", tag, rev) + } + } + } + + tags, err := r.code.Tags(ctx, tagPrefix+base) + if err != nil { + return err + } + + var lastTag string // Prefer to log some real tag rather than a canonically-equivalent base. + ancestorFound := false + for _, tag := range tags.List { + versionOnly := strings.TrimPrefix(tag.Name, tagPrefix) + if semver.Compare(versionOnly, base) == 0 { + lastTag = tag.Name + ancestorFound, err = r.code.DescendsFrom(ctx, info.Name, tag.Name) + if ancestorFound { + break + } + } + } + + if lastTag == "" { + return fmt.Errorf("preceding tag (%s) not found", base) + } + + if !ancestorFound { + if err != nil { + return err + } + rev, err := module.PseudoVersionRev(version) + if err != nil { + return fmt.Errorf("not a descendent of preceding tag (%s)", lastTag) + } + return fmt.Errorf("revision %s is not a descendent of preceding tag (%s)", rev, lastTag) + } + return nil +} + +func (r *codeRepo) revToRev(rev string) string { + if semver.IsValid(rev) { + if module.IsPseudoVersion(rev) { + r, _ := module.PseudoVersionRev(rev) + return r + } + if semver.Build(rev) == "+incompatible" { + rev = rev[:len(rev)-len("+incompatible")] + } + if r.codeDir == "" { + return rev + } + return r.codeDir + "/" + rev + } + return rev +} + +func (r *codeRepo) versionToRev(version string) (rev string, err error) { + if !semver.IsValid(version) { + return "", &module.ModuleError{ + Path: r.modPath, + Err: &module.InvalidVersionError{ + Version: version, + Err: errors.New("syntax error"), + }, + } + } + return r.revToRev(version), nil +} + +// findDir locates the directory within the repo containing the module. +// +// If r.pathMajor is non-empty, this can be either r.codeDir or — if a go.mod +// file exists — r.codeDir/r.pathMajor[1:]. +func (r *codeRepo) findDir(ctx context.Context, version string) (rev, dir string, gomod []byte, err error) { + rev, err = r.versionToRev(version) + if err != nil { + return "", "", nil, err + } + + // Load info about go.mod but delay consideration + // (except I/O error) until we rule out v2/go.mod. + file1 := path.Join(r.codeDir, "go.mod") + gomod1, err1 := r.code.ReadFile(ctx, rev, file1, codehost.MaxGoMod) + if err1 != nil && !os.IsNotExist(err1) { + return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.codeRoot, file1, rev, err1) + } + mpath1 := modfile.ModulePath(gomod1) + found1 := err1 == nil && (isMajor(mpath1, r.pathMajor) || r.canReplaceMismatchedVersionDueToBug(mpath1)) + + var file2 string + if r.pathMajor != "" && r.codeRoot != r.modPath && !strings.HasPrefix(r.pathMajor, ".") { + // Suppose pathMajor is "/v2". + // Either go.mod should claim v2 and v2/go.mod should not exist, + // or v2/go.mod should exist and claim v2. Not both. + // Note that we don't check the full path, just the major suffix, + // because of replacement modules. This might be a fork of + // the real module, found at a different path, usable only in + // a replace directive. + dir2 := path.Join(r.codeDir, r.pathMajor[1:]) + file2 = path.Join(dir2, "go.mod") + gomod2, err2 := r.code.ReadFile(ctx, rev, file2, codehost.MaxGoMod) + if err2 != nil && !os.IsNotExist(err2) { + return "", "", nil, fmt.Errorf("reading %s/%s at revision %s: %v", r.codeRoot, file2, rev, err2) + } + mpath2 := modfile.ModulePath(gomod2) + found2 := err2 == nil && isMajor(mpath2, r.pathMajor) + + if found1 && found2 { + return "", "", nil, fmt.Errorf("%s/%s and ...%s/go.mod both have ...%s module paths at revision %s", r.pathPrefix, file1, r.pathMajor, r.pathMajor, rev) + } + if found2 { + return rev, dir2, gomod2, nil + } + if err2 == nil { + if mpath2 == "" { + return "", "", nil, fmt.Errorf("%s/%s is missing module path at revision %s", r.codeRoot, file2, rev) + } + return "", "", nil, fmt.Errorf("%s/%s has non-...%s module path %q at revision %s", r.codeRoot, file2, r.pathMajor, mpath2, rev) + } + } + + // Not v2/go.mod, so it's either go.mod or nothing. Which is it? + if found1 { + // Explicit go.mod with matching major version ok. + return rev, r.codeDir, gomod1, nil + } + if err1 == nil { + // Explicit go.mod with non-matching major version disallowed. + suffix := "" + if file2 != "" { + suffix = fmt.Sprintf(" (and ...%s/go.mod does not exist)", r.pathMajor) + } + if mpath1 == "" { + return "", "", nil, fmt.Errorf("%s is missing module path%s at revision %s", file1, suffix, rev) + } + if r.pathMajor != "" { // ".v1", ".v2" for gopkg.in + return "", "", nil, fmt.Errorf("%s has non-...%s module path %q%s at revision %s", file1, r.pathMajor, mpath1, suffix, rev) + } + if _, _, ok := module.SplitPathVersion(mpath1); !ok { + return "", "", nil, fmt.Errorf("%s has malformed module path %q%s at revision %s", file1, mpath1, suffix, rev) + } + return "", "", nil, fmt.Errorf("%s has post-%s module path %q%s at revision %s", file1, semver.Major(version), mpath1, suffix, rev) + } + + if r.codeDir == "" && (r.pathMajor == "" || strings.HasPrefix(r.pathMajor, ".")) { + // Implicit go.mod at root of repo OK for v0/v1 and for gopkg.in. + return rev, "", nil, nil + } + + // Implicit go.mod below root of repo or at v2+ disallowed. + // Be clear about possibility of using either location for v2+. + if file2 != "" { + return "", "", nil, fmt.Errorf("missing %s/go.mod and ...%s/go.mod at revision %s", r.pathPrefix, r.pathMajor, rev) + } + return "", "", nil, fmt.Errorf("missing %s/go.mod at revision %s", r.pathPrefix, rev) +} + +// isMajor reports whether the versions allowed for mpath are compatible with +// the major version(s) implied by pathMajor, or false if mpath has an invalid +// version suffix. +func isMajor(mpath, pathMajor string) bool { + if mpath == "" { + // If we don't have a path, we don't know what version(s) it is compatible with. + return false + } + _, mpathMajor, ok := module.SplitPathVersion(mpath) + if !ok { + // An invalid module path is not compatible with any version. + return false + } + if pathMajor == "" { + // All of the valid versions for a gopkg.in module that requires major + // version v0 or v1 are compatible with the "v0 or v1" implied by an empty + // pathMajor. + switch module.PathMajorPrefix(mpathMajor) { + case "", "v0", "v1": + return true + default: + return false + } + } + if mpathMajor == "" { + // Even if pathMajor is ".v0" or ".v1", we can't be sure that a module + // without a suffix is tagged appropriately. Besides, we don't expect clones + // of non-gopkg.in modules to have gopkg.in paths, so a non-empty, + // non-gopkg.in mpath is probably the wrong module for any such pathMajor + // anyway. + return false + } + // If both pathMajor and mpathMajor are non-empty, then we only care that they + // have the same major-version validation rules. A clone fetched via a /v2 + // path might replace a module with path gopkg.in/foo.v2-unstable, and that's + // ok. + return pathMajor[1:] == mpathMajor[1:] +} + +// canReplaceMismatchedVersionDueToBug reports whether versions of r +// could replace versions of mpath with otherwise-mismatched major versions +// due to a historical bug in the Go command (golang.org/issue/34254). +func (r *codeRepo) canReplaceMismatchedVersionDueToBug(mpath string) bool { + // The bug caused us to erroneously accept unversioned paths as replacements + // for versioned gopkg.in paths. + unversioned := r.pathMajor == "" + replacingGopkgIn := strings.HasPrefix(mpath, "gopkg.in/") + return unversioned && replacingGopkgIn +} + +func (r *codeRepo) GoMod(ctx context.Context, version string) (data []byte, err error) { + if version != module.CanonicalVersion(version) { + return nil, fmt.Errorf("version %s is not canonical", version) + } + + if module.IsPseudoVersion(version) { + // findDir ignores the metadata encoded in a pseudo-version, + // only using the revision at the end. + // Invoke Stat to verify the metadata explicitly so we don't return + // a bogus file for an invalid version. + _, err := r.Stat(ctx, version) + if err != nil { + return nil, err + } + } + + rev, dir, gomod, err := r.findDir(ctx, version) + if err != nil { + return nil, err + } + if gomod != nil { + return gomod, nil + } + data, err = r.code.ReadFile(ctx, rev, path.Join(dir, "go.mod"), codehost.MaxGoMod) + if err != nil { + if os.IsNotExist(err) { + return LegacyGoMod(r.modPath), nil + } + return nil, err + } + return data, nil +} + +// LegacyGoMod generates a fake go.mod file for a module that doesn't have one. +// The go.mod file contains a module directive and nothing else: no go version, +// no requirements. +// +// We used to try to build a go.mod reflecting pre-existing +// package management metadata files, but the conversion +// was inherently imperfect (because those files don't have +// exactly the same semantics as go.mod) and, when done +// for dependencies in the middle of a build, impossible to +// correct. So we stopped. +func LegacyGoMod(modPath string) []byte { + return fmt.Appendf(nil, "module %s\n", modfile.AutoQuote(modPath)) +} + +func (r *codeRepo) modPrefix(rev string) string { + return r.modPath + "@" + rev +} + +func (r *codeRepo) retractedVersions(ctx context.Context) (func(string) bool, error) { + vs, err := r.Versions(ctx, "") + if err != nil { + return nil, err + } + versions := vs.List + + for i, v := range versions { + if strings.HasSuffix(v, "+incompatible") { + // We're looking for the latest release tag that may list retractions in a + // go.mod file. +incompatible versions necessarily do not, and they start + // at major version 2 — which is higher than any version that could + // validly contain a go.mod file. + versions = versions[:i] + break + } + } + if len(versions) == 0 { + return func(string) bool { return false }, nil + } + + var highest string + for i := len(versions) - 1; i >= 0; i-- { + v := versions[i] + if semver.Prerelease(v) == "" { + highest = v + break + } + } + if highest == "" { + highest = versions[len(versions)-1] + } + + data, err := r.GoMod(ctx, highest) + if err != nil { + return nil, err + } + f, err := modfile.ParseLax("go.mod", data, nil) + if err != nil { + return nil, err + } + retractions := make([]modfile.VersionInterval, 0, len(f.Retract)) + for _, r := range f.Retract { + retractions = append(retractions, r.VersionInterval) + } + + return func(v string) bool { + for _, r := range retractions { + if semver.Compare(r.Low, v) <= 0 && semver.Compare(v, r.High) <= 0 { + return true + } + } + return false + }, nil +} + +func (r *codeRepo) Zip(ctx context.Context, dst io.Writer, version string) error { + if version != module.CanonicalVersion(version) { + return fmt.Errorf("version %s is not canonical", version) + } + + if module.IsPseudoVersion(version) { + // findDir ignores the metadata encoded in a pseudo-version, + // only using the revision at the end. + // Invoke Stat to verify the metadata explicitly so we don't return + // a bogus file for an invalid version. + _, err := r.Stat(ctx, version) + if err != nil { + return err + } + } + + rev, subdir, _, err := r.findDir(ctx, version) + if err != nil { + return err + } + + if gomod, err := r.code.ReadFile(ctx, rev, filepath.Join(subdir, "go.mod"), codehost.MaxGoMod); err == nil { + goVers := gover.GoModLookup(gomod, "go") + if gover.Compare(goVers, gover.Local()) > 0 { + return &gover.TooNewError{What: r.ModulePath() + "@" + version, GoVersion: goVers} + } + } else if !errors.Is(err, fs.ErrNotExist) { + return err + } + + dl, err := r.code.ReadZip(ctx, rev, subdir, codehost.MaxZipFile) + if err != nil { + return err + } + defer dl.Close() + subdir = strings.Trim(subdir, "/") + + // Spool to local file. + f, err := os.CreateTemp("", "go-codehost-") + if err != nil { + dl.Close() + return err + } + defer os.Remove(f.Name()) + defer f.Close() + maxSize := int64(codehost.MaxZipFile) + lr := &io.LimitedReader{R: dl, N: maxSize + 1} + if _, err := io.Copy(f, lr); err != nil { + dl.Close() + return err + } + dl.Close() + if lr.N <= 0 { + return fmt.Errorf("downloaded zip file too large") + } + size := (maxSize + 1) - lr.N + if _, err := f.Seek(0, 0); err != nil { + return err + } + + // Translate from zip file we have to zip file we want. + zr, err := zip.NewReader(f, size) + if err != nil { + return err + } + + var files []modzip.File + if subdir != "" { + subdir += "/" + } + haveLICENSE := false + topPrefix := "" + for _, zf := range zr.File { + if topPrefix == "" { + i := strings.Index(zf.Name, "/") + if i < 0 { + return fmt.Errorf("missing top-level directory prefix") + } + topPrefix = zf.Name[:i+1] + } + var name string + var found bool + if name, found = strings.CutPrefix(zf.Name, topPrefix); !found { + return fmt.Errorf("zip file contains more than one top-level directory") + } + + if name, found = strings.CutPrefix(name, subdir); !found { + continue + } + + if name == "" || strings.HasSuffix(name, "/") { + continue + } + files = append(files, zipFile{name: name, f: zf}) + if name == "LICENSE" { + haveLICENSE = true + } + } + + if !haveLICENSE && subdir != "" { + data, err := r.code.ReadFile(ctx, rev, "LICENSE", codehost.MaxLICENSE) + if err == nil { + files = append(files, dataFile{name: "LICENSE", data: data}) + } + } + + return modzip.Create(dst, module.Version{Path: r.modPath, Version: version}, files) +} + +type zipFile struct { + name string + f *zip.File +} + +func (f zipFile) Path() string { return f.name } +func (f zipFile) Lstat() (fs.FileInfo, error) { return f.f.FileInfo(), nil } +func (f zipFile) Open() (io.ReadCloser, error) { return f.f.Open() } + +type dataFile struct { + name string + data []byte +} + +func (f dataFile) Path() string { return f.name } +func (f dataFile) Lstat() (fs.FileInfo, error) { return dataFileInfo{f}, nil } +func (f dataFile) Open() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(f.data)), nil +} + +type dataFileInfo struct { + f dataFile +} + +func (fi dataFileInfo) Name() string { return path.Base(fi.f.name) } +func (fi dataFileInfo) Size() int64 { return int64(len(fi.f.data)) } +func (fi dataFileInfo) Mode() fs.FileMode { return 0644 } +func (fi dataFileInfo) ModTime() time.Time { return time.Time{} } +func (fi dataFileInfo) IsDir() bool { return false } +func (fi dataFileInfo) Sys() any { return nil } + +func (fi dataFileInfo) String() string { + return fs.FormatFileInfo(fi) +} + +// hasPathPrefix reports whether the path s begins with the +// elements in prefix. +func hasPathPrefix(s, prefix string) bool { + switch { + default: + return false + case len(s) == len(prefix): + return s == prefix + case len(s) > len(prefix): + if prefix != "" && prefix[len(prefix)-1] == '/' { + return strings.HasPrefix(s, prefix) + } + return s[len(prefix)] == '/' && s[:len(prefix)] == prefix + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/coderepo_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/coderepo_test.go new file mode 100644 index 0000000000000000000000000000000000000000..aad78722c09c6a61068a8dc55da5ba251f61e1f3 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/coderepo_test.go @@ -0,0 +1,965 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfetch + +import ( + "archive/zip" + "context" + "crypto/sha256" + "encoding/hex" + "flag" + "hash" + "internal/testenv" + "io" + "log" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + "time" + + "cmd/go/internal/cfg" + "cmd/go/internal/modfetch/codehost" + "cmd/go/internal/vcweb/vcstest" + + "golang.org/x/mod/sumdb/dirhash" +) + +func TestMain(m *testing.M) { + flag.Parse() + if err := testMain(m); err != nil { + log.Fatal(err) + } +} + +func testMain(m *testing.M) (err error) { + + cfg.GOPROXY = "direct" + + // The sum database is populated using a released version of the go command, + // but this test may include fixes for additional modules that previously + // could not be fetched. Since this test isn't executing any of the resolved + // code, bypass the sum database. + cfg.GOSUMDB = "off" + + dir, err := os.MkdirTemp("", "gitrepo-test-") + if err != nil { + return err + } + defer func() { + if rmErr := os.RemoveAll(dir); err == nil { + err = rmErr + } + }() + + cfg.GOMODCACHE = filepath.Join(dir, "modcache") + if err := os.Mkdir(cfg.GOMODCACHE, 0755); err != nil { + return err + } + + srv, err := vcstest.NewServer() + if err != nil { + return err + } + defer func() { + if closeErr := srv.Close(); err == nil { + err = closeErr + } + }() + + m.Run() + return nil +} + +const ( + vgotest1git = "github.com/rsc/vgotest1" + vgotest1hg = "vcs-test.golang.org/hg/vgotest1.hg" +) + +var altVgotests = map[string]string{ + "hg": vgotest1hg, +} + +type codeRepoTest struct { + vcs string + path string + mpath string + rev string + err string + version string + name string + short string + time time.Time + gomod string + gomodErr string + zip []string + zipErr string + zipSum string + zipFileHash string +} + +var codeRepoTests = []codeRepoTest{ + { + vcs: "git", + path: "github.com/rsc/vgotest1", + rev: "v0.0.0", + version: "v0.0.0", + name: "80d85c5d4d17598a0e9055e7c175a32b415d6128", + short: "80d85c5d4d17", + time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC), + zip: []string{ + "LICENSE", + "README.md", + "pkg/p.go", + }, + zipSum: "h1:zVEjciLdlk/TPWCOyZo7k24T+tOKRQC+u8MKq/xS80I=", + zipFileHash: "738a00ddbfe8c329dce6b48e1f23c8e22a92db50f3cfb2653caa0d62676bc09c", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1", + rev: "v0.0.0-20180219231006-80d85c5d4d17", + version: "v0.0.0-20180219231006-80d85c5d4d17", + name: "80d85c5d4d17598a0e9055e7c175a32b415d6128", + short: "80d85c5d4d17", + time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC), + zip: []string{ + "LICENSE", + "README.md", + "pkg/p.go", + }, + zipSum: "h1:nOznk2xKsLGkTnXe0q9t1Ewt9jxK+oadtafSUqHM3Ec=", + zipFileHash: "bacb08f391e29d2eaaef8281b5c129ee6d890e608ee65877e0003c0181a766c8", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1", + rev: "v0.0.1-0.20180219231006-80d85c5d4d17", + err: `github.com/rsc/vgotest1@v0.0.1-0.20180219231006-80d85c5d4d17: invalid pseudo-version: tag (v0.0.0) found on revision 80d85c5d4d17 is already canonical, so should not be replaced with a pseudo-version derived from that tag`, + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1", + rev: "v1.0.0", + version: "v1.0.0", + name: "80d85c5d4d17598a0e9055e7c175a32b415d6128", + short: "80d85c5d4d17", + time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC), + zip: []string{ + "LICENSE", + "README.md", + "pkg/p.go", + }, + zipSum: "h1:e040hOoWGeuJLawDjK9DW6med+cz9FxMFYDMOVG8ctQ=", + zipFileHash: "74caab65cfbea427c341fa815f3bb0378681d8f0e3cf62a7f207014263ec7be3", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/v2", + rev: "v2.0.0", + version: "v2.0.0", + name: "45f53230a74ad275c7127e117ac46914c8126160", + short: "45f53230a74a", + time: time.Date(2018, 7, 19, 1, 21, 27, 0, time.UTC), + err: "missing github.com/rsc/vgotest1/go.mod and .../v2/go.mod at revision v2.0.0", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1", + rev: "80d85c5", + version: "v1.0.0", + name: "80d85c5d4d17598a0e9055e7c175a32b415d6128", + short: "80d85c5d4d17", + time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC), + zip: []string{ + "LICENSE", + "README.md", + "pkg/p.go", + }, + zipSum: "h1:e040hOoWGeuJLawDjK9DW6med+cz9FxMFYDMOVG8ctQ=", + zipFileHash: "74caab65cfbea427c341fa815f3bb0378681d8f0e3cf62a7f207014263ec7be3", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1", + rev: "mytag", + version: "v1.0.0", + name: "80d85c5d4d17598a0e9055e7c175a32b415d6128", + short: "80d85c5d4d17", + time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC), + zip: []string{ + "LICENSE", + "README.md", + "pkg/p.go", + }, + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/v2", + rev: "45f53230a", + version: "v2.0.0", + name: "45f53230a74ad275c7127e117ac46914c8126160", + short: "45f53230a74a", + time: time.Date(2018, 7, 19, 1, 21, 27, 0, time.UTC), + err: "missing github.com/rsc/vgotest1/go.mod and .../v2/go.mod at revision v2.0.0", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/v54321", + rev: "80d85c5", + version: "v54321.0.0-20180219231006-80d85c5d4d17", + name: "80d85c5d4d17598a0e9055e7c175a32b415d6128", + short: "80d85c5d4d17", + time: time.Date(2018, 2, 19, 23, 10, 6, 0, time.UTC), + err: "missing github.com/rsc/vgotest1/go.mod and .../v54321/go.mod at revision 80d85c5d4d17", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/submod", + rev: "v1.0.0", + err: "unknown revision submod/v1.0.0", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/submod", + rev: "v1.0.3", + err: "unknown revision submod/v1.0.3", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/submod", + rev: "v1.0.4", + version: "v1.0.4", + name: "8afe2b2efed96e0880ecd2a69b98a53b8c2738b6", + short: "8afe2b2efed9", + time: time.Date(2018, 2, 19, 23, 12, 7, 0, time.UTC), + gomod: "module \"github.com/vgotest1/submod\" // submod/go.mod\n", + zip: []string{ + "go.mod", + "pkg/p.go", + "LICENSE", + }, + zipSum: "h1:iMsJ/9uQsk6MnZNnJK311f11QiSlmN92Q2aSjCywuJY=", + zipFileHash: "95801bfa69c5197ae809af512946d22f22850068527cd78100ae3f176bc8043b", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1", + rev: "v1.1.0", + version: "v1.1.0", + name: "b769f2de407a4db81af9c5de0a06016d60d2ea09", + short: "b769f2de407a", + time: time.Date(2018, 2, 19, 23, 13, 36, 0, time.UTC), + gomod: "module \"github.com/rsc/vgotest1\" // root go.mod\nrequire \"github.com/rsc/vgotest1/submod\" v1.0.5\n", + zip: []string{ + "LICENSE", + "README.md", + "go.mod", + "pkg/p.go", + }, + zipSum: "h1:M69k7q+8bQ+QUpHov45Z/NoR8rj3DsQJUnXLWvf01+Q=", + zipFileHash: "58af45fb248d320ea471f568e006379e2b8d71d6d1663f9b19b2e00fd9ac9265", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/v2", + rev: "v2.0.1", + version: "v2.0.1", + name: "ea65f87c8f52c15ea68f3bdd9925ef17e20d91e9", + short: "ea65f87c8f52", + time: time.Date(2018, 2, 19, 23, 14, 23, 0, time.UTC), + gomod: "module \"github.com/rsc/vgotest1/v2\" // root go.mod\n", + zipSum: "h1:QmgYy/zt+uoWhDpcsgrSVzYFvKtBEjl5zT/FRz9GTzA=", + zipFileHash: "1aedf1546d322a0121879ddfd6d0e8bfbd916d2cafbeb538ddb440e04b04b9ef", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/v2", + rev: "v2.0.3", + version: "v2.0.3", + name: "f18795870fb14388a21ef3ebc1d75911c8694f31", + short: "f18795870fb1", + time: time.Date(2018, 2, 19, 23, 16, 4, 0, time.UTC), + err: "github.com/rsc/vgotest1/v2/go.mod has non-.../v2 module path \"github.com/rsc/vgotest\" at revision v2.0.3", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/v2", + rev: "v2.0.4", + version: "v2.0.4", + name: "1f863feb76bc7029b78b21c5375644838962f88d", + short: "1f863feb76bc", + time: time.Date(2018, 2, 20, 0, 3, 38, 0, time.UTC), + err: "github.com/rsc/vgotest1/go.mod and .../v2/go.mod both have .../v2 module paths at revision v2.0.4", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/v2", + rev: "v2.0.5", + version: "v2.0.5", + name: "2f615117ce481c8efef46e0cc0b4b4dccfac8fea", + short: "2f615117ce48", + time: time.Date(2018, 2, 20, 0, 3, 59, 0, time.UTC), + gomod: "module \"github.com/rsc/vgotest1/v2\" // v2/go.mod\n", + zipSum: "h1:RIEb9q1SUSEQOzMn0zfl/LQxGFWlhWEAdeEguf1MLGU=", + zipFileHash: "7d92c2c328c5e9b0694101353705d5843746ec1d93a1e986d0da54c8a14dfe6d", + }, + { + // redirect to github + vcs: "git", + path: "rsc.io/quote", + rev: "v1.0.0", + version: "v1.0.0", + name: "f488df80bcdbd3e5bafdc24ad7d1e79e83edd7e6", + short: "f488df80bcdb", + time: time.Date(2018, 2, 14, 0, 45, 20, 0, time.UTC), + gomod: "module \"rsc.io/quote\"\n", + zipSum: "h1:haUSojyo3j2M9g7CEUFG8Na09dtn7QKxvPGaPVQdGwM=", + zipFileHash: "5c08ba2c09a364f93704aaa780e7504346102c6ef4fe1333a11f09904a732078", + }, + { + // redirect to static hosting proxy + vcs: "mod", + path: "swtch.com/testmod", + rev: "v1.0.0", + version: "v1.0.0", + // NO name or short - we intentionally ignore those in the proxy protocol + time: time.Date(1972, 7, 18, 12, 34, 56, 0, time.UTC), + gomod: "module \"swtch.com/testmod\"\n", + }, + { + // redirect to googlesource + vcs: "git", + path: "golang.org/x/text", + rev: "4e4a3210bb", + version: "v0.3.1-0.20180208041248-4e4a3210bb54", + name: "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1", + short: "4e4a3210bb54", + time: time.Date(2018, 2, 8, 4, 12, 48, 0, time.UTC), + zipSum: "h1:Yxu6pHX9X2RECiuw/Q5/4uvajuaowck8zOFKXgbfNBk=", + zipFileHash: "ac2c165a5c10aa5a7545dea60a08e019270b982fa6c8bdcb5943931de64922fe", + }, + { + vcs: "git", + path: "github.com/pkg/errors", + rev: "v0.8.0", + version: "v0.8.0", + name: "645ef00459ed84a119197bfb8d8205042c6df63d", + short: "645ef00459ed", + time: time.Date(2016, 9, 29, 1, 48, 1, 0, time.UTC), + zipSum: "h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=", + zipFileHash: "e4fa69ba057356614edbc1da881a7d3ebb688505be49f65965686bcb859e2fae", + }, + { + // package in subdirectory - custom domain + // In general we can't reject these definitively in Lookup, + // but gopkg.in is special. + vcs: "git", + path: "gopkg.in/yaml.v2/abc", + err: "invalid module path \"gopkg.in/yaml.v2/abc\"", + }, + { + // package in subdirectory - github + // Because it's a package, Stat should fail entirely. + vcs: "git", + path: "github.com/rsc/quote/buggy", + rev: "c4d4236f", + err: "missing github.com/rsc/quote/buggy/go.mod at revision c4d4236f9242", + }, + { + vcs: "git", + path: "gopkg.in/yaml.v2", + rev: "d670f940", + version: "v2.0.0", + name: "d670f9405373e636a5a2765eea47fac0c9bc91a4", + short: "d670f9405373", + time: time.Date(2018, 1, 9, 11, 43, 31, 0, time.UTC), + gomod: "module gopkg.in/yaml.v2\n", + zipSum: "h1:uUkhRGrsEyx/laRdeS6YIQKIys8pg+lRSRdVMTYjivs=", + zipFileHash: "7b0a141b1b0b49772ab4eecfd11dfd6609a94a5e868cab04a3abb1861ffaa877", + }, + { + vcs: "git", + path: "gopkg.in/check.v1", + rev: "20d25e280405", + version: "v1.0.0-20161208181325-20d25e280405", + name: "20d25e2804050c1cd24a7eea1e7a6447dd0e74ec", + short: "20d25e280405", + time: time.Date(2016, 12, 8, 18, 13, 25, 0, time.UTC), + gomod: "module gopkg.in/check.v1\n", + zipSum: "h1:829vOVxxusYHC+IqBtkX5mbKtsY9fheQiQn0MZRVLfQ=", + zipFileHash: "9e7cb3f4f1e66d722306442b0dbe1f6f43d74d1736d54c510537bdfb1d6f432f", + }, + { + vcs: "git", + path: "vcs-test.golang.org/go/mod/gitrepo1", + rev: "master", + version: "v1.2.4-annotated", + name: "ede458df7cd0fdca520df19a33158086a8a68e81", + short: "ede458df7cd0", + time: time.Date(2018, 4, 17, 19, 43, 22, 0, time.UTC), + gomod: "module vcs-test.golang.org/go/mod/gitrepo1\n", + zipSum: "h1:YJYZRsM9BHFTlVr8YADjT0cJH8uFIDtoc5NLiVqZEx8=", + zipFileHash: "c15e49d58b7a4c37966cbe5bc01a0330cd5f2927e990e1839bda1d407766d9c5", + }, + { + vcs: "git", + path: "gopkg.in/natefinch/lumberjack.v2", + // This repo has a v2.1 tag. + // We only allow semver references to tags that are fully qualified, as in v2.1.0. + // Because we can't record v2.1.0 (the actual tag is v2.1), we record a pseudo-version + // instead, same as if the tag were any other non-version-looking string. + // We use a v2 pseudo-version here because of the .v2 in the path, not because + // of the v2 in the rev. + rev: "v2.1", // non-canonical semantic version turns into pseudo-version + version: "v2.0.0-20170531160350-a96e63847dc3", + name: "a96e63847dc3c67d17befa69c303767e2f84e54f", + short: "a96e63847dc3", + time: time.Date(2017, 5, 31, 16, 3, 50, 0, time.UTC), + gomod: "module gopkg.in/natefinch/lumberjack.v2\n", + }, + { + vcs: "git", + path: "vcs-test.golang.org/go/v2module/v2", + rev: "v2.0.0", + version: "v2.0.0", + name: "203b91c896acd173aa719e4cdcb7d463c4b090fa", + short: "203b91c896ac", + time: time.Date(2019, 4, 3, 15, 52, 15, 0, time.UTC), + gomod: "module vcs-test.golang.org/go/v2module/v2\n\ngo 1.12\n", + zipSum: "h1:JItBZ+gwA5WvtZEGEbuDL4lUttGtLrs53lmdurq3bOg=", + zipFileHash: "9ea9ae1673cffcc44b7fdd3cc89953d68c102449b46c982dbf085e4f2e394da5", + }, + { + // Git branch with a semver name, +incompatible version, and no go.mod file. + vcs: "git", + path: "vcs-test.golang.org/go/mod/gitrepo1", + rev: "v2.3.4+incompatible", + err: `resolves to version v2.0.1+incompatible (v2.3.4 is not a tag)`, + }, + { + // Git branch with a semver name, matching go.mod file, and compatible version. + vcs: "git", + path: "vcs-test.golang.org/git/semver-branch.git", + rev: "v1.0.0", + err: `resolves to version v0.1.1-0.20220202191944-09c4d8f6938c (v1.0.0 is not a tag)`, + }, + { + // Git branch with a semver name, matching go.mod file, and disallowed +incompatible version. + // The version/tag mismatch takes precedence over the +incompatible mismatched. + vcs: "git", + path: "vcs-test.golang.org/git/semver-branch.git", + rev: "v2.0.0+incompatible", + err: `resolves to version v0.1.0 (v2.0.0 is not a tag)`, + }, + { + // Git branch with a semver name, matching go.mod file, and mismatched version. + // The version/tag mismatch takes precedence over the +incompatible mismatched. + vcs: "git", + path: "vcs-test.golang.org/git/semver-branch.git", + rev: "v2.0.0", + err: `resolves to version v0.1.0 (v2.0.0 is not a tag)`, + }, + { + // v3.0.0-devel is the same as tag v4.0.0-beta.1, but v4.0.0-beta.1 would + // not be allowed because it is incompatible and a go.mod file exists. + // The error message should refer to a valid pseudo-version, not the + // unusable semver tag. + vcs: "git", + path: "vcs-test.golang.org/git/semver-branch.git", + rev: "v3.0.0-devel", + err: `resolves to version v0.1.1-0.20220203155313-d59622f6e4d7 (v3.0.0-devel is not a tag)`, + }, + + // If v2/go.mod exists, then we should prefer to match the "v2" + // pseudo-versions to the nested module, and resolve the module in the parent + // directory to only compatible versions. + // + // However (https://go.dev/issue/51324), previous versions of the 'go' command + // didn't always do so, so if the user explicitly requests a +incompatible + // version (as would be present in an existing go.mod file), we should + // continue to allow it. + { + vcs: "git", + path: "vcs-test.golang.org/git/v2sub.git", + rev: "80beb17a1603", + version: "v0.0.0-20220222205507-80beb17a1603", + name: "80beb17a16036f17a5aedd1bb5bd6d407b3c6dc5", + short: "80beb17a1603", + time: time.Date(2022, 2, 22, 20, 55, 7, 0, time.UTC), + }, + { + vcs: "git", + path: "vcs-test.golang.org/git/v2sub.git", + rev: "v2.0.0", + err: `module contains a go.mod file, so module path must match major version ("vcs-test.golang.org/git/v2sub.git/v2")`, + }, + { + vcs: "git", + path: "vcs-test.golang.org/git/v2sub.git", + rev: "v2.0.1-0.20220222205507-80beb17a1603", + err: `module contains a go.mod file, so module path must match major version ("vcs-test.golang.org/git/v2sub.git/v2")`, + }, + { + vcs: "git", + path: "vcs-test.golang.org/git/v2sub.git", + rev: "v2.0.0+incompatible", + version: "v2.0.0+incompatible", + name: "5fcd3eaeeb391d399f562fd45a50dac9fc34ae8b", + short: "5fcd3eaeeb39", + time: time.Date(2022, 2, 22, 20, 53, 33, 0, time.UTC), + }, + { + vcs: "git", + path: "vcs-test.golang.org/git/v2sub.git", + rev: "v2.0.1-0.20220222205507-80beb17a1603+incompatible", + version: "v2.0.1-0.20220222205507-80beb17a1603+incompatible", + name: "80beb17a16036f17a5aedd1bb5bd6d407b3c6dc5", + short: "80beb17a1603", + time: time.Date(2022, 2, 22, 20, 55, 7, 0, time.UTC), + }, + + // A version tag with explicit build metadata is valid but not canonical. + // It should resolve to a pseudo-version based on the same tag. + { + vcs: "git", + path: "vcs-test.golang.org/git/odd-tags.git", + rev: "v0.1.0+build-metadata", + version: "v0.1.1-0.20220223184835-9d863d525bbf", + name: "9d863d525bbfcc8eda09364738c4032393711a56", + short: "9d863d525bbf", + time: time.Date(2022, 2, 23, 18, 48, 35, 0, time.UTC), + }, + { + vcs: "git", + path: "vcs-test.golang.org/git/odd-tags.git", + rev: "9d863d525bbf", + version: "v0.1.1-0.20220223184835-9d863d525bbf", + name: "9d863d525bbfcc8eda09364738c4032393711a56", + short: "9d863d525bbf", + time: time.Date(2022, 2, 23, 18, 48, 35, 0, time.UTC), + }, + { + vcs: "git", + path: "vcs-test.golang.org/git/odd-tags.git", + rev: "latest", + version: "v0.1.1-0.20220223184835-9d863d525bbf", + name: "9d863d525bbfcc8eda09364738c4032393711a56", + short: "9d863d525bbf", + time: time.Date(2022, 2, 23, 18, 48, 35, 0, time.UTC), + }, + + // A version tag with an erroneous "+incompatible" suffix should resolve using + // only the prefix before the "+incompatible" suffix, not the "+incompatible" + // tag itself. (Otherwise, we would potentially have two different commits + // both named "v2.0.0+incompatible".) However, the tag is still valid semver + // and can still be used as the base for an unambiguous pseudo-version. + { + vcs: "git", + path: "vcs-test.golang.org/git/odd-tags.git", + rev: "v2.0.0+incompatible", + err: `unknown revision v2.0.0`, + }, + { + vcs: "git", + path: "vcs-test.golang.org/git/odd-tags.git", + rev: "12d19af20458", + version: "v2.0.1-0.20220223184802-12d19af20458+incompatible", + name: "12d19af204585b0db3d2a876ceddf5b9323f5a4a", + short: "12d19af20458", + time: time.Date(2022, 2, 23, 18, 48, 2, 0, time.UTC), + }, + + // Similarly, a pseudo-version must resolve to the named commit, even if a tag + // matching that pseudo-version is present on a *different* commit. + { + vcs: "git", + path: "vcs-test.golang.org/git/odd-tags.git", + rev: "v3.0.0-20220223184802-12d19af20458", + version: "v3.0.0-20220223184802-12d19af20458+incompatible", + name: "12d19af204585b0db3d2a876ceddf5b9323f5a4a", + short: "12d19af20458", + time: time.Date(2022, 2, 23, 18, 48, 2, 0, time.UTC), + }, +} + +func TestCodeRepo(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + tmpdir := t.TempDir() + + for _, tt := range codeRepoTests { + f := func(tt codeRepoTest) func(t *testing.T) { + return func(t *testing.T) { + if strings.Contains(tt.path, "gopkg.in") { + testenv.SkipFlaky(t, 54503) + } + + t.Parallel() + if tt.vcs != "mod" { + testenv.MustHaveExecPath(t, tt.vcs) + } + ctx := context.Background() + + repo := Lookup(ctx, "direct", tt.path) + + if tt.mpath == "" { + tt.mpath = tt.path + } + if mpath := repo.ModulePath(); mpath != tt.mpath { + t.Errorf("repo.ModulePath() = %q, want %q", mpath, tt.mpath) + } + + info, err := repo.Stat(ctx, tt.rev) + if err != nil { + if tt.err != "" { + if !strings.Contains(err.Error(), tt.err) { + t.Fatalf("repoStat(%q): %v, wanted %q", tt.rev, err, tt.err) + } + return + } + t.Fatalf("repo.Stat(%q): %v", tt.rev, err) + } + if tt.err != "" { + t.Errorf("repo.Stat(%q): success, wanted error", tt.rev) + } + if info.Version != tt.version { + t.Errorf("info.Version = %q, want %q", info.Version, tt.version) + } + if info.Name != tt.name { + t.Errorf("info.Name = %q, want %q", info.Name, tt.name) + } + if info.Short != tt.short { + t.Errorf("info.Short = %q, want %q", info.Short, tt.short) + } + if !info.Time.Equal(tt.time) { + t.Errorf("info.Time = %v, want %v", info.Time, tt.time) + } + + if tt.gomod != "" || tt.gomodErr != "" { + data, err := repo.GoMod(ctx, tt.version) + if err != nil && tt.gomodErr == "" { + t.Errorf("repo.GoMod(%q): %v", tt.version, err) + } else if err != nil && tt.gomodErr != "" { + if err.Error() != tt.gomodErr { + t.Errorf("repo.GoMod(%q): %v, want %q", tt.version, err, tt.gomodErr) + } + } else if tt.gomodErr != "" { + t.Errorf("repo.GoMod(%q) = %q, want error %q", tt.version, data, tt.gomodErr) + } else if string(data) != tt.gomod { + t.Errorf("repo.GoMod(%q) = %q, want %q", tt.version, data, tt.gomod) + } + } + + needHash := !testing.Short() && (tt.zipFileHash != "" || tt.zipSum != "") + if tt.zip != nil || tt.zipErr != "" || needHash { + f, err := os.CreateTemp(tmpdir, tt.version+".zip.") + if err != nil { + t.Fatalf("os.CreateTemp: %v", err) + } + zipfile := f.Name() + defer func() { + f.Close() + os.Remove(zipfile) + }() + + var w io.Writer + var h hash.Hash + if needHash { + h = sha256.New() + w = io.MultiWriter(f, h) + } else { + w = f + } + err = repo.Zip(ctx, w, tt.version) + f.Close() + if err != nil { + if tt.zipErr != "" { + if err.Error() == tt.zipErr { + return + } + t.Fatalf("repo.Zip(%q): %v, want error %q", tt.version, err, tt.zipErr) + } + t.Fatalf("repo.Zip(%q): %v", tt.version, err) + } + if tt.zipErr != "" { + t.Errorf("repo.Zip(%q): success, want error %q", tt.version, tt.zipErr) + } + + if tt.zip != nil { + prefix := tt.path + "@" + tt.version + "/" + z, err := zip.OpenReader(zipfile) + if err != nil { + t.Fatalf("open zip %s: %v", zipfile, err) + } + var names []string + for _, file := range z.File { + if !strings.HasPrefix(file.Name, prefix) { + t.Errorf("zip entry %v does not start with prefix %v", file.Name, prefix) + continue + } + names = append(names, file.Name[len(prefix):]) + } + z.Close() + if !reflect.DeepEqual(names, tt.zip) { + t.Fatalf("zip = %v\nwant %v\n", names, tt.zip) + } + } + + if needHash { + sum, err := dirhash.HashZip(zipfile, dirhash.Hash1) + if err != nil { + t.Errorf("repo.Zip(%q): %v", tt.version, err) + } else if sum != tt.zipSum { + t.Errorf("repo.Zip(%q): got file with sum %q, want %q", tt.version, sum, tt.zipSum) + } else if zipFileHash := hex.EncodeToString(h.Sum(nil)); zipFileHash != tt.zipFileHash { + t.Errorf("repo.Zip(%q): got file with hash %q, want %q (but content has correct sum)", tt.version, zipFileHash, tt.zipFileHash) + } + } + } + } + } + t.Run(strings.ReplaceAll(tt.path, "/", "_")+"/"+tt.rev, f(tt)) + if strings.HasPrefix(tt.path, vgotest1git) { + for vcs, alt := range altVgotests { + altTest := tt + altTest.vcs = vcs + altTest.path = alt + strings.TrimPrefix(altTest.path, vgotest1git) + if strings.HasPrefix(altTest.mpath, vgotest1git) { + altTest.mpath = alt + strings.TrimPrefix(altTest.mpath, vgotest1git) + } + var m map[string]string + if alt == vgotest1hg { + m = hgmap + } + altTest.version = remap(altTest.version, m) + altTest.name = remap(altTest.name, m) + altTest.short = remap(altTest.short, m) + altTest.rev = remap(altTest.rev, m) + altTest.err = remap(altTest.err, m) + altTest.gomodErr = remap(altTest.gomodErr, m) + altTest.zipErr = remap(altTest.zipErr, m) + altTest.zipSum = "" + altTest.zipFileHash = "" + t.Run(strings.ReplaceAll(altTest.path, "/", "_")+"/"+altTest.rev, f(altTest)) + } + } + } +} + +var hgmap = map[string]string{ + "github.com/rsc/vgotest1": "vcs-test.golang.org/hg/vgotest1.hg", + "f18795870fb14388a21ef3ebc1d75911c8694f31": "a9ad6d1d14eb544f459f446210c7eb3b009807c6", + "ea65f87c8f52c15ea68f3bdd9925ef17e20d91e9": "f1fc0f22021b638d073d31c752847e7bf385def7", + "b769f2de407a4db81af9c5de0a06016d60d2ea09": "92c7eb888b4fac17f1c6bd2e1060a1b881a3b832", + "8afe2b2efed96e0880ecd2a69b98a53b8c2738b6": "4e58084d459ae7e79c8c2264d0e8e9a92eb5cd44", + "2f615117ce481c8efef46e0cc0b4b4dccfac8fea": "879ea98f7743c8eff54f59a918f3a24123d1cf46", + "80d85c5d4d17598a0e9055e7c175a32b415d6128": "e125018e286a4b09061079a81e7b537070b7ff71", + "1f863feb76bc7029b78b21c5375644838962f88d": "bf63880162304a9337477f3858f5b7e255c75459", + "45f53230a74ad275c7127e117ac46914c8126160": "814fce58e83abd5bf2a13892e0b0e1198abefcd4", +} + +func remap(name string, m map[string]string) string { + if m[name] != "" { + return m[name] + } + if codehost.AllHex(name) { + for k, v := range m { + if strings.HasPrefix(k, name) { + return v[:len(name)] + } + } + } + for k, v := range m { + name = strings.ReplaceAll(name, k, v) + if codehost.AllHex(k) { + name = strings.ReplaceAll(name, k[:12], v[:12]) + } + } + return name +} + +var codeRepoVersionsTests = []struct { + vcs string + path string + prefix string + versions []string +}{ + { + vcs: "git", + path: "github.com/rsc/vgotest1", + versions: []string{"v0.0.0", "v0.0.1", "v1.0.0", "v1.0.1", "v1.0.2", "v1.0.3", "v1.1.0"}, + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1", + prefix: "v1.0", + versions: []string{"v1.0.0", "v1.0.1", "v1.0.2", "v1.0.3"}, + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/v2", + versions: []string{"v2.0.0", "v2.0.1", "v2.0.2", "v2.0.3", "v2.0.4", "v2.0.5", "v2.0.6"}, + }, + { + vcs: "mod", + path: "swtch.com/testmod", + versions: []string{"v1.0.0", "v1.1.1"}, + }, + { + vcs: "git", + path: "vcs-test.golang.org/git/odd-tags.git", + versions: nil, + }, +} + +func TestCodeRepoVersions(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + for _, tt := range codeRepoVersionsTests { + tt := tt + t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) { + if strings.Contains(tt.path, "gopkg.in") { + testenv.SkipFlaky(t, 54503) + } + + t.Parallel() + if tt.vcs != "mod" { + testenv.MustHaveExecPath(t, tt.vcs) + } + ctx := context.Background() + + repo := Lookup(ctx, "direct", tt.path) + list, err := repo.Versions(ctx, tt.prefix) + if err != nil { + t.Fatalf("Versions(%q): %v", tt.prefix, err) + } + if !reflect.DeepEqual(list.List, tt.versions) { + t.Fatalf("Versions(%q):\nhave %v\nwant %v", tt.prefix, list, tt.versions) + } + }) + } +} + +var latestTests = []struct { + vcs string + path string + version string + err string +}{ + { + vcs: "git", + path: "github.com/rsc/empty", + err: "no commits", + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1", + err: `github.com/rsc/vgotest1@v0.0.0-20180219223237-a08abb797a67: invalid version: go.mod has post-v0 module path "github.com/vgotest1/v2" at revision a08abb797a67`, + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/v2", + err: `github.com/rsc/vgotest1/v2@v2.0.0-20180219223237-a08abb797a67: invalid version: github.com/rsc/vgotest1/go.mod and .../v2/go.mod both have .../v2 module paths at revision a08abb797a67`, + }, + { + vcs: "git", + path: "github.com/rsc/vgotest1/subdir", + err: "github.com/rsc/vgotest1/subdir@v0.0.0-20180219223237-a08abb797a67: invalid version: missing github.com/rsc/vgotest1/subdir/go.mod at revision a08abb797a67", + }, + { + vcs: "git", + path: "vcs-test.golang.org/git/commit-after-tag.git", + version: "v1.0.1-0.20190715211727-b325d8217783", + }, + { + vcs: "git", + path: "vcs-test.golang.org/git/no-tags.git", + version: "v0.0.0-20190715212047-e706ba1d9f6d", + }, + { + vcs: "mod", + path: "swtch.com/testmod", + version: "v1.1.1", + }, +} + +func TestLatest(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + for _, tt := range latestTests { + name := strings.ReplaceAll(tt.path, "/", "_") + t.Run(name, func(t *testing.T) { + tt := tt + t.Parallel() + if tt.vcs != "mod" { + testenv.MustHaveExecPath(t, tt.vcs) + } + ctx := context.Background() + + repo := Lookup(ctx, "direct", tt.path) + info, err := repo.Latest(ctx) + if err != nil { + if tt.err != "" { + if err.Error() == tt.err { + return + } + t.Fatalf("Latest(): %v, want %q", err, tt.err) + } + t.Fatalf("Latest(): %v", err) + } + if tt.err != "" { + t.Fatalf("Latest() = %v, want error %q", info.Version, tt.err) + } + if info.Version != tt.version { + t.Fatalf("Latest() = %v, want %v", info.Version, tt.version) + } + }) + } +} + +// fixedTagsRepo is a fake codehost.Repo that returns a fixed list of tags +type fixedTagsRepo struct { + tags []string + codehost.Repo +} + +func (ch *fixedTagsRepo) Tags(ctx context.Context, prefix string) (*codehost.Tags, error) { + tags := &codehost.Tags{} + for _, t := range ch.tags { + tags.List = append(tags.List, codehost.Tag{Name: t}) + } + return tags, nil +} + +func TestNonCanonicalSemver(t *testing.T) { + t.Parallel() + ctx := context.Background() + + root := "golang.org/x/issue24476" + ch := &fixedTagsRepo{ + tags: []string{ + "", "huh?", "1.0.1", + // what about "version 1 dot dogcow"? + "v1.🐕.🐄", + "v1", "v0.1", + // and one normal one that should pass through + "v1.0.1", + }, + } + + cr, err := newCodeRepo(ch, root, root) + if err != nil { + t.Fatal(err) + } + + v, err := cr.Versions(ctx, "") + if err != nil { + t.Fatal(err) + } + if len(v.List) != 1 || v.List[0] != "v1.0.1" { + t.Fatal("unexpected versions returned:", v) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/fetch.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/fetch.go new file mode 100644 index 0000000000000000000000000000000000000000..eeab6da62a2daddf5cc3de7df47b512b20157c5e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/fetch.go @@ -0,0 +1,1002 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfetch + +import ( + "archive/zip" + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/go/internal/gover" + "cmd/go/internal/lockedfile" + "cmd/go/internal/par" + "cmd/go/internal/robustio" + "cmd/go/internal/str" + "cmd/go/internal/trace" + + "golang.org/x/mod/module" + "golang.org/x/mod/sumdb/dirhash" + modzip "golang.org/x/mod/zip" +) + +var downloadCache par.ErrCache[module.Version, string] // version → directory + +var ErrToolchain = errors.New("internal error: invalid operation on toolchain module") + +// Download downloads the specific module version to the +// local download cache and returns the name of the directory +// corresponding to the root of the module's file tree. +func Download(ctx context.Context, mod module.Version) (dir string, err error) { + if gover.IsToolchain(mod.Path) { + return "", ErrToolchain + } + if err := checkCacheDir(ctx); err != nil { + base.Fatal(err) + } + + // The par.Cache here avoids duplicate work. + return downloadCache.Do(mod, func() (string, error) { + dir, err := download(ctx, mod) + if err != nil { + return "", err + } + checkMod(ctx, mod) + + // If go.mod exists (not an old legacy module), check version is not too new. + if data, err := os.ReadFile(filepath.Join(dir, "go.mod")); err == nil { + goVersion := gover.GoModLookup(data, "go") + if gover.Compare(goVersion, gover.Local()) > 0 { + return "", &gover.TooNewError{What: mod.String(), GoVersion: goVersion} + } + } else if !errors.Is(err, fs.ErrNotExist) { + return "", err + } + + return dir, nil + }) +} + +func download(ctx context.Context, mod module.Version) (dir string, err error) { + ctx, span := trace.StartSpan(ctx, "modfetch.download "+mod.String()) + defer span.Done() + + dir, err = DownloadDir(ctx, mod) + if err == nil { + // The directory has already been completely extracted (no .partial file exists). + return dir, nil + } else if dir == "" || !errors.Is(err, fs.ErrNotExist) { + return "", err + } + + // To avoid cluttering the cache with extraneous files, + // DownloadZip uses the same lockfile as Download. + // Invoke DownloadZip before locking the file. + zipfile, err := DownloadZip(ctx, mod) + if err != nil { + return "", err + } + + unlock, err := lockVersion(ctx, mod) + if err != nil { + return "", err + } + defer unlock() + + ctx, span = trace.StartSpan(ctx, "unzip "+zipfile) + defer span.Done() + + // Check whether the directory was populated while we were waiting on the lock. + _, dirErr := DownloadDir(ctx, mod) + if dirErr == nil { + return dir, nil + } + _, dirExists := dirErr.(*DownloadDirPartialError) + + // Clean up any remaining temporary directories created by old versions + // (before 1.16), as well as partially extracted directories (indicated by + // DownloadDirPartialError, usually because of a .partial file). This is only + // safe to do because the lock file ensures that their writers are no longer + // active. + parentDir := filepath.Dir(dir) + tmpPrefix := filepath.Base(dir) + ".tmp-" + if old, err := filepath.Glob(filepath.Join(str.QuoteGlob(parentDir), str.QuoteGlob(tmpPrefix)+"*")); err == nil { + for _, path := range old { + RemoveAll(path) // best effort + } + } + if dirExists { + if err := RemoveAll(dir); err != nil { + return "", err + } + } + + partialPath, err := CachePath(ctx, mod, "partial") + if err != nil { + return "", err + } + + // Extract the module zip directory at its final location. + // + // To prevent other processes from reading the directory if we crash, + // create a .partial file before extracting the directory, and delete + // the .partial file afterward (all while holding the lock). + // + // Before Go 1.16, we extracted to a temporary directory with a random name + // then renamed it into place with os.Rename. On Windows, this failed with + // ERROR_ACCESS_DENIED when another process (usually an anti-virus scanner) + // opened files in the temporary directory. + // + // Go 1.14.2 and higher respect .partial files. Older versions may use + // partially extracted directories. 'go mod verify' can detect this, + // and 'go clean -modcache' can fix it. + if err := os.MkdirAll(parentDir, 0777); err != nil { + return "", err + } + if err := os.WriteFile(partialPath, nil, 0666); err != nil { + return "", err + } + if err := modzip.Unzip(dir, mod, zipfile); err != nil { + fmt.Fprintf(os.Stderr, "-> %s\n", err) + if rmErr := RemoveAll(dir); rmErr == nil { + os.Remove(partialPath) + } + return "", err + } + if err := os.Remove(partialPath); err != nil { + return "", err + } + + if !cfg.ModCacheRW { + makeDirsReadOnly(dir) + } + return dir, nil +} + +var downloadZipCache par.ErrCache[module.Version, string] + +// DownloadZip downloads the specific module version to the +// local zip cache and returns the name of the zip file. +func DownloadZip(ctx context.Context, mod module.Version) (zipfile string, err error) { + // The par.Cache here avoids duplicate work. + return downloadZipCache.Do(mod, func() (string, error) { + zipfile, err := CachePath(ctx, mod, "zip") + if err != nil { + return "", err + } + ziphashfile := zipfile + "hash" + + // Return without locking if the zip and ziphash files exist. + if _, err := os.Stat(zipfile); err == nil { + if _, err := os.Stat(ziphashfile); err == nil { + return zipfile, nil + } + } + + // The zip or ziphash file does not exist. Acquire the lock and create them. + if cfg.CmdName != "mod download" { + vers := mod.Version + if mod.Path == "golang.org/toolchain" { + // Shorten v0.0.1-go1.13.1.darwin-amd64 to go1.13.1.darwin-amd64 + _, vers, _ = strings.Cut(vers, "-") + if i := strings.LastIndex(vers, "."); i >= 0 { + goos, goarch, _ := strings.Cut(vers[i+1:], "-") + vers = vers[:i] + " (" + goos + "/" + goarch + ")" + } + fmt.Fprintf(os.Stderr, "go: downloading %s\n", vers) + } else { + fmt.Fprintf(os.Stderr, "go: downloading %s %s\n", mod.Path, vers) + } + } + unlock, err := lockVersion(ctx, mod) + if err != nil { + return "", err + } + defer unlock() + + if err := downloadZip(ctx, mod, zipfile); err != nil { + return "", err + } + return zipfile, nil + }) +} + +func downloadZip(ctx context.Context, mod module.Version, zipfile string) (err error) { + ctx, span := trace.StartSpan(ctx, "modfetch.downloadZip "+zipfile) + defer span.Done() + + // Double-check that the zipfile was not created while we were waiting for + // the lock in DownloadZip. + ziphashfile := zipfile + "hash" + var zipExists, ziphashExists bool + if _, err := os.Stat(zipfile); err == nil { + zipExists = true + } + if _, err := os.Stat(ziphashfile); err == nil { + ziphashExists = true + } + if zipExists && ziphashExists { + return nil + } + + // Create parent directories. + if err := os.MkdirAll(filepath.Dir(zipfile), 0777); err != nil { + return err + } + + // Clean up any remaining tempfiles from previous runs. + // This is only safe to do because the lock file ensures that their + // writers are no longer active. + tmpPattern := filepath.Base(zipfile) + "*.tmp" + if old, err := filepath.Glob(filepath.Join(str.QuoteGlob(filepath.Dir(zipfile)), tmpPattern)); err == nil { + for _, path := range old { + os.Remove(path) // best effort + } + } + + // If the zip file exists, the ziphash file must have been deleted + // or lost after a file system crash. Re-hash the zip without downloading. + if zipExists { + return hashZip(mod, zipfile, ziphashfile) + } + + // From here to the os.Rename call below is functionally almost equivalent to + // renameio.WriteToFile, with one key difference: we want to validate the + // contents of the file (by hashing it) before we commit it. Because the file + // is zip-compressed, we need an actual file — or at least an io.ReaderAt — to + // validate it: we can't just tee the stream as we write it. + f, err := tempFile(ctx, filepath.Dir(zipfile), filepath.Base(zipfile), 0666) + if err != nil { + return err + } + defer func() { + if err != nil { + f.Close() + os.Remove(f.Name()) + } + }() + + var unrecoverableErr error + err = TryProxies(func(proxy string) error { + if unrecoverableErr != nil { + return unrecoverableErr + } + repo := Lookup(ctx, proxy, mod.Path) + err := repo.Zip(ctx, f, mod.Version) + if err != nil { + // Zip may have partially written to f before failing. + // (Perhaps the server crashed while sending the file?) + // Since we allow fallback on error in some cases, we need to fix up the + // file to be empty again for the next attempt. + if _, err := f.Seek(0, io.SeekStart); err != nil { + unrecoverableErr = err + return err + } + if err := f.Truncate(0); err != nil { + unrecoverableErr = err + return err + } + } + return err + }) + if err != nil { + return err + } + + // Double-check that the paths within the zip file are well-formed. + // + // TODO(bcmills): There is a similar check within the Unzip function. Can we eliminate one? + fi, err := f.Stat() + if err != nil { + return err + } + z, err := zip.NewReader(f, fi.Size()) + if err != nil { + return err + } + prefix := mod.Path + "@" + mod.Version + "/" + for _, f := range z.File { + if !strings.HasPrefix(f.Name, prefix) { + return fmt.Errorf("zip for %s has unexpected file %s", prefix[:len(prefix)-1], f.Name) + } + } + + if err := f.Close(); err != nil { + return err + } + + // Hash the zip file and check the sum before renaming to the final location. + if err := hashZip(mod, f.Name(), ziphashfile); err != nil { + return err + } + if err := os.Rename(f.Name(), zipfile); err != nil { + return err + } + + // TODO(bcmills): Should we make the .zip and .ziphash files read-only to discourage tampering? + + return nil +} + +// hashZip reads the zip file opened in f, then writes the hash to ziphashfile, +// overwriting that file if it exists. +// +// If the hash does not match go.sum (or the sumdb if enabled), hashZip returns +// an error and does not write ziphashfile. +func hashZip(mod module.Version, zipfile, ziphashfile string) (err error) { + hash, err := dirhash.HashZip(zipfile, dirhash.DefaultHash) + if err != nil { + return err + } + if err := checkModSum(mod, hash); err != nil { + return err + } + hf, err := lockedfile.Create(ziphashfile) + if err != nil { + return err + } + defer func() { + if closeErr := hf.Close(); err == nil && closeErr != nil { + err = closeErr + } + }() + if err := hf.Truncate(int64(len(hash))); err != nil { + return err + } + if _, err := hf.WriteAt([]byte(hash), 0); err != nil { + return err + } + return nil +} + +// makeDirsReadOnly makes a best-effort attempt to remove write permissions for dir +// and its transitive contents. +func makeDirsReadOnly(dir string) { + type pathMode struct { + path string + mode fs.FileMode + } + var dirs []pathMode // in lexical order + filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err == nil && d.IsDir() { + info, err := d.Info() + if err == nil && info.Mode()&0222 != 0 { + dirs = append(dirs, pathMode{path, info.Mode()}) + } + } + return nil + }) + + // Run over list backward to chmod children before parents. + for i := len(dirs) - 1; i >= 0; i-- { + os.Chmod(dirs[i].path, dirs[i].mode&^0222) + } +} + +// RemoveAll removes a directory written by Download or Unzip, first applying +// any permission changes needed to do so. +func RemoveAll(dir string) error { + // Module cache has 0555 directories; make them writable in order to remove content. + filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error { + if err != nil { + return nil // ignore errors walking in file system + } + if info.IsDir() { + os.Chmod(path, 0777) + } + return nil + }) + return robustio.RemoveAll(dir) +} + +var GoSumFile string // path to go.sum; set by package modload +var WorkspaceGoSumFiles []string // path to module go.sums in workspace; set by package modload + +type modSum struct { + mod module.Version + sum string +} + +var goSum struct { + mu sync.Mutex + m map[module.Version][]string // content of go.sum file + w map[string]map[module.Version][]string // sum file in workspace -> content of that sum file + status map[modSum]modSumStatus // state of sums in m + overwrite bool // if true, overwrite go.sum without incorporating its contents + enabled bool // whether to use go.sum at all +} + +type modSumStatus struct { + used, dirty bool +} + +// Reset resets globals in the modfetch package, so previous loads don't affect +// contents of go.sum files. +func Reset() { + GoSumFile = "" + WorkspaceGoSumFiles = nil + + // Uses of lookupCache and downloadCache both can call checkModSum, + // which in turn sets the used bit on goSum.status for modules. + // Reset them so used can be computed properly. + lookupCache = par.Cache[lookupCacheKey, Repo]{} + downloadCache = par.ErrCache[module.Version, string]{} + + // Clear all fields on goSum. It will be initialized later + goSum.mu.Lock() + goSum.m = nil + goSum.w = nil + goSum.status = nil + goSum.overwrite = false + goSum.enabled = false + goSum.mu.Unlock() +} + +// initGoSum initializes the go.sum data. +// The boolean it returns reports whether the +// use of go.sum is now enabled. +// The goSum lock must be held. +func initGoSum() (bool, error) { + if GoSumFile == "" { + return false, nil + } + if goSum.m != nil { + return true, nil + } + + goSum.m = make(map[module.Version][]string) + goSum.status = make(map[modSum]modSumStatus) + goSum.w = make(map[string]map[module.Version][]string) + + for _, f := range WorkspaceGoSumFiles { + goSum.w[f] = make(map[module.Version][]string) + _, err := readGoSumFile(goSum.w[f], f) + if err != nil { + return false, err + } + } + + enabled, err := readGoSumFile(goSum.m, GoSumFile) + goSum.enabled = enabled + return enabled, err +} + +func readGoSumFile(dst map[module.Version][]string, file string) (bool, error) { + var ( + data []byte + err error + ) + if actualSumFile, ok := fsys.OverlayPath(file); ok { + // Don't lock go.sum if it's part of the overlay. + // On Plan 9, locking requires chmod, and we don't want to modify any file + // in the overlay. See #44700. + data, err = os.ReadFile(actualSumFile) + } else { + data, err = lockedfile.Read(file) + } + if err != nil && !os.IsNotExist(err) { + return false, err + } + readGoSum(dst, file, data) + + return true, nil +} + +// emptyGoModHash is the hash of a 1-file tree containing a 0-length go.mod. +// A bug caused us to write these into go.sum files for non-modules. +// We detect and remove them. +const emptyGoModHash = "h1:G7mAYYxgmS0lVkHyy2hEOLQCFB0DlQFTMLWggykrydY=" + +// readGoSum parses data, which is the content of file, +// and adds it to goSum.m. The goSum lock must be held. +func readGoSum(dst map[module.Version][]string, file string, data []byte) { + lineno := 0 + for len(data) > 0 { + var line []byte + lineno++ + i := bytes.IndexByte(data, '\n') + if i < 0 { + line, data = data, nil + } else { + line, data = data[:i], data[i+1:] + } + f := strings.Fields(string(line)) + if len(f) == 0 { + // blank line; skip it + continue + } + if len(f) != 3 { + if cfg.CmdName == "mod tidy" { + // ignore malformed line so that go mod tidy can fix go.sum + continue + } else { + base.Fatalf("malformed go.sum:\n%s:%d: wrong number of fields %v\n", file, lineno, len(f)) + } + } + if f[2] == emptyGoModHash { + // Old bug; drop it. + continue + } + mod := module.Version{Path: f[0], Version: f[1]} + dst[mod] = append(dst[mod], f[2]) + } +} + +// HaveSum returns true if the go.sum file contains an entry for mod. +// The entry's hash must be generated with a known hash algorithm. +// mod.Version may have a "/go.mod" suffix to distinguish sums for +// .mod and .zip files. +func HaveSum(mod module.Version) bool { + goSum.mu.Lock() + defer goSum.mu.Unlock() + inited, err := initGoSum() + if err != nil || !inited { + return false + } + for _, goSums := range goSum.w { + for _, h := range goSums[mod] { + if !strings.HasPrefix(h, "h1:") { + continue + } + if !goSum.status[modSum{mod, h}].dirty { + return true + } + } + } + for _, h := range goSum.m[mod] { + if !strings.HasPrefix(h, "h1:") { + continue + } + if !goSum.status[modSum{mod, h}].dirty { + return true + } + } + return false +} + +// checkMod checks the given module's checksum and Go version. +func checkMod(ctx context.Context, mod module.Version) { + // Do the file I/O before acquiring the go.sum lock. + ziphash, err := CachePath(ctx, mod, "ziphash") + if err != nil { + base.Fatalf("verifying %v", module.VersionError(mod, err)) + } + data, err := lockedfile.Read(ziphash) + if err != nil { + base.Fatalf("verifying %v", module.VersionError(mod, err)) + } + data = bytes.TrimSpace(data) + if !isValidSum(data) { + // Recreate ziphash file from zip file and use that to check the mod sum. + zip, err := CachePath(ctx, mod, "zip") + if err != nil { + base.Fatalf("verifying %v", module.VersionError(mod, err)) + } + err = hashZip(mod, zip, ziphash) + if err != nil { + base.Fatalf("verifying %v", module.VersionError(mod, err)) + } + return + } + h := string(data) + if !strings.HasPrefix(h, "h1:") { + base.Fatalf("verifying %v", module.VersionError(mod, fmt.Errorf("unexpected ziphash: %q", h))) + } + + if err := checkModSum(mod, h); err != nil { + base.Fatalf("%s", err) + } +} + +// goModSum returns the checksum for the go.mod contents. +func goModSum(data []byte) (string, error) { + return dirhash.Hash1([]string{"go.mod"}, func(string) (io.ReadCloser, error) { + return io.NopCloser(bytes.NewReader(data)), nil + }) +} + +// checkGoMod checks the given module's go.mod checksum; +// data is the go.mod content. +func checkGoMod(path, version string, data []byte) error { + h, err := goModSum(data) + if err != nil { + return &module.ModuleError{Path: path, Version: version, Err: fmt.Errorf("verifying go.mod: %v", err)} + } + + return checkModSum(module.Version{Path: path, Version: version + "/go.mod"}, h) +} + +// checkModSum checks that the recorded checksum for mod is h. +// +// mod.Version may have the additional suffix "/go.mod" to request the checksum +// for the module's go.mod file only. +func checkModSum(mod module.Version, h string) error { + // We lock goSum when manipulating it, + // but we arrange to release the lock when calling checkSumDB, + // so that parallel calls to checkModHash can execute parallel calls + // to checkSumDB. + + // Check whether mod+h is listed in go.sum already. If so, we're done. + goSum.mu.Lock() + inited, err := initGoSum() + if err != nil { + goSum.mu.Unlock() + return err + } + done := inited && haveModSumLocked(mod, h) + if inited { + st := goSum.status[modSum{mod, h}] + st.used = true + goSum.status[modSum{mod, h}] = st + } + goSum.mu.Unlock() + + if done { + return nil + } + + // Not listed, so we want to add them. + // Consult checksum database if appropriate. + if useSumDB(mod) { + // Calls base.Fatalf if mismatch detected. + if err := checkSumDB(mod, h); err != nil { + return err + } + } + + // Add mod+h to go.sum, if it hasn't appeared already. + if inited { + goSum.mu.Lock() + addModSumLocked(mod, h) + st := goSum.status[modSum{mod, h}] + st.dirty = true + goSum.status[modSum{mod, h}] = st + goSum.mu.Unlock() + } + return nil +} + +// haveModSumLocked reports whether the pair mod,h is already listed in go.sum. +// If it finds a conflicting pair instead, it calls base.Fatalf. +// goSum.mu must be locked. +func haveModSumLocked(mod module.Version, h string) bool { + sumFileName := "go.sum" + if strings.HasSuffix(GoSumFile, "go.work.sum") { + sumFileName = "go.work.sum" + } + for _, vh := range goSum.m[mod] { + if h == vh { + return true + } + if strings.HasPrefix(vh, "h1:") { + base.Fatalf("verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+goSumMismatch, mod.Path, mod.Version, h, sumFileName, vh) + } + } + // Also check workspace sums. + foundMatch := false + // Check sums from all files in case there are conflicts between + // the files. + for goSumFile, goSums := range goSum.w { + for _, vh := range goSums[mod] { + if h == vh { + foundMatch = true + } else if strings.HasPrefix(vh, "h1:") { + base.Fatalf("verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+goSumMismatch, mod.Path, mod.Version, h, goSumFile, vh) + } + } + } + return foundMatch +} + +// addModSumLocked adds the pair mod,h to go.sum. +// goSum.mu must be locked. +func addModSumLocked(mod module.Version, h string) { + if haveModSumLocked(mod, h) { + return + } + if len(goSum.m[mod]) > 0 { + fmt.Fprintf(os.Stderr, "warning: verifying %s@%s: unknown hashes in go.sum: %v; adding %v"+hashVersionMismatch, mod.Path, mod.Version, strings.Join(goSum.m[mod], ", "), h) + } + goSum.m[mod] = append(goSum.m[mod], h) +} + +// checkSumDB checks the mod, h pair against the Go checksum database. +// It calls base.Fatalf if the hash is to be rejected. +func checkSumDB(mod module.Version, h string) error { + modWithoutSuffix := mod + noun := "module" + if before, found := strings.CutSuffix(mod.Version, "/go.mod"); found { + noun = "go.mod" + modWithoutSuffix.Version = before + } + + db, lines, err := lookupSumDB(mod) + if err != nil { + return module.VersionError(modWithoutSuffix, fmt.Errorf("verifying %s: %v", noun, err)) + } + + have := mod.Path + " " + mod.Version + " " + h + prefix := mod.Path + " " + mod.Version + " h1:" + for _, line := range lines { + if line == have { + return nil + } + if strings.HasPrefix(line, prefix) { + return module.VersionError(modWithoutSuffix, fmt.Errorf("verifying %s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+sumdbMismatch, noun, h, db, line[len(prefix)-len("h1:"):])) + } + } + return nil +} + +// Sum returns the checksum for the downloaded copy of the given module, +// if present in the download cache. +func Sum(ctx context.Context, mod module.Version) string { + if cfg.GOMODCACHE == "" { + // Do not use current directory. + return "" + } + + ziphash, err := CachePath(ctx, mod, "ziphash") + if err != nil { + return "" + } + data, err := lockedfile.Read(ziphash) + if err != nil { + return "" + } + data = bytes.TrimSpace(data) + if !isValidSum(data) { + return "" + } + return string(data) +} + +// isValidSum returns true if data is the valid contents of a zip hash file. +// Certain critical files are written to disk by first truncating +// then writing the actual bytes, so that if the write fails +// the corrupt file should contain at least one of the null +// bytes written by the truncate operation. +func isValidSum(data []byte) bool { + if bytes.IndexByte(data, '\000') >= 0 { + return false + } + + if len(data) != len("h1:")+base64.StdEncoding.EncodedLen(sha256.Size) { + return false + } + + return true +} + +var ErrGoSumDirty = errors.New("updates to go.sum needed, disabled by -mod=readonly") + +// WriteGoSum writes the go.sum file if it needs to be updated. +// +// keep is used to check whether a newly added sum should be saved in go.sum. +// It should have entries for both module content sums and go.mod sums +// (version ends with "/go.mod"). Existing sums will be preserved unless they +// have been marked for deletion with TrimGoSum. +func WriteGoSum(ctx context.Context, keep map[module.Version]bool, readonly bool) error { + goSum.mu.Lock() + defer goSum.mu.Unlock() + + // If we haven't read the go.sum file yet, don't bother writing it. + if !goSum.enabled { + return nil + } + + // Check whether we need to add sums for which keep[m] is true or remove + // unused sums marked with TrimGoSum. If there are no changes to make, + // just return without opening go.sum. + dirty := false +Outer: + for m, hs := range goSum.m { + for _, h := range hs { + st := goSum.status[modSum{m, h}] + if st.dirty && (!st.used || keep[m]) { + dirty = true + break Outer + } + } + } + if !dirty { + return nil + } + if readonly { + return ErrGoSumDirty + } + if _, ok := fsys.OverlayPath(GoSumFile); ok { + base.Fatalf("go: updates to go.sum needed, but go.sum is part of the overlay specified with -overlay") + } + + // Make a best-effort attempt to acquire the side lock, only to exclude + // previous versions of the 'go' command from making simultaneous edits. + if unlock, err := SideLock(ctx); err == nil { + defer unlock() + } + + err := lockedfile.Transform(GoSumFile, func(data []byte) ([]byte, error) { + if !goSum.overwrite { + // Incorporate any sums added by other processes in the meantime. + // Add only the sums that we actually checked: the user may have edited or + // truncated the file to remove erroneous hashes, and we shouldn't restore + // them without good reason. + goSum.m = make(map[module.Version][]string, len(goSum.m)) + readGoSum(goSum.m, GoSumFile, data) + for ms, st := range goSum.status { + if st.used && !sumInWorkspaceModulesLocked(ms.mod) { + addModSumLocked(ms.mod, ms.sum) + } + } + } + + var mods []module.Version + for m := range goSum.m { + mods = append(mods, m) + } + module.Sort(mods) + + var buf bytes.Buffer + for _, m := range mods { + list := goSum.m[m] + sort.Strings(list) + str.Uniq(&list) + for _, h := range list { + st := goSum.status[modSum{m, h}] + if (!st.dirty || (st.used && keep[m])) && !sumInWorkspaceModulesLocked(m) { + fmt.Fprintf(&buf, "%s %s %s\n", m.Path, m.Version, h) + } + } + } + return buf.Bytes(), nil + }) + + if err != nil { + return fmt.Errorf("updating go.sum: %w", err) + } + + goSum.status = make(map[modSum]modSumStatus) + goSum.overwrite = false + return nil +} + +func sumInWorkspaceModulesLocked(m module.Version) bool { + for _, goSums := range goSum.w { + if _, ok := goSums[m]; ok { + return true + } + } + return false +} + +// TrimGoSum trims go.sum to contain only the modules needed for reproducible +// builds. +// +// keep is used to check whether a sum should be retained in go.mod. It should +// have entries for both module content sums and go.mod sums (version ends +// with "/go.mod"). +func TrimGoSum(keep map[module.Version]bool) { + goSum.mu.Lock() + defer goSum.mu.Unlock() + inited, err := initGoSum() + if err != nil { + base.Fatalf("%s", err) + } + if !inited { + return + } + + for m, hs := range goSum.m { + if !keep[m] { + for _, h := range hs { + goSum.status[modSum{m, h}] = modSumStatus{used: false, dirty: true} + } + goSum.overwrite = true + } + } +} + +const goSumMismatch = ` + +SECURITY ERROR +This download does NOT match an earlier download recorded in go.sum. +The bits may have been replaced on the origin server, or an attacker may +have intercepted the download attempt. + +For more information, see 'go help module-auth'. +` + +const sumdbMismatch = ` + +SECURITY ERROR +This download does NOT match the one reported by the checksum server. +The bits may have been replaced on the origin server, or an attacker may +have intercepted the download attempt. + +For more information, see 'go help module-auth'. +` + +const hashVersionMismatch = ` + +SECURITY WARNING +This download is listed in go.sum, but using an unknown hash algorithm. +The download cannot be verified. + +For more information, see 'go help module-auth'. + +` + +var HelpModuleAuth = &base.Command{ + UsageLine: "module-auth", + Short: "module authentication using go.sum", + Long: ` +When the go command downloads a module zip file or go.mod file into the +module cache, it computes a cryptographic hash and compares it with a known +value to verify the file hasn't changed since it was first downloaded. Known +hashes are stored in a file in the module root directory named go.sum. Hashes +may also be downloaded from the checksum database depending on the values of +GOSUMDB, GOPRIVATE, and GONOSUMDB. + +For details, see https://golang.org/ref/mod#authenticating. +`, +} + +var HelpPrivate = &base.Command{ + UsageLine: "private", + Short: "configuration for downloading non-public code", + Long: ` +The go command defaults to downloading modules from the public Go module +mirror at proxy.golang.org. It also defaults to validating downloaded modules, +regardless of source, against the public Go checksum database at sum.golang.org. +These defaults work well for publicly available source code. + +The GOPRIVATE environment variable controls which modules the go command +considers to be private (not available publicly) and should therefore not use +the proxy or checksum database. The variable is a comma-separated list of +glob patterns (in the syntax of Go's path.Match) of module path prefixes. +For example, + + GOPRIVATE=*.corp.example.com,rsc.io/private + +causes the go command to treat as private any module with a path prefix +matching either pattern, including git.corp.example.com/xyzzy, rsc.io/private, +and rsc.io/private/quux. + +For fine-grained control over module download and validation, the GONOPROXY +and GONOSUMDB environment variables accept the same kind of glob list +and override GOPRIVATE for the specific decision of whether to use the proxy +and checksum database, respectively. + +For example, if a company ran a module proxy serving private modules, +users would configure go using: + + GOPRIVATE=*.corp.example.com + GOPROXY=proxy.example.com + GONOPROXY=none + +The GOPRIVATE variable is also used to define the "public" and "private" +patterns for the GOVCS variable; see 'go help vcs'. For that usage, +GOPRIVATE applies even in GOPATH mode. In that case, it matches import paths +instead of module paths. + +The 'go env -w' command (see 'go help env') can be used to set these variables +for future go command invocations. + +For more details, see https://golang.org/ref/mod#private-modules. +`, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/key.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/key.go new file mode 100644 index 0000000000000000000000000000000000000000..06f9989b9d4373c6ae8e477d8eb864ffece1677d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/key.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfetch + +var knownGOSUMDB = map[string]string{ + "sum.golang.org": "sum.golang.org+033de0ae+Ac4zctda0e5eza+HJyk9SxEdh+s3Ux18htTTAD8OuAn8", +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/proxy.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/proxy.go new file mode 100644 index 0000000000000000000000000000000000000000..e0efb097ecdda9ee27f71c673258ab9f022f1a71 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/proxy.go @@ -0,0 +1,453 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfetch + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "net/url" + "path" + pathpkg "path" + "path/filepath" + "strings" + "sync" + "time" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/modfetch/codehost" + "cmd/go/internal/web" + + "golang.org/x/mod/module" + "golang.org/x/mod/semver" +) + +var HelpGoproxy = &base.Command{ + UsageLine: "goproxy", + Short: "module proxy protocol", + Long: ` +A Go module proxy is any web server that can respond to GET requests for +URLs of a specified form. The requests have no query parameters, so even +a site serving from a fixed file system (including a file:/// URL) +can be a module proxy. + +For details on the GOPROXY protocol, see +https://golang.org/ref/mod#goproxy-protocol. +`, +} + +var proxyOnce struct { + sync.Once + list []proxySpec + err error +} + +type proxySpec struct { + // url is the proxy URL or one of "off", "direct", "noproxy". + url string + + // fallBackOnError is true if a request should be attempted on the next proxy + // in the list after any error from this proxy. If fallBackOnError is false, + // the request will only be attempted on the next proxy if the error is + // equivalent to os.ErrNotFound, which is true for 404 and 410 responses. + fallBackOnError bool +} + +func proxyList() ([]proxySpec, error) { + proxyOnce.Do(func() { + if cfg.GONOPROXY != "" && cfg.GOPROXY != "direct" { + proxyOnce.list = append(proxyOnce.list, proxySpec{url: "noproxy"}) + } + + goproxy := cfg.GOPROXY + for goproxy != "" { + var url string + fallBackOnError := false + if i := strings.IndexAny(goproxy, ",|"); i >= 0 { + url = goproxy[:i] + fallBackOnError = goproxy[i] == '|' + goproxy = goproxy[i+1:] + } else { + url = goproxy + goproxy = "" + } + + url = strings.TrimSpace(url) + if url == "" { + continue + } + if url == "off" { + // "off" always fails hard, so can stop walking list. + proxyOnce.list = append(proxyOnce.list, proxySpec{url: "off"}) + break + } + if url == "direct" { + proxyOnce.list = append(proxyOnce.list, proxySpec{url: "direct"}) + // For now, "direct" is the end of the line. We may decide to add some + // sort of fallback behavior for them in the future, so ignore + // subsequent entries for forward-compatibility. + break + } + + // Single-word tokens are reserved for built-in behaviors, and anything + // containing the string ":/" or matching an absolute file path must be a + // complete URL. For all other paths, implicitly add "https://". + if strings.ContainsAny(url, ".:/") && !strings.Contains(url, ":/") && !filepath.IsAbs(url) && !path.IsAbs(url) { + url = "https://" + url + } + + // Check that newProxyRepo accepts the URL. + // It won't do anything with the path. + if _, err := newProxyRepo(url, "golang.org/x/text"); err != nil { + proxyOnce.err = err + return + } + + proxyOnce.list = append(proxyOnce.list, proxySpec{ + url: url, + fallBackOnError: fallBackOnError, + }) + } + + if len(proxyOnce.list) == 0 || + len(proxyOnce.list) == 1 && proxyOnce.list[0].url == "noproxy" { + // There were no proxies, other than the implicit "noproxy" added when + // GONOPROXY is set. This can happen if GOPROXY is a non-empty string + // like "," or " ". + proxyOnce.err = fmt.Errorf("GOPROXY list is not the empty string, but contains no entries") + } + }) + + return proxyOnce.list, proxyOnce.err +} + +// TryProxies iterates f over each configured proxy (including "noproxy" and +// "direct" if applicable) until f returns no error or until f returns an +// error that is not equivalent to fs.ErrNotExist on a proxy configured +// not to fall back on errors. +// +// TryProxies then returns that final error. +// +// If GOPROXY is set to "off", TryProxies invokes f once with the argument +// "off". +func TryProxies(f func(proxy string) error) error { + proxies, err := proxyList() + if err != nil { + return err + } + if len(proxies) == 0 { + panic("GOPROXY list is empty") + } + + // We try to report the most helpful error to the user. "direct" and "noproxy" + // errors are best, followed by proxy errors other than ErrNotExist, followed + // by ErrNotExist. + // + // Note that errProxyOff, errNoproxy, and errUseProxy are equivalent to + // ErrNotExist. errUseProxy should only be returned if "noproxy" is the only + // proxy. errNoproxy should never be returned, since there should always be a + // more useful error from "noproxy" first. + const ( + notExistRank = iota + proxyRank + directRank + ) + var bestErr error + bestErrRank := notExistRank + for _, proxy := range proxies { + err := f(proxy.url) + if err == nil { + return nil + } + isNotExistErr := errors.Is(err, fs.ErrNotExist) + + if proxy.url == "direct" || (proxy.url == "noproxy" && err != errUseProxy) { + bestErr = err + bestErrRank = directRank + } else if bestErrRank <= proxyRank && !isNotExistErr { + bestErr = err + bestErrRank = proxyRank + } else if bestErrRank == notExistRank { + bestErr = err + } + + if !proxy.fallBackOnError && !isNotExistErr { + break + } + } + return bestErr +} + +type proxyRepo struct { + url *url.URL // The combined module proxy URL joined with the module path. + path string // The module path (unescaped). + redactedBase string // The base module proxy URL in [url.URL.Redacted] form. + + listLatestOnce sync.Once + listLatest *RevInfo + listLatestErr error +} + +func newProxyRepo(baseURL, path string) (Repo, error) { + // Parse the base proxy URL. + base, err := url.Parse(baseURL) + if err != nil { + return nil, err + } + redactedBase := base.Redacted() + switch base.Scheme { + case "http", "https": + // ok + case "file": + if *base != (url.URL{Scheme: base.Scheme, Path: base.Path, RawPath: base.RawPath}) { + return nil, fmt.Errorf("invalid file:// proxy URL with non-path elements: %s", redactedBase) + } + case "": + return nil, fmt.Errorf("invalid proxy URL missing scheme: %s", redactedBase) + default: + return nil, fmt.Errorf("invalid proxy URL scheme (must be https, http, file): %s", redactedBase) + } + + // Append the module path to the URL. + url := base + enc, err := module.EscapePath(path) + if err != nil { + return nil, err + } + url.Path = strings.TrimSuffix(base.Path, "/") + "/" + enc + url.RawPath = strings.TrimSuffix(base.RawPath, "/") + "/" + pathEscape(enc) + + return &proxyRepo{url, path, redactedBase, sync.Once{}, nil, nil}, nil +} + +func (p *proxyRepo) ModulePath() string { + return p.path +} + +var errProxyReuse = fmt.Errorf("proxy does not support CheckReuse") + +func (p *proxyRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error { + return errProxyReuse +} + +// versionError returns err wrapped in a ModuleError for p.path. +func (p *proxyRepo) versionError(version string, err error) error { + if version != "" && version != module.CanonicalVersion(version) { + return &module.ModuleError{ + Path: p.path, + Err: &module.InvalidVersionError{ + Version: version, + Pseudo: module.IsPseudoVersion(version), + Err: err, + }, + } + } + + return &module.ModuleError{ + Path: p.path, + Version: version, + Err: err, + } +} + +func (p *proxyRepo) getBytes(ctx context.Context, path string) ([]byte, error) { + body, redactedURL, err := p.getBody(ctx, path) + if err != nil { + return nil, err + } + defer body.Close() + + b, err := io.ReadAll(body) + if err != nil { + // net/http doesn't add context to Body read errors, so add it here. + // (See https://go.dev/issue/52727.) + return b, &url.Error{Op: "read", URL: redactedURL, Err: err} + } + return b, nil +} + +func (p *proxyRepo) getBody(ctx context.Context, path string) (r io.ReadCloser, redactedURL string, err error) { + fullPath := pathpkg.Join(p.url.Path, path) + + target := *p.url + target.Path = fullPath + target.RawPath = pathpkg.Join(target.RawPath, pathEscape(path)) + + resp, err := web.Get(web.DefaultSecurity, &target) + if err != nil { + return nil, "", err + } + if err := resp.Err(); err != nil { + resp.Body.Close() + return nil, "", err + } + return resp.Body, resp.URL, nil +} + +func (p *proxyRepo) Versions(ctx context.Context, prefix string) (*Versions, error) { + data, err := p.getBytes(ctx, "@v/list") + if err != nil { + p.listLatestOnce.Do(func() { + p.listLatest, p.listLatestErr = nil, p.versionError("", err) + }) + return nil, p.versionError("", err) + } + var list []string + allLine := strings.Split(string(data), "\n") + for _, line := range allLine { + f := strings.Fields(line) + if len(f) >= 1 && semver.IsValid(f[0]) && strings.HasPrefix(f[0], prefix) && !module.IsPseudoVersion(f[0]) { + list = append(list, f[0]) + } + } + p.listLatestOnce.Do(func() { + p.listLatest, p.listLatestErr = p.latestFromList(ctx, allLine) + }) + semver.Sort(list) + return &Versions{List: list}, nil +} + +func (p *proxyRepo) latest(ctx context.Context) (*RevInfo, error) { + p.listLatestOnce.Do(func() { + data, err := p.getBytes(ctx, "@v/list") + if err != nil { + p.listLatestErr = p.versionError("", err) + return + } + list := strings.Split(string(data), "\n") + p.listLatest, p.listLatestErr = p.latestFromList(ctx, list) + }) + return p.listLatest, p.listLatestErr +} + +func (p *proxyRepo) latestFromList(ctx context.Context, allLine []string) (*RevInfo, error) { + var ( + bestTime time.Time + bestVersion string + ) + for _, line := range allLine { + f := strings.Fields(line) + if len(f) >= 1 && semver.IsValid(f[0]) { + // If the proxy includes timestamps, prefer the timestamp it reports. + // Otherwise, derive the timestamp from the pseudo-version. + var ( + ft time.Time + ) + if len(f) >= 2 { + ft, _ = time.Parse(time.RFC3339, f[1]) + } else if module.IsPseudoVersion(f[0]) { + ft, _ = module.PseudoVersionTime(f[0]) + } else { + // Repo.Latest promises that this method is only called where there are + // no tagged versions. Ignore any tagged versions that were added in the + // meantime. + continue + } + if bestTime.Before(ft) { + bestTime = ft + bestVersion = f[0] + } + } + } + if bestVersion == "" { + return nil, p.versionError("", codehost.ErrNoCommits) + } + + // Call Stat to get all the other fields, including Origin information. + return p.Stat(ctx, bestVersion) +} + +func (p *proxyRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) { + encRev, err := module.EscapeVersion(rev) + if err != nil { + return nil, p.versionError(rev, err) + } + data, err := p.getBytes(ctx, "@v/"+encRev+".info") + if err != nil { + return nil, p.versionError(rev, err) + } + info := new(RevInfo) + if err := json.Unmarshal(data, info); err != nil { + return nil, p.versionError(rev, fmt.Errorf("invalid response from proxy %q: %w", p.redactedBase, err)) + } + if info.Version != rev && rev == module.CanonicalVersion(rev) && module.Check(p.path, rev) == nil { + // If we request a correct, appropriate version for the module path, the + // proxy must return either exactly that version or an error — not some + // arbitrary other version. + return nil, p.versionError(rev, fmt.Errorf("proxy returned info for version %s instead of requested version", info.Version)) + } + return info, nil +} + +func (p *proxyRepo) Latest(ctx context.Context) (*RevInfo, error) { + data, err := p.getBytes(ctx, "@latest") + if err != nil { + if !errors.Is(err, fs.ErrNotExist) { + return nil, p.versionError("", err) + } + return p.latest(ctx) + } + info := new(RevInfo) + if err := json.Unmarshal(data, info); err != nil { + return nil, p.versionError("", fmt.Errorf("invalid response from proxy %q: %w", p.redactedBase, err)) + } + return info, nil +} + +func (p *proxyRepo) GoMod(ctx context.Context, version string) ([]byte, error) { + if version != module.CanonicalVersion(version) { + return nil, p.versionError(version, fmt.Errorf("internal error: version passed to GoMod is not canonical")) + } + + encVer, err := module.EscapeVersion(version) + if err != nil { + return nil, p.versionError(version, err) + } + data, err := p.getBytes(ctx, "@v/"+encVer+".mod") + if err != nil { + return nil, p.versionError(version, err) + } + return data, nil +} + +func (p *proxyRepo) Zip(ctx context.Context, dst io.Writer, version string) error { + if version != module.CanonicalVersion(version) { + return p.versionError(version, fmt.Errorf("internal error: version passed to Zip is not canonical")) + } + + encVer, err := module.EscapeVersion(version) + if err != nil { + return p.versionError(version, err) + } + path := "@v/" + encVer + ".zip" + body, redactedURL, err := p.getBody(ctx, path) + if err != nil { + return p.versionError(version, err) + } + defer body.Close() + + lr := &io.LimitedReader{R: body, N: codehost.MaxZipFile + 1} + if _, err := io.Copy(dst, lr); err != nil { + // net/http doesn't add context to Body read errors, so add it here. + // (See https://go.dev/issue/52727.) + err = &url.Error{Op: "read", URL: redactedURL, Err: err} + return p.versionError(version, err) + } + if lr.N <= 0 { + return p.versionError(version, fmt.Errorf("downloaded zip file too large")) + } + return nil +} + +// pathEscape escapes s so it can be used in a path. +// That is, it escapes things like ? and # (which really shouldn't appear anyway). +// It does not escape / to %2F: our REST API is designed so that / can be left as is. +func pathEscape(s string) string { + return strings.ReplaceAll(url.PathEscape(s), "%2F", "/") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/repo.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/repo.go new file mode 100644 index 0000000000000000000000000000000000000000..25fb02de35b12fc91cc389ff92d132715c07cd2f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/repo.go @@ -0,0 +1,411 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfetch + +import ( + "context" + "fmt" + "io" + "io/fs" + "os" + "strconv" + "time" + + "cmd/go/internal/cfg" + "cmd/go/internal/modfetch/codehost" + "cmd/go/internal/par" + "cmd/go/internal/vcs" + web "cmd/go/internal/web" + + "golang.org/x/mod/module" +) + +const traceRepo = false // trace all repo actions, for debugging + +// A Repo represents a repository storing all versions of a single module. +// It must be safe for simultaneous use by multiple goroutines. +type Repo interface { + // ModulePath returns the module path. + ModulePath() string + + // CheckReuse checks whether the validation criteria in the origin + // are still satisfied on the server corresponding to this module. + // If so, the caller can reuse any cached Versions or RevInfo containing + // this origin rather than redownloading those from the server. + CheckReuse(ctx context.Context, old *codehost.Origin) error + + // Versions lists all known versions with the given prefix. + // Pseudo-versions are not included. + // + // Versions should be returned sorted in semver order + // (implementations can use semver.Sort). + // + // Versions returns a non-nil error only if there was a problem + // fetching the list of versions: it may return an empty list + // along with a nil error if the list of matching versions + // is known to be empty. + // + // If the underlying repository does not exist, + // Versions returns an error matching errors.Is(_, os.NotExist). + Versions(ctx context.Context, prefix string) (*Versions, error) + + // Stat returns information about the revision rev. + // A revision can be any identifier known to the underlying service: + // commit hash, branch, tag, and so on. + Stat(ctx context.Context, rev string) (*RevInfo, error) + + // Latest returns the latest revision on the default branch, + // whatever that means in the underlying source code repository. + // It is only used when there are no tagged versions. + Latest(ctx context.Context) (*RevInfo, error) + + // GoMod returns the go.mod file for the given version. + GoMod(ctx context.Context, version string) (data []byte, err error) + + // Zip writes a zip file for the given version to dst. + Zip(ctx context.Context, dst io.Writer, version string) error +} + +// A Versions describes the available versions in a module repository. +type Versions struct { + Origin *codehost.Origin `json:",omitempty"` // origin information for reuse + + List []string // semver versions +} + +// A RevInfo describes a single revision in a module repository. +type RevInfo struct { + Version string // suggested version string for this revision + Time time.Time // commit time + + // These fields are used for Stat of arbitrary rev, + // but they are not recorded when talking about module versions. + Name string `json:"-"` // complete ID in underlying repository + Short string `json:"-"` // shortened ID, for use in pseudo-version + + Origin *codehost.Origin `json:",omitempty"` // provenance for reuse +} + +// Re: module paths, import paths, repository roots, and lookups +// +// A module is a collection of Go packages stored in a file tree +// with a go.mod file at the root of the tree. +// The go.mod defines the module path, which is the import path +// corresponding to the root of the file tree. +// The import path of a directory within that file tree is the module path +// joined with the name of the subdirectory relative to the root. +// +// For example, the module with path rsc.io/qr corresponds to the +// file tree in the repository https://github.com/rsc/qr. +// That file tree has a go.mod that says "module rsc.io/qr". +// The package in the root directory has import path "rsc.io/qr". +// The package in the gf256 subdirectory has import path "rsc.io/qr/gf256". +// In this example, "rsc.io/qr" is both a module path and an import path. +// But "rsc.io/qr/gf256" is only an import path, not a module path: +// it names an importable package, but not a module. +// +// As a special case to incorporate code written before modules were +// introduced, if a path p resolves using the pre-module "go get" lookup +// to the root of a source code repository without a go.mod file, +// that repository is treated as if it had a go.mod in its root directory +// declaring module path p. (The go.mod is further considered to +// contain requirements corresponding to any legacy version +// tracking format such as Gopkg.lock, vendor/vendor.conf, and so on.) +// +// The presentation so far ignores the fact that a source code repository +// has many different versions of a file tree, and those versions may +// differ in whether a particular go.mod exists and what it contains. +// In fact there is a well-defined mapping only from a module path, version +// pair - often written path@version - to a particular file tree. +// For example rsc.io/qr@v0.1.0 depends on the "implicit go.mod at root of +// repository" rule, while rsc.io/qr@v0.2.0 has an explicit go.mod. +// Because the "go get" import paths rsc.io/qr and github.com/rsc/qr +// both redirect to the Git repository https://github.com/rsc/qr, +// github.com/rsc/qr@v0.1.0 is the same file tree as rsc.io/qr@v0.1.0 +// but a different module (a different name). In contrast, since v0.2.0 +// of that repository has an explicit go.mod that declares path rsc.io/qr, +// github.com/rsc/qr@v0.2.0 is an invalid module path, version pair. +// Before modules, import comments would have had the same effect. +// +// The set of import paths associated with a given module path is +// clearly not fixed: at the least, new directories with new import paths +// can always be added. But another potential operation is to split a +// subtree out of a module into its own module. If done carefully, +// this operation can be done while preserving compatibility for clients. +// For example, suppose that we want to split rsc.io/qr/gf256 into its +// own module, so that there would be two modules rsc.io/qr and rsc.io/qr/gf256. +// Then we can simultaneously issue rsc.io/qr v0.3.0 (dropping the gf256 subdirectory) +// and rsc.io/qr/gf256 v0.1.0, including in their respective go.mod +// cyclic requirements pointing at each other: rsc.io/qr v0.3.0 requires +// rsc.io/qr/gf256 v0.1.0 and vice versa. Then a build can be +// using an older rsc.io/qr module that includes the gf256 package, but if +// it adds a requirement on either the newer rsc.io/qr or the newer +// rsc.io/qr/gf256 module, it will automatically add the requirement +// on the complementary half, ensuring both that rsc.io/qr/gf256 is +// available for importing by the build and also that it is only defined +// by a single module. The gf256 package could move back into the +// original by another simultaneous release of rsc.io/qr v0.4.0 including +// the gf256 subdirectory and an rsc.io/qr/gf256 v0.2.0 with no code +// in its root directory, along with a new requirement cycle. +// The ability to shift module boundaries in this way is expected to be +// important in large-scale program refactorings, similar to the ones +// described in https://talks.golang.org/2016/refactor.article. +// +// The possibility of shifting module boundaries reemphasizes +// that you must know both the module path and its version +// to determine the set of packages provided directly by that module. +// +// On top of all this, it is possible for a single code repository +// to contain multiple modules, either in branches or subdirectories, +// as a limited kind of monorepo. For example rsc.io/qr/v2, +// the v2.x.x continuation of rsc.io/qr, is expected to be found +// in v2-tagged commits in https://github.com/rsc/qr, either +// in the root or in a v2 subdirectory, disambiguated by go.mod. +// Again the precise file tree corresponding to a module +// depends on which version we are considering. +// +// It is also possible for the underlying repository to change over time, +// without changing the module path. If I copy the github repo over +// to https://bitbucket.org/rsc/qr and update https://rsc.io/qr?go-get=1, +// then clients of all versions should start fetching from bitbucket +// instead of github. That is, in contrast to the exact file tree, +// the location of the source code repository associated with a module path +// does not depend on the module version. (This is by design, as the whole +// point of these redirects is to allow package authors to establish a stable +// name that can be updated as code moves from one service to another.) +// +// All of this is important background for the lookup APIs defined in this +// file. +// +// The Lookup function takes a module path and returns a Repo representing +// that module path. Lookup can do only a little with the path alone. +// It can check that the path is well-formed (see semver.CheckPath) +// and it can check that the path can be resolved to a target repository. +// To avoid version control access except when absolutely necessary, +// Lookup does not attempt to connect to the repository itself. + +var lookupCache par.Cache[lookupCacheKey, Repo] + +type lookupCacheKey struct { + proxy, path string +} + +// Lookup returns the module with the given module path, +// fetched through the given proxy. +// +// The distinguished proxy "direct" indicates that the path should be fetched +// from its origin, and "noproxy" indicates that the patch should be fetched +// directly only if GONOPROXY matches the given path. +// +// For the distinguished proxy "off", Lookup always returns a Repo that returns +// a non-nil error for every method call. +// +// A successful return does not guarantee that the module +// has any defined versions. +func Lookup(ctx context.Context, proxy, path string) Repo { + if traceRepo { + defer logCall("Lookup(%q, %q)", proxy, path)() + } + + return lookupCache.Do(lookupCacheKey{proxy, path}, func() Repo { + return newCachingRepo(ctx, path, func(ctx context.Context) (Repo, error) { + r, err := lookup(ctx, proxy, path) + if err == nil && traceRepo { + r = newLoggingRepo(r) + } + return r, err + }) + }) +} + +// lookup returns the module with the given module path. +func lookup(ctx context.Context, proxy, path string) (r Repo, err error) { + if cfg.BuildMod == "vendor" { + return nil, errLookupDisabled + } + + switch path { + case "go", "toolchain": + return &toolchainRepo{path, Lookup(ctx, proxy, "golang.org/toolchain")}, nil + } + + if module.MatchPrefixPatterns(cfg.GONOPROXY, path) { + switch proxy { + case "noproxy", "direct": + return lookupDirect(ctx, path) + default: + return nil, errNoproxy + } + } + + switch proxy { + case "off": + return errRepo{path, errProxyOff}, nil + case "direct": + return lookupDirect(ctx, path) + case "noproxy": + return nil, errUseProxy + default: + return newProxyRepo(proxy, path) + } +} + +type lookupDisabledError struct{} + +func (lookupDisabledError) Error() string { + if cfg.BuildModReason == "" { + return fmt.Sprintf("module lookup disabled by -mod=%s", cfg.BuildMod) + } + return fmt.Sprintf("module lookup disabled by -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) +} + +var errLookupDisabled error = lookupDisabledError{} + +var ( + errProxyOff = notExistErrorf("module lookup disabled by GOPROXY=off") + errNoproxy error = notExistErrorf("disabled by GOPRIVATE/GONOPROXY") + errUseProxy error = notExistErrorf("path does not match GOPRIVATE/GONOPROXY") +) + +func lookupDirect(ctx context.Context, path string) (Repo, error) { + security := web.SecureOnly + + if module.MatchPrefixPatterns(cfg.GOINSECURE, path) { + security = web.Insecure + } + rr, err := vcs.RepoRootForImportPath(path, vcs.PreferMod, security) + if err != nil { + // We don't know where to find code for a module with this path. + return nil, notExistError{err: err} + } + + if rr.VCS.Name == "mod" { + // Fetch module from proxy with base URL rr.Repo. + return newProxyRepo(rr.Repo, path) + } + + code, err := lookupCodeRepo(ctx, rr) + if err != nil { + return nil, err + } + return newCodeRepo(code, rr.Root, path) +} + +func lookupCodeRepo(ctx context.Context, rr *vcs.RepoRoot) (codehost.Repo, error) { + code, err := codehost.NewRepo(ctx, rr.VCS.Cmd, rr.Repo) + if err != nil { + if _, ok := err.(*codehost.VCSError); ok { + return nil, err + } + return nil, fmt.Errorf("lookup %s: %v", rr.Root, err) + } + return code, nil +} + +// A loggingRepo is a wrapper around an underlying Repo +// that prints a log message at the start and end of each call. +// It can be inserted when debugging. +type loggingRepo struct { + r Repo +} + +func newLoggingRepo(r Repo) *loggingRepo { + return &loggingRepo{r} +} + +// logCall prints a log message using format and args and then +// also returns a function that will print the same message again, +// along with the elapsed time. +// Typical usage is: +// +// defer logCall("hello %s", arg)() +// +// Note the final (). +func logCall(format string, args ...any) func() { + start := time.Now() + fmt.Fprintf(os.Stderr, "+++ %s\n", fmt.Sprintf(format, args...)) + return func() { + fmt.Fprintf(os.Stderr, "%.3fs %s\n", time.Since(start).Seconds(), fmt.Sprintf(format, args...)) + } +} + +func (l *loggingRepo) ModulePath() string { + return l.r.ModulePath() +} + +func (l *loggingRepo) CheckReuse(ctx context.Context, old *codehost.Origin) (err error) { + defer func() { + logCall("CheckReuse[%s]: %v", l.r.ModulePath(), err) + }() + return l.r.CheckReuse(ctx, old) +} + +func (l *loggingRepo) Versions(ctx context.Context, prefix string) (*Versions, error) { + defer logCall("Repo[%s]: Versions(%q)", l.r.ModulePath(), prefix)() + return l.r.Versions(ctx, prefix) +} + +func (l *loggingRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) { + defer logCall("Repo[%s]: Stat(%q)", l.r.ModulePath(), rev)() + return l.r.Stat(ctx, rev) +} + +func (l *loggingRepo) Latest(ctx context.Context) (*RevInfo, error) { + defer logCall("Repo[%s]: Latest()", l.r.ModulePath())() + return l.r.Latest(ctx) +} + +func (l *loggingRepo) GoMod(ctx context.Context, version string) ([]byte, error) { + defer logCall("Repo[%s]: GoMod(%q)", l.r.ModulePath(), version)() + return l.r.GoMod(ctx, version) +} + +func (l *loggingRepo) Zip(ctx context.Context, dst io.Writer, version string) error { + dstName := "_" + if dst, ok := dst.(interface{ Name() string }); ok { + dstName = strconv.Quote(dst.Name()) + } + defer logCall("Repo[%s]: Zip(%s, %q)", l.r.ModulePath(), dstName, version)() + return l.r.Zip(ctx, dst, version) +} + +// errRepo is a Repo that returns the same error for all operations. +// +// It is useful in conjunction with caching, since cache hits will not attempt +// the prohibited operations. +type errRepo struct { + modulePath string + err error +} + +func (r errRepo) ModulePath() string { return r.modulePath } + +func (r errRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error { return r.err } +func (r errRepo) Versions(ctx context.Context, prefix string) (*Versions, error) { return nil, r.err } +func (r errRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) { return nil, r.err } +func (r errRepo) Latest(ctx context.Context) (*RevInfo, error) { return nil, r.err } +func (r errRepo) GoMod(ctx context.Context, version string) ([]byte, error) { return nil, r.err } +func (r errRepo) Zip(ctx context.Context, dst io.Writer, version string) error { return r.err } + +// A notExistError is like fs.ErrNotExist, but with a custom message +type notExistError struct { + err error +} + +func notExistErrorf(format string, args ...any) error { + return notExistError{fmt.Errorf(format, args...)} +} + +func (e notExistError) Error() string { + return e.err.Error() +} + +func (notExistError) Is(target error) bool { + return target == fs.ErrNotExist +} + +func (e notExistError) Unwrap() error { + return e.err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/sumdb.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/sumdb.go new file mode 100644 index 0000000000000000000000000000000000000000..ea7d561d7b9e0bd76a9808405bacbc2525fa3061 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/sumdb.go @@ -0,0 +1,315 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Go checksum database lookup + +//go:build !cmd_go_bootstrap + +package modfetch + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/fs" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/lockedfile" + "cmd/go/internal/web" + + "golang.org/x/mod/module" + "golang.org/x/mod/sumdb" + "golang.org/x/mod/sumdb/note" +) + +// useSumDB reports whether to use the Go checksum database for the given module. +func useSumDB(mod module.Version) bool { + if mod.Path == "golang.org/toolchain" { + must := true + // Downloaded toolchains cannot be listed in go.sum, + // so we require checksum database lookups even if + // GOSUMDB=off or GONOSUMDB matches the pattern. + // If GOSUMDB=off, then the eventual lookup will fail + // with a good error message. + + // Exception #1: using GOPROXY=file:// to test a distpack. + if strings.HasPrefix(cfg.GOPROXY, "file://") && !strings.ContainsAny(cfg.GOPROXY, ",|") { + must = false + } + // Exception #2: the Go proxy+checksum database cannot check itself + // while doing the initial download. + if strings.Contains(os.Getenv("GIT_HTTP_USER_AGENT"), "proxy.golang.org") { + must = false + } + + // Another potential exception would be GOPROXY=direct, + // but that would make toolchain downloads only as secure + // as HTTPS, and in particular they'd be susceptible to MITM + // attacks on systems with less-than-trustworthy root certificates. + // The checksum database provides a stronger guarantee, + // so we don't make that exception. + + // Otherwise, require the checksum database. + if must { + return true + } + } + return cfg.GOSUMDB != "off" && !module.MatchPrefixPatterns(cfg.GONOSUMDB, mod.Path) +} + +// lookupSumDB returns the Go checksum database's go.sum lines for the given module, +// along with the name of the database. +func lookupSumDB(mod module.Version) (dbname string, lines []string, err error) { + dbOnce.Do(func() { + dbName, db, dbErr = dbDial() + }) + if dbErr != nil { + return "", nil, dbErr + } + lines, err = db.Lookup(mod.Path, mod.Version) + return dbName, lines, err +} + +var ( + dbOnce sync.Once + dbName string + db *sumdb.Client + dbErr error +) + +func dbDial() (dbName string, db *sumdb.Client, err error) { + // $GOSUMDB can be "key" or "key url", + // and the key can be a full verifier key + // or a host on our list of known keys. + + // Special case: sum.golang.google.cn + // is an alias, reachable inside mainland China, + // for sum.golang.org. If there are more + // of these we should add a map like knownGOSUMDB. + gosumdb := cfg.GOSUMDB + if gosumdb == "sum.golang.google.cn" { + gosumdb = "sum.golang.org https://sum.golang.google.cn" + } + + if gosumdb == "off" { + return "", nil, fmt.Errorf("checksum database disabled by GOSUMDB=off") + } + + key := strings.Fields(gosumdb) + if len(key) >= 1 { + if k := knownGOSUMDB[key[0]]; k != "" { + key[0] = k + } + } + if len(key) == 0 { + return "", nil, fmt.Errorf("missing GOSUMDB") + } + if len(key) > 2 { + return "", nil, fmt.Errorf("invalid GOSUMDB: too many fields") + } + vkey, err := note.NewVerifier(key[0]) + if err != nil { + return "", nil, fmt.Errorf("invalid GOSUMDB: %v", err) + } + name := vkey.Name() + + // No funny business in the database name. + direct, err := url.Parse("https://" + name) + if err != nil || strings.HasSuffix(name, "/") || *direct != (url.URL{Scheme: "https", Host: direct.Host, Path: direct.Path, RawPath: direct.RawPath}) || direct.RawPath != "" || direct.Host == "" { + return "", nil, fmt.Errorf("invalid sumdb name (must be host[/path]): %s %+v", name, *direct) + } + + // Determine how to get to database. + var base *url.URL + if len(key) >= 2 { + // Use explicit alternate URL listed in $GOSUMDB, + // bypassing both the default URL derivation and any proxies. + u, err := url.Parse(key[1]) + if err != nil { + return "", nil, fmt.Errorf("invalid GOSUMDB URL: %v", err) + } + base = u + } + + return name, sumdb.NewClient(&dbClient{key: key[0], name: name, direct: direct, base: base}), nil +} + +type dbClient struct { + key string + name string + direct *url.URL + + once sync.Once + base *url.URL + baseErr error +} + +func (c *dbClient) ReadRemote(path string) ([]byte, error) { + c.once.Do(c.initBase) + if c.baseErr != nil { + return nil, c.baseErr + } + + var data []byte + start := time.Now() + targ := web.Join(c.base, path) + data, err := web.GetBytes(targ) + if false { + fmt.Fprintf(os.Stderr, "%.3fs %s\n", time.Since(start).Seconds(), targ.Redacted()) + } + return data, err +} + +// initBase determines the base URL for connecting to the database. +// Determining the URL requires sending network traffic to proxies, +// so this work is delayed until we need to download something from +// the database. If everything we need is in the local cache and +// c.ReadRemote is never called, we will never do this work. +func (c *dbClient) initBase() { + if c.base != nil { + return + } + + // Try proxies in turn until we find out how to connect to this database. + // + // Before accessing any checksum database URL using a proxy, the proxy + // client should first fetch /sumdb//supported. + // + // If that request returns a successful (HTTP 200) response, then the proxy + // supports proxying checksum database requests. In that case, the client + // should use the proxied access method only, never falling back to a direct + // connection to the database. + // + // If the /sumdb//supported check fails with a “not found” (HTTP + // 404) or “gone” (HTTP 410) response, or if the proxy is configured to fall + // back on errors, the client will try the next proxy. If there are no + // proxies left or if the proxy is "direct" or "off", the client should + // connect directly to that database. + // + // Any other response is treated as the database being unavailable. + // + // See https://golang.org/design/25530-sumdb#proxying-a-checksum-database. + err := TryProxies(func(proxy string) error { + switch proxy { + case "noproxy": + return errUseProxy + case "direct", "off": + return errProxyOff + default: + proxyURL, err := url.Parse(proxy) + if err != nil { + return err + } + if _, err := web.GetBytes(web.Join(proxyURL, "sumdb/"+c.name+"/supported")); err != nil { + return err + } + // Success! This proxy will help us. + c.base = web.Join(proxyURL, "sumdb/"+c.name) + return nil + } + }) + if errors.Is(err, fs.ErrNotExist) { + // No proxies, or all proxies failed (with 404, 410, or were allowed + // to fall back), or we reached an explicit "direct" or "off". + c.base = c.direct + } else if err != nil { + c.baseErr = err + } +} + +// ReadConfig reads the key from c.key +// and otherwise reads the config (a latest tree head) from GOPATH/pkg/sumdb/. +func (c *dbClient) ReadConfig(file string) (data []byte, err error) { + if file == "key" { + return []byte(c.key), nil + } + + if cfg.SumdbDir == "" { + return nil, fmt.Errorf("could not locate sumdb file: missing $GOPATH: %s", + cfg.GoPathError) + } + targ := filepath.Join(cfg.SumdbDir, file) + data, err = lockedfile.Read(targ) + if errors.Is(err, fs.ErrNotExist) { + // Treat non-existent as empty, to bootstrap the "latest" file + // the first time we connect to a given database. + return []byte{}, nil + } + return data, err +} + +// WriteConfig rewrites the latest tree head. +func (*dbClient) WriteConfig(file string, old, new []byte) error { + if file == "key" { + // Should not happen. + return fmt.Errorf("cannot write key") + } + if cfg.SumdbDir == "" { + return fmt.Errorf("could not locate sumdb file: missing $GOPATH: %s", + cfg.GoPathError) + } + targ := filepath.Join(cfg.SumdbDir, file) + os.MkdirAll(filepath.Dir(targ), 0777) + f, err := lockedfile.Edit(targ) + if err != nil { + return err + } + defer f.Close() + data, err := io.ReadAll(f) + if err != nil { + return err + } + if len(data) > 0 && !bytes.Equal(data, old) { + return sumdb.ErrWriteConflict + } + if _, err := f.Seek(0, 0); err != nil { + return err + } + if err := f.Truncate(0); err != nil { + return err + } + if _, err := f.Write(new); err != nil { + return err + } + return f.Close() +} + +// ReadCache reads cached lookups or tiles from +// GOPATH/pkg/mod/cache/download/sumdb, +// which will be deleted by "go clean -modcache". +func (*dbClient) ReadCache(file string) ([]byte, error) { + targ := filepath.Join(cfg.GOMODCACHE, "cache/download/sumdb", file) + data, err := lockedfile.Read(targ) + // lockedfile.Write does not atomically create the file with contents. + // There is a moment between file creation and locking the file for writing, + // during which the empty file can be locked for reading. + // Treat observing an empty file as file not found. + if err == nil && len(data) == 0 { + err = &fs.PathError{Op: "read", Path: targ, Err: fs.ErrNotExist} + } + return data, err +} + +// WriteCache updates cached lookups or tiles. +func (*dbClient) WriteCache(file string, data []byte) { + targ := filepath.Join(cfg.GOMODCACHE, "cache/download/sumdb", file) + os.MkdirAll(filepath.Dir(targ), 0777) + lockedfile.Write(targ, bytes.NewReader(data), 0666) +} + +func (*dbClient) Log(msg string) { + // nothing for now +} + +func (*dbClient) SecurityError(msg string) { + base.Fatalf("%s", msg) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/toolchain.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/toolchain.go new file mode 100644 index 0000000000000000000000000000000000000000..0d7cfcfe7d10734caa8c4616cf7492b13b21a855 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/toolchain.go @@ -0,0 +1,181 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modfetch + +import ( + "context" + "fmt" + "io" + "sort" + "strings" + + "cmd/go/internal/gover" + "cmd/go/internal/modfetch/codehost" +) + +// A toolchainRepo is a synthesized repository reporting Go toolchain versions. +// It has path "go" or "toolchain". The "go" repo reports versions like "1.2". +// The "toolchain" repo reports versions like "go1.2". +// +// Note that the repo ONLY reports versions. It does not actually support +// downloading of the actual toolchains. Instead, that is done using +// the regular repo code with "golang.org/toolchain". +// The naming conflict is unfortunate: "golang.org/toolchain" +// should perhaps have been "go.dev/dl", but it's too late. +// +// For clarity, this file refers to golang.org/toolchain as the "DL" repo, +// the one you can actually download. +type toolchainRepo struct { + path string // either "go" or "toolchain" + repo Repo // underlying DL repo +} + +func (r *toolchainRepo) ModulePath() string { + return r.path +} + +func (r *toolchainRepo) Versions(ctx context.Context, prefix string) (*Versions, error) { + // Read DL repo list and convert to "go" or "toolchain" version list. + versions, err := r.repo.Versions(ctx, "") + if err != nil { + return nil, err + } + versions.Origin = nil + var list []string + have := make(map[string]bool) + goPrefix := "" + if r.path == "toolchain" { + goPrefix = "go" + } + for _, v := range versions.List { + v, ok := dlToGo(v) + if !ok { + continue + } + if !have[v] { + have[v] = true + list = append(list, goPrefix+v) + } + } + + // Always include our own version. + // This means that the development branch of Go 1.21 (say) will allow 'go get go@1.21' + // even though there are no Go 1.21 releases yet. + // Once there is a release, 1.21 will be treated as a query matching the latest available release. + // Before then, 1.21 will be treated as a query that resolves to this entry we are adding (1.21). + if v := gover.Local(); !have[v] { + list = append(list, goPrefix+v) + } + + if r.path == "go" { + sort.Slice(list, func(i, j int) bool { + return gover.Compare(list[i], list[j]) < 0 + }) + } else { + sort.Slice(list, func(i, j int) bool { + return gover.Compare(gover.FromToolchain(list[i]), gover.FromToolchain(list[j])) < 0 + }) + } + versions.List = list + return versions, nil +} + +func (r *toolchainRepo) Stat(ctx context.Context, rev string) (*RevInfo, error) { + // Convert rev to DL version and stat that to make sure it exists. + // In theory the go@ versions should be like 1.21.0 + // and the toolchain@ versions should be like go1.21.0 + // but people will type the wrong one, and so we accept + // both and silently correct it to the standard form. + prefix := "" + v := rev + v = strings.TrimPrefix(v, "go") + if r.path == "toolchain" { + prefix = "go" + } + + if !gover.IsValid(v) { + return nil, fmt.Errorf("invalid %s version %s", r.path, rev) + } + + // If we're asking about "go" (not "toolchain"), pretend to have + // all earlier Go versions available without network access: + // we will provide those ourselves, at least in GOTOOLCHAIN=auto mode. + if r.path == "go" && gover.Compare(v, gover.Local()) <= 0 { + return &RevInfo{Version: prefix + v}, nil + } + + // Similarly, if we're asking about *exactly* the current toolchain, + // we don't need to access the network to know that it exists. + if r.path == "toolchain" && v == gover.Local() { + return &RevInfo{Version: prefix + v}, nil + } + + if gover.IsLang(v) { + // We can only use a language (development) version if the current toolchain + // implements that version, and the two checks above have ruled that out. + return nil, fmt.Errorf("go language version %s is not a toolchain version", rev) + } + + // Check that the underlying toolchain exists. + // We always ask about linux-amd64 because that one + // has always existed and is likely to always exist in the future. + // This avoids different behavior validating go versions on different + // architectures. The eventual download uses the right GOOS-GOARCH. + info, err := r.repo.Stat(ctx, goToDL(v, "linux", "amd64")) + if err != nil { + return nil, err + } + + // Return the info using the canonicalized rev + // (toolchain 1.2 => toolchain go1.2). + return &RevInfo{Version: prefix + v, Time: info.Time}, nil +} + +func (r *toolchainRepo) Latest(ctx context.Context) (*RevInfo, error) { + versions, err := r.Versions(ctx, "") + if err != nil { + return nil, err + } + var max string + for _, v := range versions.List { + if max == "" || gover.ModCompare(r.path, v, max) > 0 { + max = v + } + } + return r.Stat(ctx, max) +} + +func (r *toolchainRepo) GoMod(ctx context.Context, version string) (data []byte, err error) { + return []byte("module " + r.path + "\n"), nil +} + +func (r *toolchainRepo) Zip(ctx context.Context, dst io.Writer, version string) error { + return fmt.Errorf("invalid use of toolchainRepo: Zip") +} + +func (r *toolchainRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error { + return fmt.Errorf("invalid use of toolchainRepo: CheckReuse") +} + +// goToDL converts a Go version like "1.2" to a DL module version like "v0.0.1-go1.2.linux-amd64". +func goToDL(v, goos, goarch string) string { + return "v0.0.1-go" + v + ".linux-amd64" +} + +// dlToGo converts a DL module version like "v0.0.1-go1.2.linux-amd64" to a Go version like "1.2". +func dlToGo(v string) (string, bool) { + // v0.0.1-go1.19.7.windows-amd64 + // cut v0.0.1- + _, v, ok := strings.Cut(v, "-") + if !ok { + return "", false + } + // cut .windows-amd64 + i := strings.LastIndex(v, ".") + if i < 0 || !strings.Contains(v[i+1:], "-") { + return "", false + } + return strings.TrimPrefix(v[:i], "go"), true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/zip_sum_test/testdata/zip_sums.csv b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/zip_sum_test/testdata/zip_sums.csv new file mode 100644 index 0000000000000000000000000000000000000000..0906975f5537e632815a18f1a217a9ebdf1ed591 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/zip_sum_test/testdata/zip_sums.csv @@ -0,0 +1,2119 @@ +9fans.net/go,v0.0.2,h1:RYM6lWITV8oADrwLfdzxmt8ucfW6UtP9v1jg4qAbqts=,2c42aad9ed60e24046fbf5720f438884942897197cb790ce58cccdacedd9532d +aahframe.work,v0.12.3,h1:hc3chv+f49yLYVT/aSEhgpoqd8bS0rDKEew1un8AkSo=,0c7e3fab03920a79ace8e0a9ddf4517225f595ce39f2124ec3d9353508da5dbd +aahframework.org/essentials.v0,v0.8.0,h1:R/lcfOuhvZptG4IWX/CzAtpiVJFUjbCxLao6DfmeWBA=,d640fe6b83a31ffe09d12eea37de000be7ec8d7330c0a1d7413d6e31a675628d +bazil.org/fuse,v0.0.0-20180421153158-65cc252bf669,h1:FNCRpXiquG1aoyqcIWVFmpTSKVcx2bQD38uZZeGtdlw=,fce7ed008451861ba30974e95468d716f5ff4fde14e9605dbc2db5fac935c71d +bitbucket.org/abex/go-optional,v0.0.0-20150902044611-5304370459de,h1:iGmurCCO42qFsQ46DzROSsZJFf8/7AKFH/VpRGd2PBw=,02e1b23db09fb6945ba4ca57c0af8125b608fceae125fe625f6536b9e466e7e0 +bitbucket.org/abex/pathfinder,v0.0.0-20170507112819-bdce8b2efbc9,h1:M1jjfmrcOcmWy2/aABpm3k9h/M6NccmjgLtE5gVl+y8=,8469c0a656a895863d4714a658ee4a9634e78547142fa7239331e15d0143c679 +bitbucket.org/abex/sliceconv,v0.0.0-20151017061159-594a23261816,h1:7XPf5/Oar0LfWbnUY29doBDzSr6ToseiJRqkZtb0YOo=,e2433a32246bd5e2fb5d52bf6dc36188a75c4b59b98f76eb7607035b8525dd37 +bitbucket.org/abex/yumeko,v0.0.0-20190825151534-d98ca20ac08c,h1:ES4kIm83Q1RYr9uhhpQhqh/tqjt8H+Xz4xuSAv5Crcw=,1d352a11b3ed5850a425fde048cafa65b2c079c4e9647c52a339b28276065ba9 +bitbucket.org/liamstask/goose,v0.0.0-20150115234039-8488cc47d90c,h1:bkb2NMGo3/Du52wvYj9Whth5KZfMV6d3O0Vbr3nz/UE=,3d64cac7774bf87a9d050222b87387c112bcb6ef0ea0e2b3324a95330573a0c5 +bitbucket.org/ww/goautoneg,v0.0.0-20120707110453-75cd24fc2f2c,h1:t+Ra932MCC0eeyD/vigXqMbZTzgZjd4JOfBJWC6VSMI=,8ad2afdee1dc46b2c78e986bc2cce89cd0b8815b278a01879ef08d56585c247f +bou.ke/monkey,v1.0.1,h1:zEMLInw9xvNakzUUPjfS4Ds6jYPqCFx3m7bRmG5NH2U=,20cb7da509322267189d32a125d7e0f782264508bc8e17306c80424514e797ce +cloud.google.com/go,v0.47.0,h1:1JUtpcY9E7+eTospEwWS2QXP3DEn7poB3E2j0jN74mM=,7739fd24e36a536488115ef0ec9d739e608ee68448f8a469e84855c008b00ecd +cloud.google.com/go/bigquery,v1.0.1,h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU=,738d1f726ce24f618ee7563f6c9419e6307f8814548f45ad8a227cffbb1448c0 +cloud.google.com/go/datastore,v1.0.0,h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM=,41e93ec9526ae580da90300d7e421a6d39d79cb6118d62ad1d3c06422d8a71bf +cloud.google.com/go/pubsub,v1.0.1,h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8=,8bca46a7c5f0dcd576d23fa9a5f107955316d6f0d8f306ee1d6faa7de99c3d29 +cloud.google.com/go/spanner,v1.0.0,h1:jLKThep5kbWLeBhLgtEfm/OPT08n1z7itVTR82WUBQg=,90579f16545e352c662ae9f62dd02dddf834fe10b33d1dbcfbf0a8aadfcd21f8 +cloud.google.com/go/storage,v1.0.0,h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4=,baec4756c573ede58f19eb7ae4acaebd7ac3f0c56413ecbbd216ad46a589a5da +code.cloudfoundry.org/clock,v0.0.0-20180518195852-02e53af36e6c,h1:5eeuG0BHx1+DHeT3AP+ISKZ2ht1UjGhm581ljqYpVeQ=,61785787db7dadaf695506636dcb98c26bbdd0c847f589aa1fb4bbe9ef0e4455 +code.cloudfoundry.org/gofileutils,v0.0.0-20170111115228-4d0c80011a0f,h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk=,ec71ca818158525773e53568f71db38f63423822a426e1a18f7d34318e97eb3e +code.cloudfoundry.org/lager,v2.0.0+incompatible,h1:WZwDKDB2PLd/oL+USK4b4aEjUymIej9My2nUQ9oWEwQ=,ce1da175885c2587ca091532a937108ed646e3bd6bd902640891f75ae70adb8b +code.gitea.io/gitea,v1.9.5,h1:Q3PROlfPth1NlLGaeYcr6YVqyfAy7txnFpDKe1BXo7Q=,c7b63394004fb8f355d859f11a007ff17126eac092f90507a80392335351a6df +code.gitea.io/sdk,v0.0.0-20191030144301-2a5a0e75e5cf,h1:uXUz7lXbs33QAYIu1rF0o8tNsa3DlDDSMYek/3CldIo=,6472a2b30b8108cae9b6a4914ce986d61e9ed37baade5ad35cb337270602b70a +code.gitea.io/sdk/gitea,v0.0.0-20191030144301-2a5a0e75e5cf,h1:aAwV+RyellgKMACMu21Vyv/XgSHipLvbJsXDoXP1Yv0=,62570a621e1bf13724fb1f45d7ea95c48de02abb00468cf1da4b35820203d3b4 +contrib.go.opencensus.io/exporter/aws,v0.0.0-20181029163544-2befc13012d0,h1:YsbWYxDZkC7x2OxlsDEYvvEXZ3cBI3qBgUK5BqkZvRw=,3e351a39c3caf9ce263155f2d6e5a4e0cd84177661e1bf40f0d8fd06854831e9 +contrib.go.opencensus.io/exporter/ocagent,v0.6.0,h1:Z1n6UAyr0QwM284yUuh5Zd8JlvxUGAhFZcgMJkMPrGM=,e526ae16b06c682c3661738938f912a2e301a5e2d0ba875c7a0ec40fde825491 +contrib.go.opencensus.io/exporter/stackdriver,v0.12.8,h1:iXI5hr7pUwMx0IwMphpKz5Q3If/G5JiWFVZ5MPPxP9E=,db745d331f8a0455abbbcfeb4bb33dbc5cbb73a119b4e86f833cd497cfc72559 +contrib.go.opencensus.io/integrations/ocsql,v0.1.4,h1:kfg5Yyy1nYUrqzyfW5XX+dzMASky8IJXhtHe0KTYNS4=,0a4be97a579c5212bd83d21a177b279bc5b0c04350a63c56e8f8e611ffcba09c +contrib.go.opencensus.io/resource,v0.1.1,h1:4r2CANuYhKGmYWP02+5E94rLRcS/YeD+KlxSrOsMxk0=,07ad3d36f96cb86ecba376353d02730855e117db3ffac5c2ab2c7cdf4eca25dc +cuelang.org/go,v0.0.11,h1:t7s006dOWh6tgnwPifvO3l704eg8oPuIH7AR1hfTFYk=,69bdc6b3f1000308d399f166dd0d46576019f68cbf37765bd30821584b1296de +dmitri.shuralyov.com/app/changes,v0.0.0-20180602232624-0a106ad413e3,h1:hJiie5Bf3QucGRa4ymsAUOxyhYwGEz1xrsVk0P8erlw=,a4d9079d5550094191f608c628ff2eb6999e0d0b6aea894ba59d063107777dfa +dmitri.shuralyov.com/gpu/mtl,v0.0.0-20190408044501-666a987793e9,h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=,ca5330901fcda83d09553ac362576d196c531157bc9c502e76b237cca262b400 +dmitri.shuralyov.com/html/belt,v0.0.0-20180602232347-f7d459c86be0,h1:SPOUaucgtVls75mg+X7CXigS71EnsfVUK/2CgVrwqgw=,bd6b059cceaea8ab23e65b8118fab5d22f82149417fcc5fcf930ef9a52d582f1 +dmitri.shuralyov.com/service/change,v0.0.0-20181023043359-a85b471d5412,h1:GvWw74lx5noHocd+f6HBMXK6DuggBB1dhVkuGZbv7qM=,8a1ba9c7ba7eea08389c15315a23485d19fc7166d30b5b47a35fab949c4bf886 +dmitri.shuralyov.com/state,v0.0.0-20190403024436-2cf192113e66,h1:/74W9PTF+vJhgRsWpPWlZT77+phX7vXPcelX7JXFu5s=,eda200c06f669f06c56e1d53a1879b88dd7ee99eea1f56d329028fa773cfc2dd +docker.io/go-docker,v1.0.0,h1:VdXS/aNYQxyA9wdLD5z8Q8Ro688/hG8HzKxYVEVbE6s=,b162036b1af6e1e5434e2e5a35faa7191014529259fbf2f4f1b3e7de6b816516 +fyne.io/fyne,v1.1.2,h1:a9YLFXxqN7lKNqTrk+ocw3/3ROrn6aFiofix8ATVOBc=,dc2d7fd4a4ee9852328fc79c52459a53d94d26f6ac3282ebcacc0c6cd6688d23 +git.apache.org/thrift.git,v0.13.0,h1:/3bz5WZ+sqYArk7MBBBbDufMxKKOA56/6JO6psDpUDY=,10412b7bc503ef2a7cc3bf58fe69e5a2d2594354ae3cc5ab2baa2b3ecc8c4f1d +git.fd.io/govpp.git,v0.1.0,h1:fV5H9ghURFfmNAjk7Scb/aG3OGwevLayHfSdS8GsYjE=,0a023d4b5b36131a4fde2c3d19047bbd4f5c3a7cc07c1ccf40bfb75a501f51b3 +git.torproject.org/pluggable-transports/goptlib.git,v1.1.0,h1:LMQAA8pAho+QtYrrVNimJQiINNEwcwuuD99vezD/PAo=,f6769c4813dedf933071289bfd9381aa5eb3a012b3a32d1da02aa9bebd3a3b5b +gitee.com/nggs/util,v0.0.0-20190830024003-3e49d2efc84b,h1:6KQpPEs326uPrICQy9x/PxmR8U0v/XsFzpt0k1nFKcY=,a062c99c2b560a36168fe51eab8f17f4fadf5d534238881628e83d8d61e51c2a +github.com/1and1/oneandone-cloudserver-sdk-go,v1.0.1,h1:RMTyvS5bjvSWiUcfqfr/E2pxHEMrALvU+E12n6biymg=,7f068808fc0857d7de8c8f829cc380dce1c6611a3fc819daf4421e9bcb75a07c +github.com/99designs/gqlgen,v0.10.1,h1:1BgB6XKGTHq7uH4G1/PYyKe2Kz7/vw3AlvMZlD3TEEY=,04b9e7d8a3df6543cd870325b1140ce9ac3f4bbfd8c90ebecec4f908dd420d08 +github.com/AndreasBriese/bbloom,v0.0.0-20190306092124-e2d15f34fcf9,h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=,6d7c1af06f8597fde1e86166f26416057392f1b0bdb84f2af555aa461282dd18 +github.com/AsynkronIT/goconsole,v0.0.0-20160504192649-bfa12eebf716,h1:Pk/Kzi5O0T4QxfqvbaUsh8UklbJ9BklZ/ClZBptX5WU=,5a2507b89bb4436881718d785a0ef383652aa99782508b7444cf20255082dab9 +github.com/Azure/azure-amqp-common-go,v1.1.4,h1:DmPXxmLZwi/71CgRTZIKR6yiKEW3eC42S4gSBhfG7y0=,4b800793ff4fefa86a427c445e3a4671b8d1dcd87a44075f6309cace6b0e01e2 +github.com/Azure/azure-amqp-common-go/v2,v2.1.0,h1:+QbFgmWCnPzdaRMfsI0Yb6GrRdBj5jVL8N3EXuEUcBQ=,9a91c6ac9656faea0ddfb0bb497c109451faaba09b85ce3237309f5982b095a3 +github.com/Azure/azure-pipeline-go,v0.2.2,h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY=,3e4f90f6ec86d4875e8758f01947adece11c1b4317b448fe0197188765c83efc +github.com/Azure/azure-sdk-for-go,v36.0.0+incompatible,h1:XIaBmA4pgKqQ7jInQPaNJQ4pOHrdJjw9gYXhbyiChaU=,71db17c798b784b96a45efdbabd18ad86d03e5f490701081a2f7bf19efa67c13 +github.com/Azure/azure-service-bus-go,v0.9.1,h1:G1qBLQvHCFDv9pcpgwgFkspzvnGknJRR0PYJ9ytY/JA=,81e42ed51354d71b53daf93b5b9f0f2c20fb7d2923f45ab88eea22419bfbc63a +github.com/Azure/azure-storage-blob-go,v0.8.0,h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o=,3b02b720c25bbb6cdaf77f45a29a21e374e087081dedfeac2700aed6147b4b35 +github.com/Azure/go-ansiterm,v0.0.0-20170929234023-d6e3b3328b78,h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=,b7a14d18891abef8b8a2e622f62a3cebeac32f9f1223dc9d62a6f8769861aaf2 +github.com/Azure/go-autorest,v13.3.0+incompatible,h1:8Ix0VdeOllBx9jEcZ2Wb1uqWUpE1awmJiaHztwaJCPk=,44fdf420bd96bb97df7910806efb25f2fae701078c39f5592f5c4131ffce41e6 +github.com/Azure/go-autorest/autorest,v0.9.2,h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4=,26df5fc6c03e8c66021dd272b04242f6c2ce2a5975f87799dfcf1b9597800dba +github.com/Azure/go-autorest/autorest/adal,v0.8.0,h1:CxTzQrySOxDnKpLjFJeZAS5Qrv/qFPkgLjx5bOAi//I=,af59c00ec7e19cda9b960babaee7bfe27cf3d5f7415ac3afdb4cddc73d4b5743 +github.com/Azure/go-autorest/autorest/azure/auth,v0.4.0,h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530=,2fe394de946f42c2ea8ad07f1b282eac6bb56e372f5c2a35e49dfef0cf015ccb +github.com/Azure/go-autorest/autorest/azure/cli,v0.3.0,h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY=,729d09b69a1912faa7c2395389bbf67ec22a420d42c15414823d43a380a2f09a +github.com/Azure/go-autorest/autorest/date,v0.2.0,h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=,9ec7b48c865a185b72d3822ac2dff7e0163315a23911c87a479a3db616af9853 +github.com/Azure/go-autorest/autorest/mocks,v0.3.0,h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc=,d5daf74cf531c37b27b39d3bf65b6930aee4b226b5fb4ea91a87be93aaf37f10 +github.com/Azure/go-autorest/autorest/to,v0.3.0,h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8=,955ee6bde8af1314d22b51f265799147f42f7c705714b1cc1c51144441d5fa9c +github.com/Azure/go-autorest/autorest/validation,v0.2.0,h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4=,10f40b0d943d4d1a0a1cbcb9fdb058b8a3a59a55ae26583566dfaa82883f86ea +github.com/Azure/go-autorest/logger,v0.1.0,h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY=,5e0804944db0707502c9d29defb54961c281a19311c9eb321a246ba054ac5256 +github.com/Azure/go-autorest/tracing,v0.5.0,h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k=,4951b0f4a88a44b7ed4e4834654e4e01922ade35d97899b8596998184abbc652 +github.com/Azure/go-ntlmssp,v0.0.0-20180810175552-4a21cbd618b4,h1:pSm8mp0T2OH2CPmPDPtwHPr3VAQaOwVF/JbllOPP4xA=,64cd585589154ce18d7557ccfd8d26e2c2f5c4ecf13b17bdbfb913e17863d280 +github.com/BurntSushi/locker,v0.0.0-20171006230638-a6e239ea1c69,h1:+tu3HOoMXB7RXEINRVIpxJCT+KdYiI7LAEAUrOw3dIU=,836038343df9e9126b59d54201951191898bd875ec32d93c2018d759f358fcfb +github.com/BurntSushi/toml,v0.3.1,h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=,815c6e594745f2d8842ff9a4b0569c6695e6cdfd5e07e5b3d98d06b72ca41e3c +github.com/BurntSushi/xgb,v0.0.0-20160522181843-27f122750802,h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=,f52962c7fbeca81ea8a777d1f8b1f1d25803dc437fbb490f253344232884328e +github.com/BurntSushi/xgbutil,v0.0.0-20190907113008-ad855c713046,h1:O/r2Sj+8QcMF7V5IcmiE2sMFV2q3J47BEirxbXJAdzA=,492ce6b11d7faaec4e15d1279d81e28d2e0e9844ad117f9de9411286a5b0e305 +github.com/ChrisTrenkamp/goxpath,v0.0.0-20170922090931-c385f95c6022,h1:y8Gs8CzNfDF5AZvjr+5UyGQvQEBL7pwo+v+wX6q9JI8=,8d79cd78a309a1b0f22790d354b9c4c929c64d03c7e572627ba430908fbb9d78 +github.com/CodisLabs/codis,v0.0.0-20181104082235-de1ad026e329,h1:KyRmPlfd2xewxb54vIBPNILFyCh2R3zNDwLZURDxT0E=,f61ae85688d10dddf0d62c30aaaa2701373fc11851dae4435de0212513c578c1 +github.com/DATA-DOG/go-sqlmock,v1.3.3,h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=,5dc430c2836af3bfc85f590366a6e284a251978e9397d0d54fa97db913263461 +github.com/DataDog/datadog-go,v3.2.0+incompatible,h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4=,ede4a024d3c106b2f57ca04d7bfc7610e0c83f4d8a3bace2cf87b42fd5cf66cd +github.com/DataDog/zstd,v1.4.0,h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo=,601f6fe1f4138d676946f4b27f7a714bbedea8c1785d10c1b74a03c68ad13070 +github.com/FiloSottile/b2,v0.0.0-20170207175032-b197f7a2c317,h1:1GuMjC4tjfwnWBdoTS7YqtQ3JIsEft6NRcdmXdzvYYc=,6ff3cfed3f510fc69b47f263936642950afc7892f557ed716dd8c5584f187411 +github.com/GeertJohan/go-sourcepath,v0.0.0-20150925135350-83e8b8723a9b,h1:D4H5C4VvURnduTQydyEhA6OWnNcZTLUlNX4YBw5yelY=,8bdcf0b6cc58f5ec1cef031b4052e6d699683bf1daf4a1a20f92f67d5be06b82 +github.com/GeertJohan/go.incremental,v1.0.0,h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg=,ce46b3b717f8d2927046bcfb99c6f490b1b547a681e6b23240ac2c2292a891e8 +github.com/GeertJohan/go.rice,v1.0.0,h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ=,2fc48b9422bf356c18ed3fe32ec52f6a8b87ac168f83d2eed249afaebcc3eeb8 +github.com/GoogleCloudPlatform/cloudsql-proxy,v0.0.0-20191017031552-46c5533ff5ba,h1:ZNYxMf89tMi+NydPAq7yGAxMfMNaMHgG+7WL1CEabjc=,25a1fe9f189e6a4d6e108f22abaec7dd36edc819ab5af1a3e448450b73026271 +github.com/GoogleCloudPlatform/docker-credential-gcr,v1.5.0,h1:wykTgKwhVr2t2qs+xI020s6W5dt614QqCHV+7W9dg64=,4acfcaddfe2aa53e1e643ea13ff3534a2fca1e043d008ab5bba5a0910db1f7c2 +github.com/IBM/go-sdk-core,v1.0.1,h1:vF9Lsoih6fxrAxzJp2fWqnO6Mg8x8O8fzwQAdFoUdok=,e063c8f79f94936a165355f61bdc6f2c404975472ad6be5e47bfdb87fb393c72 +github.com/Jeffail/gabs,v1.4.0,h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo=,cb193b1477109c19b0d2521fc61735619202e58ac4699605f72313d70884ca9e +github.com/Joker/hpp,v0.0.0-20180418125244-6893e659854a,h1:PiDAizhfJbwZMISZ1Itx1ZTFeOFCml89Ofmz3V8rhoU=,4e99372a7576c587c107fb16d1ae0e8662111e2ca5e5127f7cd93bb01cd02076 +github.com/Joker/jade,v1.0.0,h1:lOCEPvTAtWfLpSZYMOv/g44MGQFAolbKh2khHHGu0Kc=,c4a7f39e7483446ff7b0d7e213a4cd813c783108d6d2e7c6e9a8e968789b18bc +github.com/Knetic/govaluate,v3.0.1-0.20171022003610-9aa49832a739+incompatible,h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw=,d1d4ac5b4f5759726368f68b0d47f3c17c6d8689243ec66272311359d28a865b +github.com/Kodeworks/golang-image-ico,v0.0.0-20141118225523-73f0f4cfade9,h1:1ltqoej5GtaWF8jaiA49HwsZD459jqm9YFz9ZtMFpQA=,1d677069e35c4a3e4f290e68c6e2391f6237aee9ce3f39448ed09a2ddab274b0 +github.com/Kubuxu/go-os-helper,v0.0.1,h1:EJiD2VUQyh5A9hWJLmc6iWg6yIcJ7jpBcwC8GMGXfDk=,90a16f95a8a238910ab0dc9004cb6e56242a10810bf1e296a263d2e385f002e0 +github.com/KyleBanks/depth,v1.2.1,h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=,8f3e9af2e038f561d9c34b631fddc7db39e39992a121fd087f0bf980026464d9 +github.com/MakeNowJust/heredoc,v1.0.0,h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=,062afe6e11aa3c3ac0035d08907b80d5e5b7563905603391ee774bda440abf16 +github.com/Masterminds/goutils,v1.1.0,h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=,b9520e8d2775ac1ff3fbf18c93dbc4b921133f957ae274f5b047965e9359d27d +github.com/Masterminds/semver,v1.5.0,h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=,15f6b54a695c15ffb205d5719e5ed50fab9ba9a739e1b4bdf3a0a319f51a7202 +github.com/Masterminds/semver/v3,v3.0.1,h1:2kKm5lb7dKVrt5TYUiAavE6oFc1cFT0057UVGT+JqLk=,f1eef1a1b6489d895eb32326f3369bd1615812a4c5fbfe60b2b6cc774c6340f0 +github.com/Masterminds/sprig,v2.22.0+incompatible,h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=,1b4d772334cc94e5703291b5f0fe4ac4965ac265424b1060baf18ef5ff9d845c +github.com/Masterminds/squirrel,v1.1.0,h1:baP1qLdoQCeTw3ifCdOq2dkYc6vGcmRdaociKLbEJXs=,cede1b0a054e000a5e6a8000cb02de7ab64ddca9e0f4153732274627adeed0ae +github.com/Microsoft/go-winio,v0.4.14,h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=,7a86644691d3c86c77ae0b639fa27029706552f00cd51b445389a61694576f6b +github.com/Microsoft/hcsshim,v0.8.6,h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=,900feaaec1c41d4e111a66bbde330b41fc78902c70c0af37d611505bf42e0632 +github.com/NYTimes/gziphandler,v1.1.1,h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=,2948d9f70e4388f13f4ed9400df41dca60841059f7dcc30cf909c82796cc705a +github.com/NaverCloudPlatform/ncloud-sdk-go,v0.0.0-20180110055012-c2e73f942591,h1:/P9HCl71+Eh6vDbKNyRu+rpIIR70UCZWNOGexVV3e6k=,2e9eacfe3e6785beef75391bcebc14a6a082687c0f0582bc441c3d0106b8bf5c +github.com/NebulousLabs/entropy-mnemonics,v0.0.0-20181203154559-bc7e13c5ccd8,h1:wPFCU8DwC4k5C2LfJc/rVp4cmTqzF3vyydxRR3b3HhQ=,6a65ca779cd216db7bf326ebbb5a26a87d85ff6a6ba832eec281c5c09a8294e3 +github.com/Netflix/go-expect,v0.0.0-20180615182759-c93bf25de8e8,h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw=,fbe7b2f58ecb0e1067a6670bbcf0718d54ec407aab81790cc9e58db9a6774775 +github.com/NickBall/go-aes-key-wrap,v0.0.0-20170929221519-1c3aa3e4dfc5,h1:5BIUS5hwyLM298mOf8e8TEgD3cCYqc86uaJdQCYZo/o=,fd78212ec77052b032b9fc308c028e8fc166de3d6ae4494f5eb3254930728a0b +github.com/Nvveen/Gotty,v0.0.0-20120604004816-cd527374f1e5,h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=,362ac7b59d74231419471b65b60079d167785b97fd4aa0de71575088cd192b1e +github.com/OneOfOne/xxhash,v1.2.5,h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=,7ab3c6a0e7c16c987a589e50a9a353e8877cfffea02bf9e04e370fd26a0c85e1 +github.com/OpenBazaar/jsonpb,v0.0.0-20171123000858-37d32ddf4eef,h1:+aqKrHtCJTRp8ziyrjfHbTF5puPQZfgRt65+iM7FD2w=,5f6ea1466b9d27f016c1bf2650669c788db623142cdc8a1794bc1784fc80fc4e +github.com/OpenBazaar/wallet-interface,v0.0.0-20190807004547-aa8e214acd9b,h1:KjQH45msWRtDhb5JAbBW+eU4M/9xIm11rsOSgAaqDOs=,f7ac40d665241766533b1a49a726068d9dfea5e02c7fd426df81f9e390a7003e +github.com/OpenDNS/vegadns2client,v0.0.0-20180418235048-a3fa4a771d87,h1:xPMsUicZ3iosVPSIP7bW5EcGUzjiiMl1OYTe14y/R24=,b73d6b37d519c7bf181e502b92962f1bf961bb0ca3a9ef7057c3d9a8a3c2f3cd +github.com/OwnLocal/goes,v1.0.0,h1:81QQ3z6dvLhgXlkNpLkaYhk8jiKS7saFG01xy039KaU=,ebb6c7e2c12577c590d2d5546b7a4b4e6fa75c9a408ae5244b5ba2cf09dec1d6 +github.com/PuerkitoBio/goquery,v1.5.0,h1:uGvmFXOA73IKluu/F84Xd1tt/z07GYm8X49XKHP7EJk=,f0064ad35f21c2b9d1377b94f09ead56ec1862da3807e78c26b99c4b3a04f5e6 +github.com/PuerkitoBio/purell,v1.1.1,h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=,59e636760d7f2ab41c2f80c1784b1c73d381d44888d1999228dedd634ddcf5ed +github.com/PuerkitoBio/urlesc,v0.0.0-20170810143723-de5bf2ad4578,h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=,1793124273dd94e7089e95716d40529bcf70b9e87162d60218f68dde4d6aeb9d +github.com/Quasilyte/inltest,v0.7.0,h1:yHvFAaoXn+6iK2uKtb8mXB9KURz6SDPyszoyBAC0Xk4=,8fb4273cea3514742aec06ed58f20cea1214cc542799c70c331a80865aaf3988 +github.com/RangelReale/osin,v1.0.1,h1:JcqBe8ljQq9WQJPtioXGxBWyIcfuVMw0BX6yJ9E4HKw=,edbcc6208879bffa533369bbf417db41c1322193ca05d0deecf13075972c9d57 +github.com/RangelReale/osincli,v0.0.0-20160924135400-fababb0555f2,h1:x8Brv0YNEe6jY3V/hQglIG2nd8g5E2Zj5ubGKkPQctQ=,82fc65bad3da9fc26cc77b485e10ee117459e830547ce89592c41d92871e1129 +github.com/Rican7/retry,v0.1.0,h1:FqK94z34ly8Baa6K+G8Mmza9rYWTKOJk+yckIBB5qVk=,c0e956967f2f632ffc889eeae5b82e437f30e9be409870cdd1e7998def458843 +github.com/RoaringBitmap/roaring,v0.4.7,h1:eGUudvFzvF7Kxh7JjYvXfI1f7l22/2duFby7r5+d4oc=,515892d9b8e4350e5ac5b7a487da94d5d9ab9641071e002b778dd864b7a31c2a +github.com/SAP/go-hdb,v0.14.1,h1:hkw4ozGZ/i4eak7ZuGkY5e0hxiXFdNUBNhr4AvZVNFE=,273de28a254c39e9f24293b864c1d664488e4a5d44d535755a5e5b68ae7eed8d +github.com/Sereal/Sereal,v0.0.0-20190529075751-4d99287c2c28,h1:kmfzzWpCZIrVhxx4V/2oSGhGnhtX+/JijVIlPuKYfHg=,eebfe79e62b5a07f98a367d8a84bcf33ed69818c031e70c3ebc6e9fc34361466 +github.com/SermoDigital/jose,v0.0.0-20180104203859-803625baeddc,h1:LkkwnbY+S8WmwkWq1SVyRWMH9nYWO1P5XN3OD1tts/w=,1711f20ec5b1498c98e46b96e578f39b723557ab50183d644702d40f44a1a345 +github.com/Shopify/go-lua,v0.0.0-20181106184032-48449c60c0a9,h1:+2M9NEk3+xSg0+bWzt1kxsL6EtoEg7sgtT11CZjGwq8=,3e399584ff4a876314243c01be3cba5b98b46bba483d6996dd2d0e7f161b7ad8 +github.com/Shopify/goreferrer,v0.0.0-20181106222321-ec9c9a553398,h1:WDC6ySpJzbxGWFh4aMxFFC28wwGp5pEuoTtvA4q/qQ4=,e47cdf750e6aa39707b90e62f4f87e97abb8d64b2525a16c021c82efb24f9969 +github.com/Shopify/sarama,v1.24.1,h1:svn9vfN3R1Hz21WR2Gj0VW9ehaDGkiOS+VqlIcZOkMI=,c5e06f9c835846eeb5cbbbc540ab949f9775ff37c08cab503dd820b858b1f2e7 +github.com/Shopify/toxiproxy,v2.1.4+incompatible,h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=,9427e70698ee6a906904dfa0652624f640619acef40652a1e5490e13b31e7f61 +github.com/Sirupsen/logrus,v1.0.6,h1:HCAGQRk48dRVPA5Y+Yh0qdCSTzPOyU1tBJ7Q9YzotII=,dc69c77019152ace477a7f5c0cd97fd25d6ab866e01e1dd06f391722f4f9fba9 +github.com/StackExchange/wmi,v0.0.0-20190523213315-cbe66965904d,h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=,68f499ad4c3f45fc6c286fd2a5966e8e15c0f3abc1f96fbf4a979245df936e16 +github.com/Stebalien/go-bitfield,v0.0.1,h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo=,9b17a2749922c810f3598606b87b5f2ba0f3c6abc70966911a8c32f0533ee827 +github.com/Telmate/proxmox-api-go,v0.0.0-20190815172943-ef9222844e60,h1:iEmbIRk4brAP3wevhCr5MGAqxHUbbIDHvE+6D1/7pRA=,55dd16e2cd8e6c1464c6456007cdc5d8676b8b096e90230312daa8c84b57b34d +github.com/TheThingsNetwork/api,v0.0.0-20190522113053-d844e8c040fc,h1:hDk+SAT2tV584ye1hqMN5+NHL6RHJDIbe97cNot6/WQ=,7931f7c4699cd1019c950a68361e3b3fec8bbb8e9204c65c08b50bd588ac506a +github.com/TheThingsNetwork/go-account-lib,v2.0.3+incompatible,h1:pnDIalIqac/VlXenPr+L1XEEf3gIq1eIoZ78S5AP1/s=,e62dcb784cbd28bcec55cf332f7dc06779c75c7df66299f4ce542cd6852358b4 +github.com/TheThingsNetwork/go-cayenne-lib,v1.0.0,h1:be7h6E/69+qaYs1iwQ2xjGjSFPXzvU3q6AWBCWayG2Y=,17091b77ac39b8e73ca6ac3f39f34909bc6a3770098ff2dd534b59a10f6e66ad +github.com/TheThingsNetwork/go-utils,v0.0.0-20190813113035-8715cf82e887,h1:DF/1gkOPk3jtwWa9dFd5tUtwb6z3bLw9tZ/UALbS5Ck=,be4c7c2955630b63300f21773efcdc991d5ff201a53b01d92b2f20fede77065c +github.com/TheThingsNetwork/ttn,v2.10.1+incompatible,h1:LQw+g+kinajii5DHJ6I2o82ObaU/Ws+YYgdLkF5eF54=,4a803fe23636f9c99926e6b5a2b956fb42e84ca38c98458a12bd7fe1c22f7439 +github.com/TheThingsNetwork/ttn/api,v0.0.0-20190516081709-034d40b328bd,h1:vCjDYImJDdW+39EXwij00yzDi1pd3TmP6XtCteDJBd0=,9cda2f899f15f57e8f649bbffb955a8153d6c25a13a4e969df8898bb61559a44 +github.com/TheThingsNetwork/ttn/core/types,v0.0.0-20191015060859-00a6f7874bb9,h1:tlWwCxI3/Zu4vJ4dLWb2wMOYSkeMBvLAxQGwJDCFXi8=,7b3895805d6ac341e5df44c2c8154b374b4bed4c8f54e248ea967abfc37186e7 +github.com/TheThingsNetwork/ttn/utils/errors,v0.0.0-20190516081709-034d40b328bd,h1:ITXOJpmUR4Jhp3Xb/xNUIJH4WR0h2/NsxZkSDzFIFiU=,d64decf456c10fdbbb887212ea63749b495264c40bb5ac047b9f0e5ccd7e540b +github.com/TheThingsNetwork/ttn/utils/random,v0.0.0-20190516081709-034d40b328bd,h1:zKTRK1r3K55XxHuUGxnqYg9aiPDduYeilHUEHua+F+Y=,c504030254919a902b3957267b7ce1870f909cbdd65f0f927819f60710e41d9b +github.com/TheThingsNetwork/ttn/utils/security,v0.0.0-20190516081709-034d40b328bd,h1:og10Wq5S/QC+f4ziON4vrxlYKv9gfEKxG8v/MDs00xw=,e6f013adee3a7a212a6f892db59c8efaf715fe49413e6dbca22229fa04f0d006 +github.com/Unknwon/cae,v0.0.0-20160715032808-c6aac99ea2ca,h1:xU8R31tsvj6TesCBog973+UgI3TXjh/LqN5clki6hcc=,15a1394a603423c5bcd4659275be09d7696774990d5f127500f4156c1a78eb85 +github.com/Unknwon/com,v0.0.0-20190321035513-0fed4efef755,h1:1B7wb36fHLSwZfHg6ngZhhtIEHQjiC5H4p7qQGBEffg=,2cfba36da8f59c6dd8c7a20af59e5ccf9558f42bde7e0918a64a9b68dafcf271 +github.com/Unknwon/i18n,v0.0.0-20171114194641-b64d33658966,h1:Mp8GNJ/tdTZIEdLdZfykEJaL3mTyEYrSzYNcdoQKpJk=,a5ce1436582e797d60e967d853fd22458fc7edeb31bd390d6ace979133bedb78 +github.com/Unknwon/paginater,v0.0.0-20170405233947-45e5d631308e,h1:HnbTtNLKnRmwn85vBmyl7nNJCXUw4rh6X3UeIX5nvko=,60e3af4ba9b482892127f829ec7cc837977ca9e9e634be855d599cc08230e606 +github.com/VividCortex/ewma,v1.1.1,h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=,eebee7c0f20e96abbda1611ed2a3d26b4c2c10393caa6a2dfd1605763a5c1a12 +github.com/VividCortex/gohistogram,v1.0.0,h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=,16ebeceeb7e4066f90edbfb90282cd90d4dad0f71339199551de3fbdc7e8c545 +github.com/Workiva/go-datastructures,v1.0.50,h1:slDmfW6KCHcC7U+LP3DDBbm4fqTwZGn1beOFPfGaLvo=,1ac8c9334b63ee2b089b7ecc3b6c8d45793cc4ef4c460f6ebbfd6ecea3ee83bc +github.com/a8m/mark,v0.1.1-0.20170507133748-44f2db618845,h1:hIjQrEARcc9LcH8igte3JBpWBZ7+SpinU70dOjU/afo=,048bfeb7427ff5622874d874a52d7215a2cea99f9741c031e9963348785103c2 +github.com/abbot/go-http-auth,v0.4.0,h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=,8204bca24734f55f179dd1c0b820ae5be83151268693a147086f33cd2d4d473c +github.com/abdullin/seq,v0.0.0-20160510034733-d5467c17e7af,h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14=,bcbe9a2c1e3ac0b981ee436cd1bbb2da8220527511b3cea6517a28a881636814 +github.com/abronan/valkeyrie,v0.0.0-20191010124425-1ae9442de16e,h1:4SrbWyef51DHDc957/8Ms/fDM4D+3bkbXqg6OTnIEAo=,553dce6f5ff57f7ccc5ed6a94e6bf29b38b8773236f3b85bb4025dc0d10d2a92 +github.com/aead/siphash,v1.0.1,h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=,25da04ff418e0b2871b1193a3478977b4aa66c20737b9ca70a5040b876b6d3d9 +github.com/aerogo/http,v1.0.12,h1:1o5QW6TQLNuutQLuPCX0Tn7g/sSH3JMHv79UGIBpvkw=,a58d344ff2010737d2418050f4188339087cfb369c903bd31e20ccba388304a1 +github.com/afex/hystrix-go,v0.0.0-20180502004556-fa1af6a1f4f5,h1:rFw4nCn9iMW+Vajsk51NtYIcwSTkXr+JGrMd36kTDJw=,c0e0ea63b57e95784eeeb18ab8988ac2c3d3a17dc729d557c963f391f372301c +github.com/agext/levenshtein,v1.2.2,h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE=,07caaae8fcdb7c83195a0afffc03c9df76275b1e9a7b69dabfe0d2f47729bc7c +github.com/agl/ed25519,v0.0.0-20170116200512-5312a6153412,h1:w1UutsfOrms1J05zt7ISrnJIXKzwaspym5BTKGx93EI=,98c1510ac20b7d61bf4e2c76e7184fcbd0a8b78b0fc667c2b772777912963d3f +github.com/agnivade/levenshtein,v1.0.1,h1:3oJU7J3FGFmyhn8KHjmVaZCN5hxTr7GxgRue+sxIXdQ=,cb0e7f070ba2b6a10e1c600d71f06508404801ff45046853001b83be6ebedac3 +github.com/ajg/form,v1.5.1,h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU=,b063b07639670ce9b6a0065b4dc35ef9e4cebc0c601be27f5494a3e6a87eb78b +github.com/ajstarks/svgo,v0.0.0-20190826172357-de52242f3d65,h1:kZegOsPGxfV9mM8WzfllNZOx3MvM5zItmhQlvITKVvA=,1459a44f9162f463b59eacf58e4bb8873e612c5b3df45fc6e34074310d2269ae +github.com/akamai/AkamaiOPEN-edgegrid-golang,v0.9.0,h1:rXPPPxDA4GCPN0YWwyVHMzcxVpVg8gai2uGhJ3VqOSs=,91c3a4743d959b3bb2bb7359790df4688021830e482d393ea6d4f3a27aebd63d +github.com/akavel/rsrc,v0.8.0,h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw=,13954a09edc3a680d633c5ea7b4be902df3a70ca1720b349faadca44dc0c7ecc +github.com/akyoto/assert,v0.2.3,h1:ftENRGDEK5AKuKmZb9LtbDIHeE8p8cIYI4M92CbA9nE=,f0a31d5859109c37568b8702fcf92cd3a49ec4892dace74d113df5fd49491975 +github.com/akyoto/color,v1.8.11,h1:uCQi+uRyngo1cJhJSv28PQmduGFiOAGNF6F9MFoRDek=,0fa16c51743ca03e108fde20eabb070d17d25111cb287e15f8567268d439098a +github.com/akyoto/colorable,v0.1.7,h1:ge91E25hiOiT/Zu47ij/rTO3cks7wMlTrcQspua1hFM=,07c2dd4d994d9ff1dad97ad2e2650ff60d90f50ecd52380f357e562efda99613 +github.com/akyoto/stringutils,v0.2.6,h1:IP+7jtH8uofpan8MYlV/WMNaLDGBRbzgiTKYnxcAwkw=,802a3b54f91b930c1e8f2376bebf783b16894da626fc7c8064268b07ab567f7c +github.com/akyoto/tty,v0.1.3,h1:AdnLETzgooimWLvoBQLn5bT1j+i0yiB4E596BfFKnmA=,749381ec9dce8bc96bec66c5dfb0874db917a008f9685d9c65c15da43ede964c +github.com/alcortesm/tgz,v0.0.0-20161220082320-9c5fe88206d7,h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=,ccedffb2c46724216b787fb1a79ae33fb0dfdd672c669db000c4ed5a68b08014 +github.com/alecthomas/assert,v0.0.0-20170929043011-405dbfeb8e38,h1:smF2tmSOzy2Mm+0dGI2AIUHY+w0BUc+4tn40djz7+6U=,873d257170b1363142cbf5e16b49c6a21cccb3e4aaceb9d370c3b78b051a5663 +github.com/alecthomas/chroma,v0.6.8,h1:TW4JJaIdbAbMyUtGEd6BukFlOKYvVQz3vVhLBEUNwMU=,ebc5202e6a0ededc5a2c7396b01b76c050331bead9d047f31fe648cb63e68aa3 +github.com/alecthomas/colour,v0.0.0-20160524082231-60882d9e2721,h1:JHZL0hZKJ1VENNfmXvHbgYlbUOvpzYzvy2aZU5gXVeo=,334101c562d2e74338f6baab1de04f3bbff89021d24f4206c551ef47b96a2bfe +github.com/alecthomas/kingpin,v2.2.6+incompatible,h1:5svnBTFgJjZvGKyYBtMB0+m5wvrbUHiqye8wRJMlnYI=,a88daee47262ffeca1f6e348399c16c9be160f3c5e972c0b6c9dc275d85bcdc6 +github.com/alecthomas/kong,v0.2.1-0.20190708041108-0548c6b1afae,h1:C4Q9m+oXOxcSWwYk9XzzafY2xAVAaeubZbUHJkw3PlY=,4292d9b6903d67f060d3bd57ffca0a4ebca359824ce2d32a512ac1b963fa3dc0 +github.com/alecthomas/kong-hcl,v0.1.8-0.20190615233001-b21fea9723c8,h1:atLL+K8Hg0e8863K2X+k7qu+xz3M2a/mWFIACAPf55M=,21a34d6ee62e3419601d0e083b8829001a9833899dd3c2d27a82c794426fd0ee +github.com/alecthomas/log4go,v0.0.0-20180109082532-d146e6b86faa,h1:0zdYOLyuQ3TWIgWNgEH+LnmZNMmkO1ze3wriQt093Mk=,04bdaa7d57a681072316927175c21ca7c9e7a19bd7fee2102b5f40e5b01a7559 +github.com/alecthomas/repr,v0.0.0-20181024024818-d37bc2a10ba1,h1:GDQdwm/gAcJcLAKQQZGOJ4knlw+7rfEQQcmwTbt4p5E=,c01a833ec56f68113f6cd7ed82b7da9bfaec641a10e929e0e3e5e5dadb1a85ad +github.com/alecthomas/template,v0.0.0-20190718012654-fb15b899a751,h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=,25e3be7192932d130d0af31ce5bcddae887647ba4afcfb32009c3b9b79dbbdb3 +github.com/alecthomas/units,v0.0.0-20190924025748-f65c72e2690d,h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=,e6b0ccb38bfba85d90092d1c57671d5f7996757bd71f6f1970c6ae2f9dae3f6e +github.com/alicebob/gopher-json,v0.0.0-20180125190556-5a6b3ba71ee6,h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U=,2374b534198621157afb9466a52d361b6eed33dcf9bb0674019515e64b16129e +github.com/alicebob/miniredis,v0.0.0-20180911162847-3657542c8629,h1:gLoh8jzwIxdisBnHiWRIuReqtH9cpslSE2564UWXun0=,14b5e988ec6d8357a25ba19a7adbdb34920f5f91401b2b26eb25f04fed9893b0 +github.com/aliyun/alibaba-cloud-sdk-go,v0.0.0-20191031111935-12810c79403d,h1:CmGtZPPsr0C31ZBrzdP+D2oczTbyEBbO3bYg6z5EIDY=,4f0f25f45d954ab970b24783e31b716b619b128081acc9ed7b00727cd7c2d536 +github.com/aliyun/aliyun-oss-go-sdk,v2.0.3+incompatible,h1:724q2AmQ3m1mrdD9kYqK5+1+Zr77vS21jdQ9iF9t4b8=,47ede6a440ad4bb1a1c33d71bd12f76f44aa2487f676b8770152130be3021657 +github.com/aliyun/aliyun-tablestore-go-sdk,v4.1.2+incompatible,h1:ABQ7FF+IxSFHDMOTtjCfmMDMHiCq6EsAoCV/9sFinaM=,82c8ced9cd377462c6ea5070258f97c77ffddd66621e8960b08184eb58416846 +github.com/allegro/bigcache,v1.2.1,h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=,9250edab8c7851cfa0c6c173e721cf70831e90742a7485c2eba1d6e2cc8c71eb +github.com/anacrolix/envpprof,v1.1.0,h1:hz8QWMN1fA01YNQsUtVvl9hBXQWWMxSnHHoOK9IdrNY=,97f2340bcb169956bad97c59fdc17bbd2eb7c0acefe4e2ae327c7d6bd5a5f6cf +github.com/anacrolix/log,v0.3.0,h1:Btxh7GkT4JYWvWJ1uKOwgobf+7q/1eFQaDdCUXCtssw=,e8bc14381d8746426c7e272228780047e0594d695d02e188269f1e86ef1644d4 +github.com/anacrolix/missinggo,v1.2.1,h1:0IE3TqX5y5D0IxeMwTyIgqdDew4QrzcXaaEnJQyjHvw=,2fb8cba1f6eaf69989ca5c522c2d4afd6c1071ad9459f940b6058dbfc2f3b285 +github.com/anacrolix/missinggo/perf,v1.0.0,h1:7ZOGYziGEBytW49+KmYGTaNfnwUqP1HBsy6BqESAJVw=,f4271e6359cf3dd5cba81bcf1436e8abc5d0c96c11820b881544708caa131713 +github.com/anacrolix/sync,v0.0.0-20180808010631-44578de4e778,h1:XpCDEixzXOB8yaTW/4YBzKrJdMcFI0DzpPTYNv75wzk=,bef95f54e1b17e4e7666cbf552e541e670f29fc3fd354aba0ebeee73f744ea24 +github.com/anacrolix/tagflag,v1.0.1,h1:Yd3d5DaKbRA70k7CoFuBsbmfSWIsvtZ9t80xW/x4vQY=,8fc0a5b5607cde223bacd9e4fa3b26f6166c09a09bfabe2c2c803e45e17971fa +github.com/anacrolix/utp,v0.0.0-20180219060659-9e0e1d1d0572,h1:kpt6TQTVi6gognY+svubHfxxpq0DLU9AfTQyZVc3UOc=,35c47428844d10f077225195f9a6c7587c671b7fc70bbaf59ef74cd6d8834e32 +github.com/andreyvit/diff,v0.0.0-20170406064948-c7f18ee00883,h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=,d39614ff930006640ec15865bca0bb6bf8e1ed145bccf30bab08b88c1d90f670 +github.com/andybalholm/cascadia,v1.0.0,h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=,7fd82e560ca1a453974a64c9bf6514b17322d1b7392bad730a5006d929996906 +github.com/andygrunwald/go-jira,v1.5.0,h1:/1CyYLNdwus7TvB/DHyD3udb52K12aYL9m7WaGAO9m4=,3ee973941f400bf95005cada54e09e319cb4943cd6c8d66480243d3b40895821 +github.com/anmitsu/go-shlex,v0.0.0-20161002113705-648efa622239,h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=,3b8376ff631f30d47e0348a8f847050b97c3db89483f45d1cd8f11d23c7c56a2 +github.com/antchfx/htmlquery,v1.0.0,h1:O5IXz8fZF3B3MW+B33MZWbTHBlYmcfw0BAxgErHuaMA=,81c86507bf2a226d5a3d20db547503d490f1e3b77035f267056e80cd73e240e2 +github.com/antchfx/xmlquery,v1.0.0,h1:YuEPqexGG2opZKNc9JU3Zw6zFXwC47wNcy6/F8oKsrM=,969fc21438fe076aee032574578158ac7e030979153dcf7b5ff5c133cbfa4d86 +github.com/antchfx/xpath,v0.0.0-20190129040759-c8489ed3251e,h1:ptBAamGVd6CfRsUtyHD+goy2JGhv1QC32v3gqM8mYAM=,22cb767dc0cafecba39e1b0322cc8aebbc6fd912e4b0fcda8c2c1dde2d80c4d2 +github.com/antchfx/xquery,v0.0.0-20180515051857-ad5b8c7a47b0,h1:JaCC8jz0zdMLk2m+qCCVLLLM/PL93p84w4pK3aJWj60=,9ddc9d830f2d6c7a22604035f0c621228ffa4ed6ff1f1d34655ee477c203c899 +github.com/antihax/optional,v0.0.0-20180407024304-ca021399b1a6,h1:uZuxRZCz65cG1o6K/xUqImNcYKtmk9ylqaH0itMSvzA=,7b0a2bf3eb029d9abe761db1874a501b60f267e675d72ae8c4b8c6f406ddcfd0 +github.com/apache/arrow/go/arrow,v0.0.0-20191024131854-af6fa24be0db,h1:nxAtV4VajJDhKysp2kdcJZsq8Ss1xSA0vZTkVHHJd0E=,4bd8443c24bc06843c0270df4f08f98b3eee6116604ff16d14dce34b242783cf +github.com/apache/thrift,v0.13.0,h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=,d75265e363da943c24e7ed69104bf018429024a50968421e48a6ab3e624733c2 +github.com/apex/log,v1.1.1,h1:BwhRZ0qbjYtTob0I+2M+smavV0kOC8XgcnGZcyL9liA=,5bb0f19e5c68b104ed32a311ea9c6f6e2a5e8fa597b342695e069468e2248d83 +github.com/aphistic/golf,v0.0.0-20180712155816-02c07f170c5a,h1:2KLQMJ8msqoPHIPDufkxVcoTtcmE5+1sL9950m4R9Pk=,a0ca77a50520037607c3a2a798b66aee1d5df63f4800b4236f51be2f1e3c1d70 +github.com/aphistic/gomol,v0.0.0-20190314031446-1546845ba714,h1:ml3df+ybkktxzxTLInLXEDqfoFQUMC8kQtdfv8iwI+M=,c2fd1a9db2fb7a5ca7ba9132fbddb5d8efd64babcff7c0f66d41d3cf97b8caab +github.com/aphistic/gomol-console,v0.0.0-20180111152223-9fa1742697a8,h1:tzgowv45TOFALtZLJ9y3k+krzOh2J8IkCvJ8T//6VAU=,26a1b99db9a92a7f5d088e529c43db6de957a3a1650c27d7a872495f73a52880 +github.com/aphistic/gomol-gelf,v0.0.0-20170516042314-573e82a82082,h1:PgPqI/JnStmzwTof+PtT53Pz53dlrz2BmF7cn5CAwQM=,e44d4de8d62391c1e0e70c3b27f4c341bb0398083f33b99be46e29144fad3c50 +github.com/aphistic/gomol-json,v1.1.0,h1:XJWwW8PxYOHf0f0FquuBWcgvZBvQ89nPxZsqQ9pfpro=,0e1ab66a46afe81c4662f8a49ca38042f0c6bc8645895336399adef1eedaff59 +github.com/aphistic/sweet,v0.2.0,h1:I4z+fAUqvKfvZV/CHi5dV0QuwbmIvYYFDjG0Ss5QpAs=,02bebcef905b02cf7195137d9b20920367bb5f8c635a6e5a112b787596414f51 +github.com/aphistic/sweet-junit,v0.0.0-20190314030539-8d7e248096c2,h1:qDCG/a4+mCcRqj+QHTc1RNncar6rpg0oGz9ynH4IRME=,6a3ab195b97bd1981f2ae87a172bc24ecfb44ffbd8d28428f97bfa46e66f559b +github.com/apparentlymart/go-cidr,v1.0.1,h1:NmIwLZ/KdsjIUlhf+/Np40atNXm/+lZ5txfTJ/SpF+U=,5af128e1ecdf5f2203fda104a653f13fb2e46acc3f68b2d7634a760a8f556ea0 +github.com/apparentlymart/go-dump,v0.0.0-20190214190832-042adf3cf4a0,h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I=,3506757fd2dcbcf8e77aa962c923d9ceaf918538bf9b117f98aa562bc83c77ef +github.com/apparentlymart/go-textseg,v1.0.0,h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0=,2572a77af285125f1980e9b751e5a7c3ae59b73c4fc97e7c2407681609991142 +github.com/appc/spec,v0.8.11,h1:BFwMCTHSDwanDlAA3ONbsLllTw4pCW85kVm290dNrV4=,4a17d699b3e2c3cc8b301de260a45c8fc31054fbb5c689e567f24e3e63bf8f79 +github.com/apple/foundationdb/bindings/go,v0.0.0-20190411004307-cd5c9d91fad2,h1:VoHKYIXEQU5LWoambPBOvYxyLqZYHuj+rj5DVnMUc3k=,a2dc6bd23d9066d3acf174c9b33378c08ae4a95cfd017abc70a16388e74ea2c3 +github.com/approvals/go-approval-tests,v0.0.0-20160714161514-ad96e53bea43,h1:ePCAQPf5tUc5IMcUvu6euhSGna7jzs7eiXtJXHig6Zc=,e3b51ab88c4f3b1c4aea2fadd0b3d3e2ec178d37232066b9fe3b0177e1c6e9aa +github.com/aquasecurity/fanal,v0.0.0-20191031102512-c1c079886da6,h1:B84l/SNXzzcqwgIORAmEv7gs4K4l+DJkdliI6ib/zNw=,7247188e1746360364e7ff77aa0c531df69074c49b23e7f67d65134ca577b0e0 +github.com/aquasecurity/go-dep-parser,v0.0.0-20190819075924-ea223f0ef24b,h1:55Ulc/gvfWm4ylhVaR7MxOwujRjA6et7KhmUbSgUFf4=,73ce01b48b9aa56349d928a27bdd4b77c149541385e645951b2e25f1d6ab5d26 +github.com/araddon/dateparse,v0.0.0-20190622164848-0fb0a474d195,h1:c4mLfegoDw6OhSJXTd2jUEQgZUQuJWtocudb97Qn9EM=,3b88bff198316e2795d11340862ef873387cd7dba97eeb17f106f41deb00d602 +github.com/araddon/gou,v0.0.0-20190110011759-c797efecbb61,h1:Xz25cuW4REGC5W5UtpMU3QItMIImag615HiQcRbxqKQ=,936e20f4c9eaa45f54586ab86bce911f0b1f935d0410dd683dc647797ed7225d +github.com/aristanetworks/fsnotify,v1.4.2,h1:it2ydpY6k0aXB7qjb4vGhOYOL6YDC/sr8vhqwokFQwQ=,9c0dd5427e82f044a9e5808a3436b43472ff032f23ac853829e5c166171044a3 +github.com/aristanetworks/glog,v0.0.0-20180419172825-c15b03b3054f,h1:Gj+4e4j6g8zOhckHfGbZnpa0k8yDrc0XRmiyQj2jzlU=,496dd08756b324a7925b670a907328433f1477763a229b76a4eef8ed254c9683 +github.com/aristanetworks/goarista,v0.0.0-20191023202215-f096da5361bb,h1:gXDS2cX8AS8KbnP32J6XMSjzC1FhHEdHfUUCy018VrA=,2c348fcdf827ac0d1238fb556f66ad1f13f05d8c5a6d2b3efe5f94be40af5021 +github.com/aristanetworks/splunk-hec-go,v0.3.3,h1:O7zlcm4ve7JvqTyEK3vSBh1LngLezraqcxv8Ya6tQFY=,545adec43ebdf1c9cdc65cd3d738d131f1b02706d25876de1fda65c4989195af +github.com/armon/circbuf,v0.0.0-20190214190532-5111143e8da2,h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs=,c8b7ba977844b5378a2413c123c3e55d0885fb67f64ad6cf06575a791a36b827 +github.com/armon/consul-api,v0.0.0-20180202201655-eb2c6b5be1b6,h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=,091b79667f16ae245785956c490fe05ee26970a89f8ecdbe858ae3510d725088 +github.com/armon/go-metrics,v0.0.0-20190430140413-ec5e00d3c878,h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=,3d48bc38dda0cff4dbf0b56b9b6e2e8fc3e6be2282f2a612a96a6702cc8a9fc5 +github.com/armon/go-proxyproto,v0.0.0-20190211145416-68259f75880e,h1:h0gP0hBU6DsA5IQduhLWGOEfIUKzJS5hhXQBSgHuF/g=,1004212be9a343c99e1849425845af1ec5e3e35cc4917483721cb03620982d58 +github.com/armon/go-radix,v1.0.0,h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=,df93c816505baf12c3efe61328dc6f8fa42438f68f80b0b3725cae957d021c90 +github.com/armon/go-socks5,v0.0.0-20160902184237-e75332964ef5,h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=,f473e6dce826a0552639833cf72cfaa8bc7141daa7b537622d7f78eacfd9dfb3 +github.com/asaskevich/govalidator,v0.0.0-20190424111038-f61b66f89f4a,h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=,b5dfb936e0256459bc633c8acf433f4a01a468868db9bd6e390a67f4678185f5 +github.com/asdine/storm,v2.1.2+incompatible,h1:dczuIkyqwY2LrtXPz8ixMrU/OFgZp71kbKTHGrXYt/Q=,ffea8b759006a871732554e1e0a42753fb9a5dd9884eb150e1b42806d51cd5fd +github.com/assetsadapterstore/tivalue-adapter,v1.0.3,h1:zcFcT1x1rWDYQEaA3wI7Hr7F25Cspy+O1cr+vUMjrks=,c42adddd544495ef0ebe1d8730bad20c4251c7646e1782542782bc946c839eca +github.com/astaxie/beego,v1.12.0,h1:MRhVoeeye5N+Flul5PoVfD9CslfdoH+xqC/xvSQ5u2Y=,1f14eb5d216170c027754bea1129bbcdafc06a035650e635375c61a17be6f316 +github.com/asticode/go-astilog,v1.0.0,h1:l9tek0K7KoQCmhZ7cvBTtVu0NsKpS9hB6jBLtQyxWYk=,49fe2b286073848e780a9326f7d37771372e61827ff07b80db89667e6ac4d1d4 +github.com/aws/amazon-ssm-agent,v0.0.0-20191011205301-04bb0617297b,h1:xv695CeRjoBS0baQSS5UfQkeo63GiMjmDwiAeY09bSw=,08ede8d7aa20210a4738e0ea033f1bf8fd1ce13bba6c375431c8c1e7a8565c37 +github.com/aws/aws-lambda-go,v1.13.2,h1:8lYuRVn6rESoUNZXdbCmtGB4bBk4vcVYojiHjE4mMrM=,05b1633366a8df9e313df4409d003a277ff7ae46f1079b3ad7f6b48c0dabfb75 +github.com/aws/aws-sdk-go,v1.25.25,h1:j3HLOqcDWjNox1DyvJRs+kVQF42Ghtv6oL6cVBfXS3U=,c34d718d97487766a9a8ac818d37dd135d75d747a8d191a616b75425c32456f2 +github.com/aybabtme/rgbterm,v0.0.0-20170906152045-cc83f3b3ce59,h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo=,a4456a42277e0c987de99e9c4ba141db064107ce737ad1dd2e050aeb1149b67e +github.com/aymerick/raymond,v2.0.2+incompatible,h1:VEp3GpgdAnv9B2GFyTvqgcKvY+mfKMjPOA3SbKLtnU0=,df6e22632cb314b76ab10dd6a1c2c66a79da44200bfec9f5e4f321100d90dc64 +github.com/baiyubin/aliyun-sts-go-sdk,v0.0.0-20180326062324-cfa1a18b161f,h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=,0965da027355d9b385358331ec359cf729ec4571ec4ca86339da925364c13559 +github.com/bartekn/go-bip39,v0.0.0-20171116152956-a05967ea095d,h1:1aAija9gr0Hyv4KfQcRcwlmFIrhkDmIj2dz5bkg/s/8=,6a278508499838d4c57c1dbdafcfc9f9f909e7358c518a8699728053b695d0c5 +github.com/bazelbuild/buildtools,v0.0.0-20191024175656-9f3978593d3e,h1:QdfIPgk+fJY8AcfjVk2/tdc2dNtl6d+7x8dhVBP72Ik=,f768dd2a38a1dedc924740f9b7a3194ca68d8a24db8fb840c547aee3911162d3 +github.com/bbangert/toml,v0.0.0-20130821181452-a2063ce2e5cf,h1:SGoM2ypzNnI+hMs01svW6wRddndk7eWRs1Bx1zOGRTI=,63690dcb3fcf13b55193cfe263b4a4fdbbe2ee9d7f93440375815dac28d34cb9 +github.com/bcext/cashutil,v0.0.0-20190126062106-1194a0af0582,h1:+sgikGWB0jvS9rzLlPww+SSFoieOLB8yieXyX9DRCF4=,4d5b42e5d472015edeef1b6bf54e253a85bab6df1ac16aabea7fd0dea4aa85e3 +github.com/bcext/gcash,v0.0.0-20190404152342-2e38815af4f2,h1:XVuqYNixmuo81vR/PnBRDDiTH7596mAwQlQ8BucvGnM=,6b24e00369a493c32e730a4d78d8c4fd122ffe0ce319c5d72f3c7d2f12ede4b7 +github.com/beego/goyaml2,v0.0.0-20130207012346-5545475820dd,h1:jZtX5jh5IOMu0fpOTC3ayh6QGSPJ/KWOv1lgPvbRw1M=,aaa4165412caaacbb2df4427207a206e09215c3f7a19f8309e9222ca9ff80691 +github.com/beego/x2j,v0.0.0-20131220205130-a0352aadc542,h1:nYXb+3jF6Oq/j8R/y90XrKpreCxIalBWfeyeKymgOPk=,f9a32026b2107f3cc3610ac6b75c4c64818646a316c35e648c8811d4276a9993 +github.com/beevik/etree,v1.1.0,h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=,614a33736f8b9262a809f101df5bf71f47777879b1191165b6247d6b67c7468c +github.com/beevik/guid,v0.0.0-20170504223318-d0ea8faecee0,h1:oLd/YLOTOgA4D4aAUhIE8vhl/LAP1ZJrj0mDQpl7GB8=,5add94fcade6c7afa236112c8da300d47ec499ad1789a5e805c8198062dd0749 +github.com/beevik/ntp,v0.2.0,h1:sGsd+kAXzT0bfVfzJfce04g+dSRfrs+tbQW8lweuYgw=,42e14f30c23ba2f5ddaff76101016d87f0f0a0f1d96d3d20e42fd02842091c76 +github.com/beorn7/perks,v1.0.1,h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=,25bd9e2d94aca770e6dbc1f53725f84f6af4432f631d35dd2c46f96ef0512f1a +github.com/bep/debounce,v1.2.0,h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo=,ddc0a77e4819b6b826d69fdf1a5a153f3f867a31e030cfe28296355b670adf21 +github.com/bep/gitmap,v1.1.1,h1:Nf8ySnC3I7/xPjuWeCwzukUFv185iTUQ6nOvLy9gCJA=,364163e67741ae331d164fd881964160f19fdbdfe094e0e762314cc37aac646a +github.com/bep/go-tocss,v0.6.0,h1:lJf+nIjsQDpifUr+NgHi9QMBnrr9cFvMvEBT+uV9Q9E=,40e7175da9564796e184e4383bfce703f63244b850999b5a54fd5792bfc5baf5 +github.com/bep/tmc,v0.5.0,h1:AP43LlBcCeJuXqwuQkVbTUOG6gQCo04Et4dHqOOx4hA=,f8e0be71fb845a4ca22825f5b9c51c1a66c29e9ccff723e063781ee64c664c66 +github.com/bgentry/go-netrc,v0.0.0-20140422174119-9fd32a8b3d3d,h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=,59fbb1e8e307ccd7052f77186990d744284b186e8b1c5ebdfb12405ae8d7f935 +github.com/bgentry/speakeasy,v0.1.0,h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=,d4bfd48b9bf68c87f92c94478ac910bcdab272e15eb909d58f1fb939233f75f0 +github.com/bifurcation/mint,v0.0.0-20180715133206-93c51c6ce115,h1:fUjoj2bT6dG8LoEe+uNsKk8J+sLkDbQkJnB6Z1F02Bc=,40a4bd02b9e3477271638bc17ae8537e2675ace0a9b85d753820e979dbf97f36 +github.com/binance-chain/go-sdk,v1.0.8,h1:mC1Tai9diqIWuKTJmrFLal90OCsgtDvyLEItMvglaHA=,3d0f86f959b38f11174d8ee574e77e5d80d2c672d0720dee519f3708e873b0ca +github.com/binance-chain/ledger-cosmos-go,v0.9.9-binance.1,h1:8mAtw1Tp/BhhTrsXmXM60H1fihcvcKLfo2ZSxShaXKw=,f6dc2bfb4d29db01cad72815615301e089d727110d1d5a0de43e829953e45041 +github.com/biogo/hts,v0.0.0-20160420073057-50da7d4131a3,h1:3b+p838vN4sc37brz9W2HDphtSwZFcXZwFLyzm5Vk28=,93be93b79da8920fb5f02bb2e50a364e2b33dc831229d163e7be70c1010cdb9e +github.com/bitcoinsv/bsvd,v0.0.0-20190609155523-4c29707f7173,h1:2yTIV9u7H0BhRDGXH5xrAwAz7XibWJtX2dNezMeNsUo=,8e1e554ddc232e763fac27ddc0661cfe543163802b0d6bb9a2904bf24756ddc3 +github.com/bitcoinsv/bsvlog,v0.0.0-20181216181007-cb81b076bf2e,h1:6f+gRvaPE/4h0g39dqTNPr9/P4mikw0aB+dhiExaWN8=,89f0c34e6936d82a1629d5d255923ff27c0adeb99709269cf62071e48cb5fbd8 +github.com/bitcoinsv/bsvutil,v0.0.0-20181216182056-1d77cf353ea9,h1:hFI8rT84FCA0FFy3cFrkW5Nz4FyNKlIdCvEvvTNySKg=,4d4923e8743012e1f8ed1a1ef721786fc2d5249cc5dafd96fdd350c485378cfe +github.com/bitly/go-hostpool,v0.0.0-20171023180738-a3a6125de932,h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=,9a55584d7fa2c1639d0ea11cd5b437786c2eadc2401d825e699ad6445fc8e476 +github.com/bitly/go-simplejson,v0.5.0,h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=,53930281dc7fba8947c1b1f07c82952a38dcaefae23bd3c8e71d70a6daa6cb40 +github.com/blackducksoftware/horizon,v0.0.0-20190625151958-16cafa9109a3,h1:noI1RY2cUFZfdZMIz1+1LzT8ZeuWK703gwmH/ZC2YnQ=,ece353e9e973ce03d131b29c6c00aea53f1b2e507960b389cdfeb2cc317897ef +github.com/blacktear23/go-proxyprotocol,v0.0.0-20180807104634-af7a81e8dd0d,h1:rQlvB2AYWme2bIB18r/SipGiMEVJYE9U0z+MGoU/LtQ=,123c82a455309b3a3118504c0a70771352292abced294dca39a570b89e48adba +github.com/blakesmith/ar,v0.0.0-20190502131153-809d4375e1fb,h1:m935MPodAbYS46DG4pJSv7WO+VECIWUQ7OJYSoTrMh4=,015878daba57ba5ce7228f772b843fffa847d99c7afeb308089bef77f433c510 +github.com/blang/semver,v3.5.1+incompatible,h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=,8d032399cf835b93f7cf641b5477a31a002059eed7888a775f97bd3e9677ad3c +github.com/blevesearch/bleve,v0.8.1,h1:20zBREtGe8dvBxCC+717SaxKcUVQOWk3/Fm75vabKpU=,58a5b5ade8123d54b7510e463c25e1e59e6cd3d98acdcb4d582c42db67c03519 +github.com/blevesearch/blevex,v0.0.0-20180227211930-4b158bb555a3,h1:U6vnxZrTfItfiUiYx0lf/LgHjRSfaKK5QHSom3lEbnA=,defa5966f802eab571cc8d9315323104b776751dd13caae9d8fc0476576d57ca +github.com/blevesearch/go-porterstemmer,v0.0.0-20141230013033-23a2c8e5cf1f,h1:J9ZVHbB2X6JNxbKw/f3Y4E9Xq+Ro+zPiivzgmi3RTvg=,e13cc37d08c58870cdbad544b726934cd62ca6aa2ae35f02598f72e30d7c0f59 +github.com/blevesearch/segment,v0.0.0-20160105220820-db70c57796cc,h1:7OfDAkuAGx71ruzOIFqCkHqGIsVZU0C7PMw5u1bIrwU=,21278826e6ba0f63024a953c480467bf41d6717ae4a87c3021a9f74d2f2ae618 +github.com/blocktree/arkecosystem-adapter,v1.0.4,h1:TkZWCzAgi20CjAMlOpwTDppt6XO7X8Fn5EjSUsuB6kI=,22346af6957b0b8fae47d982605f565e5e16e86bc52a9fdd234b023067896cf2 +github.com/blocktree/bitshares-adapter,v1.0.5,h1:mzYlpip0crtYaDaXbKqtGLAxad83p19HLTVa9LLW3fc=,03ce80398ab59af79feb92b054e5da02a290ef27aef5facb93cdd86de2e0df91 +github.com/blocktree/ddmchain-adapter,v1.0.5,h1:Lx8zD0lOHb9TJ7EcGJQhyvpDkYko6OoV8uwudKRKlJA=,81e65ed5692152fcaa1dbf997f71dc43192abc72dad6fb78b721d742c05c1a7a +github.com/blocktree/eosio-adapter,v1.0.0,h1:cncKE4QbQxDsr8B+HlhU7tywbCtZRsWMln2ek8I5lbc=,cc658d6f9fa5470c5affb4caad58e17637a6d46f8e5d1b3730ed06e570e61959 +github.com/blocktree/ethereum-adapter,v1.1.10,h1:PkmQeRT5ljyCOQZPT0diJo+4G9OqOcJsnRcXeF5fitU=,cf8465db958e214c8196e6311fd5db24f8d28265a37914c2cf9d2dac54a5fd1a +github.com/blocktree/futurepia-adapter,v1.0.12,h1:mL1rDvcM55hKwLhHOkg1v2GwnCEsDniUrqrMG3PK/+4=,ae29bbdb9a4a1ec345f7d220d6a736ed827a307454d0466bb065dacf3d94200d +github.com/blocktree/go-owcdrivers,v1.1.18,h1:KCNm+HczpDfxyUf+Wrvbj/iWwQDJ+ca/FBjm3H06rIY=,65bcca1918d8b9e1048bac14b1393dec246402320b6a5dde20ee6afe84585736 +github.com/blocktree/go-owcrypt,v1.0.3,h1:qfAwJsWYp7WaI26hAwPuFUrMXhD9bWwuGXYWBOLsVes=,f365daad6adfcc5aee14faa1455f772b5e39b1c9ff3598afb1c3645587cb6b2e +github.com/blocktree/moacchain-adapter,v1.0.3,h1:k9drMeekvBsXORortW/zJXaO6CokXVv2EL0/YK3c1/A=,66c87656369c5246bad1527f7385d1cb98bbeda431383f154a23e9bf821a05a2 +github.com/blocktree/nulsio-adapter,v1.1.7,h1:d0xuovBqodBAv8BE/CPZjfe5CNma6FFSP6W3ynJRD0U=,c814c05686483abc345bf4fb997fc61595e738b99cc8a8d36742414f93e948b0 +github.com/blocktree/ontology-adapter,v1.0.8,h1:Lej35ZPPgjS6nP5CEumIUskRNASMZswgrByYSxrWPe0=,fcdeb4c6d8f37a22f52e2938ebc51b4ff1f4cf4116eebce8ecc7591995236853 +github.com/blocktree/openwallet,v1.5.3,h1:6hNj61wLfzEGqbbY0ZOeqGAjSj9snoRSBikgSlWPqZI=,1f169b69cd3ec4a4f82836c2b2178eb162464d6413c09f8170f73a838d28650b +github.com/blocktree/ripple-adapter,v1.0.13,h1:zgJt7onq5+V6pvQ7Kl3xiiSkk3uxuCF07OpwCtJTM8w=,bb7f515a6573eb185da0bebb28bf57364d175ee7f937e18af5b2eac98741464f +github.com/blocktree/virtualeconomy-adapter,v1.1.5,h1:YJ2JKUifSsCjCneM0NUky3WbG0LEm7IKUBmf9EAmAXc=,7a5085b8b0b114e2491032ec6f95e300c28fa309ec5883044d8b954d7d4db06e +github.com/blocktree/waykichain-adapter,v1.0.3,h1:qY/Txh+n4iIJA49rDMj41qpIUj3McjBir8Ls+sX8c3w=,62ea2ff873c84a32d3482c8ec1687221a1e46055b9be18fcd25be584cf2cac5d +github.com/bluele/gcache,v0.0.0-20190518031135-bc40bd653833,h1:yCfXxYaelOyqnia8F/Yng47qhmfC9nKTRIbYRrRueq4=,334accb65479b1b18fb569b08d14eebceb6478ea16abe9fbad2f1c6b6586deb6 +github.com/bluele/slack,v0.0.0-20180528010058-b4b4d354a079,h1:dm7wU6Dyf+rVGryOAB8/J/I+pYT/9AdG8dstD3kdMWU=,2b0055c292b7baa49f56eb9fc710f35f005747ddbef16427d5c985617c3b697d +github.com/bmatcuk/doublestar,v1.1.5,h1:2bNwBOmhyFEFcoB3tGvTD5xanq+4kyOZlB8wFYbMjkk=,81f592b11277591e943b91522497c323fcf0c6b4f3099f495de10f83e8c3e697 +github.com/bmizerany/assert,v0.0.0-20160611221934-b7ed37b82869,h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=,2532a167df77ade7e8012f07c0e3db4d4c15abdb7ffa7b05e1d961408da9a539 +github.com/bmizerany/pat,v0.0.0-20170815010413-6226ea591a40,h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo=,ed04bed4d193e25371ebc6524984da4af9ece5c107fcc82d5aa4914b726706d2 +github.com/bndr/gotabulate,v1.1.2,h1:yC9izuZEphojb9r+KYL4W9IJKO/ceIO8HDwxMA24U4c=,2c1ecc544368e40010082f800c1ee24eaf1b8e0f96fa76a56e4f61dda4cd0d60 +github.com/boltdb/bolt,v1.3.1,h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=,ecaf17b0dbe7c85a017704c72667b2526b492b1a753ce7302a27dd2fb2e6ee79 +github.com/boombuler/barcode,v1.0.0,h1:s1TvRnXwL2xJRaccrdcBQMZxq6X7DvsMogtmJeHDdrc=,ef3832c4d22a09377323980bacd9f5f2ab43d0d20da115e1cfb139e093d7bb9b +github.com/bradfitz/go-smtpd,v0.0.0-20170404230938-deb6d6237625,h1:ckJgFhFWywOx+YLEMIJsTb+NV6NexWICk5+AMSuz3ss=,0a06dd547fed38e2744800b5f4ebae5ac00ee08717ded281510a8d319b8db8f3 +github.com/bradfitz/gomemcache,v0.0.0-20190913173617-a41fca850d0b,h1:L/QXpzIa3pOvUGt1D1lA5KjYhPBAN/3iWdP7xeFS9F0=,eb71acfac0c4ce5f0b6537d8029de98902d83fd38fdcbfd757f06697c6323f78 +github.com/bradfitz/iter,v0.0.0-20190303215204-33e6a9893b0c,h1:FUUopH4brHNO2kJoNN3pV+OBEYmgraLT/KHZrMM69r0=,6883ce0960849ca9c024a4a4e7508ff521da2a3bb66d1974ea2f970a5265ea39 +github.com/bradfitz/latlong,v0.0.0-20140711231157-b74550508561,h1:mz4equOOUOnI4q5E7dyHlRx1x63YEaYwhlVluCDila4=,d1c124508f1825697a2bdb9fac48d2b8805b41f8e546d262fc487d8450962cec +github.com/bradhe/stopwatch,v0.0.0-20180424000511-fd55e776a960,h1:YJWTgxlTgeHlvhe7tZJm0yBcg2GhjDQs8zig5O5vup8=,c2926a4febee7eea0f523b3d4fcaa414c27effc2abc053137a3dbf0b3a4fa324 +github.com/briankassouf/jose,v0.9.2-0.20180619214549-d2569464773f,h1:ZMEzE7R0WNqgbHplzSBaYJhJi5AZWTCK9baU0ebzG6g=,c0b50157ec3c39fbd6ded9d5e6bc763890e6d909db38b337a72876124c2baeeb +github.com/brocaar/lorawan,v0.0.0-20190925120821-154a30dbdce2,h1:51WcQ+VAc/6jZ/8GBJiQ3B7FrT2aXI+YsUx2iG9tJlw=,0082cebaf26ed36c901f9b44b6d785eccc2a0c123088642eac7c9b5711b7d0ca +github.com/bsm/go-vlq,v0.0.0-20150828105119-ec6e8d4f5f4e,h1:D64GF/Xr5zSUnM3q1Jylzo4sK7szhP/ON+nb2DB5XJA=,61fc03674cd72d5a4c55413e8b58fc8eafc58fbb71fb89c719225650754b3469 +github.com/bsm/sarama-cluster,v2.1.15+incompatible,h1:RkV6WiNRnqEEbp81druK8zYhmnIgdOjqSVi0+9Cnl2A=,a8a4867f09704222362b75fa00c9894106a928dc7cf905f1b80ca7bbd1a3b8e5 +github.com/btcsuite/btclog,v0.0.0-20170628155309-84c8d2346e9f,h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=,74ad4defbabf48c98bbb547be1c40c11fa2c286f599412c774d1c5604dc1808d +github.com/btcsuite/btcutil,v0.0.0-20190425235716-9e5f4b9a998d,h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=,de1ee450ff2cfec2df220fec0d3e265cc812f214892bfad601e142632e2cf3f9 +github.com/btcsuite/go-socks,v0.0.0-20170105172521-4720035b7bfd,h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=,cc27776f56f7c58c2808af55781e9b3f7d0eb0dc08e4c19c38c6bdf2465ce0e7 +github.com/btcsuite/goleveldb,v1.0.0,h1:Tvd0BfvqX9o823q1j2UZ/epQo09eJh6dTcRp79ilIN4=,13e37462cb2fe5976221f57d357051c1c3cc63a9b0e67e6ed97f98af795d0815 +github.com/btcsuite/snappy-go,v1.0.0,h1:ZxaA6lo2EpxGddsA8JwWOcxlzRybb444sgmeJQMJGQE=,d136165bdbf91780ded5d3ebaba9026f900595e56c19aa0ef29896015eae9627 +github.com/btcsuite/websocket,v0.0.0-20150119174127-31079b680792,h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=,d45ac16f59082ac369e61c7bbe23153e289cad03619ab8041963d54cd700d6f0 +github.com/btcsuite/winsvc,v1.0.0,h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk=,6893f7a62faec17d7b0856c7464754cab14c4d913e27af5276f6a98b25f3c779 +github.com/buger/jsonparser,v0.0.0-20191004114745-ee4c978eae7e,h1:oJCXMss/3rg5F6Poy9wG3JQusc58Mzk5B9Z6wSnssNE=,7e2dda4c1b4217408903f3b4a1f2cdd93d71bc7682387ba860cfa0cc9fcf88be +github.com/bugsnag/bugsnag-go,v1.5.3,h1:yeRUT3mUE13jL1tGwvoQsKdVbAsQx9AJ+fqahKveP04=,8aaf02df2c1a4e8a5725eea1d91af69c4f9e157c2559a3452388f64a977534c0 +github.com/bugsnag/panicwrap,v1.2.0,h1:OzrKrRvXis8qEvOkfcxNcYbOd2O7xXS2nnKMEMABFQA=,75357d3a5cd89dc04f1f101e02686fc1ef33b4a4f67edb82b3fa63fded3f47e9 +github.com/bwmarrin/discordgo,v0.20.1,h1:Ihh3/mVoRwy3otmaoPDUioILBJq4fdWkpsi83oj2Lmk=,616d49cc107ccd85872b6008f028c4aca021f66381828bb921f15f9e8149988a +github.com/bwmarrin/snowflake,v0.0.0-20180412010544-68117e6bbede,h1:lTJlWdyhwqq7h29GtuIDHW/xi+sMN+JOLMgYAwQ5O74=,2e13ad82f7ae64821f9851a66b4800f1589e413b27b469f28d21970957a3c6da +github.com/c-bata/go-prompt,v0.2.2,h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0=,ffe765d86d90afdf8519def13cb027c94a1fbafea7a18e9625210786663436c4 +github.com/c2h5oh/datasize,v0.0.0-20171227191756-4eba002a5eae,h1:2Zmk+8cNvAGuY8AyvZuWpUdpQUAXwfom4ReVMe/CTIo=,b5543f3e104a84e35ac51780968282b455dd30c88730d0da166d8d6512301da6 +github.com/caarlos0/ctrlc,v1.0.0,h1:2DtF8GSIcajgffDFJzyG15vO+1PuBWOMUdFut7NnXhw=,e4b5e9dd37cee2d47ff1c5eeba9a4b6e2b778c349a3615ca9653531f035a3ca6 +github.com/cactus/go-statsd-client/statsd,v0.0.0-20191030180650-a68a2246f89c,h1:rrLWPlpOKwnBpVUXitbgM3+Nie1eBaFfBZqfiPpxVj8=,cbb94149ec688419a91406b374955946c3679b1dde0752d7c0ffdc87432cd0b3 +github.com/caddyserver/caddy,v1.0.3,h1:i9gRhBgvc5ifchwWtSe7pDpsdS9+Q0Rw9oYQmYUTw1w=,029f14052f1ec9937c4028f3231899bf5391d5eeb7f58795d5d470a6f4c338a7 +github.com/campoy/unique,v0.0.0-20180121183637-88950e537e7e,h1:V9a67dfYqPLAvzk5hMQOXYJlZ4SLIXgyKIE+ZiHzgGQ=,4bc20f70e0b170ecdabd740a5de012d05f4c9149e2882fbdb303dc1b1793a77e +github.com/casbin/casbin,v1.9.1,h1:ucjbS5zTrmSLtH4XogqOG920Poe6QatdXtz1FEbApeM=,e2ef71d15eb595374d27961d255941b50691f9eaa91b5590f081fe3a4ab195c2 +github.com/cavaliercoder/go-cpio,v0.0.0-20180626203310-925f9528c45e,h1:hHg27A0RSSp2Om9lubZpiMgVbvn39bsUmW9U5h0twqc=,08b68e1d424b545418828c05c46bce5d795bbb8b534871667650ec6b3e7b33a6 +github.com/cenk/backoff,v2.2.1+incompatible,h1:djdFT7f4gF2ttuzRKPbMOWgZajgesItGLwG5FTQKmmE=,e3d1c641f85f548370aedc6bae3d4b975b09e3b2d1d9060f0e72bd5e2710d4c9 +github.com/cenkalti/backoff,v2.2.1+incompatible,h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=,f8196815a1b4d25e5b8158029d5264801fc8aa5ff128ccf30752fd169693d43b +github.com/cenkalti/backoff/v3,v3.0.0,h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c=,c69bf77e7b43cb3935d763c24af3810d9869a664bbcd26ffad9d3dc1bf602006 +github.com/census-instrumentation/opencensus-proto,v0.2.1,h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=,b3c09f3e635d47b4138695a547d1f2c7138f382cbe5a8b5865b66a8e08233461 +github.com/centrify/cloud-golang-sdk,v0.0.0-20190214225812-119110094d0f,h1:gJzxrodnNd/CtPXjO3WYiakyNzHg3rtAi7rO74ejHYU=,dc3de1393d7ae63ce35393630417ff8c5421a2a03cbf1a20680c7d57a74cd311 +github.com/certifi/gocertifi,v0.0.0-20180118203423-deb3ae2ef261,h1:6/yVvBsKeAw05IUj4AzvrxaCnDjN4nUqKjW9+w5wixg=,054d6c3a6f8d78fba2f08fbc2f23ec839d5a4aead4a184270d87d095c80eb6dc +github.com/cespare/cp,v1.1.1,h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU=,25f2ed5bac9ac3c1891ff364b213f6b7b0ee2e7aed13510738ced93ea71860e3 +github.com/cespare/xxhash,v1.1.0,h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=,fe98c56670b21631f7fd3305a29a3b17e86a6cce3876a2119460717a18538e2e +github.com/cespare/xxhash/v2,v2.1.0,h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA=,655feb22a395d9f56315280770d386eb99cdca79a97970812dbd3b30a7940638 +github.com/chaseadamsio/goorgeous,v0.0.0-20170901132237-098da33fde5f,h1:REH9VH5ubNR0skLaOxK7TRJeRbE2dDfvaouQo8FsRcA=,f81f4ef8ac52852b232ea971d009ec88007f1258c29e10e49918a31a99c6c4cc +github.com/checkpoint-restore/go-criu,v0.0.0-20190109184317-bdb7599cd87b,h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM=,1d1f5c6e529c87259305d8ed6bf4d381dabbf85458de187981204339e251a5be +github.com/cheekybits/genny,v1.0.0,h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=,770f3e01425b9b0a87a5e0b29fc6ac2cfa67a3f1265aafb16c96a47bafc304e4 +github.com/cheekybits/is,v0.0.0-20150225183255-68e9c0620927,h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764=,f7bf9ac5b1fc574ef5a373382909af550ef1a7f01182469eaa12e18c7c5fc7cb +github.com/cheggaaa/pb,v2.0.7+incompatible,h1:gLKifR1UkZ/kLkda5gC0K6c8g+jU2sINPtBeOiNlMhU=,383b717f271a2471e57ac52f64dbb77304ec1c0b53c5efeb7a1392668f59d0b4 +github.com/cheggaaa/pb/v3,v3.0.1,h1:m0BngUk2LuSRYdx4fujDKNRXNDpbNCfptPfVT2m6OJY=,781be3118614dfaeb2df44d31d8af36c703c2aaed18e9ca49fa4ef9ba1539236 +github.com/chewxy/hm,v1.0.0,h1:zy/TSv3LV2nD3dwUEQL2VhXeoXbb9QkpmdRAVUFiA6k=,68ab03d9f8cb3d92d6c8234cfd879004be2fd69457d2c9fa6834d1c6ddb22b43 +github.com/chewxy/math32,v1.0.4,h1:dfqy3+BbCmet2zCkaDaIQv9fpMxnmYYlAEV2Iqe3DZo=,7885f637bb90729d04f125e030542b9a6999f9e5dffd3294baffbcdd548bbc3e +github.com/chrismalek/oktasdk-go,v0.0.0-20181212195951-3430665dfaa0,h1:CWU8piLyqoi9qXEUwzOh5KFKGgmSU5ZhktJyYcq6ryQ=,094a132bc1e950677f75e570b17a52f103edd6acd3ec1c0943cf9cda3cd6355a +github.com/chromedp/cdproto,v0.0.0-20191009033829-c22f49c9ff0a,h1:AuIGvB6IuWpMEdfKQ+t77D6dzLpNftzxAsktehYyWn8=,bf85eeebdc65b1e90d851b42f56a3dbf5bcff4923aa426692a1c0d0a1727a522 +github.com/chromedp/chromedp,v0.5.1,h1:PAqhoCWCHzRphYnmmxLSiYk7EEwDplCm4woTCCaV2cQ=,59cd1ab42eeb90e32cc60e77a8fbb19ca629603200d5bd40d611f780e646062b +github.com/chzyer/logex,v1.1.10,h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=,2c94771c1e335a2c58a96444b3768b8e00297747d6ce7e7c14bab2e8b39d91bd +github.com/chzyer/readline,v0.0.0-20180603132655-2972be24d48e,h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=,3dc842677887278fb33d25078d375ae6a7a94bb77a8d205ee2230b581b6947a6 +github.com/chzyer/test,v0.0.0-20180213035817-a1ea475d72b1,h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=,ad8550bed3c4a94bbef57b9fc5bb15806eaceda00925716404320580d60e2f7d +github.com/cihub/seelog,v0.0.0-20170130134532-f561c5e57575,h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs=,fc279208e6094fb22c8ea651c6e9794844069693c9b916c225276c54f7e76bfe +github.com/circonus-labs/circonus-gometrics,v2.3.1+incompatible,h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=,d8081141497e3cd34844df66af016c7900d58b324fb689e17e57bc053d91c9ba +github.com/circonus-labs/circonusllhist,v0.1.3,h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=,4dc805d9735dd9ca9b8875c0ad23126abb5bc969c5a40c61b5bc891808dbdcb6 +github.com/clbanning/mxj,v1.8.4,h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I=,8947cf617bdd9efc62817c8ddb17bafe497f35abdf10a3c60f295e387f633f70 +github.com/client9/misspell,v0.3.4,h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=,a3af206372e131dd10a68ac470c66a1b18eaf51c6afacb55b2e2a06e39b90728 +github.com/cloudflare/backoff,v0.0.0-20161212185259-647f3cdfc87a,h1:8d1CEOF1xldesKds5tRG3tExBsMOgWYownMHNCsev54=,2aea6d1528c42cf5f111e035bba564fd0481cb4ddb3b50f783f2481d855947cb +github.com/cloudflare/cfssl,v1.4.0,h1:TdyQbj/bDUMUHf2IkcHU2EHUmzCmRLuJ3fFd8EYMg1E=,845fc5f4a7f4c2356d676916fdd7b4b2217b76c8f9b7a960290ab8884d6f8e0e +github.com/cloudflare/cloudflare-go,v0.10.4,h1:7C1D9mtcNFZLCqmhkHK2BlwKKm9fi4cBqY6qpYtQv5E=,e8f6ee817c9b807c98559ff87d4ed7a284738d9dc253b6db7520911d93bd81e3 +github.com/cloudflare/go-metrics,v0.0.0-20151117154305-6a9aea36fb41,h1:/8sZyuGTAU2+fYv0Sz9lBcipqX0b7i4eUl8pSStk/4g=,9176a680ad7a72cf717e3e01ee1ca6b292cb576b543e12ff1770cc58957bc222 +github.com/cloudflare/golz4,v0.0.0-20150217214814-ef862a3cdc58,h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=,75832d1c2989b2a0d7eb8d2cec300f6d457254d42927a23f522b164833e791d4 +github.com/cloudflare/redoctober,v0.0.0-20171127175943-746a508df14c,h1:p0Q1GvgWtVf46XpMMibupKiE7aQxPYUIb+/jLTTK2kM=,e69334393aec994f9ba55bbdfa8a65c0cfa46080230068c44ca16a85c0a74079 +github.com/cloudfoundry-community/go-cfclient,v0.0.0-20190201205600-f136f9222381,h1:rdRS5BT13Iae9ssvcslol66gfOOXjaLYwqerEn/cl9s=,f01d41c3c911b59bf717674690799c978f3a841ef695c7ee09f4afe5f7c96e64 +github.com/cloudfoundry-incubator/candiedyaml,v0.0.0-20170901234223-a41693b7b7af,h1:6Cpkahw28+gcBdnXQL7LcMTX488+6jl6hfoTMRT6Hm4=,325af9d6827b8d120a72992c38ba776187fbd947a39c9f1928a43a1a2b262453 +github.com/cloudfoundry/bosh-agent,v2.271.0+incompatible,h1:277mM9hsUzyrd5Qd/5e1LFwiobIYorE7vTBRZohRV8s=,42e253b855d03655ec2cf59ab01a14aa0037f25029517be595dda26ff9a2a552 +github.com/cloudfoundry/bosh-utils,v0.0.0-20191026100324-0b6803ec5382,h1:Rrpgz+K2Zso//XUmqbGlnYi9rw6EtYJ4uLlTNSnSBIw=,c08bbf97e510b2de271fd64f5b2acedfa011b4fd3f30092804992084c67b68b7 +github.com/cloudfoundry/gosigar,v1.1.0,h1:V/dVCzhKOdIU3WRB5inQU20s4yIgL9Dxx/Mhi0SF8eM=,53acb43e5111c6af6af138e1144907bb5f9bf8abc28e71a703502f92c13ba274 +github.com/cloudfoundry/sonde-go,v0.0.0-20171206171820-b33733203bb4,h1:cWfya7mo/zbnwYVio6eWGsFJHqYw4/k/uhwIJ1eqRPI=,6124fdcac54e1baf09703ed2b938a4e2bb55d9cd20f78451f25c16638a95f62d +github.com/cockroachdb/apd,v1.1.0,h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=,fef7ec2fae220f84bfacb17fbfc1b04a666ab7f6fc04f3ff6d2b1e05c380777d +github.com/cockroachdb/apd/v2,v2.0.1,h1:y1Rh3tEU89D+7Tgbw+lp52T6p/GJLpDmNvr10UWqLTE=,9f1c35b8118f70f08150bf5e9da225fa1201f5d0f8c22f326468ea22ab6b791d +github.com/cockroachdb/cockroach-go,v0.0.0-20190916165215-ad57a61cc915,h1:QX2Zc22B15gdWwDCwS7BXmbeD/SWdcRK12gOfZ5BsIs=,e3faa1cdf2a15357d1e2eb200b3bdb81dae3fb084cb04534e0caf27a68487a88 +github.com/cockroachdb/datadriven,v0.0.0-20190809214429-80d97fb3cbaa,h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=,170480bf3daa133144f2578e3f051f0fd98313666642cab64cef3359753a5c32 +github.com/codahale/hdrhistogram,v0.0.0-20161010025455-3a0bb77429bd,h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=,e7e117da64da2f921b1f9dc57c524430a7f74a78c4b0bad718d85b08e8374e78 +github.com/codegangsta/inject,v0.0.0-20150114235600-33e0aa1cb7c0,h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q=,0a324d56992bffd288fa70a6d10eb9b8a9467665b0b1eb749ac6ae80e8977ee2 +github.com/codegangsta/negroni,v1.0.0,h1:+aYywywx4bnKXWvoWtRfJ91vC59NbEhEY03sZjQhbVY=,2e6301aa682a7c38305f2ee72b276181cd0990f224f9fe115a433a5beb138488 +github.com/codeskyblue/go-sh,v0.0.0-20190412065543-76bd3d59ff27,h1:HHUr4P/aKh4quafGxDT9LDasjGdlGkzLbfmmrlng3kA=,77348ab27860460a015d0e65d08f18ed2194c13981f5fd722143a6e0c2dbb589 +github.com/confluentinc/confluent-kafka-go,v1.1.0,h1:HIW7Nkm8IeKRotC34mGY06DwQMf9Mp9PZMyqDxid2wI=,bc9aee1c8052340809bc43bf015a183985ec3426d404c34acfa3970e3b245340 +github.com/container-storage-interface/spec,v1.2.0,h1:bD9KIVgaVKKkQ/UbVUY9kCaH/CJbhNxe0eeB4JeJV2s=,86ecb02d57af97c9a4de8f2f3cacbceb5c7f2f96ee007133e0cfb9525ce45177 +github.com/containerd/cgroups,v0.0.0-20191011165608-5fbad35c2a7e,h1:3bt+8T1I/CuYx+a5ww32+UT4fc9x8iRiXrhfduFTlBU=,4646f14f27a365ff08abb1266b7ca4dffc1acd5e8e74b57211acbba22b496d46 +github.com/containerd/console,v0.0.0-20181022165439-0650fd9eeb50,h1:WMpHmC6AxwWb9hMqhudkqG7A/p14KiMnl6d3r1iUMjU=,62a7f1da11b3be4c0ef4f9f03b99dcf59dc988f062749f35e4e6bb585fb4e4fe +github.com/containerd/containerd,v1.3.0,h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=,e3f529147f2c909c85ac461126ad092a3c5d5a2abcc4f3c22600685af6dc2f08 +github.com/containerd/continuity,v0.0.0-20190827140505-75bee3e2ccb6,h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw=,ef1a3a4c2c1508d293eb2730e47e9601cba19d939393b1018d8e476b30dfd90b +github.com/containerd/fifo,v0.0.0-20190816180239-bda0ff6ed73c,h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk=,0c1b858ee9dd28bd915a3f7bd108b98b1d689be3c14535e7e8aee4a60c4a72c0 +github.com/containerd/go-runc,v0.0.0-20190923131748-a2952bc25f51,h1:vmF3zULCGpZ4QJCCLsGUXX7tNXW+0x3r9owerRAmRaU=,76ce6296dc07f1f5957867e9a5925cf9e16c69ad2b635f74a4ec471e6672ee51 +github.com/containerd/ttrpc,v0.0.0-20191028202541-4f1b8fe65a5c,h1:+RqLdWzn0xFunb+sxXaEzHOg8NuEG/eaI+9C1xXX8Mw=,f43884f8f37259c4b50a4413092064f35abd03b9db3bbe2ca3264b5a4b591b04 +github.com/containerd/typeurl,v0.0.0-20190911142611-5eb25027c9fd,h1:bRLyitWw3PT/2YuVaCKTPg0cA5dOFKFwKtkfcP2dLsA=,aa4e0823acf7b686a9521617134a171c5b5813de302e3fba742cd3b7f43ba944 +github.com/containernetworking/cni,v0.7.1,h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE=,b83f1b8e9bba747e41512737383da57e517cf425beb1bd58882904dae9348b1d +github.com/containers/image,v3.0.2+incompatible,h1:B1lqAE8MUPCrsBLE86J0gnXleeRq8zJnQryhiiGQNyE=,dadc25bfff923d4f2c8b570471be3b0fd1449f42251fb6c318b68e04f6d47b3a +github.com/containers/storage,v1.12.13,h1:GtaLCY8p1Drlk1Oew581jGvB137UaO+kpz0HII67T0A=,08f5ee958be629b73ff02296eb11f4b0698dbd90e585ce019c5428a8e1d371d4 +github.com/containous/flaeg,v1.4.1,h1:VTouP7EF2JeowNvknpP3fJAJLUDsQ1lDHq/QQTQc1xc=,d097191570bb92f920cd15500a93205e6e93b5ee4723a51c9b8e3bfbcfaae505 +github.com/corbym/gocrest,v1.0.3,h1:gwEdq6RkTmq+09CTuM29DfKOCtZ7G7bcyxs3IZ6EVdU=,f13221d177442318b04f468fa57ea92bd9892d86e7cf7bb7299e0c58cea9df48 +github.com/coredns/coredns,v1.1.2,h1:bAFHrSsBeTeRG5W3Nf2su3lUGw7Npw2UKeCJm/3A638=,cbf720a9af4fdc5be08b0eea67fe219bb08c75292e22dca90095bf45cbd4a926 +github.com/coreos/bbolt,v1.3.3,h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=,63ea574f28bd03b6d2a82304e0f7c96dcb30fa048311a4c8c3ad512dbacc4630 +github.com/coreos/clair,v0.0.0-20180919182544-44ae4bc9590a,h1:glxUtT0RlaVJU86kg78ygzfhwW6D+uj5H+aOK01QDgI=,3bc8c4b06a61c5673fcc69d5278b3a5313633fca1166e94a7140c363399c3dc6 +github.com/coreos/etcd,v3.3.17+incompatible,h1:f/Z3EoDSx1yjaIjLQGo1diYUlQYSBrrAQ5vP8NjwXwo=,d7ca8db509166ce05482c9b3e80cfb8d1086691901e80202f571d152da912153 +github.com/coreos/go-etcd,v2.0.0+incompatible,h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo=,4b226732835b9298af65db5d075024a5971aa11ef4b456899a3830bccd435b07 +github.com/coreos/go-iptables,v0.4.3,h1:jJg1aFuhCqWbgBl1VTqgTHG5faPM60A5JDMjQ2HYv+A=,4626df8f719f93e5d66bd995d586ae3540c24b2203c0d2aab7c6d5e60f89a3dc +github.com/coreos/go-oidc,v2.1.0+incompatible,h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=,e2e123270614dd7d47d95ae1fce80a9102df019f9e820d4f5cf5c92c64e1ad91 +github.com/coreos/go-semver,v0.3.0,h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=,b2fc075395ffc34cff4b964681d0ae3cd22096cfcadd2970eeaa877596ceb210 +github.com/coreos/go-systemd,v0.0.0-20190719114852-fd7a80b32e1f,h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=,22237f0aed3ab6018a1025c65f4f45b4c05f9aa0c0bb9ec880294273b9a15bf2 +github.com/coreos/pkg,v0.0.0-20180928190104-399ea9e2e55f,h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=,7fe161d49439a9b4136c932233cb4b803b9e3ac7ee46f39ce247defc4f4ea8d7 +github.com/coreos/rkt,v1.30.0,h1:Kkt6sYeEGKxA3Y7SCrY+nHoXkWed6Jr2BBY42GqMymM=,436e294b735bada49407ad3c066ae251ef105ce59076ef8f0f732c586a72970e +github.com/cosiner/argv,v0.0.0-20170225145430-13bacc38a0a5,h1:rIXlvz2IWiupMFlC45cZCXZFvKX/ExBcSLrDy2G0Lp8=,deb11c1c7a2fa44b3497731d497b3d7be5a51cf696ed43280e01822e2eed9b96 +github.com/cosmos/cosmos-sdk,v0.35.0,h1:EPeie1aKHwnXtTzKggvabG7aAPN+DDmju2xquvjFwao=,ccc975b48e3b40f4eb054e28e9243ecb48c0d8ecdf52b9512da26a8200cc7c43 +github.com/cosmos/go-bip39,v0.0.0-20180819234021-555e2067c45d,h1:49RLWk1j44Xu4fjHb6JFYmeUnDORVwHNkDxaQ0ctCVU=,e41d7ea781b15421a4690bedf78543f2eaad00c36c439dd4973131dec1985177 +github.com/cosmos/ledger-cosmos-go,v0.10.3,h1:Qhi5yTR5Pg1CaTpd00pxlGwNl4sFRdtK1J96OTjeFFc=,f1089701d8868e4ff3fd9e9a4104476963f725a713ee2a476b4ef8094a0bca20 +github.com/cosmos/ledger-go,v0.9.2,h1:Nnao/dLwaVTk1Q5U9THldpUMMXU94BOTWPddSmVB6pI=,a77b2063a64133d8dda638d5d602071429d7e2500576bfff5c1763f8572a8517 +github.com/couchbase/go-couchbase,v0.0.0-20191031153726-96c2e23d589a,h1:eKnoG+AQQQIxHEcBIbudmwLJv3S9UQU6oGHzvqhttqE=,5dd3e610f24adb44b31e7ecc6a80a8974b769bd622d569c69fb98bd02610bbef +github.com/couchbase/gomemcached,v0.0.0-20191004160342-7b5da2ec40b2,h1:vZryARwW4PSFXd9arwegEywvMTvPuXL3/oa+4L5NTe8=,5b9a280cd2d546cd0d70fbd6828e73fa0b07fb9d3c0b6bff88d8e23d8e4256f4 +github.com/couchbase/goutils,v0.0.0-20190315194238-f9d42b11473b,h1:bZ9rKU2/V8sY+NulSfxDOnXTWcs1rySqdF1sVepihvo=,a2820e0f01d8c944b70c70515b9924f41b450f3688d19ad4d506b2b9b367c433 +github.com/couchbase/vellum,v0.0.0-20190111184608-e91b68ff3efe,h1:2o6Y7KMjJNsuMTF8f2H2eTKRhqH7+bQbjr+D+LnhE5M=,06e3ca28a98c95bcdfd909168e1dcf45a6667ef59ad59112a01e6bbdcf591e84 +github.com/couchbaselabs/go-couchbase,v0.0.0-20190708161019-23e7ca2ce2b7,h1:1XjEY/gnjQ+AfXef2U6dxCquhiRzkEpxZuWqs+QxTL8=,3429eb55dd38b07bab5e9a57a3e2451449b49bdbc6f16585f8b7557067572499 +github.com/cpu/goacmedns,v0.0.1,h1:GeIU5chKys9zmHgOAgP+bstRaLqcGQ6HJh/hLw9hrus=,12acca48bb444f3832a87b8d238e573bbfa60e5c25dfcf6787a003dfacaf055d +github.com/cpuguy83/go-md2man,v1.0.10,h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=,b9b153bb97e2a702ec5c41f6815985d4295524cdf4f2a9e5633f98e9739f4d6e +github.com/cpuguy83/go-md2man/v2,v2.0.0,h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=,f2fdd06287a80f1bea5552f572d7f2314ec829285a3040b63469e0635f66fb6d +github.com/creack/goselect,v0.1.0,h1:4QiXIhcpSQF50XGaBsFzesjwX/1qOY5bOveQPmN9CXY=,24d8028970032b1a45091ad8ff9b9c280693def1433cb5948ed92c0c975226ea +github.com/creack/pty,v1.1.7,h1:6pwm8kMQKCmgUg0ZHTm5+/YvRK0s3THD/28+T6/kk4A=,e7ea3403784d186aefbe84caed958f8cba2e72a04f30cdb291ece19bec39c8f3 +github.com/cskr/pubsub,v1.0.2,h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0=,39e40a42c10058c188f331ed0bb660a0504d7c2ddd9e835a9970786fdc35feb0 +github.com/cupcake/rdb,v0.0.0-20161107195141-43ba34106c76,h1:Lgdd/Qp96Qj8jqLpq2cI1I1X7BJnu06efS+XkhRoLUQ=,019a246ac0d7f6fcf3758587a031767730cfb824003c311686a4eb552a1dcc57 +github.com/cweill/gotests,v1.5.3,h1:k3t4wW/x/YNixWZJhUIn+mivmK5iV1tJVOwVYkx0UcU=,7ced96d4223a0afcd41922c4d3ae064493dd5bedbc72f6541716fce1cab24b7d +github.com/cxr29/aliyun-openapi-go-sdk,v0.0.0-20151123082822-0b043e4d1e0c,h1:WEWetvNRZlk7JW3M4fycSA3f/2xZGxRdrwmpgRkGoQc=,6c80128745e3acdd01f59bc6c6e3a1f24193e89eb627ad6dcc615e763878b6e4 +github.com/cyphar/filepath-securejoin,v0.2.2,h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=,d022873dbb9e8d3b7a43c9dedbea54dfc9a6c15f9632ba522a1257e8b948c100 +github.com/cznic/b,v0.0.0-20181122101859-a26611c4d92d,h1:SwD98825d6bdB+pEuTxWOXiSjBrHdOl/UVp75eI7JT8=,1c34b27ce98f70cb0e97c2bbe0bdae216cc1ea6b2617b0e984e2ce30adc06338 +github.com/cznic/fileutil,v0.0.0-20181122101858-4d67cfea8c87,h1:94XgeeTZ+3Xi9zsdgBjP1Byx/wywCImjF8FzQ7OaKdU=,109b4c91722a0f9a4f941d77eff34270684e53ca36e7d14ab2cd4a4e80841d73 +github.com/cznic/golex,v0.0.0-20181122101858-9c343928389c,h1:G8zTsaqyVfIHpgMFcGgdbhHSFhlNc77rAKkhVbQ9kQg=,d2b11a6e0e1de5125a2d550650b4cbb7bf44280ebf1cda74ef4a63e3cfa11012 +github.com/cznic/internal,v0.0.0-20181122101858-3279554c546e,h1:58AcyflCe84EONph4gkyo3eDOEQcW5HIPfQBrD76W68=,bc177d001529bca3f46aa84855db4e783a041c188d3ba237f68fa4522bdca74b +github.com/cznic/kv,v0.0.0-20181122101858-e9cdcade440e,h1:8ji4rZgRKWMQUJlPNEzfzCkX7yFAZFR829Mrh7PXxLA=,4f992bdaf6d17487c7b16669b6d55afa76b321e63f8e4b6a6d1126b44b18b0d9 +github.com/cznic/lldb,v1.1.0,h1:AIA+ham6TSJ+XkMe8imQ/g8KPzMUVWAwqUQQdtuMsHs=,ddec7228568547a5fbfbc6a91208cbcafeed4338a38c41d483448957e4bec186 +github.com/cznic/mathutil,v0.0.0-20181122101859-297441e03548,h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=,8f69a36f60d885e011b0a90b91246a7e88223cb2883dc6e71eab3f42d653231b +github.com/cznic/parser,v0.0.0-20181122101858-d773202d5b1f,h1:DUtr2TvhM9rmiHKVJWoLqDY2+MdxljW9hlaS/oYoi1c=,18b746a4090720bd9dfe219d0f7bb7fb28565df70417208d7e99dfd79f1ea264 +github.com/cznic/ql,v1.2.0,h1:lcKp95ZtdF0XkWhGnVIXGF8dVD2X+ClS08tglKtf+ak=,05164e379d43eaada0efdd763a50a9ef8f4b7f73a5de7ab866093bb25a4fb747 +github.com/cznic/sortutil,v0.0.0-20181122101858-f5f958428db8,h1:LpMLYGyy67BoAFGda1NeOBQwqlv7nUXpm+rIVHGxZZ4=,67783879c1ae4472fdabb377b1772e4e4c5ced181528c2fc4569b565cb47a57b +github.com/cznic/strutil,v0.0.0-20181122101858-275e90344537,h1:MZRmHqDBd0vxNwenEbKSQqRVT24d3C05ft8kduSwlqM=,867902276444cbffca84d9d5f63754e8b22092d93a94480d8dfebd234ac8ffbd +github.com/cznic/y,v0.0.0-20181122101901-b05e8c2e8d7b,h1:gvFsf4zJcnW6GRN+HPGTxwuw+7sTwzmoeoBQQCZDEnk=,8c84f5e4f9dc5f0809d8ad22d057e404c3e8644dc28e8fc52abbb1d2350f8d3e +github.com/cznic/zappy,v0.0.0-20181122101859-ca47d358d4b1,h1:ytLS5Cgkxq6jObotJ+a13nsejdqzLFPliDf8CQ8OkAA=,505c19b52924ee21b65611bc45640d3ff4671e50ee04f7c17c38342190645595 +github.com/d2g/dhcp4,v0.0.0-20170904100407-a1d1b6c41b1c,h1:Xo2rK1pzOm0jO6abTPIQwbAmqBIOj132otexc1mmzFc=,15df9468cf548a626e1319e92d550432512c4319cf555bf278ea9215de3504e3 +github.com/daaku/go.zipexe,v1.0.0,h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY=,74d7a0242c03c3c03220e56a59da5f97d3478743250740df538e05e6b609f553 +github.com/danwakefield/fnmatch,v0.0.0-20160403171240-cbb64ac3d964,h1:y5HC9v93H5EPKqaS1UYVg1uYah5Xf51mBfIoWehClUQ=,f601e8d25a43ed32e00851e1686a93b0175dadea8f4e32c8af2f1533f20736bc +github.com/dave/jennifer,v1.2.0,h1:S15ZkFMRoJ36mGAQgWL1tnr0NQJh9rZ8qatseX/VbBc=,85b37a1b99b7d67664389b8c11b7174f521a396bb59d4e0e766df16336a7f112 +github.com/dave/services,v0.1.0,h1:7isGzpZHJWmOYTV+Pn3f6gpQUmrveJqsQpAkH0HXFbU=,e52a7ffba3aa07cca4888e08248771211abd139928b5cde9b228a61da88eddcc +github.com/davecgh/go-spew,v1.1.1,h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=,6b44a843951f371b7010c754ecc3cabefe815d5ced1c5b9409fb2d697e8a890d +github.com/davecgh/go-xdr,v0.0.0-20161123171359-e6a2ba005892,h1:qg9VbHo1TlL0KDM0vYvBG9EY0X0Yku5WYIPoFWt8f6o=,11cb87912b5288e13534cb396935694c257eb9164ffc20ce21e3bc9955edd82a +github.com/daviddengcn/go-colortext,v0.0.0-20180409174941-186a3d44e920,h1:d/cVoZOrJPJHKH1NdeUjyVAWKp4OpOT+Q+6T1sH7jeU=,159d727adf4f0763ec3dc6156fd46531a2afbffdc17feeb6b5ffe2eb54b35d41 +github.com/davyxu/cellnet,v4.1.0+incompatible,h1:zDRqhkFRhBTD7ajra2888aoRLN1qlv8LV8+qHg/emO4=,f085f088b68b2e379a6dc37501ef2c9809836cfac147a30ed3025571c2d57df7 +github.com/davyxu/golog,v0.1.0,h1:SsV3m2x37sCzFaQzq5OHc5S+PE2VMiL7XUx34JCa7mo=,a3c240bc4b958fa4b4e73caa59c28fc658afbabdb1f28b237874803ca96dcb1f +github.com/dchest/blake256,v1.0.0,h1:6gUgI5MHdz9g0TdrgKqXsoDX+Zjxmm1Sc6OsoGru50I=,9a9ed00a3024f2f7480b59c7b2ee1013cae3026d7dc2f065ce225dcce8cf357e +github.com/dchest/siphash,v1.2.1,h1:4cLinnzVJDKxTCl9B01807Yiy+W7ZzVHj/KIroQRvT4=,877a468e533e28c777c59b3dfea175b38a1f0bc1f8551e3a9e1739b1821c7e3e +github.com/dchest/uniuri,v0.0.0-20160212164326-8902c56451e9,h1:74lLNRzvsdIlkTgfDSMuaPjBr4cf6k7pwQQANm/yLKU=,41db9fb52a841d11d8592a1d4f56e8a440e3991b699ae0f95ab5f5a7b2aeb24c +github.com/deckarep/golang-set,v1.7.1,h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=,86606609df42529fda55a15475b495f993f0c1cc4be6e1e50a9165a514d1ed71 +github.com/decker502/dnspod-go,v0.2.0,h1:6dwhUFCYbC5bgpebLKn7PrI43e/5mn9tpUL9YcYCdTU=,381fb0bb29ac973f318db3d464f76e5d3016d4963c78ccd7df7dbc4231a68455 +github.com/decred/base58,v1.0.0,h1:BVi1FQCThIjZ0ehG+I99NJ51o0xcc9A/fDKhmJxY6+w=,75b1a2c78759ee2e8755156806ce770c9199464c2d58541388d5ec7c000c99e1 +github.com/decred/dcrd/chaincfg,v1.5.1,h1:u1Xbq0VTnAXIHW5ECqrWe0VYSgf5vWHqpSiwoLBzxAQ=,7344cd4dc90a82342c90811c8180b1fef6c79e9c49caa38135f271cf0ecb056f +github.com/decred/dcrd/chaincfg/chainhash,v1.0.2,h1:rt5Vlq/jM3ZawwiacWjPa+smINyLRN07EO0cNBV6DGU=,a8b24e2c4e64015430b8a6502f9e8c3eeea246021638884dc510508eccda31a0 +github.com/decred/dcrd/chaincfg/v2,v2.0.2,h1:VeGY52lHuYT01tIGbvYj+OO0GaGxGaJmnh+4vGca1+U=,906dec975cf574c55f2eb588dc91a4ddd6be273eaddfbeb45288ea6aebcc6306 +github.com/decred/dcrd/crypto/blake256,v1.0.0,h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=,cd8bbdae14641f0ba44430fc66990dd37bbfcf1e21a965a9fd1871d16cac127d +github.com/decred/dcrd/dcrec,v1.0.0,h1:W+z6Es+Rai3MXYVoPAxYr5U1DGis0Co33scJ6uH2J6o=,a1e16c5ef3633f2dfa23c052778552cf9300821197f5b2dc547e20dd9d45756b +github.com/decred/dcrd/dcrec/edwards,v1.0.0,h1:UDcPNzclKiJlWqV3x1Fl8xMCJrolo4PB4X9t8LwKDWU=,7ed52f3316f5a47c5925e23bebf5016ecfd75e7ac340714b4b94b0e25bdf0611 +github.com/decred/dcrd/dcrec/secp256k1,v1.0.2,h1:awk7sYJ4pGWmtkiGHFfctztJjHMKGLV8jctGQhAbKe0=,5fa2c17fd611665a39e6435283445ec3b46a5b52d14661e04bd1f7ef295ba9d3 +github.com/decred/dcrd/dcrutil,v1.4.0,h1:xD5aUqysGQnsnP1c9J0kGeW8lDIwFGC3ja/gE3HnpCs=,6de50428375fca174f4861f8aa45549360e7733bca0184a882448f0b9f94be2e +github.com/decred/dcrd/dcrutil/v2,v2.0.0,h1:HTqn2tZ8eqBF4y3hJwjyKBmJt16y7/HjzpE82E/crhY=,fa91eb7c5062e0f3f6e7d1b9d8e1a89698f6ee6e7f8f4941929f6d89a293ec76 +github.com/decred/dcrd/wire,v1.3.0,h1:X76I2/a8esUmxXmFpJpAvXEi014IA4twgwcOBeIS8lE=,e17b78d19d0056503627826a0e599ed14a7a4fc8aa2c31c47b12ffc1864aedb1 +github.com/decred/slog,v1.0.0,h1:Dl+W8O6/JH6n2xIFN2p3DNjCmjYwvrXsjlSJTQQ4MhE=,1c27399a3f38fb7b581f4dbe11a0b3e3d5d8afcc8109880771c0e44135388bb0 +github.com/denisenkom/go-mssqldb,v0.0.0-20191001013358-cfbb681360f0,h1:epsH3lb7KVbXHYk7LYGN5EiE0MxcevHU85CKITJ0wUY=,ff2349c73cee9e54cd61e85af75d7d0537fb5f070da5a737b5abede1f7d579ac +github.com/denkhaus/bitshares,v0.6.1-0.20190502142618-5ae8c00cb394,h1:PpFS6pvAoRwH13WlqnX/mrxesu6LNFtiVwoWgfNLCeY=,af76695d3e546cad6a8b56d9d5e431bfeb12bfce643a395fb45d8827409dd9ff +github.com/denkhaus/gojson,v1.0.0,h1:p1hAlN/yAvRvzbdO1HNDQvmBslfyk64IMt3O3DtftPU=,5c0d8d98a53be88e2801d90124e28ba781d2c6a09aaf9a57272df92c5c0e0fe2 +github.com/denkhaus/logging,v0.0.0-20180714213349-14bfb935047c,h1:imM7UU8JD1sNuk2tVEk3QvrY2RZ5f/DOB+UA7c5ThGs=,5a1bb81f35dc7847b0cb8efe3f1e3bac3a34c9f11950a7c7643115c952fa3166 +github.com/denverdino/aliyungo,v0.0.0-20170926055100-d3308649c661,h1:lrWnAyy/F72MbxIxFUzKmcMCdt9Oi8RzpAxzTNQHD7o=,e6ca432bab5a7b1d233c9c1495d32668d31b18803d65f3af27f1d8240b6547d4 +github.com/detached/gorocket,v0.0.0-20170629192631-d44bbd3f26d2,h1:zwp9mAr+YvsgLCFIVJ3/m61Z+NRX35jbD0HBa62ryHY=,f54c9dc20ba925f0b2a726cc1a22466c6e05d7e0080f6e4b5f26e60c15938712 +github.com/detailyang/go-fallocate,v0.0.0-20180908115635-432fa640bd2e,h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU=,dcc45102d034d78825d1aa9d2f61720b4b0d9f76314a7a53b32cf032713a0bde +github.com/devfeel/dotweb,v1.7.3,h1:tt7YtCIp9JPmAS2yksVIsw6CiUkUSz3kVLSiCzRaWDw=,7cdb6d4872bb4c82fc333722fb2be3e39fe391b121550421d240d3008c8e00a0 +github.com/devigned/tab,v0.1.1,h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA=,528e21b578f28a998453551c51abfdeed154c981486d49a8ad7c149743ea450f +github.com/dghubble/oauth1,v0.6.0,h1:m1yC01Ohc/eF38jwZ8JUjL1a+XHHXtGQgK+MxQbmSx0=,6d4be6cfc2771fab15e47d2aa9c40d347dab7166f2cae3c248aeb51b10c88b4a +github.com/dghubble/sling,v1.3.0,h1:pZHjCJq4zJvc6qVQ5wN1jo5oNZlNE0+8T/h0XeXBUKU=,880e7f44ee68eae979a34afb2f95ab1c7555712153c45be01d15cbc5991a5fe6 +github.com/dgraph-io/badger,v1.6.0,h1:DshxFxZWXUcO0xX476VJC07Xsr6ZCBVRHKZ93Oh7Evo=,8329ae390aebec6ae360356e77a2743357ad4e0d0bd4c3ae03b7d17e01ad70aa +github.com/dgraph-io/dgo,v1.0.0,h1:DRuI66G+j0XWDOXly4v5PSk2dGkbIopAZIirRjq7lzI=,dae0ee7690b0c58d72be328263d55394f88a4924a8274017021736d702be9cee +github.com/dgrijalva/jwt-go,v3.2.0+incompatible,h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=,26b028eb2d9ee3aef26a96d6790e101f4088ef901008ebab17096966bf6522ad +github.com/dgryski/go-farm,v0.0.0-20190423205320-6a90982ecee2,h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=,d1fb60f1ce562acb07569d53b43353b73f439911c27eecef716305cd2d730258 +github.com/dgryski/go-jump,v0.0.0-20170409065014-e1f439676b57,h1:qZNIK8jjHgLFHAW2wzCWPEv0ZIgcBhU7X3oDt/p3Sv0=,92666f8caf4843c5a9b6bdb0f48f261922595683351958b0909884adf064cfb2 +github.com/dgryski/go-metro,v0.0.0-20180109044635-280f6062b5bc,h1:8WFBn63wegobsYAX0YjD+8suexZDga5CctH4CCTx2+8=,3f97b3cdeaee7b4fbf4fa06b7c52e3ee6bca461a100077892e861c6c8fc03722 +github.com/dgryski/go-sip13,v0.0.0-20190329191031-25c5027a8c7b,h1:Yqiad0+sloMPdd/0Fg22actpFx0dekpzt1xJmVNVkU0=,81d318bf94b85b240278c35d7ef6015510751e31ffa89eb6287d6d236493551e +github.com/digitalocean/go-libvirt,v0.0.0-20190626172931-4d226dd6c437,h1:phR13shVFOIpa1pnLBmewI9p16NEladLPvVylLPeexo=,7748e819d19524170969d2a470c212bb3936778ff630f833adc286e8c21e37cc +github.com/digitalocean/go-qemu,v0.0.0-20181112162955-dd7bb9c771b8,h1:N7nH2py78LcMqYY3rZjjrsX6N7uCN7sjvaosgpXN9Ow=,7530507881e53214ed3c0fb770fb3faed36a57ca6eb376bd2cec91a0e5d575a6 +github.com/digitalocean/godo,v1.11.1,h1:OsTh37YFKk+g6DnAOrkXJ9oDArTkRx5UTkBJ2EWAO38=,5d1ad5b25ad252fb1a02366087fe6e94845ec2dce64dc6e875ed3253a7e0f8ff +github.com/dimchansky/utfbom,v1.1.0,h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=,27fed73a62fcf06d4ceb28846e5d40786b7e81213aa0d1f4d840e89d25f285f7 +github.com/dimfeld/httppath,v0.0.0-20170720192232-ee938bf73598,h1:MGKhKyiYrvMDZsmLR/+RGffQSXwEkXgfLSA08qDn9AI=,ff59ff07643eccf8a166cc9693fbd18c42869e0bfcc0a9c979435847a7ae4fb1 +github.com/dimfeld/httptreemux,v5.0.1+incompatible,h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA=,031da29a128234db595fdce84301cfe5ff13b4be03c1e344cfe7daadb68559e9 +github.com/disintegration/gift,v1.2.1,h1:Y005a1X4Z7Uc+0gLpSAsKhWi4qLtsdEcMIbbdvdZ6pc=,d9a688a552dc8f5b2319325541e2bbc5c0af66b6e78273058893b259fcca5a0f +github.com/disintegration/imaging,v1.6.1,h1:JnBbK6ECIZb1NsWIikP9pd8gIlTIRx7fuDNpU9fsxOE=,209474c4c0348672c6747a7a73ff887a6d9458b67df78ff342ee3fd628156412 +github.com/djherbis/atime,v1.0.0,h1:ySLvBAM0EvOGaX7TI4dAM5lWj+RdJUCKtGSEHN8SGBg=,fe677e5c1a8bb168904c0856010bed33a770d49eda9edc6dc1b567940bf20afc +github.com/dlclark/regexp2,v1.2.0,h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk=,61054c243455e034d7a81e2f6a888cab5a81056a0cc43463cb3536b42cfe7cc1 +github.com/dmotylev/goproperties,v0.0.0-20140630191356-7cbffbaada47,h1:sP2APvSdZpfBiousrppBZNOvu+TE79Myq4kkmmrtSuI=,8afdf7b2989dff361cc80e560c1bd17e5c4ad37826b5caf4b65af8e152cdc6cb +github.com/dnaeon/go-vcr,v1.0.1,h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=,8f586f95ce5567ef2ae702cf98e56a09ea0cc6171f5cd959e6fcf7502e00dabc +github.com/dnsimple/dnsimple-go,v0.30.0,h1:IBIrn9jMKRMwporIRwdFyKdnHXVmwy6obnguB+ZMDIY=,5821d521b402f93dc19f6eb332d5f4159800336f53626c6dedd99ce4c351a55a +github.com/dnstap/golang-dnstap,v0.1.0,h1:hKtRrSTEHuTmG0vCLgKU8WJkXCARoAJMDrlXHTTPBK8=,fe23fd626917c7f45ead63cef4a4bd1bb366bb30ba5873d9ee5432e79b971349 +github.com/docker/cli,v0.0.0-20191031185610-968ce1ae4d45,h1:KJ4FsevlLR30Q2H1aCACmL3CEoUTAZf16PMAJj+ofXI=,145fef54aa162edc123d514ed7a20bc14564581ad95bb6aae7294c3c08df55fd +github.com/docker/distribution,v2.7.1+incompatible,h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=,be78bc43d74873b67afe05a6b244490088680dab75bdfaf26d0fd4d054595bc7 +github.com/docker/docker,v1.13.1,h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=,1decea9f21d4165bc134de72c51055612ff6992409cd56f3c35b7f78f3b542bd +github.com/docker/docker-ce,v0.0.0-20180924210327-f53bd8bb8e43,h1:gZ4lWixV821UVbYtr+oz1ZPCHkbtE+ivfmHyZRgyl2Y=,d670d1c5faec51ee82dbc5d479a7fca60916c1b30547994c206622ab338a735a +github.com/docker/docker-credential-helpers,v0.6.3,h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=,4dd2971b28524442b7a01e118a8040c3ab90eca50d55a7a232af514d18187324 +github.com/docker/engine-api,v0.4.0,h1:D0Osr6+45yAlQqLyoczv5qJtAu+P0HB0rLCddck03wY=,0db5d01c8401192b4eee6d2f9c34aa297d1a892f25230b470efd73f8f7ab59a4 +github.com/docker/go,v1.5.1-1,h1:hr4w35acWBPhGBXlzPoHpmZ/ygPjnmFVxGxxGnMyP7k=,fd626ee84b1eaea11c2a374fda5ed5ca8ad820bb4746ee31519efeb5038077b5 +github.com/docker/go-connections,v0.4.0,h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=,570ebcee7e6fd844e00c89eeab2b1922081d6969df76078dfe4ffacd3db56ada +github.com/docker/go-events,v0.0.0-20190806004212-e31b211e4f1c,h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=,0f654eb0e7e07c237a229935ea3488728ddb5b082af2918b64452a1129dccae3 +github.com/docker/go-metrics,v0.0.1,h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=,4efab3706215f5b2d29ba823d3991fd6e2f81c02ce45ef0c73c019ebc90e020b +github.com/docker/go-units,v0.4.0,h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=,0f2be7dce7b1a0ba6a4a786eb144a3398e9a61afc0eec5799a1520d9906fc58c +github.com/docker/libkv,v0.2.1,h1:PNXYaftMVCFS5CmnDtDWTg3wbBO61Q/cEo3KX1oKxto=,7a0c81782d38b550acc2c0ef0ce397adfc13716f483be6a47d0b97fbc6eea0d5 +github.com/docker/libnetwork,v0.5.6,h1:hnGiypBsZR6PW1I8lqaBHh06U6LCJbI3IhOvfsZiymY=,7aea42c405304c495bf159e5004674eb503eb0120eb4c5d1275fdba65d88cc53 +github.com/docker/libtrust,v0.0.0-20160708172513-aabc10ec26b7,h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=,bf1c1230a3b5c0dadb2c9366aabc99181e708369d735dc83c3eb89f597f42adb +github.com/docker/machine,v0.16.2,h1:jyF9k3Zg+oIGxxSdYKPScyj3HqFZ6FjgA/3sblcASiU=,1c13210831cafddba1abbf9ef034135233252c62927df396fee6fa0a45efcb43 +github.com/docker/notary,v0.6.1,h1:6BO5SNujR+CIuj2jwT2/yD6LdD+N9f5VbzR+nfzB5ZA=,439fd6664fb75323d78c5a362483f3375a6ac61a3dd08438a503df470a34f300 +github.com/docker/spdystream,v0.0.0-20160310174837-449fdfce4d96,h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=,70964f9eef29843634539b8d6e09c8b51ed6aa96b5deda28b7a44613327a22f2 +github.com/docker/swarmkit,v1.12.0,h1:vcbNXevt9xOod0miQxkp9WZ70IsOCe8geXkmFnXP2e0=,b9d09ff080beb0db2d4d4ebca93438dd080769266eb7aab6d5182e1ad7ba2c3a +github.com/docopt/docopt-go,v0.0.0-20180111231733-ee0de3bc6815,h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=,00aad861d150c62598ca4fb01cfbe15c2eefb5186df7e5d4a59286dcf09556c8 +github.com/documize/community,v3.2.0+incompatible,h1:ilePrhqxjc+BWpDRsXPyLyMEE1BrGlqCPMg3T577mzQ=,e9e06bdbef4500c0d2cc609164fe23bc05f8234c2c8483c8c9bc3ffffe22bbf7 +github.com/dogmatiq/dogma,v0.6.0,h1:HdJ0cTcORIxZRTB5Z7RdsBXEr18gB3so7FMIHYiAhEQ=,db91004377004aa3c5f0c462205beea995e93a0be13d7d99d3232dc03209f65c +github.com/donovanhide/eventsource,v0.0.0-20171031113327-3ed64d21fb0b,h1:eR1P/A4QMYF2/LpHRhYAts9wyYEtF7qNk/tVNiYCWc8=,2b911efc5101522ce50399cd7831ef931896541893955441168783666811a1d1 +github.com/dop251/goja,v0.0.0-20190912223329-aa89e6a4c733,h1:cyNc40Dx5YNEO94idePU8rhVd3dn+sd04Arh0kDBAaw=,485156ad52ca9651f728a6039af63f9f11c5bf49846e513635d5fa35d8d39097 +github.com/dotcloud/docker,v1.13.1,h1:jjwxeyQYDwROaGy/YEodF+srQW5hJAnNnaTcfcKoU+0=,83884e41d26b32eae2387080b245792ac8fc0200f645aef02656cb5e4b3d0595 +github.com/drone/go-scm,v1.6.0,h1:PZZWLeSHHwdc6zbSQpg9n0CNoRB+8DAINzX9X/wJifY=,e26d2bc63c53a66252ab24a1b45ced06825bb4101cbd746c581683cf39e520b6 +github.com/dsnet/compress,v0.0.0-20171208185109-cc9eb1d7ad76,h1:eX+pdPPlD279OWgdx7f6KqIRSONuK7egk+jDx7OM3Ac=,25f6bcccb4c1cf6d97ad69253a394bd0a52a633caa623d75b30729aed495a73d +github.com/dsnet/golib/unitconv,v0.0.0-20190531212259-571cdbcff553,h1:mE6azeVhLnKfk6DH3Zcg56L87yJ/uv9HZ5YJOQcPC4s=,603b60f7278fe7299f59d716da2bd287441f1321b5a663828d894e67bc274bed +github.com/duosecurity/duo_api_golang,v0.0.0-20190308151101-6c680f768e74,h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M=,75c90bdd92362e2cc36297193a543fe0cd75c07f82182940ad6158a1d470cc8b +github.com/dustin/go-humanize,v1.0.0,h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=,e01916e082a6646ea12d7800d77af43045c27284ff2a0a77e3484509989cc107 +github.com/dylanmei/iso8601,v0.1.0,h1:812NGQDBcqquTfH5Yeo7lwR0nzx/cKdsmf3qMjPURUI=,1e682968bfcac2115e1fd706ec6bd09a0b676d7d224514d8f8dff9cadbf87e79 +github.com/dylanmei/winrmtest,v0.0.0-20190225150635-99b7fe2fddf1,h1:r1oACdS2XYiAWcfF8BJXkoU8l1J71KehGR+d99yWEDA=,5607cb987ec0a699003eeec5952f0280792fd5db7099ca277bdfae26e93b0ef3 +github.com/eapache/go-resiliency,v1.1.0,h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=,a64ebe539335e126b30f79f0f00f39ffe083e794995500a67e0a2156b334788e +github.com/eapache/go-xerial-snappy,v0.0.0-20180814174437-776d5712da21,h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=,785264afffdcfe50573a1cb0df85ff4186e9e7e4e3a04513752f52d3da1054af +github.com/eapache/queue,v1.1.0,h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=,1dc1b4972e8505c4763c65424b19604c65c944911d16c18c5cbd35aae45626fb +github.com/eclipse/paho.mqtt.golang,v1.2.0,h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0=,d36337c4b5a2752b91bcd437bd74e0907bf6c9e6c611dab88407bcca8462e918 +github.com/edgexfoundry/go-mod-core-contracts,v0.1.33,h1:lQbLbRhymV0/QDDDGU26idZ9Kv+Q0IETn81hLpHxi68=,a7a8792a8692d64daea343577a49934be6ba64acbe114b3c24262537b5a9157f +github.com/edsrzf/mmap-go,v1.0.0,h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=,851a1d4d6e30f97ab23b7e4a6a7da9d1842f126d738f7386010c6ee7bf82518e +github.com/edwingeng/doublejump,v0.0.0-20190102103700-461a0155c7be,h1:FnUE/uuuegwvhGE9z61q9krL5km5Mnwlusq3BT06yy8=,a9cb92422f0bbdd56c80d9873a8f7af6fd2d8d8154a7a11d7cb9232d9146f07c +github.com/efarrer/iothrottler,v0.0.0-20141121142253-60e7e547c7fe,h1:WAx1vRufH0I2pTWldQkXPzpc+jndCOi2FH334LFQ1PI=,04291e6136b933fd2cdcc29f3af78090a9d678534a94823590eb63f1f318db1d +github.com/efritz/backoff,v1.0.0,h1:r1DfNhA1J7p8kZ185J/hLPz2Bl5ezTicUr9KamEAOYw=,064d92e7f3e46079d158cac717e1c9bf96a230a5f31bf28940bd4a99bb91657e +github.com/efritz/glock,v0.0.0-20181228234553-f184d69dff2c,h1:Q3HKbZogL9GGZVdO3PiVCOxZmRCsQAgV1xfelXJF/dY=,716200eb117905f4df509b7260869bb97bf8833c160d2ff1d328d01aa3874bc9 +github.com/eknkc/amber,v0.0.0-20171010120322-cdade1c07385,h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=,b1dde9f3713742ad0961825a2d962bd99d9390daf8596e7680dfb5f395e54e22 +github.com/elastic/go-sysinfo,v1.0.1,h1:lzGPX2sIXaETeMXitXL2XZU8K4B7k7JBhIKWxdOdUt8=,fe0cd64aa3ac73edbb4240dcbcb660c4ec004f07c36371be6d78543c3b215d92 +github.com/elastic/go-windows,v1.0.0,h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY=,e487e6f1e269766b5815c36e93614b87a185ddc33f7a6f4bf23e5ee6d0d0e3c1 +github.com/elastic/gosigar,v0.10.5,h1:GzPQ+78RaAb4J63unidA/JavQRKrB6s8IOzN6Ib59jo=,a139252942b5ca82ddc3d9ced1daa262de0149a413149d3f0234b43dc3635acf +github.com/elazarl/go-bindata-assetfs,v1.0.0,h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=,3aa225ae5ae4a8059a671fa656d8567f09861f88b88dbef9e06a291efd90013a +github.com/elazarl/goproxy,v0.0.0-20191011121108-aa519ddbe484,h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8=,6c224ac5720959a46f6d88e0b15dda732c7eb180b3103a826cf6d5459a5e112f +github.com/elazarl/goproxy/ext,v0.0.0-20190711103511-473e67f1d7d2,h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM=,7244c1fe7490460503559e24e0e478540bc10481d1d8f3afd0a1f6b1a470b52f +github.com/emicklei/go-restful,v2.11.1+incompatible,h1:CjKsv3uWcCMvySPQYKxO8XX3f9zD4FeZRsW4G0B4ffE=,9befcac63629841301235124e728206a96170afd83c78b632d271acafc9acccf +github.com/emicklei/go-restful-swagger12,v0.0.0-20170926063155-7524189396c6,h1:V94anc0ZG3Pa/cAMwP2m1aQW3+/FF8Qmw/GsFyTJAp4=,07fd41dbe765b7d340df21d6353db8bef782f9b6742a93696b6f4133ef1d8955 +github.com/emicklei/proto,v1.6.15,h1:XbpwxmuOPrdES97FrSfpyy67SSCV/wBIKXqgJzh6hNw=,162ad34010e5f81ebed962a33c91ee6356e19631c7a7030bc9b173e85ca34678 +github.com/emirpasic/gods,v1.12.0,h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=,729ea0bda86bf801b61ff66eb019e5b9adc559cd217944abf10bb103fca573ee +github.com/endophage/gotuf,v0.0.0-20151124190824-3b700e20e376,h1:rPyHFhsuPZMEJAe1Oj2vpRC8277wpDJJ+aabkmlHF1A=,2cd5e6d0e748e0625e8c4a08a3b9f74e311e6654a1c5411fa3a9720f5f67cf40 +github.com/envoyproxy/go-control-plane,v0.9.0,h1:67WMNTvGrl7V1dWdKCeTwxDr7nio9clKoTlLhwIPnT4=,07b3a43081c9e1cdccb95c657cba7f483d5099f9ce07b5e3f3e28ce557687521 +github.com/envoyproxy/protoc-gen-validate,v0.1.0,h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=,ec5261f3bbc426d71e2be4c76063ba12460c5d27845d630763e9e911ec4768af +github.com/eoscanada/eos-go,v0.8.10,h1:QUwHRBHEFag/qyW4PR2S9++0se0V4LjPLk1/KsNtXlo=,f1c48e793d1c7864288871a944af4b4ee3363ad6ae5298e9c2f9f42202e6d77c +github.com/erikstmartin/go-testdb,v0.0.0-20160219214506-8d10e4a1bae5,h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=,471feb426b2a7ec1df29cc21c66aef34c9e7aabea751328644d1362593983d21 +github.com/ernesto-jimenez/gogen,v0.0.0-20180125220232-d7d4131e6607,h1:cTavhURetDkezJCvxFggiyLeP40Mrk/TtVg2+ycw1Es=,1f3030cfc89653ba791ae312b19e420dc8eaf1bef51f59dca6aa390f3cd1f3d0 +github.com/etcd-io/bbolt,v1.3.3,h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=,6630d7aad4b10f76aea88ee6d9086a1edffe371651cc2432edfd0de6beb99120 +github.com/ethantkoenig/rupture,v0.0.0-20180203182544-0a76f03a811a,h1:M1bRpaZAn4GSsqu3hdK2R8H0AH9O6vqCTCbm2oAFGfE=,8559344c496621c06b612453de587e8e4c45c0fbc348a955f8eda7ea2b3d09c8 +github.com/ethereum/go-ethereum,v1.9.6,h1:EacwxMGKZezZi+m3in0Tlyk0veDQgnfZ9BjQqHAaQLM=,778c9bf77dd96bfaf5c3ea84498611490999782fb37edf8257680e27dd8976e8 +github.com/euank/go-kmsg-parser,v2.0.0+incompatible,h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY=,43cadfa5ab226f89ca7a715add32ba23c554a5dfafd3a55449856a6b7012f946 +github.com/evanphx/json-patch,v4.5.0+incompatible,h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=,5508e810685a5081a3e880aeb24e501bd87920241baa317bfb5f3946b4fa417c +github.com/exoscale/egoscale,v0.18.1,h1:1FNZVk8jHUx0AvWhOZxLEDNlacTU0chMXUUNkm9EZaI=,8cb4f10504b54d31c71bc4a670171a074f7abbab67d939fd404b62ad36cb6aed +github.com/facebookgo/atomicfile,v0.0.0-20151019160806-2de1f203e7d5,h1:BBso6MBKW8ncyZLv37o+KNyy0HrrHgfnOaGQC2qvN+A=,3c9bdee73452cc12c2936b4050d638d36302a958091ceb49c45ffbaff8954218 +github.com/facebookgo/clock,v0.0.0-20150410010913-600d898af40a,h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=,5d6b671bd5afef8459fb7561d19bcf7c7f378da9943722d36676735b3c6272fa +github.com/facebookgo/ensure,v0.0.0-20160127193407-b4ab57deab51,h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=,a96c69c2b5902e0383139ee7089877a5ae2ddcd4eba42a595d13b570907d3fdc +github.com/facebookgo/freeport,v0.0.0-20150612182905-d4adf43b75b9,h1:wWke/RUCl7VRjQhwPlR/v0glZXNYzBHdNUzf/Am2Nmg=,0f717d7eb52e276aec2138a971b091cd04da95826c8f451a20e8e78c4bb8f915 +github.com/facebookgo/grace,v0.0.0-20160926231715-5729e484473f,h1:0mlfEUWnUDVZnqWEVHGerL5bKYDKMEmT/Qk/W/3nGuo=,79f9f73ef925d457d2b70d37b12c3cec97a2e84e73a932397d2f569ec8702ee7 +github.com/facebookgo/httpdown,v0.0.0-20160323221027-a3b1354551a2,h1:3Zvf9wRhl1cOhckN1oRGWPOkIhOketmEcrQ4TeFAoR4=,dbbccf963238c5f80c54edb19aeb016f486f42dcd922fc0be5b832af9449ca4b +github.com/facebookgo/inject,v0.0.0-20161006174721-cc1aa653e50f,h1:jK9r9Ofgc/Yzdlod77G23LfYtwqAmkQCZ9MaP6779OI=,6292702ff520e1fb14231f29bb2639d8f39edc08de479d76757ad97dafbb9174 +github.com/facebookgo/stack,v0.0.0-20160209184415-751773369052,h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A=,0afd18a8394caa29e94bd58a42e0d2be07939f9daf190a9ba2a947f9cbd4ba1a +github.com/facebookgo/stats,v0.0.0-20151006221625-1b76add642e4,h1:0YtRCqIZs2+Tz49QuH6cJVw/IFqzo39gEqZ0iYLxD2M=,d87443825721dc1dd5c358cd9e55b917ee1c3b6b10ab9557375f59d563b628cb +github.com/facebookgo/structtag,v0.0.0-20150214074306-217e25fb9691,h1:KnnwHN59Jxec0htA2pe/i0/WI9vxXLQifdhBrP3lqcQ=,3a9c84e9dc2b9960f1de3cc7a61d91fe2978e64e4e4859a9383259092ec91c5e +github.com/facebookgo/subset,v0.0.0-20150612182917-8dac2c3c4870,h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y=,bb18c678177e1aaaae209a2de9c28b5b7acc34e58fe00517b847a9460bd42df2 +github.com/farsightsec/golang-framestream,v0.0.0-20190425193708-fa4b164d59b8,h1:/iPdQppoAsTfML+yqFSq2EBChiEMnRkh5WvhFgtWwcU=,084f0ac3684b180e3d87db3e7b36a412c750397fbf009579e126c304528c1738 +github.com/fatih/camelcase,v1.0.0,h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=,54664f64f1f24097b80c64b9f606cbe8d8bc410a755ce6cda4f45e46f1141984 +github.com/fatih/color,v1.7.0,h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=,6036f0b31167280b696b5efb43603e71bce31420fb3428afdf74a68bb3a3ebef +github.com/fatih/structs,v1.1.0,h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=,a361ecc95ad12000c66ee143d26b2aa0a4e5de3b045fd5d18a52564622a59148 +github.com/fatih/structtag,v1.0.0,h1:pTHj65+u3RKWYPSGaU290FpI/dXxTaHdVwVwbcPKmEc=,347fce3911900f5947735c12ccb4c6fbe0199c6df040bcaa4d74a8587af896d0 +github.com/fd/go-nat,v1.0.0,h1:DPyQ97sxA9ThrWYRPcWUz/z9TnpTIGRYODIQc/dy64M=,bdf011af97da57ef3c58a091ae760eb885a6322faa3539d3c37bf76d4fff536a +github.com/fernet/fernet-go,v0.0.0-20180830025343-9eac43b88a5e,h1:P10tZmVD2XclAaT9l7OduMH1OLFzTa1wUuUqHZnEdI0=,a484a3172222095507a7f1901a91ab741c28278ea6b878c21c1151c0fd40f46d +github.com/flosch/pongo2,v0.0.0-20190707114632-bbf5a6c351f4,h1:GY1+t5Dr9OKADM64SYnQjw/w99HMYvQ0A8/JoUkxVmc=,814b52f668d2e2528fe9af917506cda4894d22c927283cfb8aaf6857503dfc5a +github.com/flynn/go-shlex,v0.0.0-20150515145356-3f9db97f8568,h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=,ea68a1d391e59ebc04ce986b88e000327bb141e5e8e80ef93af950bca42bb4cc +github.com/fogleman/gg,v1.3.0,h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8=,792f7a3ea9eea31b7947dabaf9d5a307389245069078e4bf435d76cb0505439c +github.com/forestgiant/sliceutil,v0.0.0-20160425183142-94783f95db6c,h1:pBgVXWDXju1m8W4lnEeIqTHPOzhTUO81a7yknM/xQR4=,bedd47c23670847642576777cc8b53b9dd8a5a8e7b0a6f2299ebc6fa3b7b6f00 +github.com/fortytw2/leaktest,v1.3.0,h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=,867e6d131510751ba6055c51e7746b0056a6b3dcb1a1b2dfdc694251cd7eb8b3 +github.com/francoispqt/gojay,v1.2.13,h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=,f41e3e4f3086400448dbce1c06c59f5848a6c5983e5466689965e3a2cabcba7c +github.com/frankban/quicktest,v1.5.0,h1:Tb4jWdSpdjKzTUicPnY61PZxKbDoGa7ABbrReT3gQVY=,515b5b2b9320b2982193ad6bd118907aaab9ff62189870e00be459cc4097073c +github.com/fsnotify/fsnotify,v1.4.7,h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=,1d09ad8f3dc41cb6e0288023b47272c1c9393ca411e48f4b5009bca6662dc3ad +github.com/fsouza/fake-gcs-server,v1.2.0,h1:FZUL/EJlyAlHxpUWZs23ae4zNwBwmHM1p5TykkoP85A=,83b547a0780693f154c30137b1eeaf0c0e9628798ae4b7e1d74ebfb8efaf61fc +github.com/fsouza/go-dockerclient,v1.5.0,h1:7OtayOe5HnoG+KWMHgyyPymwaodnB2IDYuVfseKyxbA=,c7025b816e0ba28041a88b1063003f4e31097346d06cf69811f9d55505d3d46c +github.com/fullsailor/pkcs7,v0.0.0-20190404230743-d7302db945fa,h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=,ba36a8fc855d6eecef329d26f8e82132e38d45d06f79f88d3b0bde6d718c8fb2 +github.com/fuyufjh/splunk-hec-go,v0.3.3,h1:7PLVIODblK9FXfuAy8iPZg0lcw1YNzSQHfC+0NYgUxU=,9517f63386f64e0dceca9352f45eb7f160452682a07fa04d3c1ff90eb19ac83d +github.com/gabriel-samfira/sys,v0.0.0-20150608132119-9ddc60d56b51,h1:rUp9t/FbeJM3R3BSYkJfViN3CNQcmk44H20SqkJ/y+k=,1be262d101bd9079bb859639ad6d5eaee80646b6db0fcbeb7146d9381949d2a8 +github.com/gammazero/deque,v0.0.0-20190130191400-2afb3858e9c7,h1:D2LrfOPgGHQprIxmsTpxtzhpmF66HoM6rXSmcqaX7h8=,a1fe4ec3258f68685ee45b68e1d9188d79726af46a1b93281cf11ddc6045a864 +github.com/gammazero/workerpool,v0.0.0-20190406235159-88d534f22b56,h1:VzbudKn/nvxYKOdzgkEBS6SSreRjAgoJ+ZeS4wPFkgc=,cbb92fdf8d457e27923dc6515af4458a55af932ccf468415c8b36bf49845fc00 +github.com/garyburd/go-oauth,v0.0.0-20180319155456-bca2e7f09a17,h1:GOfMz6cRgTJ9jWV0qAezv642OhPnKEG7gtUjJSdStHE=,be051ba0d52eaced1c1985ebdf2dece3f7127ad392645b42fd06c2af9c9caea2 +github.com/garyburd/redigo,v1.6.0,h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=,68f0d2b454f7a9a000c3335fc0f409123637e4711c6461a4c75e2f128f68f283 +github.com/gavv/monotime,v0.0.0-20161010190848-47d58efa6955,h1:gmtGRvSexPU4B1T/yYo0sLOKzER1YT+b4kPxPpm0Ty4=,c97324768edc8170e05b8925b0551778909c8e15817d4327ac405a4e0b6071f4 +github.com/gcash/bchd,v0.14.7,h1:n3gMXCT4VhU/emiCq61kmKBPADLxBzpX5IlXPnGuR2c=,871644f504d6c3f19dcfc8a7a6e6aa623e6642275a48dfffe770ec61368c2032 +github.com/gcash/bchlog,v0.0.0-20180913005452-b4f036f92fa6,h1:3pZvWJ8MSfWstGrb8Hfh4ZpLyZNcXypcGx2Ju4ZibVM=,d400c8e944edf2a67f46e75335f55c14170c523691804ea71e1a348ad45bc7e7 +github.com/gcash/bchutil,v0.0.0-20191012211144-98e73ec336ba,h1:KVa96lSrJGMYZ414NtYuAlbtCgrmW9kDnjvYXcLrr5A=,7b829a35d22ead0ee82d8a98b1e06da5e63fd07b2798fce8ba87c8da670ef04a +github.com/gcla/gowid,v1.0.0,h1:78Xf5G9+lb4/g3KCB3hX8UJ8VorymMH5PXu9Npvwf8s=,eaa7e0b7bb0912c6b24c98dee0073a2de754c24e1347ce7c5bfc63397ccf0fa6 +github.com/gdamore/encoding,v1.0.0,h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=,638a9832e2f62d118d7c511d86bdae1622a51f331de48a01d929fd24ebe6a2a6 +github.com/gdamore/tcell,v1.3.0,h1:r35w0JBADPZCVQijYebl6YMWWtHRqVEGt7kL2eBADRM=,97c1e828ff9de0cef3a5bbdb3f3def8a351ad6ca65a780d4dd4141b0ee23c88e +github.com/genuinetools/pkg,v0.0.0-20180910213200-1c141f661797,h1:SGpZXDd/CFeDIY4Rq5cFO8K/uqDblHUxjlzOmjFpvRg=,c15cbe95e0a7e38cc0a790b0098170c103ba84d56e7cbaf744a6df10c00efa45 +github.com/genuinetools/reg,v0.16.0,h1:ZhLZPT+aUGHLfy45Ub5FLWik+3Dij1iwaj8A/GyAZBw=,a505ff5357d6095540c89ee27d207a3a4dc7c73840fb6bc9a2f0f3a81e498341 +github.com/gernest/wow,v0.1.0,h1:g9xdwCwP0+xgVYlA2sopI0gZHqXe7HjI/7/LykG4fks=,b49d5efc34e19469e7319df09b35438de307ba7cd8c9333ecba190f457ca8e22 +github.com/getgauge/common,v0.0.0-20190514095629-619e107433ce,h1:/ofMj8gIhPYdb/JEXKj8iYe5Yxl3mrK8YA7yl/06t6Y=,04ab4fb7e8dcf693c3b79028693130cd51fe54f5a16f12622975a7c3eb7705f7 +github.com/getlantern/context,v0.0.0-20190109183933-c447772a6520,h1:NRUJuo3v3WGC/g5YiyF790gut6oQr5f3FBI88Wv0dx4=,27515ae761018c4cfc83043194904170bef0cac037c48ff96fc497502b9bab14 +github.com/getlantern/errors,v0.0.0-20190325191628-abdb3e3e36f7,h1:6uJ+sZ/e03gkbqZ0kUG6mfKoqDb4XMAzMIwlajq19So=,a48d7684463e8c496fea4a2595ca71012c3b222bc77de7c2ddfbe78bc4595ac5 +github.com/getlantern/fdcount,v0.0.0-20170105153814-6a6cb5839bc5,h1:8Q9iN/V24EG01IgXEKVScth/rTXpplBxCYio/yIKtUw=,b24c26d5ede197fd6b7f981cf5db300124e22f48667942c948a9750f7a908c94 +github.com/getlantern/golog,v0.0.0-20190830074920-4ef2e798c2d7,h1:guBYzEaLz0Vfc/jv0czrr2z7qyzTOGC9hiQ0VC+hKjk=,1eeabfbc56105f3d751e1947405f5296db5ded7e25900209fe7327f1b5d785e6 +github.com/getlantern/hex,v0.0.0-20190417191902-c6586a6fe0b7,h1:micT5vkcr9tOVk1FiH8SWKID8ultN44Z+yzd2y/Vyb0=,ea5a13f98a82c1919c59b655de531cbb35ac7dfff3c99072b43b8bfd1c29b774 +github.com/getlantern/hidden,v0.0.0-20190325191715-f02dbb02be55,h1:XYzSdCbkzOC0FDNrgJqGRo8PCMFOBFL9py72DRs7bmc=,c901f2e702114d6268446a381a27737c6123e50191197fd84f17b339238191b4 +github.com/getlantern/idletiming,v0.0.0-20190529182719-d2fbc83372a5,h1:laM1s/bxUH8xbbC9TBGWsOc7A0KCAPZMa4pdwO5e6Vw=,35de51b383e926042d3f8f4859e2d961582cf9964d3b7bb513ac4733cc43162f +github.com/getlantern/mockconn,v0.0.0-20190403061815-a8ffa60494a6,h1:+aO65ByJw74kV8vXqvkj49P5RtIqyUObyeRTIxMz218=,a4a1ccdc9ec68dea571d9603d4a36150b6ccaea447ca88965e088ff0b9eeaa0d +github.com/getlantern/mtime,v0.0.0-20170117193331-ba114e4a82b0,h1:1VNkP55LM/W2IwWN+qi+5X3gZcEQHfj8X9E+FNxVgM4=,5af0b20838a808b86a2a9c87c254d47185d38d5935780dade3bc7a54dc2880f4 +github.com/getlantern/netx,v0.0.0-20190110220209-9912de6f94fd,h1:mn98vs69Kqw56iKhR82mjk16Q1q5aDFFW0E89/QbXkQ=,cb386d0527fb6f549fa0266c770a68d7d83a88bab2194d25b55355f59198fdf0 +github.com/getlantern/ops,v0.0.0-20190325191751-d70cb0d6f85f,h1:wrYrQttPS8FHIRSlsrcuKazukx/xqO/PpLZzZXsF+EA=,321694d3d2f31415653a7b9d97a4a701f36f10ccfbbdb94449f1211137d6f215 +github.com/getsentry/raven-go,v0.2.0,h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=,eaffe69939612cd05f95e1846b8ddb4043655571be34cdb6412a66b41b6826eb +github.com/gf-third/mysql,v1.4.2,h1:f1M5CNFUG3WkE07UOomtu4o0n/KJKeuUUf5Nc9ZFXs4=,14a08134ce02bd0d07667da91a89c9098d18bad8c790414e37aba906895a5a3e +github.com/gf-third/yaml,v1.0.1,h1:pqD4ix+65DqGphU1MDnToPZfGYk0tuuwRzuTSl3g0d0=,6354a95d7faa222d2e653485bc9dd555aad61a75eb5a5f970de531391ed77a2f +github.com/ghetzel/go-defaults,v1.2.0,h1:U1T64bxhBc6nVZ68QXch1hoHq43h6isqgbvG7kxY9Uc=,f339e441d08af3af184a21f518227db7c705851be82f3fcea611e762ebb633a1 +github.com/ghetzel/go-stockutil,v1.8.6,h1:VgqpePUGGXMHjgArUH5mSAYFC35aiFgkU/TdTU/ts80=,aa0cce06af82b7d1f98a20deaafd6997fa7c3d36fba9a204a34e5d91a2096fa0 +github.com/ghetzel/testify,v1.4.1,h1:wpJirdM+znAnxWruGDBdIys5aU+wGJHNUTkgEo4PYwk=,90206efc10ad71a33bf314ef768d16c6186d23ccb5aa8172663437d497dbfdd7 +github.com/ghetzel/uuid,v0.0.0-20171129191014-dec09d789f3d,h1:YVJe7KwVYazt90hCc/q2dYJVS3062AY6QdT6iHd+Kh8=,924f39fe83589fa269e652c8ca4f7b0dbc59023baada8a55c24692fe5223b67a +github.com/ghodss/yaml,v1.0.1-0.20190212211648-25d852aebe32,h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=,9771720da98bbdd80dacdefb47b9a0e36faa75caa4745149d150325ba5390e4b +github.com/gin-contrib/gzip,v0.0.1,h1:ezvKOL6jH+jlzdHNE4h9h8q8uMpDQjyl0NN0Jd7jozc=,e994ecc5881938978d6d031e3d0c1bc5968bfe5de2a307aed7c63aecba459ecd +github.com/gin-contrib/sse,v0.1.0,h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=,512c8672f26405172077e764c4817ed8f66edc632d1bed205b5e1b8d282816ab +github.com/gin-gonic/gin,v1.4.0,h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ=,b9bc661bf658179d53fee9e7c587eba4df8326d0c26ad29f785739a78313fc4b +github.com/glacjay/goini,v0.0.0-20161120062552-fd3024d87ee2,h1:+SEORW3KptcFnlhTbn7N0drG3AFnrcmBDWDyQ3Bt06o=,061319068788a9eeef67d4e5cf84a87c4649005aaa4f37c983a868c357e3df3c +github.com/gliderlabs/ssh,v0.2.2,h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=,f9f12d766ceeab9e2134504520de75819d1eeb6733b8b619b7bcd4aac4cca983 +github.com/globalsign/mgo,v0.0.0-20181015135952-eeefdecb41b8,h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is=,c07f09e0c93e6410076edfd621d2decbd361361c536c3e33ba097fa51708f360 +github.com/glycerine/go-unsnap-stream,v0.0.0-20180323001048-9f0cb55181dd,h1:r04MMPyLHj/QwZuMJ5+7tJcBr1AQjpiAK/rZWRrQT7o=,9a66d6f9bb1a268f4b824d6fe7adcd55dc17ed504683bdf2dbf67b32028d9b88 +github.com/glycerine/goconvey,v0.0.0-20190315024820-982ee783a72e,h1:SiEs4J3BKVIeaWrH3tKaz3QLZhJ68iJ/A4xrzIoE5+Y=,344fb699344a5ab09464c0283a65402ae0fe6bd6fac7d40e9c4d403cf4a7714f +github.com/gmallard/stompngo,v1.0.12,h1:uj1Bl9o+dqn0qSR33xHmaKw21W5LzhWo4Q4hS1MCpQU=,88498e4da4e0f7f3923d758a464d53f550921617b1047643def2a973c86dfd03 +github.com/go-aah/forge,v0.8.0,h1:sk4Z523B9ay3JQF4At97U7kecB5yTIm0J2UM/qRVXbQ=,e883adcfb380d6187de84c59a0f8bb3b34931487151873d7a326a1b4df556e48 +github.com/go-acme/lego,v2.7.2+incompatible,h1:ThhpPBgf6oa9X/vRd0kEmWOsX7+vmYdckmGZSb+FEp0=,1a597873ff61c0fbdab6b4f1027141d2e8dbe739bd2018473559bec954f3e651 +github.com/go-acme/lego/v3,v3.1.0,h1:yanYFoYW8azFkCvJfIk7edWWfjkYkhDxe45ZsxoW4Xk=,fbb3cfe2619281c3ccd456b213b5f8c7bf695f82ecac6c97f747dc4159dfe4b2 +github.com/go-ble/ble,v0.0.0-20190521171521-147700f13610,h1:eWay3GzFqTJUEYN1BrbqdDTFeFUGmYLps8SQkn1D7Yo=,a5fb6440935dd7ef8bb3569bc7260bd1ad44e01d41bbb684dbb96cc677fb2234 +github.com/go-chassis/foundation,v0.0.0-20190621030543-c3b63f787f4c,h1:p+Y6yq7RwHmYjEr/vwdVYGacBqFCc2lPQfNRIC3vRIs=,db38c108455e57b3f8f062c22872554d5af9dfa03a723c9fea263a009f3002e6 +github.com/go-chassis/go-archaius,v0.24.0,h1:ubNgs3Rv067PI7t37ZJoIMaPPHIBWV+ni/e7XAdW1hU=,37b0c60692eaed91abd3d2c6a0fc9366a54882f3a6b5ef81f3cc20d14882a13d +github.com/go-chassis/go-chassis,v1.7.3,h1:7fcfaE9Ij+oBbf2lHoHHIvxT9objtt1EHpwRPBUkDhw=,38f8393558528b0212674268f6dc507d5db716fc5745eff09bccf1cd98b86eb7 +github.com/go-chassis/go-chassis-config,v0.14.0,h1:OnM9sx2GalDC7vEIhPecRpQlVa8hz10NOB41+9tii5A=,afc7506eec8591a5ccbb08f073ba19312bc03d87ec15c1532f5daba02f090e00 +github.com/go-chassis/go-restful-swagger20,v1.0.1,h1:HdGto0xroWGK504XN0Um7JBc0OPMHDlWwedkd2mTGII=,2c41388f71dc766088fc3e47e91a2f8c2d7936e40f6a64afff53a12ef73e0d05 +github.com/go-chassis/paas-lager,v1.0.2-0.20190328010332-cf506050ddb2,h1:iORWPbIQ81tJPKWs9TNvcjCQnqvyTlL41F9ILgiTcyM=,a74c06554cf6835e98c4fa548a4aa3dcc317ca93567af893b89a4dba88b783af +github.com/go-chat-bot/bot,v0.0.0-20191022130543-3da6cae45477,h1:JfUELmxvEz/MXI3/iSn2UcB/5CCAvMsxKi88j783ssk=,a059cd1d050747bd0adcd3d4ba91e12b0ace2c038187484726c3d551169d9fa4 +github.com/go-chat-bot/plugins,v0.0.0-20181006134258-491b3f9878d6,h1:qNYjVQnDwznjLk+OnNdczA5SXwEa/RwjPTZSQCKofF4=,b19527108aef487fa1f4856e354f4777644a574248cc7e891bacf1bfb38bd12d +github.com/go-chat-bot/plugins-br,v0.0.0-20170316122923-eb41b30907dc,h1:v/poG4Y4O/z1cUm2cWxiIkFFgRsT3Fe1u1A33evx89g=,6b613e62d3f389f3d6f8f262903bc31c4f1eb4b3ca8d192606f78199b1af0d43 +github.com/go-check/check,v0.0.0-20190902080502-41f04d3bba15,h1:xJdCV5uP69sUzCIIzmhAw6EKKdVk3Tu48oLzM86+XPI=,93bbc1f982dd553e279fb4c7fbc060032096e2b5d0537385ae80247492a6433e +github.com/go-chi/chi,v4.0.2+incompatible,h1:maB6vn6FqCxrpz4FqWdh4+lwpyZIQS7YEAUcHlgXVRs=,25c94ccd43f18002c2dd07e87da1dc393ff87d615441e559bda425ea0979715b +github.com/go-cmd/cmd,v1.0.5,h1:IK23uTRWxq6UJnNWp8nKO7mVCwnPfbaxA2lhzEKfNj0=,2623aa43dbf68c24362bcfb7a216b83c2e7473d4a3e49e7955c3fa5f28b4974c +github.com/go-delve/delve,v1.3.2,h1:K8VjV+Q2YnBYlPq0ctjrvc9h7h03wXszlszzfGW5Tog=,b8a250f2b3ef87da34fbfc655bb23a051b43672bea7a8abc4e083a2b214faf09 +github.com/go-errors/errors,v1.0.1,h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=,bdbee3143e1798eadff4df919479c28ec2d3299a97d445917bc64d6eb6a3b95a +github.com/go-gl/gl,v0.0.0-20181026044259-55b76b7df9d2,h1:78Hza2KHn2PX1jdydQnffaU2A/xM0g3Nx1xmMdep9Gk=,499822d1b3bcc34b82df0fcc13ac9a0ea273c5d68b3e183e18fa76dab9793954 +github.com/go-gl/glfw,v0.0.0-20190409004039-e6da0acd62b1,h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=,96c694c42e7b866ea8e26dc48b612c4daa8582ce61fdeefbe92c1a4c46163169 +github.com/go-gl/mathgl,v0.0.0-20190713194549-592312d8590a,h1:yoAEv7yeWqfL/l9A/J5QOndXIJCldv+uuQB1DSNQbS0=,39948d90a5672c7866b5b1c01e9e8ce6c80c099306ed80e9e138350840f82110 +github.com/go-ini/ini,v1.49.0,h1:ymWFBUkwN3JFPjvjcJJ5TSTwh84M66QrH+8vOytLgRY=,4820559fd3640c6b5361a7077e8b5c1a4318a06a59df7a095cbf96514d46d432 +github.com/go-kit/kit,v0.9.0,h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=,f3da9b35b100dd32e7b10c37a0630af60d54afa37c61291e7df94bc0ac31ed03 +github.com/go-ldap/ldap,v3.0.3+incompatible,h1:HTeSZO8hWMS1Rgb2Ziku6b8a7qRIZZMHjsvuZyatzwk=,4197e5fbebc7a1805be236cf75dea301f0b8e15a857e2373653b76157c649f93 +github.com/go-log/log,v0.1.0,h1:wudGTNsiGzrD5ZjgIkVZ517ugi2XRe9Q/xRCzwEO4/U=,ec5845d33a6d7ede81970833cfc3179d53b99019da1ebffef5e71005ff94be43 +github.com/go-logfmt/logfmt,v0.4.0,h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=,d678198dc0eeaed28736e0d71b919a0bd98501b7275c69a7917122f6de9e0d1c +github.com/go-logr/logr,v0.1.0,h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=,4c14b7c05eaa48b7f8dbf2ca38c3603dce446f4184a4c0af2f569b046d66201e +github.com/go-logr/zapr,v0.1.0,h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54=,7b60c74f722b8f215711503dd63576845987eff81ef5f9dc052fc9158d1c57e2 +github.com/go-macaron/binding,v1.0.0,h1:ILEIP1e9GaXz//fZIl1zXgHVbM9j1SN89aTGOq8340Y=,3887f50d442cd8f9eeeb0e7710c7cba41c185d8e5a82404ff33e7cbd4e16d0c7 +github.com/go-macaron/cache,v0.0.0-20151013081102-561735312776,h1:UYIHS1r0WotqB5cIa0PAiV0m6GzD9rDBcn4alp5JgCw=,a854b7844fff9ec69025db12a2b03834a2eac570a366962c4eb83984813a9fdb +github.com/go-macaron/captcha,v0.0.0-20190710000913-8dc5911259df,h1:MdgvtI3Y1u/DHNj7xUGOqAv+KGoTikjy8xQtCm12L78=,fb1c643c72ba9ef2c5d613e324e47dbb17ce45a28cbee8cb540ea48a0b3d6a23 +github.com/go-macaron/cors,v0.0.0-20190418220122-6fd6a9bfe14e,h1:auESkcVctNZnNl4EH0TuoCSJMJ7Q7ShU8FS6lDEsAC4=,0f3043631d54efca5615fe7ed819523bbe0c18726ce9e4b0cdc0ef2879aa6044 +github.com/go-macaron/csrf,v0.0.0-20180426211211-503617c6b372,h1:acrx8CnDmlKl+BPoOOLEK9Ko+SrWFB5pxRuGkKj4iqo=,90b5cbd86ff3708d41be70ad3cde77fdedd5ef485b960cc3a9ffea6f0a14902c +github.com/go-macaron/gzip,v0.0.0-20191101043656-b5609500c6fc,h1:z3gfrCJUPhdRHtd8kftnNBzI5ayZ1zQhWARPeL83JNQ=,dfcc1200b66bcb581c6984da9fa4aefc92facc3a07d182c7c37f0978b41b868f +github.com/go-macaron/i18n,v0.0.0-20160612092837-ef57533c3b0f,h1:wDKrZFc9pYJlqFOf7EzGbFMrSFFtyHt3plr2uTdo8Rg=,6c1d5fe7ed23e05ca1af7462e6deac2d993ddacd099ad794faad5c685337742d +github.com/go-macaron/inject,v0.0.0-20160627170012-d8a0b8677191,h1:NjHlg70DuOkcAMqgt0+XA+NHwtu66MkTVVgR4fFWbcI=,666bb04a5df1271326b4fcdbbdc3276400ae7e54f4ed6233792cd6e519676491 +github.com/go-macaron/session,v0.0.0-20191101041208-c5d57a35f512,h1:7ndsXTX42iYHryQz98zUsBJfStJ0kXFKgDrPmRvR400=,3581a7eb19a2a60d41aba7e85afa576c35a97659e162b83292ff67396f899845 +github.com/go-macaron/toolbox,v0.0.0-20180818072302-a77f45a7ce90,h1:3wYKrRg9IjUMfaf3H0Hh7M5Li9ge79Y7aw2yujHa2jQ=,43f2a06502408404c3b1231c3642693632cf20bc4f2cb45881bd2292b1eed714 +github.com/go-martini/martini,v0.0.0-20170121215854-22fa46961aab,h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk=,0561a4dadd68dbc1b38c09ed95bbfc5073b0a7708b9a787d38533ebd48040ec2 +github.com/go-mesh/openlogging,v1.0.1,h1:6raaXo8SK+wuQX1VoNi6QJCSf1fTOFWh7f5f6b2ZEmY=,3606bad571f959cc24382381f7d50fb321819958df37911f6ad6aa5ac3e02181 +github.com/go-ole/go-ole,v1.2.4,h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=,c8b3ef1187d2d7dbfddc4badefcc992c029cd377ae07bff2fa05ec8972836612 +github.com/go-openapi/analysis,v0.19.5,h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI=,22e5ff3f88802059aa86835d8f7c25386afed1159d4e951ef0f87ef62ab4a253 +github.com/go-openapi/errors,v0.19.2,h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY=,e02e448e5a2c1ff2a011f74d41d505a2f32b369551064940630d6660c600bf3d +github.com/go-openapi/inflect,v0.19.0,h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=,fbcca36e347a2f560f50ac1c9c63f7d6cd97c8dff9800f08f370b5ce09b77c57 +github.com/go-openapi/jsonpointer,v0.19.3,h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=,1fe6122c9c9d10837439398976a2ff55e8ed905fa7e4a66f3fb0e857c6e06582 +github.com/go-openapi/jsonreference,v0.19.2,h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w=,00b2457c2d091a9817f91f55655a334bed8f75b2d6499ba9192f12564dd51dd9 +github.com/go-openapi/loads,v0.19.4,h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY=,adffcd0e2900bf0cca893e6bf014db55ebf161476367ac4dd365f8481c12616f +github.com/go-openapi/runtime,v0.19.7,h1:b2zcE9GCjDVtguugU7+S95vkHjwQEjz/lB+8LOuA9Nw=,4017d9c69d9d2789d0a3b50c6af509831c0f24bfc545f1b43224df2fc5194dbd +github.com/go-openapi/spec,v0.19.4,h1:ixzUSnHTd6hCemgtAJgluaTSGYpLNpJY4mA2DIkdOAo=,7c12cf07de1b65175474fdde12110716ab237fa862694e4e5051eb15541a964e +github.com/go-openapi/strfmt,v0.19.3,h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA=,07b9c9b2da9dffc0a830e6536b705282fd17023fe8d04aa909fe1e4e3b6306f5 +github.com/go-openapi/swag,v0.19.5,h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=,54aec6bdc63d1d6609c32b140fe74d099f8b9628d362689556537506724eaeda +github.com/go-openapi/validate,v0.19.4,h1:LGjO87VyXY3bIKjlYpXSFuLRG2mTeuYlZyeNwFFWpyM=,2b1b2612db93ed3fb411cc798150821af5c031b120097bbe6578dc4ce2d6d1df +github.com/go-playground/locales,v0.13.0,h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=,9c4b65273e135b1bdb9bafc7c0b5180a6c5936f54edecbc8807c57a9d107c6b9 +github.com/go-playground/overalls,v0.0.0-20180201144345-22ec1a223b7c,h1:3bjbKXoj7jBYdHpQFbKL2546c4dtltTHzjo+5i4CHBU=,7972d7c49470ee2e187868b30d3157ca58201f50a934caa75ce4d5b134a2a644 +github.com/go-playground/universal-translator,v0.16.0,h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM=,316fba5fa26a586e39fc11698c16e67edabd122efe26f7fff71091a00a59883a +github.com/go-redis/redis,v6.15.6+incompatible,h1:H9evprGPLI8+ci7fxQx6WNZHJSb7be8FqJQRhdQZ5Sg=,e277bbc2acb8462aca5e20ef7569a733501bc765f65303a6e5153a86e6e3090c +github.com/go-sourcemap/sourcemap,v2.1.2+incompatible,h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug=,1bdaec84a31896eee149acb563f8af0b3ce7899d916383e0b597d6b480b6a622 +github.com/go-sql-driver/mysql,v1.4.1,h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=,f128045df19d340743a155ef282116130d27e27cbc62de160b6072c751b435ba +github.com/go-stack/stack,v1.8.0,h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=,78c2667c710f811307038634ffa43af442619acfeaf1efb593aa4e0ded9df48f +github.com/go-swagger/go-swagger,v0.20.1,h1:37XFujv7lYHLOKawfzLDg4STwwgB5zhPjodN33asJto=,79cc2c57c4e9d03a9399577b942eface46073ee6fa289b86651f1c5d0c513484 +github.com/go-swagger/scan-repo-boundary,v0.0.0-20180623220736-973b3573c013,h1:l9rI6sNaZgNC0LnF3MiE+qTmyBA/tZAg1rtyrGbUMK0=,51aed4b67bce9d988d64ca6be9de2169f709a29d5ea83e78ffb1c2432b346ec6 +github.com/go-telegram-bot-api/telegram-bot-api,v4.6.4+incompatible,h1:2cauKuaELYAEARXRkq2LrJ0yDDv1rW7+wrTEdVL3uaU=,a0d2549e07c67e066337cc6eadd8be2a961d13b493d4325603010d4e35e519df +github.com/go-test/deep,v1.0.3,h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=,d199ce762552766bd3baf37ae4b0255bb6a6fecf144e8ae5fa3a94f1ce30a180 +github.com/go-xorm/builder,v0.3.4,h1:FxkeGB4Cggdw3tPwutLCpfjng2jugfkg6LDMrd/KsoY=,81028f69e261c29566c24f4717458d04dbe92aebc4eb93a41c1cfeef13b7c5dd +github.com/go-xorm/core,v0.6.0,h1:tp6hX+ku4OD9khFZS8VGBDRY3kfVCtelPfmkgCyHxL0=,8a8c43c039422f38e1775a835bda46e62f4a055b4b38d57967c0e7a6c9b21d23 +github.com/go-xorm/sqlfiddle,v0.0.0-20180821085327-62ce714f951a,h1:9wScpmSP5A3Bk8V3XHWUcJmYTh+ZnlHVyc+A4oZYS3Y=,e539a37b8fb0d23c21e9eb1fe34db0ffcf19e5e4ae3d3b7049bb23c722c4b382 +github.com/go-xorm/xorm,v0.7.9,h1:LZze6n1UvRmM5gpL9/U9Gucwqo6aWlFVlfcHKH10qA0=,8836904c60cf227804fc843c707cd3e99122b95a97801d09dd2bddce4ed5a29f +github.com/go-yaml/yaml,v2.1.0+incompatible,h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o=,842989ea2e54ba8e4ef49cca914a5cd37176c44ccd3bb3e8c44fcbc10cb7832e +github.com/gobuffalo/attrs,v0.1.0,h1:LY6/rbhPD/hfa+AfviaMXBmZBGb0fGHF12yg7f3dPQA=,06c6c210a26c85ae291efe9d54cab9cab26fd1453f4f48962e04c89760e775d0 +github.com/gobuffalo/buffalo,v0.15.0,h1:VsxIcfJaDm4u2UirLHGgMfQpfHVwJP3JoDmGyeeNnc0=,f4553c8809a6764cefac8eefce9a868a42ee7538bdef4eadfcc06075b865a087 +github.com/gobuffalo/buffalo-docker,v1.0.7,h1:kj+AfChcev54v4N8N6PzNFWyiVSenzu6djrgxTBvbTk=,d84d8bea93f017e3ff07eddab57e0fd7007cf2516250d6fea86c8811c36cf786 +github.com/gobuffalo/buffalo-plugins,v1.14.1,h1:ZL22sNZif+k/0I9X7LB8cpVMWh7zcVjfpiqxFlH4xSY=,556641c2c1b3a9d679a3fc46727d41da225f33c63cfbf1ff721203b24e0a9b82 +github.com/gobuffalo/buffalo-pop,v1.23.1,h1:AnxJQZu/ZN7HCm3L8YBJoNWc2UiwSe6UHv5S4DfXUDA=,00dea8b0e63d3f4110b8bd9d32c086163229f56845a9f8b221e0093876065a05 +github.com/gobuffalo/clara,v0.9.1,h1:LYjwmKG0VwwW/nOG2f5jNamvAcfdm2Ysokc/eoVhtZ8=,319f607092c02686dfed2eb047d500c332ddd962341012bdcd91202bb46d37a9 +github.com/gobuffalo/depgen,v0.2.0,h1:CYuqsR8sq+L9G9+A6uUcTEuaK8AGenAjtYOm238fN3M=,efb3db0d05f712580bc8d3dce2967bd09d6c90140ac7bca1fbd5c5c4a28e1836 +github.com/gobuffalo/envy,v1.7.1,h1:OQl5ys5MBea7OGCdvPbBJWRgnhC/fGona6QKfvFeau8=,14ac6a5cd617dc05abfcb136586800f05f861d4a03d8fa66819a18c0d9eddeec +github.com/gobuffalo/events,v1.4.0,h1:Vje/vgTWs+dyhIS0U03oLpvx1SUdAqutv/hDWIz2ErM=,f6d99c722115631805f04fcf22e8edb7a4116bc65d698ac05c58b6a7f768efdc +github.com/gobuffalo/fizz,v1.9.5,h1:Qh0GkP7MYtJs9RZwBkPJ0CzEXynVowdNfrjg8b+TOxA=,2f645d789550f8f97039e1c4ce3e3f09dfeec28d85c8977c2b20caa06cd75b0c +github.com/gobuffalo/flect,v0.1.6,h1:D7KWNRFiCknJKA495/e1BO7oxqf8tbieaLv/ehoZ/+g=,a7011c8d3f59bac18512c76de610bf1a1f022a01ac6695e0c5af7498d33be613 +github.com/gobuffalo/genny,v0.4.1,h1:ylgRyFoVGtfq92Ziq0kyi0Sdwh//pqWEwg+vD3eK1ZA=,4ecf29587a8cbe069fc6b298d9a3cb674a8008ca4e08233904a8cba91d1ba21b +github.com/gobuffalo/gitgen,v0.0.0-20190315122116-cc086187d211,h1:mSVZ4vj4khv+oThUfS+SQU3UuFIZ5Zo6UNcvK8E8Mz8=,c79975f91dd2fd691d70e29678034eb2dc94b5da2f01b0790a919de9d2a632ac +github.com/gobuffalo/github_flavored_markdown,v1.1.0,h1:8Zzj4fTRl/OP2R7sGerzSf6g2nEJnaBEJe7UAOiEvbQ=,2d73a2baad09dc0d0f0c01549c35e83ab0c18c97f859191e54a632c2fb0eaad2 +github.com/gobuffalo/gogen,v0.2.0,h1:Xx7NCe+/y++eII2aWAFZ09/81MhDCsZwvMzIFJoQRnU=,f60900e595a3779b95b299ca9e74c517523860994a0477b360ac447d3318ccbd +github.com/gobuffalo/helpers,v0.4.0,h1:DR/iYihrVCXv1cYeIGSK3EZz2CljO+DqDLQPWZAod9c=,17ae2b069c0ca73b11b4ace6793617e0620f8d8ef171b0010b91e243c4a3bbe3 +github.com/gobuffalo/here,v0.2.3,h1:1xamq7i4CKjGgICCXY0qpxPeXGdB8oVNSevkpqwd5X4=,3808d0fbc11c58cfb0e7b430b9fc30024ba3781febe8e2601a8e2b8f76e48c00 +github.com/gobuffalo/httptest,v1.4.0,h1:DaoTl/2iFRTk9Uau6b0Lh644tcbRtBNMHcWg6WhieS8=,9d1b48f3e525ab4661d02b3fac86f89fe27f648b1ff8e607f39a353c60c0f315 +github.com/gobuffalo/licenser,v1.4.0,h1:S8WY0nLT9zkBTjFYcbJ0E9MEK7SgE86aMfjsnuThQjY=,3e126adeb06dcaee29376804b463ed33af2b821579162039e8a16e45d0334cdc +github.com/gobuffalo/logger,v1.0.1,h1:ZEgyRGgAm4ZAhAO45YXMs5Fp+bzGLESFewzAVBMKuTg=,43510255e52f7472ec17a76847ca42cebab6efe0b573a5dcfd8261e00d86d3b7 +github.com/gobuffalo/makr,v1.2.0,h1:TA6ThoZEcq0F9FCrc/7xS1ycdCIL0K6Ux+5wmwYV7BY=,113259ce8e945acf3dd184534ab6135240fde6b57d5c6ee3787e7c124e313502 +github.com/gobuffalo/mapi,v1.1.0,h1:VEhxtd2aoPXFqVmliLXGSmqPh541OprxYYZFwgNcjn4=,162640cc01d04543030d55ed51841d673cb8257fd78b069a79010e52ec996b73 +github.com/gobuffalo/meta,v0.2.0,h1:QSDlR2nbGewl0OVL9kqtU8SeKq6zSonrKWB6G3EgADs=,6a44e2a02126c65d2e2f09de5f732327001ac05d542abcabb8dc286422469e9a +github.com/gobuffalo/mw-basicauth,v1.0.7,h1:9zTxCpu0ozzwpwvw5MO31w8nEoySNRNfZwM1YAWfGZs=,da5e2767a9d91e14efb25209c9b9dcf5ad07b551d6d54670c43c6225c8e94084 +github.com/gobuffalo/mw-contenttype,v0.0.0-20190129203934-2554e742333b,h1:6LKJWRvshByPo/dvV4B1E2wvsqXp1uoynVndvuuOZZc=,f9e2f7cce4e88ff8d6f86bc61076179b4f23a85eb5fd0a5f28793ef1e7889fab +github.com/gobuffalo/mw-csrf,v0.0.0-20190129204204-25460a055517,h1:pOOXwl1xPLLP8oZw3e3t2wwrc/KSzmlRBcaQwGpG9oo=,b47a0879eadba5c6774ad37c66afea4998767d9df1295b7b17f3469282cc92f2 +github.com/gobuffalo/mw-forcessl,v0.0.0-20180802152810-73921ae7a130,h1:v94+IGhlBro0Lz1gOR3lrdAVSZ0mJF2NxsdppKd7FnI=,533187beeb18b977c8436d0a5596c1bd420b30cce55589cb11af592df063470c +github.com/gobuffalo/mw-i18n,v0.0.0-20190129204410-552713a3ebb4,h1:c1fFPCxA7SozZPqMhpfZoOVa3wUpCl11gyCEZ4nYqUE=,96a1754eff9c9a75c6b48fc3bc9ab102bbf5d23c103b37a82cc88c666c0dbf9b +github.com/gobuffalo/mw-paramlogger,v0.0.0-20190129202837-395da1998525,h1:2QoD5giw2UrYJu65UKDEo9HFcz9yun387twL2zzn+/Q=,d2e3b1baa234032585cc0e7dc1950681dbc05d960ee958578e470df9fa3b8f18 +github.com/gobuffalo/mw-tokenauth,v0.0.0-20190129201951-95847f29c5c8,h1:dqwRMSzfhe3rL0vMDaRvc2ozLqxapWFBEDH6/f0nQT0=,eb6f82200a81da34baa366475479069f08ed797d5edd4976c9f2af1027d37f1c +github.com/gobuffalo/nulls,v0.1.0,h1:pR3SDzXyFcQrzyPreZj+OzNHSxI4DphSOFaQuidxrfw=,a77a09fd75234e7e5589640fae5d261c03ede9ab5ec626406f24c89dfeba2b38 +github.com/gobuffalo/packd,v0.3.0,h1:eMwymTkA1uXsqxS0Tpoop3Lc0u3kTfiMBE6nKtQU4g4=,c7a9263fd464b9f5629bf161521f420b2c40f7780ed6a9ce88184dc4136787a5 +github.com/gobuffalo/packr,v1.30.1,h1:hu1fuVR3fXEZR7rXNW3h8rqSML8EVAf6KNm0NKO/wKg=,20aeea726f6db2ffc8b6dd90b1dce8991f0fd66152a270efdd21c0905b12d5f5 +github.com/gobuffalo/packr/v2,v2.7.1,h1:n3CIW5T17T8v4GGK5sWXLVWJhCz7b5aNLSxW6gYim4o=,60cd83772938a617b37c26a4924ee1f95008d53481724f801eee647e68ce22b1 +github.com/gobuffalo/plush,v3.8.3+incompatible,h1:kzvUTnFPhwyfPEsx7U7LI05/IIslZVGnAlMA1heWub8=,312e219c9827bb7d2dfc954f03fcaa275a3d9eb70687a62ecebad84ede4c51a7 +github.com/gobuffalo/plushgen,v0.1.2,h1:s4yAgNdfNMyMQ7o+Is4f1VlH2L1tKosT+m7BF28C8H4=,0efa90fac0c464409201fa74cace63c4307ac3700a23b3df7c9a9c1c976f0875 +github.com/gobuffalo/pop,v4.12.2+incompatible,h1:WFHMzzHbVLulZnEium1VlYRnWkzHz39FzVLov6rZdDI=,de2837b63e54b15d99234202839e0394183c4ff7c45b9d99162a407c95574003 +github.com/gobuffalo/release,v1.14.0,h1:+Jy7eLN5md6Fg+AMuFRUiK4sTNq4+zXxRho7/wJe1HU=,a0f34f0d3f02ea43434436936766f185b97204a073a605e720190c433c30aaa5 +github.com/gobuffalo/shoulders,v1.2.0,h1:XcPmWbzN7944VXS/I//R7o2eupUHEp3mLFWbUlk1Sco=,4c129ae195bd14520a38c608ba3a27aca674745c1f79fbcce03dacf829802ac6 +github.com/gobuffalo/syncx,v0.0.0-20190224160051-33c29581e754,h1:tpom+2CJmpzAWj5/VEHync2rJGi+epHNIeRSWjzGA+4=,ad9a571b43d72ecce24b8bed85636091710f22d8b06051e1e19ef2051f3e00da +github.com/gobuffalo/tags,v2.1.6+incompatible,h1:xaWOM48Xz8lBh+C8l5R7vSmLAZJK4KeWcLo+0pJ516g=,99bd74d4144bcdfba45fa501cd8d6dec78dc5b0404bbbfebf5bced5b976bb911 +github.com/gobuffalo/uuid,v2.0.5+incompatible,h1:c5uWRuEnYggYCrT9AJm0U2v1QTG7OVDAvxhj8tIV5Gc=,6ab82616cbb02ddd78b9b7db14f580e2e212ceeadcfccff387a973b04be8db37 +github.com/gobuffalo/validate,v2.0.3+incompatible,h1:6f4JCEz11Zi6iIlexMv7Jz10RBPvgI795AOaubtCwTE=,53d876ba454e5e0604ab8078bfb1fca54dcd3ddd859c850cafce757c5f40153d +github.com/gobuffalo/x,v0.0.0-20190224155809-6bb134105960,h1:DoUD23uwnzKJ3t5HH2SeTIszWmc13AV9TAdMhtXQts8=,2435ac54f3ea5c024aea1d4db42a87011bb877f18f0f273f7b3e19b7093c3cfd +github.com/gobwas/glob,v0.2.3,h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=,0cfe486cd63d45ed4cb5863ff1cbd14b15e4b9380dcbf80ff26991b4049f4fdf +github.com/gobwas/httphead,v0.0.0-20180130184737-2c6c146eadee,h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0=,5a43ed4a7cd2b063b634f0df5311c0dfa6576683bfc1339f2c5b1b1127fc392b +github.com/gobwas/pool,v0.2.0,h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8=,52604b1456b92bb310461167a3e6515562f0f4214f01ed6440e3105f78be188f +github.com/gobwas/ws,v1.0.2,h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo=,f9e5c26e83278f19958c68be7b76ad6711c806b6dae766fad7692d2af867bedd +github.com/gocolly/colly,v1.2.0,h1:qRz9YAn8FIH0qzgNUw+HT9UN7wm1oF9OBAilwEWpyrI=,82f210242c4efda461bb6d2cd0543bbadf322c23b840043f236dc1fd74af9325 +github.com/gocql/gocql,v0.0.0-20191018090344-07ace3bab0f8,h1:ZyxBBeTImqFLu9mLtQUnXrO8K/SryXE/xjG/ygl0DxQ=,d38e5bd51d411bc942f295950d87d80e607a8eb186d51b445cc6c2b985681b18 +github.com/godbus/dbus,v4.1.0+incompatible,h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4=,107ef979cca9f2720633f118263afeb9acb0bf0703cc1e860098d5ec48efccb8 +github.com/gofrs/flock,v0.7.1,h1:DP+LD/t0njgoPBvT5MJLeliUIVQR03hiKR6vezdwHlc=,ee433032ec18df1e38d2385d7f9448820c5a017d895cb930cd8801401940137c +github.com/gofrs/uuid,v3.2.0+incompatible,h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=,4139fd148a7a9389629659253722b302791146583e0db94e351a325ecd06abbf +github.com/gogf/gf,v1.9.10,h1:lPBf0EOxv6AXVWN46EKLID0GMHDGOrs4ZAi/RUJbt+c=,83a8cf0cc2557c1e1b3cdb2112953ca303a09cb6d457d2102b3921db1bfd6fe5 +github.com/gogits/chardet,v0.0.0-20150115103509-2404f7772561,h1:deE7ritpK04PgtpyVOS2TYcQEld9qLCD5b5EbVNOuLA=,4b5c6d4b26d381d37b9a5538b9f2dc29d11f422653b19a2047e439a268c3f5ba +github.com/gogits/cron,v0.0.0-20160810035002-7f3990acf183,h1:EBTlva3AOSb80G3JSwY6ZMdILEZJ1JKuewrbqrNjWuE=,746b3b98243fc5ae7127c5102f9ba4f0b88238d081e9cb113d61be2ec16a6241 +github.com/gogo/googleapis,v1.3.0,h1:M695OaDJ5ipWvDPcoAg/YL9c3uORAegkEfBqTQF/fTQ=,ee9e1dda02a5a415c41b5bdff7f6835e929ea89ff3dc1c766510ee909e03c6c3 +github.com/gogo/protobuf,v1.3.1,h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=,4b63e18981e30565f60b7305e3de71ff9aa42cfccf15b88b3813dd2ba6c27be1 +github.com/gogs/chardet,v0.0.0-20150115103509-2404f7772561,h1:aBzukfDxQlCTVS0NBUjI5YA3iVeaZ9Tb5PxNrrIP1xs=,53b6234983c0828d620ba418be5b4e467ef8c9d634bb3d0a2bd4056e3dfa38b3 +github.com/gogs/cron,v0.0.0-20171120032916-9f6c956d3e14,h1:yXtpJr/LV6PFu4nTLgfjQdcMdzjbqqXMEnHfq0Or6p8=,913889f3018853808015c9198e6d3a25f586d88d88493c3de36530eef967664c +github.com/gogs/git-module,v0.8.2,h1:fCi0Lt8VZuFgjCXeLpkhC3irKLArK4oZ69gFvrDXx/s=,e4010dd8fdfe88a65fa8af6ecf97d7e16d4235d0eeb6a0b4b1f4e4d201c70d23 +github.com/gogs/go-gogs-client,v0.0.0-20190710002546-4c3c18947c15,h1:tgEyCCe4+o8A2K/PEi9lF0QMA6XK+Y/j/WN01LnNbbo=,cc5dcea1cca3d3d3e90a0ad548a660250b1299a61519f6dda5dcd7f2f1412daf +github.com/gogs/go-libravatar,v0.0.0-20161120025154-cd1abbd55d09,h1:UdOSIHZpkYcajRbfebBYzFDsL3SuqObH3bvKYBqgKmI=,f81991af4a649aa273bc0c3e7251f107ba0967f5d83553f5a18ed688d937eff0 +github.com/gogs/gogs,v0.11.91,h1:p8kTD9Sn6a/14u6ain6j0dPENMZ0gVEiM7phSIAL29E=,b41695c115f4e2dfc96bfbc7443fa6f91a6d2c8b32d32db4262e6977f5d55fa7 +github.com/gogs/minwinsvc,v0.0.0-20170301035411-95be6356811a,h1:8DZwxETOVWIinYxDK+i6L+rMb7eGATGaakD6ZucfHVk=,fb48a56a9f610b061af186008072fbd6e51055a12c168e1e347ecf9a05f25767 +github.com/gohugoio/hugo,v0.59.1,h1:nxaeKEY52cdpx3wZN/EcY6dEqbgeFsZaeNkDL8azeZ8=,508257b11bfc1ec77d3993a13929de63fa08e70ae26cd7c53f03857b3db9bbdf +github.com/gohugoio/testmodBuilder/mods,v0.0.0-20190520184928-c56af20f2e95,h1:sgew0XCnZwnzpWxTt3V8LLiCO7OQi3C6dycaE67wfkU=,0d6eabbeb381b08c84e7191fcecc49027ad3382997441180b2d6eea3fafc81b6 +github.com/goji/httpauth,v0.0.0-20160601135302-2da839ab0f4d,h1:lBXNCxVENCipq4D1Is42JVOP4eQjlB8TQ6H69Yx5J9Q=,8467ed1df8ffba8da7ead144b656b6281469ab4d122adf3edf496175ad870192 +github.com/goki/freetype,v0.0.0-20181231101311-fa8a33aabaff,h1:W71vTCKoxtdXgnm1ECDFkfQnpdqAO00zzGXLA5yaEX8=,80884151cd73d38904e4370afba3b870345a883a77c395194582202d805d7d74 +github.com/goki/ki,v0.9.8,h1:SzVTxJrd0ZcnkRTinZdbc41nIFmocJ7pyllEyBzNmys=,ce62e162090d566e2f9cb5b1659327a84c646dced32729e24b420cde4d5cb714 +github.com/goki/prof,v0.0.0-20180502205428-54bc71b5d09b,h1:3zU6niF8uvEaNtRBhOkmgbE/Fx7D6xuALotArTpycNc=,f46b93b6c42a97f06a2f658e49243972f4bd469b296f1010609c8d649163b73f +github.com/golang-collections/collections,v0.0.0-20130729185459-604e922904d3,h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4=,7847b09c355215616db6309597757ff6be2cf44781d800cdad1628f141dc82ee +github.com/golang-migrate/migrate/v3,v3.5.2,h1:SUWSv6PD8Lr2TGx1lmVW7W2lRoQiVny3stM4He6jczQ=,5086537ee116e958cf9647e28f843a0ac17f5de75ab642e5aef1fe2b360b0e30 +github.com/golang-sql/civil,v0.0.0-20190719163853-cb61b32ac6fe,h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=,22fcd1e01cabf6ec75c6b6c8e443de029611c9dd5cc4673818d52dac465ac688 +github.com/golang/freetype,v0.0.0-20170609003504-e2365dfdc4a0,h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=,cdcb9e6a14933dcbf167b44dcd5083fc6a2e52c4fae8fb79747c691efeb7d84e +github.com/golang/gddo,v0.0.0-20180828051604-96d2a289f41e,h1:8sV50nrSGwclVxkCGHxgWfJhY6cyXS2plGjGvUzrMIw=,9a0683005c7700bb1b7ac155597592d15d02f510a0d2c334f8564c43b9072107 +github.com/golang/glog,v0.0.0-20160126235308-23def4e6c14b,h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=,36b3c522c8102dfe74ca96e474c4c361750bf2bb85bc3cefe4f074c07d6825a9 +github.com/golang/groupcache,v0.0.0-20191027212112-611e8accdfc9,h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=,a4815d7048e9a1dd79a72a09d4c9a946ccff837695d046c7f0f5c24037ce18b3 +github.com/golang/lint,v0.0.0-20180702182130-06c8688daad7,h1:2hRPrmiwPrp3fQX967rNJIhQPtiGXdlQWAxKbKw3VHA=,66e95adf2c1feb4de316d2c0ba9e04a22322df010a67b1054ad3d4fb2f9a1791 +github.com/golang/mock,v1.3.1,h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=,3209f2030646855a3644736b5d7ce2cd9076856cac2f50360805a19c38b7bc45 +github.com/golang/protobuf,v1.3.2,h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=,a004ba3acb85e012cb9e468e1d445a81cfeeb4b4db7e9802f30aa500a8341851 +github.com/golang/snappy,v0.0.1,h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=,0a9a73d55340a8e6d17e72684cf90618b275b6034ce83299abb55ed8fb3860bd +github.com/golangplus/bytes,v0.0.0-20160111154220-45c989fe5450,h1:7xqw01UYS+KCI25bMrPxwNYkSns2Db1ziQPpVq99FpE=,2904c49772d1bade7c81ddae2fa70e42bdce7b006c871c8106d1feb14fe2982b +github.com/golangplus/fmt,v0.0.0-20150411045040-2a5d6d7d2995,h1:f5gsjBiF9tRRVomCvrkGMMWI8W1f2OBFar2c5oakAP0=,2afd341a4d32c84532d6d44574718e1b8000aa57cfc21ced284612fc92b61217 +github.com/golangplus/testing,v0.0.0-20180327235837-af21d9c3145e,h1:KhcknUwkWHKZPbFy2P7jH5LKJ3La+0ZeknkkmrSgqb0=,fc111aa59d03741dad00f05ce869fcb44f5d75b841413e21e7301bc538a0255e +github.com/gomodule/redigo,v2.0.0+incompatible,h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=,77342da7b962489363b3661803ee2fba72b23b8e97af0241877ce6ab8a95d194 +github.com/gonum/blas,v0.0.0-20181208220705-f22b278b28ac,h1:Q0Jsdxl5jbxouNs1TQYt0gxesYMU4VXRbsTlgDloZ50=,bfcad082317ace0d0bdc0832f0835d95aaa90f91cf3fce5d2d81ccdd70c38620 +github.com/gonum/floats,v0.0.0-20181209220543-c233463c7e82,h1:EvokxLQsaaQjcWVWSV38221VAK7qc2zhaO17bKys/18=,52afb5e33a03b027f8f451e23618c2decbe4443f996a203e332858c1a348a627 +github.com/gonum/graph,v0.0.0-20190426092945-678096d81a4b,h1:LilU5ERRFWL+2D6yR1PL2oeS4n+xyTq1vfv39LFVaeE=,411fd86d898ad7ea8c1145610a27f0f13153c86b3ef5e78cb80431125082b5a6 +github.com/gonum/internal,v0.0.0-20181124074243-f884aa714029,h1:8jtTdc+Nfj9AR+0soOeia9UZSvYBvETVHZrugUowJ7M=,e7f40a97eee3574c826a1e75f80ecd94c27853feaab5c43fde7dd95ba516c9dc +github.com/gonum/lapack,v0.0.0-20181123203213-e4cdc5a0bff9,h1:7qnwS9+oeSiOIsiUMajT+0R7HR6hw5NegnKPmn/94oI=,f38b72e072728121b9acf5ae26d947aacc0024dddc09d19e382bacd8669f5997 +github.com/gonum/matrix,v0.0.0-20181209220409-c518dec07be9,h1:V2IgdyerlBa/MxaEFRbV5juy/C3MGdj4ePi+g6ePIp4=,9cea355e35e3f5718b2c69f65712b2c08a1bec13b3cfadf168d98b41b043dd63 +github.com/google/btree,v1.0.0,h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=,8dbcb36f92c7a6dc5f6aef5c26358d98b72caee69829b5b33dddabada2047785 +github.com/google/cadvisor,v0.34.0,h1:No7G6U/TasplR9uNqyc5Jj0Bet5VSYsK5xLygOf4pUw=,5a3807f43a14e6a03b7ceb9ea11f8ac241a42286be90c3b2cba49ee811111848 +github.com/google/certificate-transparency-go,v1.0.21,h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE=,7ddb21b272632236d5fb35b35c837f39d38390ea8dcb97c9f0f5d5aa561c3366 +github.com/google/flatbuffers,v1.11.0,h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A=,ff61e5077ecc7d46a2020c1b42e0a6405b50271f396d4dcc50c683345059af76 +github.com/google/go-cmp,v0.3.2-0.20191028172631-481baca67f93,h1:VvBteXw2zOXEgm0o3PgONTWf+bhUGsCaiNn3pbkU9LA=,6682f890f076aaa03f2c2afb6bc7304c9d602b9e23ff212f8a9a64f44f432dbc +github.com/google/go-containerregistry,v0.0.0-20191029173801-50b26ee28691,h1:9fkqC5Bq8l2FQgcW6FQbPDUeZvExyg7okl+s4Gg9Jrs=,7bef2c87f7ca8a39e04c770b38160dd5cfdd508546f96fab427225d12d40d85a +github.com/google/go-github,v17.0.0+incompatible,h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=,9831222a466bec73a21627e0c3525da9cadd969468e31d10ecae8580b0568d0e +github.com/google/go-github/v21,v21.0.0,h1:tn4/tmCgPAsezJFwZcMnE7U0R9/AtKRBGX4s4LFdDzI=,0b25aebca5386cdb52515402b81a8e0a676ac30f9843feb0a47a1944b7c8b527 +github.com/google/go-github/v24,v24.0.1,h1:KCt1LjMJEey1qvPXxa9SjaWxwTsCWSq6p2Ju57UR4Q4=,4dd0a57a527a1cc52e6619e9d2e1936534439426f0eb065bfbe1e7c03b60d465 +github.com/google/go-github/v28,v28.1.1,h1:kORf5ekX5qwXO2mGzXXOjMe/g6ap8ahVe0sBEulhSxo=,621cca7f4889897317c18ed021fe0f55c279769f11357d90eb21a29c5ea78d04 +github.com/google/go-querystring,v1.0.0,h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=,1c0a0b81b921ee270e47e05cf0bf8df4475de850671e553c07740849068d4f9f +github.com/google/go-replayers/grpcreplay,v0.1.0,h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic=,794ad7fb2669ea1d1305cf7717a1329146635637739bf2e26d858a318e87f99b +github.com/google/go-replayers/httpreplay,v0.1.0,h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk=,cf6d3e2262e94db5bad86d944f2f97507b1ffc2943e4385f140eb6f9a01f8e7b +github.com/google/go-tpm,v0.2.0,h1:3Z5ZjNRQ0CsUj3yWXtbbx4Vfb/sQapdSeZJvuaKuQzc=,7e90cb155fa3e7759caa1fe5df1ca43520a7f8e1a31e540573cc8290ff523a23 +github.com/google/go-tpm-tools,v0.0.0-20190906225433-1614c142f845,h1:2WNNKKRI+a5OZi5xiJVfDoOiUyfK/BU1D4w+N6967F4=,2e41ca1e24a1ba5eedf980331527d6a5ad09b8ef653bbd040321572899eff8a2 +github.com/google/gofuzz,v1.0.0,h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=,752570262575bbcb5f0107dbd80a463abacaf51e94e15f96f5bc4166ff2d33e1 +github.com/google/gopacket,v1.1.17,h1:rMrlX2ZY2UbvT+sdz3+6J+pp2z+msCq9MxTU6ymxbBY=,008645038244e12a1bfbda2317372ec34a514250741139b8e4842de7f98639d4 +github.com/google/gxui,v0.0.0-20151028112939-f85e0a97b3a4,h1:OL2d27ueTKnlQJoqLW2fc9pWYulFnJYLWzomGV7HqZo=,be209ad45b16077b010faef4a7bcbf0723dfbe47869a6f4c0aacd534e7fcbfb1 +github.com/google/martian,v2.1.1-0.20190517191504-25dcb96d9e51+incompatible,h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE=,dfc5eac3877863c1f231457f96c54c915ea1c86f86c590710b7477f96e1ba0f3 +github.com/google/netstack,v0.0.0-20191031000057-4787376a6744,h1:wKeh74w+ydKcE1Eo44WDzIOcPHWmxxmtAzkAL0Mlspc=,dd74d0c9fadfb29db3bd09da657cb95300255d562ce596e88c865a71ee5d2519 +github.com/google/pprof,v0.0.0-20191028172815-5e965273ee43,h1:59gkLC5pLENSgzw9Gx73BQQho5i//80XwgIIYWxZjp4=,667012da0f67eb7822d16f532e850091a58c1efebeef5047df9a02e972112484 +github.com/google/readahead,v0.0.0-20161222183148-eaceba169032,h1:6Be3nkuJFyRfCgr6qTIzmRp8y9QwDIbqy/nYr9WDPos=,3a2435123538463dc3412a2eb1be033b7cf8105775c1ff3524351ec405fa1469 +github.com/google/renameio,v0.1.0,h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=,b8510bb34078691a20b8e4902d371afe0eb171b2daf953f67cb3960d1926ccf3 +github.com/google/rpmpack,v0.0.0-20191101142923-13d81472ccfe,h1:P1WflKHEgTAYe39btxYzeds84DhxQSLj4hfoNn0tCyQ=,5144bdeda051f10f407f1f798502ec0d7599f9c4a7e0a79c3711fe2b79f5cae4 +github.com/google/shlex,v0.0.0-20181106134648-c34317bd91bf,h1:7+FW5aGwISbqUtkfmIpZJGRgNFg2ioYPvFaUxdqpDsg=,250fc48c105475c54cc8c9fe5c110e31986590433de2608740d6592d0dc0a4c6 +github.com/google/subcommands,v1.0.1,h1:/eqq+otEXm5vhfBrbREPCSVQbvofip6kIz+mX5TUH7k=,de4249d9823a0509df32ebad2787d5e54c9b53c1059592bd9a3bb0c4cf58034d +github.com/google/uuid,v1.1.1,h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=,2b0cbc45fb0e21c8bfebbae9b04babc196d9f06d9f3b9dec5e2adc8cfd0c1b81 +github.com/google/wire,v0.3.0,h1:imGQZGEVEHpje5056+K+cgdO72p0LQv2xIIFXNGUf60=,38eb402dbe84aee2f891df0e62623f9ff5615dfeb1e4f631eaac5cf1859c9ea6 +github.com/googleapis/gax-go,v2.0.2+incompatible,h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww=,36fe8c993c8f90067bffbba78f1325ff45ae60c8a85b778d798c56067e55c19e +github.com/googleapis/gax-go/v2,v2.0.5,h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=,846b017e21fc01f408774051d4a10bfccd7c294e10a1ad5d725278889d5f1d42 +github.com/googleapis/gnostic,v0.3.1,h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=,33277bd9aab84cf04d058a5e2e1dbb5f3c023ba30c6127b4cc8a6662a776de53 +github.com/gopackage/ddp,v0.0.0-20170117053602-652027933df4,h1:4EZlYQIiyecYJlUbVkFXCXHz1QPhVXcHnQKAzBTPfQo=,76b2493aae8a5513b707e4f6c529f57175cca6c834dd19072a51ed3974cd77bc +github.com/gophercloud/gophercloud,v0.6.0,h1:Xb2lcqZtml1XjgYZxbeayEemq7ASbeTp09m36gQFpEU=,f5be75a3b128c9de7385dd7e2a8ec9fba18fb46dcf57624d88249ae99e188ed2 +github.com/gophercloud/utils,v0.0.0-20190128072930-fbb6ab446f01,h1:OgCNGSnEalfkRpn//WGJHhpo7fkP+LhTpvEITZ7CkK4=,c98b6d529b47679302d175f04d7b635824c292edc8a5ede807f9ba8145517ce7 +github.com/gopherjs/gopherjs,v0.0.0-20190915194858-d3ddacdb130f,h1:TyqzGm2z1h3AGhjOoRYyeLcW4WlW81MDQkWa+rx/000=,ff395ad20350783713974a6b4d03254b811d83c0c0caa13bcb329462a7263f70 +github.com/gopherjs/jquery,v0.0.0-20180404123100-3ba2b901425e,h1:Tf0PnEo36tq56/JezxbbiFpEce0pmK6tY7hS6PNS7tI=,26fb481ef7f7010ec901990527d7ef7b06bc18c38cb617db77f8b61263b5b453 +github.com/gopherjs/jsbuiltin,v0.0.0-20180426082241-50091555e127,h1:atBEgNR1C5+LFkl8ipQtLee9RStheS8YeCSkiYqBhOg=,603151a77e4be25c8389014b06449520c2ad5856f0161590a5de5f01bee28912 +github.com/goreleaser/goreleaser,v0.120.5,h1:N3VirNAK9u30Wj7xulfE9/cCvptO0vl+CLhaMEVGbGs=,9c516d6e8db8c6800102ca68e3f674a62dd42877d7785607f56c22f6dc9b5a9e +github.com/goreleaser/nfpm,v1.1.2,h1:9+hnNm/h/ANQWLxZixNO562w4tIO/8VlgCwOKwwZTX4=,9781a05527458d352a744a524c94473d2a72694fd54bc559a5888158bb4fa1fb +github.com/gorhill/cronexpr,v0.0.0-20180427100037-88b0669f7d75,h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY=,742d8957d3f9fe773150fb3164868a755b2af5b705b38c72c45ca5386715c617 +github.com/gorilla/context,v1.1.1,h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=,4ec8e01fe741a931edeebdee9348ffb49b5cc565ca245551d0d20b67062e6f0b +github.com/gorilla/csrf,v1.6.0,h1:60oN1cFdncCE8tjwQ3QEkFND5k37lQPcRjnlvm7CIJ0=,6fa6b9d34ba1c2409e6575db396f57607c5283e397d38a271b6930c666f166b0 +github.com/gorilla/handlers,v1.4.2,h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=,9e47491112a46d32e372be827899e8678a881f6407f290564c63e8725b5e9a19 +github.com/gorilla/mux,v1.7.3,h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=,9ffc6c6c1194cb2b9f39237ff90b20eb4a55273404c97364ed9a6500e9571fe3 +github.com/gorilla/pat,v1.0.1,h1:OeSoj6sffw4/majibAY2BAUsXjNP7fEE+w30KickaL4=,e0dedacf6f405854b94932a59b410bbda64d4fff8111b674db987ce242bc9d57 +github.com/gorilla/rpc,v1.1.0,h1:marKfvVP0Gpd/jHlVBKCQ8RAoUPdX7K1Nuh6l1BNh7A=,0e83ae0cbc4164cdaf0b808413f97fed7a90e2096095c14f5495b6dbfaa34266 +github.com/gorilla/schema,v1.1.0,h1:CamqUDOFUBqzrvxuz2vEwo8+SUdwsluFh7IlzJh30LY=,42a6d7dc873e8ba1822551b4e15304d5654a11f6da3cccdc270be847148bbfaf +github.com/gorilla/securecookie,v1.1.1,h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=,dd83a4230e11568159756bbea4d343c88df0cd1415bbbc7cd5badad6cd2ed903 +github.com/gorilla/sessions,v1.2.0,h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ=,8753d00ae6cf8ea0e28c195d4b87875384e2ed79df7eba4cf210fdf9ab0294df +github.com/gorilla/websocket,v1.4.1,h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=,86eb427567de9e2dc84da52ee4f4315496c5ffc2152928df0e3ac4ce8a359ff7 +github.com/gosimple/slug,v1.9.0,h1:r5vDcYrFz9BmfIAMC829un9hq7hKM4cHUrsv36LbEqs=,0f72d897e3decea434cdc68c7d0226afbda7d6b1908e955bf406333e7d6bb4a7 +github.com/gosuri/uitable,v0.0.3,h1:9ZY4qCODg6JL1Ui4dL9LqCF4ghWnAOSV2h7xG98SkHE=,1316f88b6b2689d941a4727889818705a289c72d7f1f4d2d9cf5cd06fecd0b7b +github.com/gotestyourself/gotestyourself,v2.2.0+incompatible,h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=,653f8ec3ed62f8d235ab67cfc56e7c814d4ac6f56f24000802b32728523c074c +github.com/gotnospirit/makeplural,v0.0.0-20180622080156-a5f48d94d976,h1:b70jEaX2iaJSPZULSUxKtm73LBfsCrMsIlYCUgNGSIs=,5750c916115b851f4881b76d90128802d090558958aa821c691d4fa378018093 +github.com/gotnospirit/messageformat,v0.0.0-20180622080451-0eab1176a3fb,h1:akgcoKcMcMOlzb6fdycEck1Vc3+y7ubUjO6hgAOyqC8=,7189231c806aa1988b50a82019c5f972a5f1b82e61c94776999728ec1894cd29 +github.com/graarh/golang-socketio,v0.0.0-20170510162725-2c44953b9b5f,h1:utzdm9zUvVWGRtIpkdE4+36n+Gv60kNb7mFvgGxLElY=,f41faefdf625d1c04113636d467a9fa47fe083148d7393fa65c0f08e3a4078c3 +github.com/grafana/globalconf,v0.0.0-20181214112547-7a1aae0695d9,h1:2/Bz5A5zR4TMGd9yvgGMal7nhQwHBt5/dfp0sbJFfes=,0393f4fa690096ea26c76373e99f9d9f3bfc9b34e5acd08d639b68f68af7b5e2 +github.com/grandcat/zeroconf,v0.0.0-20190424104450-85eadb44205c,h1:svzQzfVE9t7Y1CGULS5PsMWs4/H4Au/ZTJzU/0CKgqc=,2d364bea1939e3ec55b732cae452feb3182fc1d8ffa30f35aa42c0181709d138 +github.com/graph-gophers/graphql-go,v0.0.0-20190225005345-3e8838d4614c,h1:YyFUsspLqAt3noyPCLz7EFK/o1LpC1j/6MjU0bSVOQ4=,fad60e1061e15848aff79c6620f1cf55a9dd87d58ca2f57fea50c35322c817ac +github.com/graphql-go/graphql,v0.7.9-0.20190403165646-199d20bbfed7,h1:E45QFM7IqRdFnuyFk8GSamb42EckUSyJ55rtVB/w8VQ=,6e9d51c4dc431d2d7c1348fa2b3358ed8e57338a07750177698bde29c913e786 +github.com/gravitational/trace,v0.0.0-20190726142706-a535a178675f,h1:68WxnfBzJRYktZ30fmIjGQ74RsXYLoeH2/NITPktTMY=,6fb8317692ac3aa8280cd4b4749970ec6652ecbe2c629cd43b52005f9a992197 +github.com/graymeta/stow,v0.2.4,h1:qDGstknYXqcnmBQ5TRJtxD9Qv1MuRbYRhLoSMeUDs7U=,67b4e728448b89c2233da14c22f18fe6c720e88a858dff2cd3c7405c7ea10493 +github.com/gregjones/httpcache,v0.0.0-20190611155906-901d90724c79,h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=,73d773791d582cad0d90942e7d92f52d82f13119dd78e849bbd77fae2acc0276 +github.com/grokify/html-strip-tags-go,v0.0.0-20190921062105-daaa06bf1aaf,h1:wIOAyJMMen0ELGiFzlmqxdcV1yGbkyHBAB6PolcNbLA=,0bb5eaff16e4119a9251bb0a26b4190a8e36cbacce8daee8c77df76022e1087c +github.com/grpc-ecosystem/go-grpc-middleware,v1.1.0,h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg=,def2c3ec1d07264489b79fa0e8e7a5c23545f16ba3c6e613f5cdba2ae8fe2768 +github.com/grpc-ecosystem/go-grpc-prometheus,v1.2.1-0.20191002090509-6af20e3a5340,h1:uGoIog/wiQHI9GAxXO5TJbT0wWKH3O9HhOJW1F9c3fY=,bca256c9eee3d43fe310c205866c69de454e71346f18ea2b05a32bd2f6018c84 +github.com/grpc-ecosystem/grpc-gateway,v1.11.3,h1:h8+NsYENhxNTuq+dobk3+ODoJtwY4Fu0WQXsxJfL8aM=,d96a88c820576b8b6989944cbe15f4f2d94d2884f29f2f683b975a03a5bdc5fc +github.com/grpc-ecosystem/grpc-opentracing,v0.0.0-20180507213350-8e809c8a8645,h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=,0606bde24e978e9cd91ae45ca9e5222ce695c21a07ae02e77546496bf23b1c62 +github.com/gucumber/gucumber,v0.0.0-20180127021336-7d5c79e832a2,h1:iR8wSrr/JCzL1Ul+dRVxtIOnP8DGg/m02nHZJ9PH6P0=,4feb5116e650552868f056ee74d179e91239bf166d365267f32e903ccc495dbb +github.com/guptarohit/asciigraph,v0.4.1,h1:YHmCMN8VH81BIUIgTg2Fs3B52QDxNZw2RQ6j5pGoSxo=,976279cdbc5425609c272b2116a92fb5871a40164ae64c51dedffea7b550d2d4 +github.com/guregu/null,v2.1.3-0.20151024101046-79c5bd36b615+incompatible,h1:SZmF1M6CdAm4MmTPYYTG+x9EC8D3FOxUq9S4D37irQg=,1adcbf87f6c55963b0d020ccbac0ebd07e8aca5e0ff22469ac708c6574d7333f +github.com/gxed/go-shellwords,v1.0.3,h1:2TP32H4TAklZUdz84oj95BJhVnIrRasyx2j1cqH5K38=,c63674c66949c0442402bceca8b7768684875a667140ea0b32afdd46fc094a7f +github.com/gxed/hashland/keccakpg,v0.0.1,h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU=,c77522ff0820feb7b5be4e1c74d7c64b3aa5afe3452e1dd2f54d1ffa067c6b2d +github.com/gxed/hashland/murmur3,v0.0.1,h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc=,4576d7ae9b5d2f4ebd238de84f3b52b9d4ae4d41822ac0eabd404d346eace067 +github.com/gxed/pubsub,v0.0.0-20180201040156-26ebdf44f824,h1:TF4mX7zXpeyz/xintezebSa7ZDxAGBnqDwcoobvaz2o=,718b183cca4e30a97d3fa06457060b4d3be66742838d98a39b02ea710693d9eb +github.com/h2non/filetype,v1.0.8,h1:le8gpf+FQA0/DlDABbtisA1KiTS0Xi+YSC/E8yY3Y14=,534a477c811032fceb0c8e1ad7a15f35ff95f1d038d41164bb4d265860cc42c3 +github.com/h2non/gock,v1.0.9,h1:17gCehSo8ZOgEsFKpQgqHiR7VLyjxdAG3lkhVvO9QZU=,ab5679329b0c26b523254dd728cad1b4e6e2e7bf11569df73a1dcaa468a46cd6 +github.com/h2non/parth,v0.0.0-20190131123155-b4df798d6542,h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=,3b7b7e4bb3c2d0e22075e13443af78d03fb2ed54b3eb5bb1fa6f528c7ebe3ac0 +github.com/hailocab/go-hostpool,v0.0.0-20160125115350-e80d13ce29ed,h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=,faf2b985681cda77ab928976b620b790585e364b6aff351483227d474db85e9a +github.com/hanwen/go-fuse,v1.0.0,h1:GxS9Zrn6c35/BnfiVsZVWmsG803xwE7eVRDvcf/BEVc=,4b94d038e80959f816a18b34cdcbb5244e87b73956b220aac213483999b54c84 +github.com/hashicorp/aws-sdk-go-base,v0.4.0,h1:zH9hNUdsS+2G0zJaU85ul8D59BGnZBaKM+KMNPAHGwk=,967c057aecede32de140c88b6527149d2441216569620b9d9350522d0f309bdc +github.com/hashicorp/consul,v1.6.1,h1:ISPgwOO8/vPYrCXQNyx63eJAYjPGRnmFsXK7aj2XICs=,0ca8c5046df99a7a6607ab68b6604340af58d1696c7901088adfd9618850629f +github.com/hashicorp/consul/api,v1.2.0,h1:oPsuzLp2uk7I7rojPKuncWbZ+m5TMoD4Ivs+2Rkeh4Y=,2833a78c39a4fa869a928e1218f3aa83130e4f5c03b4d4e355fb76b91fa75946 +github.com/hashicorp/consul/sdk,v0.2.0,h1:GWFYFmry/k4b1hEoy7kSkmU8e30GAyI4VZHk0fRxeL4=,3f0b677061f7e79191cc0d2f8184895c20051166959566a2e48e511b1fab222c +github.com/hashicorp/errwrap,v1.0.0,h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=,ccdf4c90f894d8a5fde4e79d5828c5d27a13e9f7ce3006dd72ce76e6e17cdeb2 +github.com/hashicorp/go-azure-helpers,v0.0.0-20190129193224-166dfd221bb2,h1:VBRx+yPYUZaobnn5ANBcOUf4hhWpTHSQgftG4TcDkhI=,dd17ed56e4b541cffa69679557074071372ab70682f695d8b61126c9393f92dc +github.com/hashicorp/go-bexpr,v0.1.2,h1:ijMXI4qERbzxbCnkxmfUtwMyjrrk3y+Vt0MxojNCbBs=,ac79086a2900ebf2f5414fe54b5799f24b3ddf953a28299f46831a11b10b1df0 +github.com/hashicorp/go-checkpoint,v0.5.0,h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU=,1baf63010271d6c8abc0f4edc9e9d41483cb55218e4e399ca4c70ef225415f36 +github.com/hashicorp/go-cleanhttp,v0.5.1,h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=,e3cc9964b0bc80c6156d6fb064abcb62ff8c00df8be8009b6f6d3aefc2776a23 +github.com/hashicorp/go-discover,v0.0.0-20190403160810-22221edb15cd,h1:SynRxs8h2h7lLSA5py5a3WWkYpImhREtju0CuRd97wc=,c58ed5375890c98a836234f5166cf88b73ad7595899edaa43c775d650043b4b3 +github.com/hashicorp/go-gcp-common,v0.5.0,h1:kkIQTjNTopn4eXQ1+lCiHYZXUtgIZvbc6YtAQkMnTos=,a1fee55619b3579e5fe89b6f944dce87e190b8ea1526f24622ba5941d664b639 +github.com/hashicorp/go-getter,v1.4.0,h1:ENHNi8494porjD0ZhIrjlAHnveSFhY7hvOJrV/fsKkw=,cbae7b8a5f018c78bb304c47840c390b3c3be98b712b90b33d16304f1b427eb1 +github.com/hashicorp/go-hclog,v0.9.2,h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=,e1a873d9fa828038b5b2c93e0f49f9e8187b4f5255d0a3d7989d3ac178807af4 +github.com/hashicorp/go-immutable-radix,v1.1.0,h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc=,c23ca92f0fb7dce35b86d35ccf9cfa871db97379d2ca8a0fcc15fde32ff369bb +github.com/hashicorp/go-memdb,v1.0.4,h1:sIdJHAEtV3//iXcUb4LumSQeorYos5V0ptvqvQvFgDA=,c3eedd68e60f3db16499dff27fe4d4e874978c250bab152044965a475cb47c72 +github.com/hashicorp/go-msgpack,v0.5.5,h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=,fb47605669b0ddd75292aac788208475fecd54e0ea3e9a282d8a98ae8c60d1f5 +github.com/hashicorp/go-multierror,v1.0.0,h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=,a66a1b9dff26a9a7fcaa5aa5e658c13f94c0daeb572536b1ecc7ebe51f4d0be7 +github.com/hashicorp/go-oracle-terraform,v0.0.0-20181016190316-007121241b79,h1:RKu7yAXZTaQsxj1K9GDsh+QVw0+Wu1SWHxtbFN0n+hE=,5b3ab30e1aef56e38d750a5dc344f1ab996859408a6b76a9f48f5f75747fd712 +github.com/hashicorp/go-plugin,v1.0.1,h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE=,0853effcccdb7bfac1c122f72cd3a1241b4e0934609541c409e9f59b441ae01e +github.com/hashicorp/go-raftchunking,v0.6.2,h1:imj6CVkwXj6VzgXZQvzS+fSrkbFCzlJ2t00F3PacnuU=,f5c55a3679c8a8f63d798d2b67552bfcd198dc5b9473d81c3ce1b353a055bc5c +github.com/hashicorp/go-retryablehttp,v0.6.3,h1:tuulM+WnToeqa05z83YLmKabZxrySOmJAd4mJ+s2Nfg=,69cb67f4821e97ca8f04b0cb710c61a5acfaa948dda59b949b40fd6fae8e7dec +github.com/hashicorp/go-rootcerts,v1.0.1,h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8=,3f558b1a436ed6fb15872383545109227f9552bf5daa95583e9402bbd3a24fff +github.com/hashicorp/go-safetemp,v1.0.0,h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=,6843a6b60d650ae9be836add0ab5ac1b1719a101bf12fe4ca6678fcd87baa19a +github.com/hashicorp/go-slug,v0.4.0,h1:YSz3afoEZZJVVB46NITf0+opd2cHpaYJ1XSojOyP0x8=,b6a027a2d69ae8786a6830239a79ceac487463237b49e03250a9b1e116f0a5ac +github.com/hashicorp/go-sockaddr,v1.0.2,h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=,50c1b60863b0cd31d03b26d3975f76cab55466666c067cd1823481a61f19af33 +github.com/hashicorp/go-syslog,v1.0.0,h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=,a0ca8b61ea365e9ecdca513b94f200aef3ff68b4c95d9dabc88ca25fcb33bce6 +github.com/hashicorp/go-tfe,v0.3.25,h1:4rPk/9rSYuRoujKk5FsxSvtC/AjJCQphLS/57yr6wUM=,5ade1d16517697c7bd04b556f852264eef33906c52d32bd6702c47838c1c1c04 +github.com/hashicorp/go-uuid,v1.0.1,h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=,a05417b988b047d55fca8ad4fec6bde56c3907f679fece48f97d608e61e82a5c +github.com/hashicorp/go-version,v1.2.0,h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E=,a3231adb6bf029750970de2955e82e41e4c062b94eb73683e9111aa0c0841008 +github.com/hashicorp/go.net,v0.0.1,h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw=,71564aa3cb6e2820ee31e4d9e264e4ed889c7916f958b2f54c6f3004d4fcd8d2 +github.com/hashicorp/golang-lru,v0.5.3,h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=,ac6e8bdc76a1275e3496f1ab2484e28ab4be2c81e2da78b8cdd1c2d269b931e4 +github.com/hashicorp/hcl,v1.0.0,h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=,54149a2e5121b3e81f961c79210e63d6798eb63de28d2599ee59ade1fa76c82b +github.com/hashicorp/hcl/v2,v2.0.0,h1:efQznTz+ydmQXq3BOnRa3AXzvCeTq1P4dKj/z5GLlY8=,6275e2af8b3247c6de72baab13b3be531431f695e001e4d36c920e412a715032 +github.com/hashicorp/hcl2,v0.0.0-20191002203319-fb75b3253c80,h1:PFfGModn55JA0oBsvFghhj0v93me+Ctr3uHC/UmFAls=,42811f77c4da1d31371c51076cbcecc99042fc7a74c6e2622b11bea96043a777 +github.com/hashicorp/hil,v0.0.0-20190212112733-ab17b08d6590,h1:2yzhWGdgQUWZUCNK+AoO35V+HTsgEmcM4J9IkArh7PI=,cb2b110c86a312b7c60094c9b11853ae288945c34fa5861b67ff2d97edaab292 +github.com/hashicorp/logutils,v1.0.0,h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=,0e88424578d1d6b7793b63d30c180a353ce8041701d25dc7c3bcd9841c36db5b +github.com/hashicorp/mdns,v1.0.1,h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8=,0f4b33961638b1273ace80b64c6fc7e54a1064484b2a1e182ab3d38a35dbc94f +github.com/hashicorp/memberlist,v0.1.5,h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM=,51054573cad1655b1b349553a8d455eedc15b49f0277edd2e693bc5d0503af62 +github.com/hashicorp/net-rpc-msgpackrpc,v0.0.0-20151116020338-a14192a58a69,h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE=,b0c3a5ec955b0dfb85b39a6aa1d10fe0e810dd78493c0a14ea5760bac1cadd32 +github.com/hashicorp/nomad/api,v0.0.0-20190412184103-1c38ced33adf,h1:U/40PQvWkaXCDdK9QHKf1pVDVcA+NIDVbzzonFGkgIA=,b9e994cd47eed80531b93d9f64be426cbdc6fc6e58323f6b26ae53b1fd692bbd +github.com/hashicorp/packer,v1.4.4,h1:ee+jewbEfTKV77+YtRR0m2Q8suTiXnr010bBFt5vJSA=,d2fc7c22b3528a4acb321fda24575cf2f88df8f5085b3b5da559e44d8b12295a +github.com/hashicorp/raft,v1.1.1,h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs=,b6a10aa04b5f45486a6111d4a50cb65ee179b091f04a047e316b85f38ebbf873 +github.com/hashicorp/raft-boltdb,v0.0.0-20191021154308-4207f1bf0617,h1:CJDRE/2tBNFOrcoexD2nvTRbQEox3FDxl4NxIezp1b8=,e2008570aed06ba72cd783d6bc729b67b7e0cecd2219a8420dd24dcef82e64f8 +github.com/hashicorp/raft-snapshot,v1.0.1,h1:cx002JsTEAfAP0pIuANlDtTXg/pi2Db6YbRRmLQTQKw=,3d40d03f6793fe87464359f28b136b920daf7aa8544a98270470d04cef132a77 +github.com/hashicorp/serf,v0.8.5,h1:ZynDUIQiA8usmRgPdGPHFdPnb1wgGI9tK3mO9hcAJjc=,88623d0f1a155bb2fe254210f68f1603b42162f031fbf51256f1465b36bc7769 +github.com/hashicorp/terraform,v0.12.13,h1:LACXUTZvAGf8W/6wehHjOgi6YEMN7ejDUpnpll2qbJ0=,4dbe6d0c15f4d934fd583fc20bec55326ffc79cf0d5b7fd28978ba14d178fe8d +github.com/hashicorp/terraform-config-inspect,v0.0.0-20190821133035-82a99dc22ef4,h1:fTkL0YwjohGyN7AqsDhz6bwcGBpT+xBqi3Qhpw58Juw=,1261dc9b65805f9be029f6a42d9e0ddccc89c4d0c50e5fa2895b1b53198195c3 +github.com/hashicorp/terraform-svchost,v0.0.0-20191011084731-65d371908596,h1:hjyO2JsNZUKT1ym+FAdlBEkGPevazYsmVgIMw7dVELg=,8055e9f82b0484eb70594ca682bcf4401d2286c2021ffc72c6c3b6ad9ac9a024 +github.com/hashicorp/vault,v1.2.0-rc1,h1:GFYP6ck5f0EaJsGMD4PARIX5HaHREUxMbTaVPy+dFEg=,89c84474c97b1400ca858fe1b6e0eb3bd91dac17a4aff4336bd95104381e8b2b +github.com/hashicorp/vault-plugin-auth-alicloud,v0.5.2-0.20190725165955-db428c1b0976,h1:f+r1gXVvQJ0+2pfxgBDP1zZUC6lUmPNM0xp7AKupyBg=,3bc95606713215c3ae25f9be06ed2186f8f2e5e9ad8e025fafd332d97045ed09 +github.com/hashicorp/vault-plugin-auth-azure,v0.5.2-0.20190725170003-0fc4bd518aa2,h1:Ua6AFhJYkdNGC5s4uDL7EGVBD/jPUOcnubDkPsaG7K8=,bfa988cf4a3e33e7db3caf2ecad55c2f7c2e3f39088243767927ac8ed8d2556e +github.com/hashicorp/vault-plugin-auth-centrify,v0.5.2-0.20190725170023-9d8d5e0b03e6,h1:UXM3yxzNaruvgaccRjFXKcKnsTTHzp213MJ045wto6A=,165cf5f7daa0e4c286bad12f09eeac648062554399f18890bf35789b6b16e9c7 +github.com/hashicorp/vault-plugin-auth-gcp,v0.5.2-0.20190725170032-0aa7d1c92039,h1:uqYbah1dntV8OccHCbY3bBzYX/zLtjmG0ZIZPV+x6EM=,e9f9ccc7ca02c40291bb27012f2dd1fead86d2de2ab85a6d07b0aa7d98533f49 +github.com/hashicorp/vault-plugin-auth-jwt,v0.5.2-0.20190725170041-1cfee03e8d3a,h1:zdhacnLMH4P47PdSPJo0omNh+IkSvPj0LbiHLQu0aVk=,2b04c80c6d2000558b63ced2c9ba60a4a2ffe4c76d2d58d5fe121f714a3cf291 +github.com/hashicorp/vault-plugin-auth-kubernetes,v0.5.2-0.20190725170047-354505be0ecf,h1:il4UUQC9zfsSRNR2EAQVqC+DzrvzZpFmJReQ7p6/bKw=,d95794ab78e644a95799a3505a83af58a83c6bf775e69832b3660ca47c042d5a +github.com/hashicorp/vault-plugin-auth-oci,v0.0.0-20190904175623-97c0c0187c5c,h1:z6LQZvs1OtoVy2XgbgNhiDgp0U62Xbstn7/cgNZvh6g=,b23f2afa7fab5368d83a01be865e2dddf7ba6c7e8804ac205ccc1701a9239d51 +github.com/hashicorp/vault-plugin-auth-pcf,v0.0.0-20190725170053-826a135618c1,h1:mPyQ1+jB/ztcqebEdmNhSuYq4XVOpB5TUyyi0118T40=,c444159df670a1aba7e59029bf928989091886fa45970f751fe644d243d43744 +github.com/hashicorp/vault-plugin-database-elasticsearch,v0.0.0-20190725170059-5c0d558eb59d,h1:VUD1T3aI5GL8uoSSDhHncHP8ksgepZsvSLhsRG8MJ3s=,b151c27f632b8e05686473b4936b480cec694498c1e446dc5208b4db05c559f1 +github.com/hashicorp/vault-plugin-secrets-ad,v0.5.3-0.20190725170108-e1b17ad0c772,h1:N219G3MUxPRhtOBMFVdsSQWU47MrvivSHLmTAPpHcs4=,c8801ee5f030fa6cb36045f1e321d964224f9a2b4a17100140cafca7a6d8daf5 +github.com/hashicorp/vault-plugin-secrets-alicloud,v0.5.2-0.20190725170114-7d66a3fa0600,h1:kyHR0JOKFDAaC4sjQ3iD1lTH6uaIfmTk4rQ+JOGW5Zo=,f816c029601c9e7235f798e9591246ac935d3b1e330abfb23af59afa6bc08e0d +github.com/hashicorp/vault-plugin-secrets-azure,v0.5.2-0.20190725170121-541440395211,h1:hZ21h0DWWKkoeMW7zkYaPVLxGZtKfYyIcE9G8xug4YQ=,5d71ad3ef26fd40b3afaf852913f38e5be0a1db8844bf21cb787e204cdbc48e4 +github.com/hashicorp/vault-plugin-secrets-gcp,v0.5.3-0.20190725170127-aa49df112140,h1:gSvWU9aYAsHxqKU0ohJD9njlNQ1/qLFPRs85u+xJFv4=,9b209e3ef7b8d7c41e823705cc190699540bbd2076f82344a83c106fa7e4ac98 +github.com/hashicorp/vault-plugin-secrets-gcpkms,v0.5.2-0.20190725170135-aaf270943731,h1:zP2vqetYhON59Mf5FTV9KmyKSnY1cLFzdNW0YYnNKbo=,5d3bc6de4bdad4725c4348a0d6861bce3e80a9eb13d4b05179cd663b47f46545 +github.com/hashicorp/vault-plugin-secrets-kv,v0.5.2-0.20190725170141-1c4dac87f383,h1:4IqT7JQt/GyYKr0HGemkUlYpF45ZALHSN9rHy7Sipos=,10f03c6d8a51714692b43ab69c2cb5f041ac611210aa9804237a9345e930f018 +github.com/hashicorp/vault/api,v1.0.4,h1:j08Or/wryXT4AcHj1oCbMd7IijXcKzYUGw59LGu9onU=,a885d16e067a5586e55914cd8e40f250a28fe94b3b864de47d495ad1f71c4251 +github.com/hashicorp/vault/sdk,v0.1.14-0.20190909201848-e0fbf9b652e2,h1:b65cSyZqljnCPzzsUXvR4P0eXypo1xahQyG809+IySk=,0aca8708570b724605514cab6dbbc9cc7bce5d27786a4b2da87553c437c42463 +github.com/hashicorp/vic,v1.5.1-0.20190403131502-bbfe86ec9443,h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw=,9c09a35b14d797812e6714073471b3472c16f9cb4deb430f9e2dd15fa8d25e32 +github.com/hashicorp/yamux,v0.0.0-20190923154419-df201c70410d,h1:W+SIwDdl3+jXWeidYySAgzytE3piq6GumXeBjFBG67c=,d8a888d6a4ecbc09f2f3663cb47aa2d064298eeb1491f4761a43ae95e93ba035 +github.com/herenow/go-crate,v0.0.0-20190617151714-6f2215a33eca,h1:kk1qCxy+FS5McLJ69dSpB6Y6kHCMa23UwHyglIzJ/bk=,aa618858b9c03e47962afb2a4098ad6cca8ecd09904cdbc5eb62c5a1d74befca +github.com/hetznercloud/hcloud-go,v1.15.1,h1:G8Q+xyAqQ5IUY7yq4HKZgkabFa0S/VXJXq3TGCeT8JM=,028402928c1bc1db686cab5738e6fb91a61252c1236258e2d911dd8da21f8af5 +github.com/hinshun/vt10x,v0.0.0-20180616224451-1954e6464174,h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ=,4afc77bd4950db746c68d23e6ed681d31cd952559d712c1400da476084567cf6 +github.com/hjfreyer/taglib-go,v0.0.0-20151027170453-0ef8bba9c41b,h1:Q4OOFmH18aIjnDJlvYm4BXmpHKXk1zTJP0QZ0otNwPs=,e7735f2cdbb7441dbe6bbc303cff9b9a20d9845dc901e31f6e29e3ef83613390 +github.com/howeyc/fsnotify,v0.9.0,h1:0gtV5JmOKH4A8SsFxG2BczSeXWWPvcMT0euZt5gDAxY=,a72f2f092433c8b53e095d6db3d3e18517db1a5a9814a78ed97194239145740f +github.com/howeyc/gopass,v0.0.0-20190910152052-7cb4b85ec19c,h1:aY2hhxLhjEAbfXOx2nRJxCXezC6CO2V/yN+OCr1srtk=,83560b6c9a6220bcbb4ad2f043e5a190ab11a013b77c1bbff9a3a67ed74d4b37 +github.com/hpcloud/tail,v1.0.0,h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=,3cba484748e2e2919d72663599b8cc6454058976fbca96f9ac78d84f195b922a +github.com/huandu/xstrings,v1.2.0,h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0=,fe7011ad569e464d6ff81bdb1d80c4ebdb5baac5c89d17c1644a23cac0c48828 +github.com/huin/goupnp,v1.0.0,h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=,9685536729d9860766846ad4e56fb961b246d5afa209e4058ee0d021aec37827 +github.com/huin/goutil,v0.0.0-20170803182201-1ca381bf3150,h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=,d887199bd2f388075ff7aaf1d3061b13b92c20e01ccd6337c864fd409fe78831 +github.com/hybridgroup/go-ardrone,v0.0.0-20140402002621-b9750d8d7b78,h1:7of6LJZ4LF9AvF4bTiMr2I72KxodBf1BXrSD9Tz0lWU=,997e0efef1b73cc1930ad67cd649268ff864393fa85dedf32672ecca78647021 +github.com/hybridgroup/mjpeg,v0.0.0-20140228234708-4680f319790e,h1:xCcwD5FOXul+j1dn8xD16nbrhJkkum/Cn+jTd/u1LhY=,d9134203da596f895c55c3a9fd0aea32ad26501ca88e646cbe9f82136f592c0f +github.com/hyperledger/fabric,v1.4.3,h1:6MmYhcDbxhd0TvpvHLR3c5m3fVjaX97690H8TRjpJNA=,067d2bd69094dc9f693d9b00c8bea810f61f6a8a3d0ac640830b468934e22023 +github.com/hyperonecom/h1-client-go,v0.0.0-20190122232013-cf38e8387775,h1:MIteIoIQ5nFoOmwEHPDsqng8d0dtKj3lCnQCwGvtxXc=,135625f81c1c6c62b296269829a74f1266928600545fedec0825cb97284264f6 +github.com/iancoleman/strcase,v0.0.0-20190422225806-e506e3ef7365,h1:ECW73yc9MY7935nNYXUkK7Dz17YuSUI9yqRqYS8aBww=,f93e74faf2e05699180c40ef21204629a1c6bd382658f1059c80631c377c5246 +github.com/ianlancetaylor/demangle,v0.0.0-20181102032728-5e5cf60278f6,h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c=,73ae40ed96af2703f85cd4c552cf6b14551ceb782348be8185b730f44c842ab9 +github.com/iij/doapi,v0.0.0-20190504054126-0bbf12d6d7df,h1:MZf03xP9WdakyXhOWuAD5uPK3wHh96wCsqe3hCMKh8E=,7e33155961c2cba072047deb34d19a7d863a713e502abe8bdc31ab91424bd226 +github.com/ijc/Gotty,v0.0.0-20170406111628-a8b993ba6abd,h1:anPrsicrIi2ColgWTVPk+TrN42hJIWlfPHSBP9S0ZkM=,b8b9a99b3632feb3449d1fb8950d292333f8a7f494b182320ecdb0479d78442f +github.com/imdario/mergo,v0.3.8,h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=,579cad1ed913cfcb424deb97e7016749abcc9d585bad07d14f19550df052cec5 +github.com/imkira/go-interpol,v1.1.0,h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk=,de5111f7694700ea056beeb7c1ca1a827075d423422f251076ee17bd869477d9 +github.com/improbable-eng/grpc-web,v0.9.1,h1:tenDg9Lg+zYXeS/ojbKyfwVO5TVYh5FFGsrXNAblF1o=,3a287ae758b41feea9f26ec1b8757628d4742b87376fa40b29d878ee651bfe62 +github.com/imroc/req,v0.2.3,h1:ElMCifcqg/1GonGloyyTUrj6D6IITL6EiNEKHUl4xZM=,951172f0969fa0bad31ebbe9b17699ea3909b09eaf8df39ccd78e48097682c78 +github.com/inconshreveable/go-update,v0.0.0-20160112193335-8152e7eb6ccf,h1:WfD7VjIE6z8dIvMsI4/s+1qr5EL+zoIGev1BQj1eoJ8=,adf856fb49e7c5059b2edb42a31daf4a536dc698fe0728835b018150a884b678 +github.com/inconshreveable/log15,v0.0.0-20180818164646-67afb5ed74ec,h1:CGkYB1Q7DSsH/ku+to+foV4agt2F2miquaLUgF6L178=,31875747bcd198c39714d38747ac77e585620f2f37d1b1e1a03b164af6762995 +github.com/inconshreveable/mousetrap,v1.0.0,h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=,c3fa0813e78f5cef10dc0e9912c43e68e06ff970a98e98c4050fe14dbbfd18c7 +github.com/influxdata/changelog,v1.1.0,h1:HXhmLZDrbuC+Ca5YX7g8B8cH5DmJpaOjd844d9Y7aTQ=,19e60d9b658aaecca4e075126c996c1abd5e369003c14bbe575edc4ba2b9c182 +github.com/influxdata/flux,v0.52.0,h1:R91uUXbHzoiyYF7Xhm+wP3a0iSnl43iYJrN93nBhuP0=,e0121889c46cc4ad22f1662e68df7dbdfbb361c3da6809add4d1409cef764be9 +github.com/influxdata/influxdb,v1.7.9,h1:uSeBTNO4rBkbp1Be5FKRsAmglM9nlx25TzVQRQt1An4=,b49a72374a14f726229e71152e74e8a132c2913137c4457f31bae8c7735e812c +github.com/influxdata/influxdb1-client,v0.0.0-20190809212627-fc22c7df067e,h1:txQltCyjXAqVVSZDArPEhUTg35hKwVIuXwtQo7eAMNQ=,fc41ea93bf2b06b231823b116dc11b0ed89badf1ce6a4c848a33c77dcf2c123a +github.com/influxdata/influxql,v1.0.1,h1:6PGG0SunRmptIMIreNRolhQ38Sq4qDfi2dS3BS1YD8Y=,2a697984d1cd82656f69901bfe1771676493411c1370d77271bde3ab3c917a1e +github.com/influxdata/line-protocol,v0.0.0-20180522152040-32c6aa80de5e,h1:/o3vQtpWJhvnIbXley4/jwzzqNeigJK9z+LZcJZ9zfM=,6111b5e459106f7003477186aa2e34423dbe0c53983944a07d8b835ff8c7757c +github.com/influxdata/promql/v2,v2.12.0,h1:kXn3p0D7zPw16rOtfDR+wo6aaiH8tSMfhPwONTxrlEc=,b928626f2eb81eed0046ef23a83a77a28dd140d369a0d2538c94e85d1055877f +github.com/influxdata/tdigest,v0.0.0-20181121200506-bf2b5ad3c0a9,h1:MHTrDWmQpHq/hkq+7cw9oYAt2PqUw52TZazRA0N7PGE=,5d6b056d98d1e7e9cd884aea4e73934cc8ea89218eb43ee1d5140d3ccb34ed52 +github.com/influxdb/influxdb,v1.7.9,h1:KMBwwvyJyBppIwrg5t0662p+Yei/ucnIkqUl8txiQdQ=,ad251d4cc00aec767465dc60d6b702a3635b68402123a4ee5d1ee2b5006310b3 +github.com/iotexproject/go-pkgs,v0.1.1,h1:AyWJf8jqOg4aMSrxi+MInFFBZhTvSm0LCu1o08heijk=,c5099edde7450b4f8b9a0f49c42697f5e9bcb92d2bf58395aa0681f3ef6b583d +github.com/iotexproject/iotex-address,v0.2.1,h1:ZJH2ajx5OBrbaRJ0ZWlWUo685zr5kjWijVjtmUrm42E=,53c7ce4d7fbc55ee79e92e9e0b31ee3b3ba0e6e5d3e24cd43e0a58c766568c9d +github.com/iotexproject/iotex-proto,v0.2.5,h1:SYdl9Lqb0LYfFf3sfw92fN8GY3bthfCvGmltz+2uvDQ=,546cb070e92286601aee16d03383712172061c8fe78e53cf04498a9358470a78 +github.com/ipfs/bbloom,v0.0.4,h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs=,92993c175552cc626ef6b1ab6cf887f0f640311748c47e7615df29a966c1b774 +github.com/ipfs/go-bitswap,v0.1.3,h1:jAl9Z/TYObpGeGATUemnOZ7RYb0F/kzNVlhcYZesz+0=,ee26d57b2765f808ebebca8aa18695bfa02b738f47b4b5db5efce5c91f28fbcd +github.com/ipfs/go-block-format,v0.0.2,h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE=,02ad9fa29f97073ece45a5da7a92e59e6c6b856e9a03bd853361b8107296c020 +github.com/ipfs/go-blockservice,v0.1.2,h1:fqFeeu1EG0lGVrqUo+BVJv7LZV31I4ZsyNthCOMAJRc=,31c5ff02d71ee454bebea3944d7e06c2ffd6f1c4cfdddf71c5122e982f261c7d +github.com/ipfs/go-cid,v0.0.3,h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms=,f8bd60f8bbd79ed1fa5c8c113f6e17addb12257b0d925d3327ee7c25a7733591 +github.com/ipfs/go-datastore,v0.1.1,h1:F4k0TkTAZGLFzBOrVKDAvch6JZtuN4NHkfdcEZL50aI=,be724f5e3a459cf6ae9e68d2fa14e27cc92c53ae775979f2412b4f5b3f2b0336 +github.com/ipfs/go-detect-race,v0.0.1,h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=,c00c97cacb355cb0569bee75775eff6b656d95dd7d0855ed97c2ee44666b72cd +github.com/ipfs/go-ds-badger,v0.0.7,h1:NMyh88Q50HG6/S2YD58DLkq0c0/ZQPMbSojONH+PRf4=,26a453fc19eb26fe6077f12310ff1ad7230fe31b31a0c17fb47abba75379ee61 +github.com/ipfs/go-ds-leveldb,v0.1.0,h1:OsCuIIh1LMTk4WIQ1UJH7e3j01qlOP+KWVhNS6lBDZY=,43085f79b999edef0b8b49dea1ed35d47cc1c453ef401634825c0be5b62ac6d9 +github.com/ipfs/go-hamt-ipld,v0.0.13,h1:Jbt5ALTYnrzbcOBka11kAkgn3auvkQBGkKWjGRsQrio=,e16acbc3f203616ccd9119415b9db28a6f18c72f053259842f7db50aa1193cf8 +github.com/ipfs/go-ipfs-blockstore,v0.1.0,h1:V1GZorHFUIB6YgTJQdq7mcaIpUfCM3fCyVi+MTo9O88=,19a45734b2615632b180b59032d39c04c50fc735c7f9fd27c5547b0facb4ef8f +github.com/ipfs/go-ipfs-blocksutil,v0.0.1,h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ=,3fcf4221d4d59af5807040f209ff0d28d81f6974d61ac279b43a44b2f46d8182 +github.com/ipfs/go-ipfs-chunker,v0.0.1,h1:cHUUxKFQ99pozdahi+uSC/3Y6HeRpi9oTeUHbE27SEw=,02a0e4766162345a5bea8962c315b4bab8f2550aa1b760dcece96794b3ba22ef +github.com/ipfs/go-ipfs-config,v0.0.11,h1:5/4nas2CQXiKr2/MLxU24GDGTBvtstQIQezuk7ltOQQ=,e26bdd6db98c4ccf932440aa22a1aa2d550903a0f6f9da82f1ff5902ebbe260e +github.com/ipfs/go-ipfs-delay,v0.0.1,h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ=,bc3a4494d27cd7fabdeb7036e2edadd27f0edbd2b7d3cf49d14e3402c17c3ab6 +github.com/ipfs/go-ipfs-ds-help,v0.0.1,h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo0OnVU=,52d0d886ebb65366abb35f19b76c4f6f349464eaedf092da95c661a451b2bf06 +github.com/ipfs/go-ipfs-exchange-interface,v0.0.1,h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM=,0a593df65586ff592255eb69923a43d413b24ad56454e14e94f5e722756fb102 +github.com/ipfs/go-ipfs-exchange-offline,v0.0.1,h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew=,04b69dc6dd34a2c5c2d1f0df8777fcaa8590aa528b960cc26178af6f609e29cf +github.com/ipfs/go-ipfs-files,v0.0.6,h1:sMRtPiSmDrTA2FEiFTtk1vWgO2Dkg7bxXKJ+s8/cDAc=,442fa790aba0beff3a79503064a35dceab2a29dc4ab8edcca690c7f61ef6c6c0 +github.com/ipfs/go-ipfs-flags,v0.0.1,h1:OH5cEkJYL0QgA+bvD55TNG9ud8HA2Nqaav47b2c/UJk=,61ac13bc74f89286ac30db2ce79b26adfba63a0676cbc430ad750df2d516565a +github.com/ipfs/go-ipfs-posinfo,v0.0.1,h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs=,149f52f33d8ffd4f82056b4ea1dae2f25024a2e8df0ff555789c549468d998e7 +github.com/ipfs/go-ipfs-pq,v0.0.1,h1:zgUotX8dcAB/w/HidJh1zzc1yFq6Vm8J7T2F4itj/RU=,4eda59f4f898933265b82d381cc1ea5a3d3c75752618f46496a2d150c09aeb2d +github.com/ipfs/go-ipfs-routing,v0.1.0,h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ=,e2281e568eed0ee5621886d28802eefd8a9d0806cbd1db80c01550ad59ec54c7 +github.com/ipfs/go-ipfs-util,v0.0.1,h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50=,6d3af4d6dcb95047b64fc74972cdcd84f199c6bad467a7de3543c3eaa0d4ee49 +github.com/ipfs/go-ipld-cbor,v0.0.3,h1:ENsxvybwkmke7Z/QJOmeJfoguj6GH3Y0YOaGrfy9Q0I=,4087b9930a8e2899c3540e61bd8e7f04c8bdd8670f68ddbfcf10f45a0e619cef +github.com/ipfs/go-ipld-format,v0.0.2,h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs=,3da08ede588080b6ec81c5ad8fbfb1c9ea306a038be41dc06b1f3a1a101ebe50 +github.com/ipfs/go-log,v0.0.1,h1:9XTUN/rW64BCG1YhPK9Hoy3q8nr4gOmHHBpgFdfw6Lc=,9165a91716b11b432f8bf303d59fc019bf91872f1b9ca7e12d666c11ba6e6676 +github.com/ipfs/go-merkledag,v0.2.4,h1:ZSHQSe9BENfixUjT+MaLeHEeZGxrZQfgo3KT3SLosF8=,ed269e045c613cc7b9bba3593797fe09cdf84c906726bef9261c74bd8c470404 +github.com/ipfs/go-metrics-interface,v0.0.1,h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg=,e83f0c01b084000492db0c0e1a28ff900c3f6d11eea8defdbe8bdd1a04c33fd0 +github.com/ipfs/go-mfs,v0.1.1,h1:tjYEWFIl0W6vRFuM/EnySHaaYzPmDcQWwTjtYWMGQ1A=,1db35113aff60e645544cc64cbbddbf0608332b1f2208615744098af59b97fee +github.com/ipfs/go-path,v0.0.7,h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho=,96c607c0253c24ed0cb37016007f34420a3a83c37cdd68b6d4391126418835c4 +github.com/ipfs/go-peertaskqueue,v0.1.1,h1:+gPjbI+V3NktXZOqJA1kzbms2pYmhjgQQal0MzZrOAY=,5fa92b0302d8e72e8c4d74517a68fad04c1d89d90f0a6314a5e30662dda5d359 +github.com/ipfs/go-unixfs,v0.2.2,h1:eTkDT9F0dn4qHmBMVRMZbziwyqLRcogjtPYqMgZYmQs=,77f7f6b2de604b592018dd914a6606084069d22efa70ea95e0dd623a04e4453c +github.com/ipfs/go-verifcid,v0.0.1,h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E=,1f808a29fcd38406325435c7a6a02b253aee28832704f0032600c2b41ef3b8f1 +github.com/ipfs/interface-go-ipfs-core,v0.2.4,h1:oQiJ3Mj3rqVJohdi316K3+VSyiADto3Z35ukj7z+UGg=,e1030de5fc1ee1868a87386708be313fc0fcbbe137d5a71d71f28621393f70a2 +github.com/ipfs/iptb,v1.4.0,h1:YFYTrCkLMRwk/35IMyC6+yjoQSHTEcNcefBStLJzgvo=,0b00d0279c700ad687cfbba073f504cc4c8a17ff731550c3784fcb3e24b0c6d5 +github.com/iris-contrib/blackfriday,v2.0.0+incompatible,h1:o5sHQHHm0ToHUlAJSTjW9UWicjJSDDauOOQ2AHuIVp4=,936679f49251da75fde84b8f38884dbce89747b96f8206f7a4675bfcc7dd165d +github.com/iris-contrib/formBinder,v5.0.0+incompatible,h1:jL+H+cCSEV8yzLwVbBI+tLRN/PpVatZtUZGK9ldi3bU=,6f1fef9e533a1f57a8b033f8c0a135ed038524d7535dd16ba22e9494e3096e3b +github.com/iris-contrib/go.uuid,v2.0.0+incompatible,h1:XZubAYg61/JwnJNbZilGjf3b3pB80+OQg2qf6c8BfWE=,c6bae86643c2d6047c68c25226a1e75c5331c03466532ee6c943705743949bd9 +github.com/issue9/assert,v1.3.2,h1:IaTa37u4m1fUuTH9K9ldO5IONKVDXjLiUO1T9vj0OF0=,f4349cbd5af134fce10b399717aa4b455b5c73df6c20c1057c6e45973f24a06d +github.com/issue9/identicon,v0.0.0-20160320065130-d36b54562f4c,h1:A/PDn117UYld5mlxe58EpMguqpkeTMw5/FCo0ZPS/Ko=,5a837560a10469ab524b185a092edf67be85aff5ed794e1fcaaa084cf4540336 +github.com/itchio/arkive,v0.0.0-20190702114012-1bb6c7241ec3,h1:UcZnU7qzWTmZf8v7F3mC79H98I0b77pZz+99vqHFwtI=,dad4e3a988e6834d4ce1c3fb650a8dbb9aedd116ba8b3556c8f8babecdd17ead +github.com/itchio/dskompress,v0.0.0-20190702113811-5e6f499be697,h1:u3Q2WkrIPYlGEw4fjcImSOrkivWd6SVb0BF0Ehoih9c=,d8379b7e4219f001b61e5c2b3b34b2a6b69f8a55dc1acde2919be3050a7c84f5 +github.com/itchio/go-brotli,v0.0.0-20190702114328-3f28d645a45c,h1:Jf20xV/yR/O6eSUqLTuXhka/+54YR59sGwN7b3MkxYk=,6bab2adfb10a8ae7132e02ed10823df2e91c42dd08a1f3e1835679390ea69927 +github.com/itchio/headway,v0.0.0-20190702175331-a4c65c5306de,h1:RQW9xPqYtvjdHHRZR95XsaEA9B4URCuNHK78IuJcc+Y=,54e63fd6f25217e272e196f6213915515196a5b17a1923a666c05b4f49c82ef3 +github.com/itchio/httpkit,v0.0.0-20190702184704-639fe5edf1f1,h1:mViP/A8hAP04YWbbZR7Kcm7rTkUeT2HLcn3BBiK+CwM=,e56bf70a53a305f6866631d1272a3f3543abd45a68c50d202ddc80796b58c461 +github.com/itchio/kompress,v0.0.0-20190702090658-5e2558a00102,h1:QXEwRXrrx+7CxU+Y+G4GpDk4mUeHbP7grMXHhydk8qU=,cadec4996aed4026c0e0321f90b5bb11d9b8d1de3665752b2e862c6ddbfa229d +github.com/itchio/ox,v0.0.0-20190925154941-b613e528fc7d,h1:EcmVffUYduCSFCEM12YpSXoVXvyeq8Ro4Q+rwc60TIo=,81c3dfe8e91eb13815bf5b7f159f24a3cb1bd7028a395f691e9cefc1c3a71d01 +github.com/itchio/randsource,v0.0.0-20190702184213-a7635a4cb94b,h1:fG+9RlMeggMG/C2FH80HTfJmm+eOjAve2pFSv6Uio8A=,2375b07785c2738527c864dfb1bee0082b1f89c51e200157165bcd36f5c2933f +github.com/itchio/savior,v0.0.0-20190925162935-b92976a0b402,h1:a51wRxkLoJWu5NqnVDkI6cE50S0mDpJfOXkCp4ltvr8=,1454524a51fa6492ee593fb7d648dc037fec7c32d90e2d21c149b6e724a74838 +github.com/iwind/TeaGo,v0.0.0-20191007090339-daba0bb6607e,h1:bxD34HpyJWx6bnGdahZo6uN6XnuOvMa8LrzfC+eZqes=,bec78c179e2676d51bb1a07122896661d4ae7727d325e9fa91682361e0321161 +github.com/jackc/chunkreader,v1.0.0,h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=,e204c917e2652ffe047f5c8b031192757321f568654e3df8408bf04178df1408 +github.com/jackc/chunkreader/v2,v2.0.0,h1:DUwgMQuuPnS0rhMXenUtZpqZqrR/30NWY+qQvTpSvEs=,cae1df6cc4f52abdf31d9c7c9869714f5c2e2dddc8047eb6d335409489e76031 +github.com/jackc/fake,v0.0.0-20150926172116-812a484cc733,h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=,bf8b5b51ae03f572a70a0582dc663c5733bba9aca785d39bb0367797148e6d64 +github.com/jackc/pgconn,v1.0.1,h1:ZANo4pIkeHKIVD1cQMcxu8fwrwIICLblzi9HCjooZeQ=,4b7e033c80207f032275845f7d366b51b46e3434cafebd13599a351f01f68b86 +github.com/jackc/pgio,v1.0.0,h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=,1a83c03d53f6a40339364cafcbbabb44238203c79ca0c9b98bf582d0df0e0468 +github.com/jackc/pgmock,v0.0.0-20190831213851-13a1b77aafa2,h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA=,5d8117d8fb79d3a41998bec8dca93d450eba9edf3cf0b8c36881e0ea6140b406 +github.com/jackc/pgpassfile,v1.0.0,h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=,1cc79fb0b80f54b568afd3f4648dd1c349f746ad7c379df8d7f9e0eb1cac938b +github.com/jackc/pgproto3,v1.1.0,h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=,e3766bee50ed74e49a067b2c4797a2c69015cf104bf3f3624cd483a9e940b4ee +github.com/jackc/pgproto3/v2,v2.0.0,h1:FApgMJ/GtaXfI0s8Lvd0kaLaRwMOhs4VH92pwkwQQvU=,22635755552d1363817a9c9f192cf464034dfc31593e4975982a85de8295dcf4 +github.com/jackc/pgtype,v0.0.0-20190828014616-a8802b16cc59,h1:xOamcCJ9MFJTxR5bvw3ZXmiP8evQMohdt2VJ57C0W8Q=,30822259b27010e41850fde5f75166abc90028b9c57e2a77976cab119e01295f +github.com/jackc/pgx,v3.6.0+incompatible,h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q=,07a0cc87069e38acac988cc48e5a6cfd1bfd02b4b843d0e8931e48bb8c25d821 +github.com/jackc/pgx/v4,v4.0.0-pre1.0.20190824185557-6972a5742186,h1:ZQM8qLT/E/CGD6XX0E6q9FAwxJYmWpJufzmLMaFuzgQ=,1782863d2118cd0e63cc50cca24bd79cbea5674bac3b798bf12148400590128d +github.com/jackc/puddle,v0.0.0-20190608224051-11cab39313c9,h1:KLBBPU++1T3DHtm1B1QaIHy80Vhu0wNMErIFCNgAL8Y=,a780306bb3ad76174eca1d83a6d925fb3f7a13981cda6249e51be64476c76f15 +github.com/jackmordaunt/icns,v0.0.0-20181231085925-4f16af745526,h1:NfuKjkj/Xc2z1xZIj+EmNCm5p1nKJPyw3F4E20usXvg=,06f511df7637fd1424b6f099d7ce7ecf7378e62adc9d13133ce7df419e51faf0 +github.com/jackpal/gateway,v1.0.5,h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc=,adab846630d73763e5a3b984c8264d6503c8cb0b2914df559dacd41f6380e4ef +github.com/jackpal/go-nat-pmp,v1.0.1,h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA=,d7f2409f72895a01e0d11b457eac015dbcd94c2657f95d508e53867ca6b07db1 +github.com/jacobsa/crypto,v0.0.0-20190317225127-9f44e2d11115,h1:YuDUUFNM21CAbyPOpOP8BicaTD/0klJEKt5p8yuw+uY=,ec4d2a1fc28e1d99c68557e38cd77527df5a9f5090aa12876ab4aa6f9137a3d5 +github.com/jacobsa/oglematchers,v0.0.0-20150720000706-141901ea67cd,h1:9GCSedGjMcLZCrusBZuo4tyKLpKUPenUUqi34AkuFmA=,bcd70357107c45c3177c913b718624376b692d39672c157708fe2cd9aa78fcb5 +github.com/jacobsa/oglemock,v0.0.0-20150831005832-e94d794d06ff,h1:2xRHTvkpJ5zJmglXLRqHiZQNjUoOkhUyhTAhEQvPAWw=,5159f5f22d0e130b1fbfdbc96eb9d4653b32bd463439cb0f3c98e179de5daf80 +github.com/jacobsa/ogletest,v0.0.0-20170503003838-80d50a735a11,h1:BMb8s3ENQLt5ulwVIHVDWFHp8eIXmbfSExkvdn9qMXI=,69d96e3ea6e055d68ed46c0c1044a5dfa18064c9d45bc68d5946aa55e048af6b +github.com/jacobsa/reqtrace,v0.0.0-20150505043853-245c9e0234cb,h1:uSWBjJdMf47kQlXMwWEfmc864bA1wAC+Kl3ApryuG9Y=,a7efb54142e39f4acab39d22db692d5734f818723783646f6727269228deea83 +github.com/jaegertracing/jaeger,v1.14.0,h1:C0En+gfcxf3NsAriMAvQ6LcSFrQ5VQGXddqfty1EpTI=,5f6245d1b0c986c44cc37c7c950f3cf9c2cfd1e0d540905cd4fab9a164684ecd +github.com/jarcoal/httpmock,v1.0.4,h1:jp+dy/+nonJE4g4xbVtl9QdrUNbn6/3hDT5R4nDIZnA=,5c7d051f237633573a168713760758005724c268242484d982cb0c76dc3f3ee7 +github.com/jaytaylor/html2text,v0.0.0-20190408195923-01ec452cbe43,h1:jTkyeF7NZ5oIr0ESmcrpiDgAfoidCBF4F5kJhjtaRwE=,2369830967f1c18c382cbee77a510431b42275f1f368e3b5cbbdaa782ae24c0d +github.com/jbenet/go-base58,v0.0.0-20150317085156-6237cf65f3a6,h1:4zOlv2my+vf98jT1nQt4bT/yKWUImevYPJ2H344CloE=,e686d369d490d6728f6e63b1680db3b567c9e884545f8c47ca656f0d944299b7 +github.com/jbenet/go-cienv,v0.1.0,h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc=,3de5dadf2add50bf7fbdf88db4e6d008ba1848516585f7f9dfbf53cb6dc1705c +github.com/jbenet/go-context,v0.0.0-20150711004518-d14ea06fba99,h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=,4cd0955abeea43dc4b5a08b8769e696109e0376f2a113a9b8eff13cc90cac1c7 +github.com/jbenet/go-temp-err-catcher,v0.0.0-20150120210811-aac704a3f4f2,h1:vhC1OXXiT9R2pczegwz6moDvuRpggaroAXhPIseh57A=,9299671a264400f8f0e145da442aa3216394f324c50f045ef2ed2b898b3945c9 +github.com/jbenet/goprocess,v0.1.3,h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10=,026bb36c2d4316ad327f8b2e623f172c01140f699d57ec8609f702df5cdf021d +github.com/jcmturner/gofork,v1.0.0,h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=,5e015dd9b038f1dded0b2ded77e529d2f6ba0bed228a98831af5a3610eefcb52 +github.com/jdcloud-api/jdcloud-sdk-go,v1.9.1-0.20190605102154-3d81a50ca961,h1:a2/K4HRhg31A5vafiz5yYiGMjaCxwRpyjJStfVquKds=,93754c3fe6c00591fcd499cf73ad7f66e4ed864619579ff726872a2f50b53dfa +github.com/jdkato/prose,v1.1.0,h1:LpvmDGwbKGTgdCH3a8VJL56sr7p/wOFPw/R4lM4PfFg=,4e07b4f2012b46465fcc262d907b1cb81699bc61e6fb7a59ee47ea262e4986d1 +github.com/jeffchao/backoff,v0.0.0-20140404060208-9d7fd7aa17f2,h1:mex1izRBCD+7WjieGgRdy7e651vD/lvB1bD9vNE/3K4=,e6daeed2ffbf793cbdab5e21e9ba47ced708e7c594d4155e1964109903bd199f +github.com/jefferai/isbadcipher,v0.0.0-20190226160619-51d2077c035f,h1:E87tDTVS5W65euzixn7clSzK66puSt1H4I5SC0EmHH4=,c438b15316e4af2487ba2c818288aa15ba19e39b3bf2f83651dcc9d451af6c5b +github.com/jefferai/jsonx,v1.0.0,h1:Xoz0ZbmkpBvED5W9W1B5B/zc3Oiq7oXqiW7iRV3B6EI=,e8ccf27ffc8d4560e7db02f8a1663fd4605c5996a025f90721f8157fde332be7 +github.com/jellevandenhooff/dkim,v0.0.0-20150330215556-f50fe3d243e1,h1:ujPKutqRlJtcfWk6toYVYagwra7HQHbXOaS171b4Tg8=,8a3ba94d93fb61070bee24ffca5043eb32b4a6aafa9b84e4950a5f8f34328659 +github.com/jessevdk/go-flags,v1.4.0,h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA=,a26e72c3f4c220df8b65ac6eb3d358a8ad2efc300b212318582893ea882726f9 +github.com/jfrazelle/go,v1.5.1-1,h1:EJWkn/L/VOoena+VQryO7xEkxz7J6lHvPXAe+Z3Q6Gc=,ff67181f47086da85e0d0896aeffb52142f6f45bd3bbf75b94cd7546365bf140 +github.com/jfrog/gofrog,v1.0.5,h1:pEJmKZ9XgvQH2a8WCqAEeUDSXBCKBMN90QzOiOhBTIs=,bb6267655de882922977dca0860020c4c781bf7b3d6aba3fddc206a21c13784c +github.com/jfrog/jfrog-client-go,v0.5.5,h1:dYoajyMXcmc13YpZ/NLye0KL7r+QfpP9l8+WriZNZbE=,3d62cf613d821eb41b8b62ff01e09d8d4eed781f4deb52d3dd96e5a636967732 +github.com/jhump/protoreflect,v1.5.0,h1:NgpVT+dX71c8hZnxHof2M7QDK7QtohIJ7DYycjnkyfc=,a6f0926d31ed98d63d04f2aa60a5579cca471e7544cb701202ba5a5fd3134256 +github.com/jimstudt/http-authentication,v0.0.0-20140401203705-3eca13d6893a,h1:BcF8coBl0QFVhe8vAMMlD+CV8EISiu9MGKLoj6ZEyJA=,0bcf35e1ca69658b70fe05050f436b18ae141a08863cf6011afb39edef5c4013 +github.com/jinzhu/copier,v0.0.0-20190924061706-b57f9002281a,h1:zPPuIq2jAWWPTrGt70eK/BSch+gFAGrNzecsoENgu2o=,c05742c031370bace7c0d5b4101d437e59ad4613bb707fda49c365b3e6af8ad2 +github.com/jinzhu/gorm,v1.9.11,h1:gaHGvE+UnWGlbWG4Y3FUwY1EcZ5n6S9WtqBA/uySMLE=,87f36225e1108c93f299d9b7e4cda23c2f9469ce3db0de59df90691c1e740565 +github.com/jinzhu/inflection,v1.0.0,h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=,cf1087a6f6653ed5f366f85cf0110bbbf581d4e9bc8a4d1a9b56765d94b546c3 +github.com/jinzhu/now,v1.0.1,h1:HjfetcXq097iXP0uoPCdnM4Efp5/9MsM0/M+XOTeR3M=,5900b34a1d8daa959798e342e684c4237f60ffaebd1aa4201e29a7d3a98d32b7 +github.com/jlaffaye/ftp,v0.0.0-20190126081051-8019e6774408,h1:9AeqmB6KVEJ7GQU985MGQc7Mtxz1+C+JZkgqBnUWqMU=,b1b8b0e10084219eaf1a829778c1b53c049eeb77249a5660b62291cc3b454e6b +github.com/jmcvetta/neoism,v1.3.1,h1:GCFSl/90OYwEQH5LML/Vy6UlwK4SZ2OIO278UI4K7DE=,93e9ce5946ab71d9d0970e3709716a2b9cc96b4d03cfc708dfba8f062e870885 +github.com/jmcvetta/randutil,v0.0.0-20150817122601-2bb1b664bcff,h1:6NvhExg4omUC9NfA+l4Oq3ibNNeJUdiAF3iBVB0PlDk=,742cb157c8eb74da05a7972de646034cf0ddaba7c89d8aac625ed73027e778c1 +github.com/jmespath/go-jmespath,v0.0.0-20180206201540-c2b33e8439af,h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=,5c18f15c2bcfbbdb4fd15c0598ea5d3a373991a7b46a8f2405d00ac8b6121629 +github.com/jmhodges/clock,v0.0.0-20160418191101-880ee4c33548,h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=,f66a541ce3f97b4696d65282a332e8d08dee3f15271b7c2066050aeb5b7334b7 +github.com/jmhodges/levigo,v1.0.0,h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U=,7f43feb409c9650336152a959d7dc4d8e5a260c92e0212b1d2e0f0a7d3de6d87 +github.com/jmoiron/sqlx,v1.2.0,h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA=,c8000fe80e86eea575e0d3dd0737f6399c1880a420ce2a9d833ca0e0cfc9c875 +github.com/joefitzgerald/rainbow-reporter,v0.1.0,h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo=,889ea7a751c043bd0ea0ee31734011938be19ecbf08e652d53fc41f3eade9435 +github.com/joeshaw/multierror,v0.0.0-20140124173710-69b34d4ec901,h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4=,e31f735c5f42ac65aef51a70ba1a32b5ac34067a7ba0624192dd41e5ea03aa1e +github.com/joho/godotenv,v1.3.0,h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=,acef5a394fbd1193f52d0d19690b0bfe82728d18dd3bf67730dc5031c22d563f +github.com/jonas-p/go-shp,v0.1.1,h1:LY81nN67DBCz6VNFn2kS64CjmnDo9IP8rmSkTvhO9jE=,ac1706c486b7ea7e83eecd1f773259098569d2fe3ad2a53cc32ff89a68915a8f +github.com/jonboulle/clockwork,v0.1.0,h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=,930d355d1ced60a668bcbca6154bb5671120ba11a34119505d1c0677f7bbbf97 +github.com/joncalhoun/qson,v0.0.0-20170526102502-8a9cab3a62b1,h1:lnrOS18wZBYrzdDmnUeg1OVk+kQ3rxG8mZWU89DpMIA=,062b14a6986be3fb833eb9dd907acb7e563d5b6cfcaee1a04120a9b1fcc2d451 +github.com/josephspurrier/goversioninfo,v0.0.0-20190124120936-8611f5a5ff3f,h1:wBb8/KQrr2tWYffdugrpxOdWyOPSBRNzAR76aF9Nn3Y=,50be4b48f9fb8fbe79a013a791c015c13d7294c5de8f9bee586eaadd6f479459 +github.com/joyent/triton-go,v0.0.0-20190112182421-51ffac552869,h1:BvV6PYcRz0yGnWXNZrd5wginNT1GfFfPvvWpPbjfFL8=,5e875a04efd7f844211b68657d21313ae16b479cb01dc7161811c2c39ac19b18 +github.com/jpillora/backoff,v1.0.0,h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=,f856692c725143c49b9cceabfbca8bc93d3dbde84a0aaa53fb26ed3774c220cc +github.com/jrick/logrotate,v1.0.0,h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI=,b87ee434f9e2cfda719b639cd5bd0a52523f920f64d23336f88070e9d3765d54 +github.com/jsimonetti/rtnetlink,v0.0.0-20190606172950-9527aa82566a,h1:84IpUNXj4mCR9CuCEvSiCArMbzr/TMbuPIadKDwypkI=,97d995d4ca858da8955aefcead01425d12a91188d6f9b36b5cb63aa35a4ea674 +github.com/json-iterator/go,v1.1.8,h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=,0de8f316729fb05ba608361323b178aa32944154e77aa208ad2818848b0628e2 +github.com/jstemmer/go-junit-report,v0.0.0-20190106144839-af01ea7f8024,h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=,b623acfae0dcc440f81ae14f3c5bc3ca40b1a674660ad549127980f892ab165e +github.com/jteeuwen/go-bindata,v3.0.7+incompatible,h1:91Uy4d9SYVr1kyTJ15wJsog+esAZZl7JmEfTkwmhJts=,03f794b47c49da98a4eab6c3a7cc49d286f012d64ab832f783b76b9fcd3bd8b2 +github.com/jtolds/gls,v4.20.0+incompatible,h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=,2f51f8cb610e846dc4bd9b3c0fbf6bebab24bb06d866db7804e123a61b0bd9ec +github.com/jtolds/go-luar,v0.0.0-20170419063437-0786921db8c0,h1:UyVaeqfY1fLPMt1iUTaWsxUNxYAzZVyK+7G+a3sRfhk=,1ed97930b5dfc7f89c84ff3c5ea5a7de9964ccca970f45853d42a13a138b644e +github.com/jtolds/monkit-hw,v0.0.0-20190108155550-0f753668cf20,h1:XK96humQhnPbQ24uKtSHKbdShDgrKYqlWBNKJTcIKbg=,5d84e6f3f559b67e00b08a5e93e1017866695a4590b97ccb23a82e3ce792ad04 +github.com/juju/ansiterm,v0.0.0-20180109212912-720a0952cc2a,h1:FaWFmfWdAUKbSCtOU2QjDaorUexogfaMgbipgYATUMU=,17d1e05fd6f1c8fdce7ba7495af54f4dac1e155febff56bd6450593b016655c2 +github.com/juju/clock,v0.0.0-20190205081909-9c5c9712527c,h1:3UvYABOQRhJAApj9MdCN+Ydv841ETSoy6xLzdmmr/9A=,f57579c0c104add5228b279c4673f592d5756033d33b085185ef72a3d2f83bfe +github.com/juju/cmd,v0.0.0-20190815094254-0c5c82a8dfc6,h1:rPqkdymtMRLcCSYKOeIxuw5mmd8dWx8jSq+t9EGBgtA=,c603f2311cf6524a74535eb9d416a83959c03e6ad52ab4ec081cbc7343734af6 +github.com/juju/collections,v0.0.0-20180717171555-9be91dc79b7c,h1:m/Uo8B7nrH3K6nvk66Y67T7cbHcyY101rW24vGuMON8=,18275066d75835f37845565980c0ac818f9c29145f756b1eeacb6496dac3ebd3 +github.com/juju/errors,v0.0.0-20190930114154-d42613fe1ab9,h1:hJix6idebFclqlfZCHE7EUX7uqLCyb70nHNHH1XKGBg=,2519c885f89cfba663da3bd9a1ff2532e3ae948bdea3e44b42603d8f91cc0796 +github.com/juju/gnuflag,v0.0.0-20171113085948-2ce1bb71843d,h1:c93kUJDtVAXFEhsCh5jSxyOJmFHuzcihnslQiX8Urwo=,47cdfb1bf94a2719e97e03caf4e0dc1cb89ba27c35ed7ce7020701fe8ee2c353 +github.com/juju/gojsonpointer,v0.0.0-20150204194629-afe8b77aa08f,h1:QzpKmMsaP06HVZnYNlcy1CLIXPytsj2NuzfCHitxuus=,0e75303c5dc230f30a629963589376030c3c2a1152a40b9e2075a084224eb173 +github.com/juju/gojsonreference,v0.0.0-20150204194633-f0d24ac5ee33,h1:huRsqE0iXmVPTML75YvFBOiaNj4ZiCZgKVnkRQ06d3w=,d1648b2f71dfbb02acc4a18c55711c721e0f6b50a5280d852cd9c0a639e8ebe6 +github.com/juju/gojsonschema,v0.0.0-20150312170016-e1ad140384f2,h1:VqIDC6dRE0C7wEtTdT6zx2zP5omaoJiZXp2g/dBHRcE=,a9f736e7cb462ccf3b2cb03aa8a133db13dc8d938a2753329a7a1274bdca2656 +github.com/juju/httpprof,v0.0.0-20141217160036-14bf14c30767,h1:COsaGcfAONDdIDnGS8yFdxOyReP7zKQEr7jFzCHKDkM=,9a8c77f887765536c312c89d73d7568126393c7d38c473a50addbec30f8c80ec +github.com/juju/httprequest,v2.0.0+incompatible,h1:+WtiSbRkEwdqKRBi+4JH8PTdNxBa/h8U8RIzdYaMENI=,0d2ae765c01f7956da6896b7c7d8bb1ad4065e960b93c09a644e2e61a0acaa52 +github.com/juju/jsonschema,v0.0.0-20161102181919-a0ef8b74ebcf,h1:SGTxyCG74uh2dYdBJCUJOo2FSx0fRHP7nMRH7s5JVeQ=,a5681c88d87b34d10dcf701b10d149303fa6d152ee224dc1bd7bd7680da80bfa +github.com/juju/loggo,v0.0.0-20190526231331-6e530bcce5d8,h1:UUHMLvzt/31azWTN/ifGWef4WUqvXk0iRqdhdy/2uzI=,3db058c07ced25b8689f5d3e462d344ffb965c6f371eabc0396ce94d927e6206 +github.com/juju/lru,v0.0.0-20190314140547-92a0afabdc41,h1:/ucixsNZ+l94agL5LZioJ4ECyOz7kOYY+DKb/0NN6ME=,8f41907249beb66ba4dee5df1f53c37f387506e21568cc89db4b72493b970e85 +github.com/juju/os,v0.0.0-20191022170002-da411304426c,h1:iJZl5krsl2AqkgU7IiJ2/jNAchctLFa3BiKdyOUvK+g=,b236cb3d90b3fae0f83e767feef3a17b472ab0fe238ac08810c4f9c1d683c14d +github.com/juju/proxy,v0.0.0-20180523025733-5f8741c297b4,h1:y2eoq0Uof/dWLAXRyKKGOJuF0TEkauPscQI7Q1XQqvM=,443cd58a22392e66576d883b9d04c17faebafa37a406a346b671f7e994436c34 +github.com/juju/pubsub,v0.0.0-20190419131051-c1f7536b9cc6,h1:2aARJxmMC2IF9GqVtt5PYcIy4jyuAcR44byqwXKTK0o=,b908f7985f6250270708c2c46ca0ccfc17a3705fea4a27da6f1277a9f6b5404c +github.com/juju/qthttptest,v0.0.1,h1:pR8nTl6Uo/iI6/ynQf5Cxy9FEICXzaa83NtrBdGMCVQ=,4ba292a46e27af468c181118214f7eb1bfc015f289e90841d7746b954f20ba49 +github.com/juju/ratelimit,v1.0.1,h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY=,c9af5c6719ce3b6912579a029cb2a651707aa25daa1921488f9cae9c4f8ed334 +github.com/juju/retry,v0.0.0-20180821225755-9058e192b216,h1:/eQL7EJQKFHByJe3DeE8Z36yqManj9UY5zppDoQi4FU=,c5b2437ff128cf13f2d6f3cc3b7e226f2c0119e22caed286946245150b9428e7 +github.com/juju/schema,v1.0.0,h1:sZvJ7iQXHhMw/lJ4YfUmq+fe7R2ZSUzZzd/eSokaB3M=,746bcab557bed4e05456419e5012573dc8481dc8740309100e4bd901ff282a39 +github.com/juju/testing,v0.0.0-20191001232224-ce9dec17d28b,h1:Rrp0ByJXEjhREMPGTt3aWYjoIsUGCbt21ekbeJcTWv0=,317de254f343f9aff6e1226b4ea225cab92fee84667db8e72d541667715ea610 +github.com/juju/txn,v0.0.0-20190612234757-afeb83d59782,h1:FcaMWAFKHuxS7UAaB/GuLWrqI9L7f20m6aXaxg+t5lY=,4656c1c5f0e3dac641999feba77879c7206aff1d606513d7bdb3be7d17a6635c +github.com/juju/utils,v0.0.0-20180820210520-bf9cc5bdd62d,h1:irPlN9z5VCe6BTsqVsxheCZH99OFSmqSVyTigW4mEoY=,8edd8a74c692eb717156a2bb689e1e24a446656677760dc7dc06b761ee451df5 +github.com/juju/version,v0.0.0-20180108022336-b64dbd566305,h1:lQxPJ1URr2fjsKnJRt/BxiIxjLt9IKGvS+0injMHbag=,73312c50c8b4f6f8644aaccc09b71a2235c8083cfc6c99425540f3c0a3c29e64 +github.com/juju/webbrowser,v1.0.0,h1:JLdmbFtCGY6Qf2jmS6bVaenJFGIFkdF1/BjUm76af78=,7b38f053656e4a883bc122589994e4ec34eae3f833e899450650752d5b72eec8 +github.com/juliangruber/go-intersect,v1.0.0,h1:0XNPNaEoPd7PZljVNZLk4qrRkR153Sjk2ZL1426zFQ0=,e7f539e6b13470da34009d3ab44c6ba84a6b9bb9f6e92d315551919287a25e3c +github.com/julienschmidt/httprouter,v1.3.0,h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=,e457dccd7015f340664e3b8cfd41997471382da2f4a743ee55be539abc6ca1f9 +github.com/jung-kurt/gofpdf,v1.0.3-0.20190309125859-24315acbbda5,h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0=,f0fa70ade137185bbff2f016831a2a456eaadc8d14bc7bf24f0229211820c078 +github.com/justinas/alice,v0.0.0-20171023064455-03f45bd4b7da,h1:5y58+OCjoHCYB8182mpf/dEsq0vwTKPOo4zGfH0xW9A=,3d6623831901bb973db882bbaffcff3f55849724100ee72c5bf8d0fdfa927ae4 +github.com/jzelinskie/whirlpool,v0.0.0-20170603002051-c19460b8caa6,h1:RyOL4+OIUc6u5ac2LclitlZvFES6k+sg18fBMfxFUUs=,ca0115fcfaaa03f1973f65d05c6d6aefdbdeca6507cdda4359fdf55fd0be2c48 +github.com/k0kubun/colorstring,v0.0.0-20150214042306-9440f1994b88,h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM=,32a2eac0ffb69c6882b32ccfcdd76968cb9dfee9d9dc3d469fc405775399167c +github.com/k0kubun/pp,v3.0.1+incompatible,h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40=,2b91f559df17a49554094e4befd7e1c7d32ba4519417b1b36796d9b49d7328c5 +github.com/kami-zh/go-capturer,v0.0.0-20171211120116-e492ea43421d,h1:cVtBfNW5XTHiKQe7jDaDBSh/EVM4XLPutLAGboIXuM0=,fb1ef7d18f4cec39e9115fb200fbf7d5cff65674afe6ecc63ad57d413f503830 +github.com/kamilsk/retry/v4,v4.3.1,h1:hNQmK1xAgybAVsadNAGvCNutFLS2h+Ycpw317u4d+i0=,74181d82f9bba5b7c313c6b338f127668fffbede70f5495a4a2ef8fddaa6c20f +github.com/kardianos/osext,v0.0.0-20190222173326-2bc1f35cddc0,h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=,10976c39b58f218a6e29687d19763845e7650d04ac86096cd67ace58f4e56346 +github.com/karrick/godirwalk,v1.13.0,h1:GJq8GHQEAPsjwqfGhLNXBO5P0dS2HYdDRVWe+P4E/EQ=,9652ac9eb85bf13594ba9c41a86864ec5236e429a65f6bbb19c6897d1e335092 +github.com/kataras/golog,v0.0.9,h1:J7Dl82843nbKQDrQM/abbNJZvQjS6PfmkkffhOTXEpM=,bb4d1476d5cbe33088190116a5af7b355fd62858127a8ea9d30d77701279350e +github.com/kataras/iris,v11.1.1+incompatible,h1:c2iRKvKLpTYMXKdVB8YP/+A67NtZFt9kFFy+ZwBhWD0=,9aba6b1128d42ee2b63a9319e28c1b665b7e82dde1b10763ee7510bcc6427a25 +github.com/kataras/pio,v0.0.0-20190103105442-ea782b38602d,h1:V5Rs9ztEWdp58oayPq/ulmlqJJZeJP6pP79uP3qjcao=,70a50855f07ff59d96db9633a0cf729280a8b9f7af72b936fe8a28e48406432f +github.com/kavu/go_reuseport,v1.4.0,h1:YIp/96RZ3sJfn0LN+FFkkXIq3H3dfVOdRUtNejhDcxc=,b08d4f774766e1136fd256484f2584d42cd568b5edc7dbc7b19e1259b5dbb75c +github.com/kballard/go-shellquote,v0.0.0-20180428030007-95032a82bc51,h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=,ae4cb7b097dc4eb0c248dff00ed3bbf0f36984c4162ad1d615266084e58bd6cc +github.com/kellydunn/golang-geo,v0.7.0,h1:A5j0/BvNgGwY6Yb6inXQxzYwlPHc6WVZR+MrarZYNNg=,4f4699636a450e20bd107fb81894fcdcc8ceeddbac7062e9457c67326c1fb036 +github.com/kelseyhightower/envconfig,v1.4.0,h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=,af674112c38290862e5f59fc2867b81f7b0e623ec2fd1465cd3812e538b351d3 +github.com/kennygrant/sanitize,v1.2.4,h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o=,733211913a22ff6eb5843455345fde8c0c3cff25cc5e8e8225c330fb4c6a72df +github.com/kevinburke/ssh_config,v0.0.0-20190725054713-01f96b0aa0cd,h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=,ebd98d4bfd0deb1825d9a54689560b42a17d87385222971117ad72e7ad2f36fa +github.com/keybase/go-crypto,v0.0.0-20190403132359-d65b6b94177f,h1:Gsc9mVHLRqBjMgdQCghN9NObCcRncDqxJvBvEaIIQEo=,a839bacd8eb0a61a72f84678d568d8df899b512510a326e06db0f191e8c1c5a1 +github.com/kisielk/errcheck,v1.2.0,h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E=,709eeca978804f41720a94bc69ee3cfa8277f7d15016478a3ebda86606a286c5 +github.com/kisielk/gotool,v1.0.0,h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=,089dbba6e3aa09944fdb40d72acc86694e8bdde01cfc0f40fe0248309eb80a3f +github.com/kisielk/sqlstruct,v0.0.0-20150923205031-648daed35d49,h1:o/c0aWEP/m6n61xlYW2QP4t9424qlJOsxugn5Zds2Rg=,dbff9241f676de69e88bc006004da6087576433457b306f53cb952d0313ccb78 +github.com/kisom/goutils,v1.1.0,h1:z4HEOgAnFq+e1+O4QdVsyDPatJDu5Ei/7w7DRbYjsIA=,a0b58731f8e1144c013107294885891c44b7fd3235da0ec20776f4d644b4eaa4 +github.com/kkdai/bstream,v1.0.0,h1:Se5gHwgp2VT2uHfDrkbbgbgEvV9cimLELwrPJctSjg8=,dc1d546e0df6ef040963bc9d483834d6e56c77e0e4f6c48e574ac360e7723121 +github.com/klauspost/compress,v1.8.2,h1:Bx0qjetmNjdFXASH02NSAREKpiaDwkO1DRZ3dV2KCcs=,4dc2632696a9cd93cc32c1564e1a6aa4aecfcb5c995a077d45c6f92116e1711d +github.com/klauspost/cpuid,v1.2.1,h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=,8367d6c97e74f88b149ba9de708ff321273e0114aeb71a45e62e5ac296412420 +github.com/klauspost/crc32,v0.0.0-20161016154125-cb6bfca970f6,h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=,6b632853a19f039138f251f94dbbdfdb72809adc3a02da08e4301d3d48275b06 +github.com/klauspost/pgzip,v1.2.1,h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=,a482336aa4b0e4e9368b15d75629ae741b44ef290b7d16430ba05ce561846213 +github.com/klauspost/reedsolomon,v1.9.2,h1:E9CMS2Pqbv+C7tsrYad4YC9MfhnMVWhMRsTi7U0UB18=,ea8a4d6d994088dae0308843fd6bddb7541cf36306463a696fd4a29097496705 +github.com/knative/pkg,v0.0.0-20191031171713-d4ce00139499,h1:ha5eqzJaPg1CZroomqWxHqspOqpqpRMO3fDtgF1fvIM=,a8d19fc2196a1aec7869ca45df44ba9c5de5b81b6094f0579d25989eb7967660 +github.com/kniren/gota,v0.9.0,h1:ywFrdNxkBD5Xypk5BxjCaKiH507oQVXIf31pTvRhC4I=,062182a345c456c9c0fd7ce9644900708f7f9c08707d64fe2438b9d295dad6dd +github.com/knq/sysutil,v0.0.0-20191005231841-15668db23d08,h1:V0an7KRw92wmJysvFvtqtKMAPmvS5O0jtB0nYo6t+gs=,81ec4ac93dba6a6161264a0575f20235d8932abab0cd6b9777b4be936f5c2af5 +github.com/knqyf263/berkeleydb,v0.0.0-20190501065933-fafe01fb9662,h1:UGS0RbPHwXJkq8tcba8OD0nvVUWLf2h7uUJznuHPPB0=,1e575b5fdc170e0318ab06841873ae6d115978fbaffc3779290d7ba3aadbdf0e +github.com/knqyf263/go-deb-version,v0.0.0-20190517075300-09fca494f03d,h1:X4cedH4Kn3JPupAwwWuo4AzYp16P0OyLO9d7OnMZc/c=,4a09d0533768cf6f9d929858aa2e79b6942685569c2db00b8d4688590a89ba3d +github.com/knqyf263/go-rpmdb,v0.0.0-20190501070121-10a1c42a10dc,h1:pumO9pqmRAjvic6oove22RGh9wDZQnj96XQjJSbSEPs=,33a3568289d22672dfcb0ba7c5b8aa7f9223d5303003368e7dbe8c9718a803b4 +github.com/knqyf263/nested,v0.0.1,h1:Sv26CegUMhjt19zqbBKntjwESdxe5hxVPSk0+AKjdUc=,c0e123844a174b1e9929d4368d8a8bb2f5ecef578ee9dee692c5971a47a633ff +github.com/koki/structurederrors,v0.0.0-20180506174113-6b997eb5e2ca,h1:KmXUVzyPjXzd3kY0feNFsWOGVDYFT4MjjgG8QJx0m6k=,1efa717c181722fd1c6807919571dc559b48d17120f5eeb4638a322fb882411a +github.com/kolo/xmlrpc,v0.0.0-20190717152603-07c4ee3fd181,h1:TrxPzApUukas24OMMVDUMlCs1XCExJtnGaDEiIAR4oQ=,9d37c94f50784536aa8ef9a7623ec7bcac9e5bc67b18f7a801efc7cbbe6b1ab0 +github.com/konsorten/go-windows-terminal-sequences,v1.0.2,h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=,4d00d71b8de60bcaf454f8f867210ebcd05e75c0a7c2725904f71aa2f20fb08e +github.com/koron/go-ssdp,v0.0.0-20180514024734-4a0ed625a78b,h1:wxtKgYHEncAU00muMD06dzLiahtGM1eouRNOzVV7tdQ=,3a99f050b7a668291942cada4e38213965fa0ae3794469bb29ad0d6d9677db23 +github.com/kr/fs,v0.1.0,h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=,d376bd98e81aea34585fc3b04bab76363e9e87cde69383964e57e9779f2af81e +github.com/kr/logfmt,v0.0.0-20140226030751-b84e30acd515,h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=,ebd95653aaca6182184a1b9b309a65d55eb4c7c833c5e790aee11efd73d4722c +github.com/kr/pretty,v0.1.0,h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=,06063d21457e06dc2aba4a5bd09771147ec3d8ab40b224f26e55c5a76089ca43 +github.com/kr/pty,v1.1.8,h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI=,d66e6fbc65e772289a7ff8c58ab2cdfb886253053b0cea11ba3ca1738b2d6bc6 +github.com/kr/text,v0.1.0,h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=,9363a4c8f1f3387a36014de51b477b831a13981fc59a5665f9d21609bea9e77c +github.com/kshvakov/clickhouse,v1.3.4,h1:p/yqvOmeDRH+KyCH6NtwExelr4rimLBBfKW2a/wBN94=,01a0d1a90e0545da94350319a52c051257fee64c838e2632ec40ef8d89a2f153 +github.com/kylelemons/go-gypsy,v0.0.0-20160905020020-08cad365cd28,h1:mkl3tvPHIuPaWsLtmHTybJeoVEW7cbePK73Ir8VtruA=,321087246482a680bd3f06de64075fb843430da544596ad216a4a63d5b8dafa3 +github.com/kylelemons/godebug,v1.1.0,h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=,dbbd0ce8c2f4932bb03704d73026b21af12bd68d5b8f4798dbf10a487a2b6d13 +github.com/kyokomi/emoji,v2.1.0+incompatible,h1:+DYU2RgpI6OHG4oQkM5KlqD3Wd3UPEsX8jamTo1Mp6o=,0721a2fc643e49e002bd8a3e604b5d2f0f3e242cc279d14d76f90a55f8aeebf7 +github.com/labbsr0x/bindman-dns-webhook,v1.0.2,h1:I7ITbmQPAVwrDdhd6dHKi+MYJTJqPCK0jE6YNBAevnk=,d1a327ab22f62486250f50f98990c0d9e1a5fdece6a496fbbb85d4e123df3244 +github.com/labbsr0x/goh,v1.0.1,h1:97aBJkDjpyBZGPbQuOK5/gHcSFbcr5aRsq3RSRJFpPk=,84c91135623961c7c400bf8b646da76c0ce2941fe8706d5aef5650be9a5e37dd +github.com/labstack/echo,v3.3.10+incompatible,h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8bbnE7CX5OEgg=,29634743cf44c47079b74812ecf5aa7074630507886c4ff40b60c397c45af524 +github.com/labstack/gommon,v0.3.0,h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=,2783ed1c24d09a5539bc35954f71f41d270d78dc656be256c98a8ede2cbbe451 +github.com/lafriks/xormstore,v1.0.0,h1:P/IJzNSIpjXl/Up3o2Td5ZU/x4v6DEKLMaPQJGtmJCk=,0e347e24ab91f62e1b69bab5d78cbba77569f087b483569ef37761e1f93a3f46 +github.com/lann/builder,v0.0.0-20180802200727-47ae307949d0,h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=,1fe7a88079ff2bbe90fb4724fb5c353ecb6af4cd7e011440354c804f678895ee +github.com/lann/ps,v0.0.0-20150810152359-62de8c46ede0,h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=,76756d46634f44edd3facdb01e7271ddf23a1b51a8423de55d3a2bf685ff032a +github.com/leanovate/gopter,v0.2.4,h1:U4YLBggDFhJdqQsG4Na2zX7joVTky9vHaj/AGEwSuXU=,99b27788411d478764bf7c51e4f6e84e5ccd60f3959a88a03e96b2a1d519a45d +github.com/leodido/go-urn,v1.2.0,h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=,8a854d784171000a69b79babb2cd3da9b8fccb1e1b6bb102c7a6d2b52380d08a +github.com/lestrrat-go/jspointer,v0.0.0-20181205001929-82fadba7561c,h1:pGh5EFIfczeDHwgMHgfwjhZzL+8/E3uZF6T7vER/W8c=,a64de11dd2840c3251906c5fe5f61719713af52a41287411007434684745af39 +github.com/lestrrat-go/jsref,v0.0.0-20181205001954-1b590508f37d,h1:1eeFdKL5ySmmYevvKv7iECIc4dTATeKTtBqP4/nXxDk=,1acee9b59501460f5063a82bc2c05f1a11cd24077198fc08ba100ee642d3db72 +github.com/lestrrat-go/jsschema,v0.0.0-20181205002244-5c81c58ffcc3,h1:TSKrrGm89gmmVlrG34ZzCIOMNVk5kkSV1P88Dt38DiE=,1b7552a5ecd193bdd07995226f58fe48de0aadedbcb42f3a5b135fd7b3538ea4 +github.com/lestrrat-go/jsval,v0.0.0-20181205002323-20277e9befc0,h1:w4rIjeCV/gQpxtn3i1voyF6Hd7v1mRGIB63F7RZOk1U=,f060af1b36e0f156546436dcc9b1569600871185d69e9daf214f24e0e2934784 +github.com/lestrrat-go/pdebug,v0.0.0-20180220043849-39f9a71bcabe,h1:S7XSBlgc/eI2v47LkPPVa+infH3FuTS4tPJbqCtJovo=,17690c72219264e0a195dac69ae6ed12bbadf309242dbaa21609339dfa74b3a5 +github.com/lestrrat-go/structinfo,v0.0.0-20190212233437-acd51874663b,h1:YUFRoeHK/mvRjBR0bBRDC7ZGygYchoQ8j1xMENlObro=,8dd77f51595dea974553558e0d249059b9047a39354548b5bbd88b32cf3df75a +github.com/lestrrat/go-jsschema,v0.0.0-20181205002244-5c81c58ffcc3,h1:UaOmzcaCH2ziMcSbQFBq/3Iuz/E/Jr/GOGtV80jpFII=,ce0f1e04d70eadcc75f96d70703b53231e1c5be7d9fd832c144e0135bfd5afb4 +github.com/lib/pq,v1.2.0,h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=,cb1028c395747cacafb6c3c6ad5fa244563ce641aae45cf7742f98b6764b1fde +github.com/libp2p/go-addr-util,v0.0.1,h1:TpTQm9cXVRVSKsYbgQ7GKc3KbbHVTnbostgGaDEP+88=,d49a37e15540c8b95f845dde6cdf802e7af490bc13fd88fec3da318d08464f7b +github.com/libp2p/go-buffer-pool,v0.0.2,h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs=,fef932705b72198df3d50befd9d2aa157aea1b5f3d23712b09d627d02cfe841e +github.com/libp2p/go-conn-security,v0.0.1,h1:4kMMrqrt9EUNCNjX1xagSJC+bq16uqjMe9lk1KBMVNs=,e7b58f887c8a8a2ed0178d2f0d6b4ad36bdd7b8cf52ca4d66bafc108b80d095c +github.com/libp2p/go-conn-security-multistream,v0.1.0,h1:aqGmto+ttL/uJgX0JtQI0tD21CIEy5eYd1Hlp0juHY0=,597b249bd51de097142815318b13c339752532f15131887492d9d3e3407ab92e +github.com/libp2p/go-eventbus,v0.1.0,h1:mlawomSAjjkk97QnYiEmHsLu7E136+2oCWSHRUvMfzQ=,1b02c8340d2740f99d67078a8c8823c6b9212b92dd9ca7eaf2a38adf2bfd6b56 +github.com/libp2p/go-flow-metrics,v0.0.1,h1:0gxuFd2GuK7IIP5pKljLwps6TvcuYgvG7Atqi3INF5s=,f783542a7fce8382de9cea6940049b106cc35f9714126a1e3d61925c29db8617 +github.com/libp2p/go-libp2p-autonat,v0.1.0,h1:aCWAu43Ri4nU0ZPO7NyLzUvvfqd0nE3dX0R/ZGYVgOU=,11e86ef0b36125a7cd6aa447ffe488f7f3ab00e441bdf8cf30a832a41da4342c +github.com/libp2p/go-libp2p-blankhost,v0.1.4,h1:I96SWjR4rK9irDHcHq3XHN6hawCRTPUADzkJacgZLvk=,9cd5abe8ad2f137c13309a9dbdd213376bbec03f9685cf8cde7fbfe2e5783e7d +github.com/libp2p/go-libp2p-circuit,v0.1.0,h1:eniLL3Y9aq/sryfyV1IAHj5rlvuyj3b7iz8tSiZpdhY=,24ee6c7851f4f0072922ae497c230718a0f44beab890d0403261b38a2946a866 +github.com/libp2p/go-libp2p-core,v0.2.4,h1:Et6ykkTwI6PU44tr8qUF9k43vP0aduMNniShAbUJJw8=,d521cc1bffba8afc8b8057901cf22c2f6ffd88faec0274426e13c4e7c12c756c +github.com/libp2p/go-libp2p-crypto,v0.1.0,h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ=,14ef1867bd8b0ef8fc528f5069ef267270dd0de8cf89a235beb9fbd79e4bed8d +github.com/libp2p/go-libp2p-discovery,v0.2.0,h1:1p3YSOq7VsgaL+xVHPi8XAmtGyas6D2J6rWBEfz/aiY=,d1c0800b601cbe6833522727b249567379422e9f324b7d0a0866bd86c74fb930 +github.com/libp2p/go-libp2p-host,v0.1.0,h1:OZwENiFm6JOK3YR5PZJxkXlJE8a5u8g4YvAUrEV2MjM=,d26bf1db299917f080a13ace37ef4363c08c2407c126cee59e642b1372d2b211 +github.com/libp2p/go-libp2p-interface-connmgr,v0.0.5,h1:KG/KNYL2tYzXAfMvQN5K1aAGTYSYUMJ1prgYa2/JI1E=,fe1e74365cc5c155161e5500671a8e9a85a90efdad9f5630bdfdc15bdfc52fe5 +github.com/libp2p/go-libp2p-interface-pnet,v0.0.1,h1:7GnzRrBTJHEsofi1ahFdPN9Si6skwXQE9UqR2S+Pkh8=,9767f78f87f54bdf3fb1f0f9b5f67e907463b445a00a566402bacca85749c8fe +github.com/libp2p/go-libp2p-loggables,v0.1.0,h1:h3w8QFfCt2UJl/0/NW4K829HX/0S4KD31PQ7m8UXXO8=,351c87c02c2b147193fac5c441d8767d2b247cd3f3c420fa205da2ccd1c3f00f +github.com/libp2p/go-libp2p-metrics,v0.1.0,h1:v7YMUTHNobFaQeqaMfJJMbnK3EPlZeb6/KFm4gE9dks=,a86fe0ae6cda820fd6a0e576bcd94a22360439819f344a9121086b31c651caaf +github.com/libp2p/go-libp2p-mplex,v0.2.1,h1:E1xaJBQnbSiTHGI1gaBKmKhu1TUKkErKJnE8iGvirYI=,f11961ef5114e57eb176740a066e1535132c8c238bd444ed53d94fad36ba7708 +github.com/libp2p/go-libp2p-nat,v0.0.4,h1:+KXK324yaY701On8a0aGjTnw8467kW3ExKcqW2wwmyw=,4c3db4e0f7f714439364ca0853f63d426bba67924da6fd050fd0184abdfec2df +github.com/libp2p/go-libp2p-net,v0.1.0,h1:3t23V5cR4GXcNoFriNoZKFdUZEUDZgUkvfwkD2INvQE=,4140afd418393c2a4ecccca97d80b4752d20da6f34fac15fbdc4f0566f7b8cea +github.com/libp2p/go-libp2p-netutil,v0.1.0,h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ=,c98ad0a3ffab37b6a0bc80aefba4e4cb442b09c01277a7dcc0086c4a004e649a +github.com/libp2p/go-libp2p-peer,v0.2.0,h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY=,5b400d6b6337cc759846d7ddf50ec2d761148e1447a86982696687ef1e792c1a +github.com/libp2p/go-libp2p-peerstore,v0.1.4,h1:d23fvq5oYMJ/lkkbO4oTwBp/JP+I/1m5gZJobNXCE/k=,1606c0bb56c31d0249980b8a0c0e5dda9212687b1994ef47cfd42039d4cf1847 +github.com/libp2p/go-libp2p-protocol,v0.1.0,h1:HdqhEyhg0ToCaxgMhnOmUO8snQtt/kQlcjVk3UoJU3c=,4560018136a73817e03eed49af46d97dd561b3eeffb1ff00559152acf9a74627 +github.com/libp2p/go-libp2p-pubsub,v0.2.0,h1:4UXcjpQdpam/RsGhfWyT/4u5f6F42ods/WgDAaocYxA=,bde7bb50d950b8ea7902c523a696eff4ad7b5d0daac808356358ff6d53aecb14 +github.com/libp2p/go-libp2p-record,v0.1.1,h1:ZJK2bHXYUBqObHX+rHLSNrM3M8fmJUlUHrodDPPATmY=,27a3e94e144b893cbb5ceaddfa7a4e456052e173f807db52945e06920f62d0b3 +github.com/libp2p/go-libp2p-routing,v0.1.0,h1:hFnj3WR3E2tOcKaGpyzfP4gvFZ3t8JkQmbapN0Ct+oU=,4241980dadf216e937a42a572a9c5b5eb28ff62458380ad37892c5b5095de270 +github.com/libp2p/go-libp2p-secio,v0.2.0,h1:ywzZBsWEEz2KNTn5RtzauEDq5RFEefPsttXYwAWqHng=,979a82829f3188d4ca8d20d194923c5620ff12a161d13c945c1630b7b9d050ff +github.com/libp2p/go-libp2p-swarm,v0.2.2,h1:T4hUpgEs2r371PweU3DuH7EOmBIdTBCwWs+FLcgx3bQ=,b920f69fbfaa8805047b958c6d45d944a195181dd6dddab36bead5fe68f2f1e4 +github.com/libp2p/go-libp2p-testing,v0.1.0,h1:WaFRj/t3HdMZGNZqnU2pS7pDRBmMeoDx7/HDNpeyT9U=,e1c7fa467d88b33f2fc519542cc19aa48bcade304f579f10ab402a19c38d0aa6 +github.com/libp2p/go-libp2p-transport,v0.0.5,h1:pV6+UlRxyDpASSGD+60vMvdifSCby6JkJDfi+yUMHac=,df7bc96a5d76c351fd3a6ee29995f4974013d9709904edd9608b86f4fa089ad2 +github.com/libp2p/go-libp2p-transport-upgrader,v0.1.1,h1:PZMS9lhjK9VytzMCW3tWHAXtKXmlURSc3ZdvwEcKCzw=,60ea73fa42536178798c3d4a36c5f9cffb185b6c1629c23c3faff5919f9e9cad +github.com/libp2p/go-libp2p-yamux,v0.2.1,h1:Q3XYNiKCC2vIxrvUJL+Jg1kiyeEaIDNKLjgEjo3VQdI=,849f0097fd7203b5c6d590463b7fb17573af8d12136413768706188a39b34b21 +github.com/libp2p/go-maddr-filter,v0.0.5,h1:CW3AgbMO6vUvT4kf87y4N+0P8KUl2aqLYhrGyDUbLSg=,19c76e021879aab85a8858b53d706220e9e3277a96dead161db152f5a1d17219 +github.com/libp2p/go-mplex,v0.1.0,h1:/nBTy5+1yRyY82YaO6HXQRnO5IAGsXTjEJaR3LdTPc0=,3340a423ea89310360810973a77a97c217fe7b35e1c18189a3628e35fe1275e0 +github.com/libp2p/go-msgio,v0.0.4,h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA=,ec22f703203a2a443c57896b2082c02fe9c54d372aad091cdca144709d244721 +github.com/libp2p/go-nat,v0.0.3,h1:l6fKV+p0Xa354EqQOQP+d8CivdLM4kl5GxC1hSc/UeI=,d642c9dd697176ec69c4a5faeff1fc3b5472ef9f32c2c40e21c42f81ceef86b9 +github.com/libp2p/go-openssl,v0.0.3,h1:wjlG7HvQkt4Fq4cfH33Ivpwp0omaElYEi9z26qaIkIk=,f2eb05d710fe960ba12d5f640cefe7d31d24f1fab0d9a52faf5f2923a19c6f13 +github.com/libp2p/go-reuseport,v0.0.1,h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw=,274ade934c7f26ffae86d3f4d34352371c3eca7ead080392f6f35698ec5f0a3f +github.com/libp2p/go-reuseport-transport,v0.0.2,h1:WglMwyXyBu61CMkjCCtnmqNqnjib0GIEjMiHTwR/KN4=,866f45bfa6c2e65d563955a28050bcfbc6ed11df6ded7c551e92ff98ba98a2d8 +github.com/libp2p/go-stream-muxer,v0.1.0,h1:3ToDXUzx8pDC6RfuOzGsUYP5roMDthbUKRdMRRhqAqY=,d42dab9fb102b3e56cc555eb9aacb742e4230120dd356078cc723f8817200d43 +github.com/libp2p/go-stream-muxer-multistream,v0.2.0,h1:714bRJ4Zy9mdhyTLJ+ZKiROmAFwUHpeRidG+q7LTQOg=,a4ca5d0422d55ee7b4e74b040ca85799365b05684b7b6687adfa79a345049a9d +github.com/libp2p/go-tcp-transport,v0.1.1,h1:yGlqURmqgNA2fvzjSgZNlHcsd/IulAnKM8Ncu+vlqnw=,147dc8d50aab944666c1a7a371ba3e351480506313be298ebf8dcdb9dc51b1b4 +github.com/libp2p/go-testutil,v0.1.0,h1:4QhjaWGO89udplblLVpgGDOQjzFlRavZOjuEnz2rLMc=,9fa6fa5741f541a6309e8a5fa6031c51f97fcd3086fe3a3b371b74f9d8e9a4b8 +github.com/libp2p/go-ws-transport,v0.1.0,h1:F+0OvvdmPTDsVc4AjPHjV7L7Pk1B7D5QwtDcKE2oag4=,30cfd8011bb8de03c23680d2249120ea9ba29879e855ca5c35311f8fa874d094 +github.com/libp2p/go-yamux,v1.2.3,h1:xX8A36vpXb59frIzWFdEgptLMsOANMFq2K7fPRlunYI=,97947a07c9430184c3be45e87580abcdea18c9b7435adb8048b08aebce0fea50 +github.com/liggitt/tabwriter,v0.0.0-20181228230101-89fcab3d43de,h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=,41b6869255915ffdfd32575ba14d52732d62d34b47d904df4890e165489ec77d +github.com/linkedin/goavro,v2.1.0+incompatible,h1:DV2aUlj2xZiuxQyvag8Dy7zjY69ENjS66bWkSfdpddY=,25d4ccde4ece770196fbf6f09ca4184df581944224be5d64a263eb2c7f9a24fc +github.com/linode/linodego,v0.10.0,h1:AMdb82HVgY8o3mjBXJcUv9B+fnJjfDMn2rNRGbX+jvM=,4c4e8829c0290c473e36bacdce8b490833d1f6247b1a4290062db30ba2b21568 +github.com/liquidweb/liquidweb-go,v1.6.0,h1:vIj1I/Wf97fUnyirD+bi6Y63c0GiXk9nKI1+sFFl3G0=,19e08fe2aa62655eb3cb209b37d532a267dd3078e5d262c4c45e7e09134b079c +github.com/lithammer/dedent,v1.1.0,h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY=,4ec56a3fef0d7dd1536046e540827e60419a935dde49d87d21f5856174cadba2 +github.com/logrusorgru/aurora,v0.0.0-20180419164547-d694e6f975a9,h1:KQdwUNlTDGyS6e+2rjAxfHSpBFIOHXqgDceNDqb55+4=,3b9d5caeede8553ead48405de57cd25bf6276b12531dae582c3ee089474aaf95 +github.com/loov/hrtime,v0.0.0-20181214195526-37a208e8344e,h1:UC+nLCm+w3WL+ibAW/wsWbQC3KAz7LLawR2hgX0eR9s=,f077796a9f39c579d356ac8f99831c56b3b2c52b70526f97730eccdc5ce558b2 +github.com/loov/plot,v0.0.0-20180510142208-e59891ae1271,h1:51ToN6N0TDtCruf681gufYuEhO9qFHQzM3RFTS/n6XE=,eb57dc24113d92cda1d0eecd6280603a2f1a98eececde895db4b060a7208659a +github.com/lovoo/gcloud-opentracing,v0.3.0,h1:nAeKG70rIsog0TelcEtt6KU0Y1s5qXtsDLnHp0urPLU=,7bead4937d23976e07caf4bf7a7f302724cda9155aa8ac4de7baa2e10976eacc +github.com/lsegal/gucumber,v0.0.0-20180127021336-7d5c79e832a2,h1:Gg0dt1q5bB+3R3qu+BucR+1f5ZhKm3OzPPo53dZ3Hxs=,2e5cd235f8c80ae078b3115b41fb765682c796d62fa54ecbb2096b159b0294bd +github.com/lucas-clemente/aes12,v0.0.0-20171027163421-cd47fb39b79f,h1:sSeNEkJrs+0F9TUau0CgWTTNEwF23HST3Eq0A+QIx+A=,074a3c40044c8f07dbe93129fe30bfd4a12f6283f393e7300664d59924a8af2b +github.com/lucas-clemente/quic-clients,v0.1.0,h1:/P9n0nICT/GnQJkZovtBqridjxU0ao34m7DpMts79qY=,b916edbd87d45fd375b0f81f905453102eb4e7e724ca0fc8ac5be323fe5958b8 +github.com/lucas-clemente/quic-go,v0.12.1,h1:BPITli+6KnKogtTxBk2aS4okr5dUHz2LtIDAP1b8UL4=,144443ffb6231cabbe6da1496c5851eb73f03fff33d7bd94aa394f8d1e3c73b3 +github.com/lucas-clemente/quic-go-certificates,v0.0.0-20160823095156-d2f86524cced,h1:zqEC1GJZFbGZA0tRyNZqRjep92K5fujFtFsu5ZW7Aug=,d9eff929a62711fc36f9655008e144863cd816ad2b59d25eb00a248c96178ce5 +github.com/lucasb-eyer/go-colorful,v1.0.2,h1:mCMFu6PgSozg9tDNMMK3g18oJBX7oYGrC09mS6CXfO4=,c0e388db91f217be87f8d508ac9f495adc5a33ffda78849e2d0a89a8e8dae28c +github.com/lunixbochs/struc,v0.0.0-20190916212049-a5c72983bc42,h1:PzBD7QuxXSgSu61TKXxRwVGzWO5d9QZ0HxFFpndZMCg=,8a7db31161ec3a3bcc7b52e25975d0299b9c0bb465f076014d303f112b5cb9e1 +github.com/lunixbochs/vtclean,v1.0.0,h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8=,4d73f9678abde21c67dd8cb4ed8d7f63bcdd9413b6093b53cec4d26ce1be5b88 +github.com/lunny/dingtalk_webhook,v0.0.0-20171025031554-e3534c89ef96,h1:uNwtsDp7ci48vBTTxDuwcoTXz4lwtDTe7TjCQ0noaWY=,b94d4c7cacca0c289b3fbbeae6cc9e66f2eec4a3210fbbfd208316337ff2f1e3 +github.com/lunny/levelqueue,v0.0.0-20190217115915-02b525a4418e,h1:GSprKUrG9wNgwQgROvjPGXmcZrg4OLslOuZGB0uJjx8=,8f62ece23811c3c2be0d1c8d10057ab564641b2f73dc5a9910dd5f8462954f19 +github.com/lunny/log,v0.0.0-20160921050905-7887c61bf0de,h1:nyxwRdWHAVxpFcDThedEgQ07DbcRc5xgNObtbTp76fk=,0d551b83dcb0c4a3e0f97febf74e8f69b58a419791e217a7d2fd3d79a1e5877b +github.com/lunny/nodb,v0.0.0-20160621015157-fc1ef06ad4af,h1:UaWHNBdukWrSG3DRvHFR/hyfg681fceqQDYVTBncKfQ=,a0f6632294f1eec60e2651fa2d4b3590f3a1a8e2f7692dcc77251b945906a701 +github.com/lusis/go-artifactory,v0.0.0-20160115162124-7e4ce345df82,h1:wnfcqULT+N2seWf6y4yHzmi7GD2kNx4Ute0qArktD48=,487d2ef1720bd49c5a36efc8893fdb0a76bd5f8b064c2a98974a78b3e35f5763 +github.com/lusis/go-slackbot,v0.0.0-20180109053408-401027ccfef5,h1:AsEBgzv3DhuYHI/GiQh2HxvTP71HCCE9E/tzGUzGdtU=,0bb7feaeb5a4e83486234c1c8fbe2f73b94213f511aaf6b8ef1f0fc96dd7b4fa +github.com/lusis/outputter,v0.0.0-20171130132426-5a3b464a163f,h1:JY0YSH+YvMGmq83g5qILMAkJDFv7qIiHalhlQXal9V0=,e3b54ad36707730681b10a3838d89c346bf2d2c52cb61a241b178bcb0fc96e0f +github.com/lusis/slack-test,v0.0.0-20190426140909-c40012f20018,h1:MNApn+Z+fIT4NPZopPfCc1obT6aY3SVM6DOctz1A9ZU=,019aa5a65d7fc369730c089a8af985f8d4760297a0058dd0c352fb662e8a0cfc +github.com/lyft/protoc-gen-star,v0.4.11,h1:zW6fJQBtCtVeSiO/Kbpzv32GO0J/Z8egSLeohES202w=,673c0c53ce301a5589d4aab2b389c6ab52c8312193bae9b491e75e4938475277 +github.com/lyft/protoc-gen-validate,v0.1.0,h1:NytKd9K7UW7Szxn+9PYNsaJ/98TL/WsDq4ro4ZVuh5o=,2e452d4298aa5f2be8d4eda3e55522a4c020d0f23dac6b33ecf9942be09bf082 +github.com/magefile/mage,v1.4.0,h1:RI7B1CgnPAuu2O9lWszwya61RLmfL0KCdo+QyyI/Bhk=,55862155e89367536d665080ac028decc98ce68c5651ccc4238d7e34ddf1cbc2 +github.com/magiconair/properties,v1.8.1,h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=,c0f0378f5949db2e7976d6822a0dfac1786acd34190e83ab253d6505542d0128 +github.com/mailgun/mailgun-go,v0.0.0-20171127222028-17e8bd11e87c,h1:5huPh/MfWW65cx8KWNVD4mCCnwIrNiX4bFJR5OeONg0=,33250edd00795e387f2de671003b8ef8f2d940d24b12a9ce90c6b49dd6094231 +github.com/mailgun/minheap,v0.0.0-20170619185613-3dbe6c6bf55f,h1:aOqSQstfwSx9+tcM/xiKTio3IVjs7ZL2vU8kI9bI6bM=,26930b2a6dc2f2b442e28ecc5dcbb22c2e7da3d151b3388d0bc604370bd9df77 +github.com/mailgun/multibuf,v0.0.0-20150714184110-565402cd71fb,h1:m2FGM8K2LC9Zyt/7zbQNn5Uvf/YV7vFWKtoMcC7hHU8=,7dbb280e8bc981732510ee72e124e931991d06c317531de709fd7922e38a5339 +github.com/mailgun/timetools,v0.0.0-20170619190023-f3a7b8ffff47,h1:jlyJPTyctWqANbaxi/nXRrxX4WeeAGMPaHPj9XlO0Rw=,a4d961cefbfbe858f4ba5a5824d91ad8713a736707f5c259cf0d7307a07ac83e +github.com/mailgun/ttlmap,v0.0.0-20170619185759-c1c17f74874f,h1:ZZYhg16XocqSKPGNQAe0aeweNtFxuedbwwb4fSlg7h4=,35308e95ed02635049d1804b85f16407f3109fc60c38df541f0401dbba66dc8d +github.com/mailru/easyjson,v0.7.0,h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=,c36c8ab36aab9ba2ca776d1c71cbd9c30fce7c4e8e62be6611f4c2d1e98e86ae +github.com/manucorporat/sse,v0.0.0-20160126180136-ee05b128a739,h1:ykXz+pRRTibcSjG1yRhpdSHInF8yZY/mfn+Rz2Nd1rE=,cd90f350cca3a6536432afb4cd2355ff25124ef89fc23a52392e5189733b0359 +github.com/manveru/faker,v0.0.0-20171103152722-9fbc68a78c4d,h1:Zj+PHjnhRYWBK6RqCDBcAhLXoi3TzC27Zad/Vn+gnVQ=,80bc3e8ca50e89d3a6139d1709fbf4680c26231079d297d237902d3c23f4c1e8 +github.com/manveru/gobdd,v0.0.0-20131210092515-f1a17fdd710b,h1:3E44bLeN8uKYdfQqVQycPnaVviZdBLbizFhU49mtbe4=,39811c3d6c7de66195a29a78b235dead57fb866e61082301fe68d51cf04a5200 +github.com/markbates/deplist,v1.3.0,h1:uPgoloPraPBPYtNSxj2UwZBh2EHW9TmMvQCP2FBiRlU=,e0b1903fb33c324721565076e2061d7f54e29ba098afb80af4fe2ccdd02ed178 +github.com/markbates/going,v1.0.3,h1:mY45T5TvW+Xz5A6jY7lf4+NLg9D8+iuStIHyR7M8qsE=,61efe687a56d3141284be7bdb83bb5ae86e1df694ababa5937c4d3e30f3b60f1 +github.com/markbates/goth,v1.49.0,h1:qQ4Ti4WaqAxNAggOC+4s5M85sMVfMJwQn/Xkp73wfgI=,39a0244d07f47d7b91215590900a7754c4700e875c0866b1e65568133471478a +github.com/markbates/grift,v1.1.0,h1:DsljFKUSK1ELpU22ZE+Gi93jiQI3cYD/RQ+vHM/PpY8=,29aa2fa782f9d8730bde2df024c40ba749f1812dd3bbab489b4197a1faa78627 +github.com/markbates/hmax,v1.1.0,h1:MswE0ks4Iv1UAQNlvAyFpsyFQSBHolckas95gRUkka4=,8c7557798a88c74594f27137be859e99195427e2e04f0835f48781b0bde5c73a +github.com/markbates/inflect,v1.0.4,h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g=,0da6e75f6cd27672255a41f5dfab418d2746897239ad601e5d8d78d6354b5665 +github.com/markbates/oncer,v1.0.0,h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY=,9a774885bfa4c9a96c438fdb51768833e1c7003f35cd27961137ff4096b1a764 +github.com/markbates/refresh,v1.8.0,h1:ELMS9kKyO/H6cJrqFo6qCyE0cRx2JeHWC9yusDkVeM8=,7ac81390a898cfd1cdc097ffb1e05321c415183165b7341749de41160c47e504 +github.com/markbates/safe,v1.0.1,h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=,d5a98e8242318d4e88844ddbbfebe91f67f41e5aa1f6a96a58fa2fa94e0ae9ef +github.com/markbates/sigtx,v1.0.0,h1:y/xtkBvNPRjD4KeEplf4w9rJVSc23/xl+jXYGowTwy0=,e3b591a1a2b4dcec7b86d59e504b0bbf87ec3663efad818cd9b00471a33a0345 +github.com/markbates/willie,v1.0.9,h1:394PpHImWjScL9X2VRCDXJAcc77sHsSr3w3sOnL/DVc=,a6c3eda44d765eeb1370b0ddeb739df86e900b78eb365688da143f1c0c0e9bc0 +github.com/marstr/guid,v1.1.0,h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=,7db3cd8020c72ba260d1a20183bf5a030c696d6442eccaff2b31f72b194fc571 +github.com/marten-seemann/qpack,v0.1.0,h1:/0M7lkda/6mus9B8u34Asqm8ZhHAAt9Ho0vniNuVSVg=,46c42087e554edae4e19f79b785722d27316e23278889bf78a0c8f43fc387f2e +github.com/marten-seemann/qtls,v0.3.2,h1:O7awy4bHEzSX/K3h+fZig3/Vo03s/RxlxgsAk9sYamI=,ff5245b3d5a1e65754d4a740e09ff02c738e9043c6e2bc02c59d5851c1fc1e2d +github.com/martini-contrib/render,v0.0.0-20150707142108-ec18f8345a11,h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw=,2edd7f64b2f1f053f86a51856cd0f02b1f762af61a458a2e282dab76ad093d70 +github.com/martinlindhe/unit,v0.0.0-20190604142932-3b6be53d49af,h1:4bEyeobv/dO+lT1Qp1hr+/DcNjy6Ob8BDaSrxX6nQsQ=,ee5001e908fb9997e5918c909dcb0cc078f1a91719f4df3d62243d5e88dc07c6 +github.com/martinusso/go-docs,v0.0.0-20161215163720-81905d575a58,h1:VmcrkkMjTdCGOsuuMnn7P2X9dGh3meUNASx6kHIpe7A=,70ad43a3172287882f904657184af77133a578c6d1ec968c5ce3e27259100a06 +github.com/maruel/panicparse,v0.0.0-20171209025017-c0182c169410,h1:1ROIrlLvFoHKX+i48KdRauq21irSOXPyfQw4T/PrINY=,5fd98b2b0a8346ffcba1858775e93db0582ead6b3329b974595d5ab448c95f28 +github.com/maruel/ut,v1.0.0,h1:Tg5f5waOijrohsOwnMlr1bZmv+wHEbuMEacNBE8kQ7k=,a7c90a5020071c66efe2ccae7f3859c60f17840d4ae2972ee9c9a38ae071fb3e +github.com/masterzen/azure-sdk-for-go,v0.0.0-20161014135628-ee4f0065d00c,h1:FMUOnVGy8nWk1cvlMCAoftRItQGMxI0vzJ3dQjeZTCE=,de40198aee773ecaf502d59b8f29fe5d1564fb9a68900b6bfed2369e169e193a +github.com/masterzen/simplexml,v0.0.0-20190410153822-31eea3082786,h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg=,a9e4548a5c7e098c89273c470e4e9d18cb0beb530629f2e512f6f105fd9cbc88 +github.com/masterzen/winrm,v0.0.0-20190223112901-5e5c9a7fe54b,h1:/1RFh2SLCJ+tEnT73+Fh5R2AO89sQqs8ba7o+hx1G0Y=,28f8e69baadf7f220842a5cd4269ccebdb175a835c0b43819a6b15670ae5403c +github.com/matryer/moq,v0.0.0-20190312154309-6cfb0558e1bd,h1:HvFwW+cm9bCbZ/+vuGNq7CRWXql8c0y8nGeYpqmpvmk=,b9fb2bc3d0894dfaa3cc4298f49c97346ccb66f2f0e6911f4f224ffc9acc3972 +github.com/matryer/try,v0.0.0-20161228173917-9ac251b645a2,h1:JAEbJn3j/FrhdWA9jW8B5ajsLIjeuEHLi8xE4fk997o=,f1afa36a4bd0bf09a1290f3afef954058e334d6b275aae6a591d8dad276f5e2f +github.com/mattbaird/elastigo,v0.0.0-20170123220020-2fe47fd29e4b,h1:v29yPGHhOqw7VHEnTeQFAth3SsBrmwc8JfuhNY0G34k=,f6a94deccbe4d008d265bb4b5cbaee7893e5994a82bc49b44438675a0ca8d8f3 +github.com/mattbaird/jsonpatch,v0.0.0-20171005235357-81af80346b1a,h1:+J2gw7Bw77w/fbK7wnNJJDKmw1IbWft2Ul5BzrG1Qm8=,55abaf4d26d8ad7f81c230f38a6e482b6b416d9b5777a6c3b1a5c140465a5235 +github.com/mattermost/mattermost-server,v5.11.1+incompatible,h1:LPzKY0+2Tic/ik67qIg6VrydRCgxNXZQXOeaiJ2rMBY=,1f601d79e647a248f9e711891e015b1709f3af37e6a45d5e97827f074c40398e +github.com/mattn/go-colorable,v0.1.4,h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=,02ad42bc54adf7c52030b6ab903277af8fb7163aad4f7f8d8703ecfdc62597de +github.com/mattn/go-ieproxy,v0.0.0-20190805055040-f9202b1cfdeb,h1:hXqqXzQtJbENrsb+rsIqkVqcg4FUJL0SQFGw08Dgivw=,5914c18852b0be63008f7ccaf1bd3a8214a82fae78f8afe2e7d774ff96a410ff +github.com/mattn/go-isatty,v0.0.10,h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=,dca893515dccb58e21f9b08837470c5512e0ecd1275767ed996912bb46933c91 +github.com/mattn/go-mastodon,v0.0.5-0.20190517015615-8f6192e26b66,h1:TbnaLJhq+sFuqZ1wxdfF5Uk7A2J41iOobCCFnLI+RPE=,b290b77b6e5556bba70cf18ac815c13ed9a80ffa4cb03627d73187e99cd15d42 +github.com/mattn/go-oci8,v0.0.0-20190320171441-14ba190cf52d,h1:m+dSK37rFf2fqppZhg15yI2IwC9BtucBiRwSDm9VL8g=,eb3bd1fa93c8a341ad43176cb6e4d8540d7a91d3edd7eb98c1388cf2f4c3515c +github.com/mattn/go-runewidth,v0.0.5,h1:jrGtp51JOKTWgvLFzfG6OtZOJcK2sEnzc/U+zw7TtbA=,3b34033634b059bfa31ac552d2150d8c0d6e530dd1c0ead2ce0806e1d7cc754a +github.com/mattn/go-shellwords,v1.0.6,h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI=,374285b205f0659ab4be3f8ce346cfd3291cd42f47b12bda15174c42c462b1a6 +github.com/mattn/go-sqlite3,v1.11.0,h1:LDdKkqtYlom37fkvqs8rMPFKAMe8+SgjbwZ6ex1/A/Q=,7fec79c50206f5faa759d1b64500fb0d082e22ef23f10e2d4cbce24e4fc2d5c1 +github.com/mattn/go-tty,v0.0.0-20190424173100-523744f04859,h1:smQbSzmT3EHl4EUwtFwFGmGIpiYgIiiPeVv1uguIQEE=,76f28f59927667d2d750fa6ffdefeb3f0c41034cb593e4545a206995c76c619f +github.com/mattn/go-xmpp,v0.0.0-20190124093244-6093f50721ed,h1:A1hEQg5M0b3Wg06pm3q/B0wdZsPjVQ/a2IgauQ8wCZo=,2c39b78184ea27890be56f593353c8fe6b3d6efa53db20e800ff8793bc665199 +github.com/mattn/go-zglob,v0.0.1,h1:xsEx/XUoVlI6yXjqBK062zYhRTZltCNmYPx6v+8DNaY=,8decd6c1916188ab4fa1001e3da3f22d7c9fb6218215fd25053c901979930feb +github.com/mattn/goveralls,v0.0.2,h1:7eJB6EqsPhRVxvwEXGnqdO2sJI0PTsrWoTMXEk9/OQc=,3df5b7ebfb61edd9a098895aae7009a927a2fe91f73f38f48467a7b9e6c006f7 +github.com/matttproud/golang_protobuf_extensions,v1.0.1,h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=,e64dc58023f4b8c4472d05a44f2719b84d6c2cc364cc682820c9f72b233c9cdc +github.com/maxbrunsfeld/counterfeiter/v6,v6.2.2,h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE=,c185793a7e749ff2557f4557628f5b5d8d9edbf72ca6bd2cb94503f4817c01d2 +github.com/mcuadros/go-version,v0.0.0-20190830083331-035f6764e8d2,h1:YocNLcTBdEdvY3iDK6jfWXvEaM5OCKkjxPKoJRdB3Gg=,ff2364bda8605ad94051c576ffa601e1a9aedabc8a1fda588eb04c3371a845ea +github.com/mdlayher/dhcp6,v0.0.0-20190311162359-2a67805d7d0b,h1:r12blE3QRYlW1WBiBEe007O6NrTb/P54OjR5d4WLEGk=,fba7b2f01311e2d41bb4ebe15409d4e0a605a79d2f05156bb0f4adbc20f557bc +github.com/mdlayher/netlink,v0.0.0-20191009155606-de872b0d824b,h1:W3er9pI7mt2gOqOWzwvx20iJ8Akiqz1mUMTxU6wdvl8=,9be201b393fe866f855e5ebb20ef33e86a0e6a99b6b76209531b93615fcbac7c +github.com/mesos/mesos-go,v0.0.10,h1:+M/7Zlkvw4MolkLvXHfj6hkDsLLHOOU54CmOkOUaNBc=,f18d5601dc6a5234b9c2d65cb96b8d30ab877e3117dd52dd47e31a353ed887d1 +github.com/mgutz/ansi,v0.0.0-20170206155736-9520e82c474b,h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=,d7c0ff88c53dfca384bb82108a6e5fdc9e11b358d68b67144ff6a285be20a16a +github.com/mgutz/logxi,v0.0.0-20161027140823-aebf8a7d67ab,h1:n8cgpHzJ5+EDyDri2s/GC7a9+qK3/YEGnBsd0uS/8PY=,0a7837d5246591fe1fd341e48a72786c0b61fff8d3ebfea0e9c789176c3e75d5 +github.com/mgutz/str,v1.2.0,h1:4IzWSdIz9qPQWLfKZ0rJcV0jcUDpxvP4JVZ4GXQyvSw=,bf640c2048957f183e72664ff08745ae3d016f64072a5967f5269ccb5fc4b318 +github.com/mholt/archiver,v3.1.1+incompatible,h1:1dCVxuqs0dJseYEhi5pl7MYPH9zDa1wBi7mF09cbNkU=,6cbad83ecd8a2bcb013fb1ac163a6551e6f948b103df9b258788612c72551184 +github.com/mholt/certmagic,v0.7.5,h1:1ZGHwUI4+zg1S17tPUj5Xxb9Q1ghTjLcUZE5G4yV5SM=,a85c14ecbb135636c8e4701a25b8d2884f091d948269c0a3187918af83e11db3 +github.com/michaelklishin/rabbit-hole,v1.5.0,h1:Bex27BiFDsijCM9D0ezSHqyy0kehpYHuNKaPqq/a4RM=,1fdb62e985c4b1be24632875668720ed687455ece54cb2c77079488784e06e69 +github.com/micro-plat/gmq,v1.0.1,h1:ai1PiCEfgBmiqzmZ4iWE3l2Vuz7rOTWOakqRWqi/Hgo=,63c4a02b87b31c0f5cfcdfee5df2fa05e77eeaa2aab93b0ef217c57f6b37b38a +github.com/micro-plat/lib4go,v0.2.1,h1:NBTIq0DvpRzTChnYShBagPmsYM4k1NgvkE8OYhgMDt8=,ae1056cc76eee3fccb14b0d8723b6444d8f31d2575a0caa1d3723bc54b91496b +github.com/micro/cli,v0.2.0,h1:ut3rV5JWqZjsXIa2MvGF+qMUP8DAUTvHX9Br5gO4afA=,09e532e4616aa7827d1a1f249bc80ebb01fe8c63978f4b14605246c6be596b82 +github.com/micro/go-log,v0.1.0,h1:szYSR+yyTsomZM2jyinJC5562DlqffSjHmTZFaeZ2vY=,5ec9ba1cfb781edd3695dc9c28afb520cced5e1cf7eabb5faafd4bd8db6953ea +github.com/micro/go-micro,v1.14.0,h1:lptn9DBbsNCB3RC3PMwxTJGqCUgU8Rf23nAMaRuOcOA=,2278cfa86f7bf97df81ea79535127cf87bf03aba29e7603f2feeb48b2d1a3334 +github.com/micro/go-rcache,v0.2.0,h1:g51QJW+lj+dAOXwRlYNZPQQ8ueHLptgoUzZE3iRwJMg=,fa96add40dac8fb14cf08f7a8c96d05c902da40b27b2c4e586cf3304e4ef6533 +github.com/micro/h2c,v1.0.0,h1:ejw6MS5+WaUoMHRtqkVCCrrVzLMzOFEH52rEyd8Fl2I=,6fea0303cbaa2bc6c45098ce5ad0ae2aa7f9c54ce2ff90160549756f8c7a2b07 +github.com/micro/mdns,v0.3.0,h1:bYycYe+98AXR3s8Nq5qvt6C573uFTDPIYzJemWON0QE=,a40ecbd32a2170698f0f49f8961b39e88e7c3e958546a401a59653231b51f1b2 +github.com/micro/micro,v1.14.0,h1:Uol1+Yg5frzneACpzoHEDsyNTN+/+yLrlGMuxR3RVRQ=,0fd330788ad610cc2cb3eb2224f1ca403d9888ad40e78628f250c885373d739c +github.com/micro/util,v0.2.0,h1:6u0cPj1TeixEk5cAR9jbcVRUWDQsmCaZvDBiM3zFZuA=,3e61d5232a3a91d521ade483ab64b53a7b8760d0635978d72b4920eba52f8f79 +github.com/microcosm-cc/bluemonday,v1.0.2,h1:5lPfLTTAvAbtS0VqT+94yOtFnGfUWYyx0+iToC3Os3s=,9cfac37098da75ab1c278740e8f0f7741891d8843e14afb256574596ad786f83 +github.com/miekg/dns,v1.1.22,h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc=,54f1f62de314150df163bbe1de91acc922cdce70c5c8a43dfeb7f4af24711d38 +github.com/miekg/mmark,v1.3.6,h1:t47x5vThdwgLJzofNsbsAl7gmIiJ7kbDQN5BxwBmwvY=,8d1b05ee1c0a28093c678af2ed9d0aac9dfc30dce728ccd21fe1506762b54cee +github.com/mindprince/gonvml,v0.0.0-20190828220739-9ebdce4bb989,h1:PS1dLCGtD8bb9RPKJrc8bS7qHL6JnW1CZvwzH9dPoUs=,6702f94187c4e2994ffbdc318c94a04d4bc67081a402e968a2c362a74c81263f +github.com/minio/blake2b-simd,v0.0.0-20160723061019-3f5f724cb5b1,h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=,ab10edfe994b513e2d03cdd8122b352f31a1eb246fe884617b3f2f6195a3ca0c +github.com/minio/cli,v1.22.0,h1:VTQm7lmXm3quxO917X3p+el1l0Ca5X3S4PM2ruUYO68=,33533a4e0a2b1a698d0f899cb5b84d9fc199e7723b971d1408e4b5ee797c9a50 +github.com/minio/dsync,v0.0.0-20180124070302-439a0961af70,h1:pRHQdPOlUhelWqNUF3icFrBSC6VYH1hvF6HigVfgMoI=,850e5b400afc4301a1860debf934c5e8e67565d4937ac45f9a37132b31a09941 +github.com/minio/highwayhash,v0.0.0-20180501080913-85fc8a2dacad,h1:L+8skVz2lusCbtlalLXmJp+TK8XaGAsZ3utSC3k5Jc0=,7393dfe736668f9ab98fcf2d264f9bd20bbf4f98538f02ff15df9604f747cdb1 +github.com/minio/lsync,v0.0.0-20180328070428-f332c3883f63,h1:utJHim4C0K4CmD+Qgod/tgHvo7QNOlH6HN5O8QUvPEI=,417c4bdd4fc5d50da2d81e8890b03af4b80ce9fbd5e4c196731a3d76a09913c1 +github.com/minio/mc,v0.0.0-20180926130011-a215fbb71884,h1:co3kRW9cEI65yolYtcLcNxp2a9yk5T/eEt7gw14tJVs=,37300de5179e1085559c6f317b331d261cc4508ba0e4febbd93cbbfef42d7fc9 +github.com/minio/minio,v0.0.0-20180508161510-54cd29b51c38,h1:F7p0ZU9AQuxlA6SWwhXr0H/rYrA9fOiBk2OzOj7GtfM=,6421e5cf72b35a2948e5edd2b189f37ad1896b8637d5b9bcf7cd40b7ab63dfd4 +github.com/minio/minio-go,v6.0.14+incompatible,h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o=,3bc396d5e1c0c6f3497743140eaf16ebb97c5f1ca815ba12c4f431e804fb737d +github.com/minio/minio-go/v6,v6.0.27-0.20190529152532-de69c0e465ed,h1:g3DRJpu22jEjs14fSeJ7Crn9vdreiRsn4RtrEsXH/6A=,34d85b6b915ef5876f9c262f260583fabec147c37dcb82e1f42374dd088b9096 +github.com/minio/sha256-simd,v0.1.1,h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=,0ecfa6532265e139d5d9406c0a803c7ef45b1d8d0f0c1b1d55f7b81969294bfc +github.com/minio/sio,v0.0.0-20180327104954-6a41828a60f0,h1:ys4bbOlPvaUBlA0byjm6TqydsXZu614ZIUTfF+4MRY0=,6c46bc4a68353d7b41f6e91eb276c9b21560cad4f75419baaee01764927fb7e8 +github.com/mistifyio/go-zfs,v2.1.1+incompatible,h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=,545764e34ed40473380ea1b08af9f0aea1715d15a0a56fc937e6c3b1bda0d9a3 +github.com/mitchellh/cli,v1.0.0,h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=,74199f2c2e1735a45e9f5c2ca049d352b0cc73d945823540e54ca9975ce35752 +github.com/mitchellh/colorstring,v0.0.0-20190213212951-d06e56a500db,h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=,d0733284b20567055e374b420373f5508fa47e95204e59e4b8a66834e7e3964d +github.com/mitchellh/copystructure,v1.0.0,h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=,4a2c9eb367a7781864e8edbd3b11781897766bcf6120f77a717d54a575392eee +github.com/mitchellh/go-fs,v0.0.0-20180402234041-7b48fa161ea7,h1:PXPMDtfqV+rZJshQHOiwUFqlqErXaAcuWy+/ZmyRfNc=,21c34fee3df3dc1ddad5e774ddf9e05998061177420709fb68a958c6c113a90b +github.com/mitchellh/go-homedir,v1.1.0,h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=,fffec361fc7e776bb71433560c285ee2982d2c140b8f5bfba0db6033c0ade184 +github.com/mitchellh/go-linereader,v0.0.0-20190213213312-1b945b3263eb,h1:GRiLv4rgyqjqzxbhJke65IYUf4NCOOvrPOJbV/sPxkM=,7b83ef857c71fe8d4937b57923923176dd43c7b1b7632a9779bac411924e87e1 +github.com/mitchellh/go-ps,v0.0.0-20190716172923-621e5597135b,h1:9+ke9YJ9KGWw5ANXK6ozjoK47uI3uNbXv4YVINBnGm8=,06090b6c22dedf800259eb5d9b5f35bfb7b38e22888c0345631dc54366b21f89 +github.com/mitchellh/go-testing-interface,v1.0.0,h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=,255871a399420cd3513b12f50738d290e251637deb23e21a4332192584ecf9c7 +github.com/mitchellh/go-vnc,v0.0.0-20150629162542-723ed9867aed,h1:FI2NIv6fpef6BQl2u3IZX/Cj20tfypRF4yd+uaHOMtI=,2d65ac584e1a17421265fe97f83bd1cbff447ca6a911fa8d91414fa2115e3e74 +github.com/mitchellh/go-wordwrap,v1.0.0,h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=,9ea185f97dfe616da351b63b229a5a212b14ac0e23bd3f943e39590eadb38031 +github.com/mitchellh/gox,v1.0.1,h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI=,30a69e17ba5cafe6f1ac436bcc99368a5a34f0a0763926d2c6780a781f8e9e95 +github.com/mitchellh/hashstructure,v1.0.0,h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y=,3b79b07860631d05645ea3f54830b7e1997dbcf477e84a8adfe4979be3abdfde +github.com/mitchellh/iochan,v1.0.0,h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=,f3eede01adb24c22945bf71b4f84ae25e3744a12b9d8bd7c016705adc0d778b8 +github.com/mitchellh/mapstructure,v1.1.2,h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=,cd86d8586cbc333de28f6a46989069487877fae437df4c2cc417668d203c7305 +github.com/mitchellh/panicwrap,v0.0.0-20190213213626-17011010aaa4,h1:jw9tsdJ1FQmUkyTXdIF/nByTX+mMnnp16glnvGZMsC4=,b9ab07bbacf733cc24f9f7f53eec19f9bf999cbb35180ad0b615fe437640de6e +github.com/mitchellh/pointerstructure,v0.0.0-20190430161007-f252a8fd71c8,h1:1CO5wil3HuiVLrUQ2ovSTO+6AfNOA5EMkHHVyHE9IwA=,658a3e14e4983f3c8a04c8da4a56d4d8a86e2b4fcaa6b1eefab150efcd742848 +github.com/mitchellh/prefixedio,v0.0.0-20190213213902-5733675afd51,h1:eD92Am0Qf3rqhsOeA1zwBHSfRkoHrt4o6uORamdmJP8=,d3209d88b3b5b05ecd48f469bc16811666f786685c49273664a5496d5dd69018 +github.com/mitchellh/reflectwalk,v1.0.1,h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=,bf1d4540bf05ea244e65fca3e9f859d8129c381adaeebe7f22703959aadc4210 +github.com/mjibson/esc,v0.2.0,h1:k96hdaR9Z+nMcnDwNrOvhdBqtjyMrbVyxLpsRCdP2mA=,9f090786bd43dddb5c0d798b449d5e8aede4cb7d106f56dcac0aebd8fd1929cc +github.com/mndrix/ps,v0.0.0-20131111202200-33ddf69629c1,h1:kCroTjOY+wyp+iHA2lZOV5aJ6WfBVjGnW8bCYmXmLPo=,30b12b7a2467d4a1aa64aa31c715cb45d570d36e31ae70719101d686363d2685 +github.com/mndrix/tap-go,v0.0.0-20171203230836-629fa407e90b,h1:Ga1nclDSe8gOw37MVLMhfu2QKWtD6gvtQ298zsKVh8g=,c6f65bd8d977e53fa083d9d0309cffb0dbfaaae69a5a64a352fb2f7d079ce73d +github.com/modern-go/concurrent,v0.0.0-20180306012644-bacd9c7ef1dd,h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=,91ef49599bec459869d94ff3dec128871ab66bd2dfa61041f1e1169f9b4a8073 +github.com/modern-go/reflect2,v1.0.1,h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=,6af8268206d037428a4197bd421bbe5399c19450ef53ae8309a083f34fb7ac05 +github.com/mohae/deepcopy,v0.0.0-20170929034955-c48cc78d4826,h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=,41ba726508a213f4af89e7d58937263ff778e352d591edd422d3a3dc3272585c +github.com/mongodb/grip,v0.0.0-20191008181606-ee248dc03622,h1:pPoJByX3B56ydhWGUMard1QQ2skLNTw/s1W5VuLLAtA=,08fcfea928382f428dc1fceeada1c264e7f6dc7256dbe05c5c0ba41dca16a42c +github.com/monoculum/formam,v0.0.0-20190830100315-7ff9597b1407,h1:ZU5O9BawmEx9Mu1lxn9NLIwO9DrqRfjE+HWKU+e9GKQ=,5a04e3907fb1008c1e6640e8a0e9394c752aab4ebf7e3be01cd3ee55c2659121 +github.com/montanaflynn/stats,v0.5.0,h1:2EkzeTSqBB4V4bJwWrt5gIIrZmpJBcoIRGS2kWLgzmk=,05527945351f54f4e8c48666bce277fbace34026eed22ac7d88a50a6730767f1 +github.com/morikuni/aec,v1.0.0,h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=,c14eeff6945b854edd8b91a83ac760fbd95068f33dc17d102c18f2e8e86bcced +github.com/moul/anonuuid,v0.0.0-20160222162117-609b752a95ef,h1:E/seV1Rtsnr2juBw1Dfz4iDPT3/5s1H/BATx+ePmSyo=,ec103e75b93231b5b858a2fc9985da39d6b7c35644a689a20e60f3a6ad6b1396 +github.com/moul/gotty-client,v0.0.0-20180327180212-b26a57ebc215,h1:y6FZWUBBt1iPmJyGbGza3ncvVBMKzgd32oFChRZR7Do=,265c4cbad4789e267f283b9012ad174c89e378e59ad9c64ac28729402eb60afe +github.com/moul/http2curl,v0.0.0-20161031194548-4e24498b31db,h1:eZgFHVkk9uOTaOQLC6tgjkzdp7Ays8eEVecBcfHZlJQ=,2ff4e19b14d84f6d181afc79f28668c6171d6dea79c43a1918c0428a265137c1 +github.com/mozilla-services/heka,v0.10.0,h1:w+y6RPJkU6ZKeNbG1VvK9aSqJm0sru5TYcwOj6ejv8U=,f325891304f9acc654944d9a2297b8816a0a86440b2f035c4996ec38fcfa0eed +github.com/mozillazg/go-cos,v0.12.0,h1:b9hUd5HjrDe10BUfkyiLYI1+z4M2kAgKasktszx9pO4=,5376eaf13e10fed6d73b713fbabc4a159d204239579120c410ea74de33dd6d71 +github.com/mozillazg/go-httpheader,v0.2.1,h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ=,50b7a36360fc1ec1a85fd40fe45f8db02fc734fc2af0514a60a068f0a2708122 +github.com/mozillazg/go-unidecode,v0.1.1,h1:uiRy1s4TUqLbcROUrnCN/V85Jlli2AmDF6EeAXOeMHE=,812d3bc9f03cb6a8552bfadd9e0d1b44a57807a3af2e8667a42861510bb2b20c +github.com/mpvl/unique,v0.0.0-20150818121801-cbe035fff7de,h1:D5x39vF5KCwKQaw+OC9ZPiLVHXz3UFw2+psEX+gYcto=,af2bcc8a61a6881e0703afee2217dd1e75c8b34f4e49947c0d7f6e87af574e0e +github.com/mr-tron/base58,v1.1.2,h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78=,c2b362db55d8266ce02a161b7f73cad646432d2dae98511385b88481380c4e86 +github.com/mreiferson/go-httpclient,v0.0.0-20160630210159-31f0106b4474,h1:oKIteTqeSpenyTrOVj5zkiyCaflLa8B+CD0324otT+o=,e94cbe43c052831323c59ff186c830ea2e271065f7f8b2794ade7aaf88a37a85 +github.com/mrjones/oauth,v0.0.0-20180629183705-f4e24b6d100c,h1:3wkDRdxK92dF+c1ke2dtj7ZzemFWBHB9plnJOtlwdFA=,4c1fef02b34241008ba6bc33fb5d01b4cfb3b7e7544fb7f70823fe74b9b21362 +github.com/mrunalp/fileutils,v0.0.0-20171103030105-7d4729fb3618,h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=,c32d691ce15012ba21fbe69db3558df0c97326426c14ef747b8a1e02652ca7b3 +github.com/mschoch/smat,v0.0.0-20160514031455-90eadee771ae,h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=,488e193897c7d8e3b3758cbeb8a5bc1b58b9619f3f14288a2ea9e0baa5ed9b3e +github.com/msteinert/pam,v0.0.0-20151204160544-02ccfbfaf0cc,h1:z1PgdCCmYYVL0BoJTUgmAq1p7ca8fzYIPsNyfsN3xAU=,315d911c41d88a22bf8831b174bbd15310bc403626507095f98b9780ddcf9174 +github.com/muesli/smartcrop,v0.0.0-20180228075044-f6ebaa786a12,h1:l0X/8IDy2UoK+oXcQFMRSIOcyuYb5iEPytPGplnM41Y=,5857e4d0ed238d8c6f8f41294b98771f1c21874a80ea5f2e75b4a49cbcf1d3e0 +github.com/multiformats/go-base32,v0.0.3,h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI=,658875e4980370db6180f99835b3a48158a697eef69e7c3eb86b0b4f5c1c19ed +github.com/multiformats/go-multiaddr,v0.1.1,h1:rVAztJYMhCQ7vEFr8FvxW3mS+HF2eY/oPbOMeS0ZDnE=,ba4849fc68453c3e812e850f40e6d5acef671060ed79f203c2d179d395d20fc5 +github.com/multiformats/go-multiaddr-dns,v0.0.2,h1:/Bbsgsy3R6e3jf2qBahzNHzww6usYaZ0NhNH3sqdFS8=,219f855f485aa198d36305f2f43012a73bd40f15caa3e606324cee9f117e5b89 +github.com/multiformats/go-multiaddr-fmt,v0.1.0,h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E=,d83537dc1f83185dfb60b190ea4b3c7b05c552a75ad7cfaddd0b987c00ff0cff +github.com/multiformats/go-multiaddr-net,v0.1.1,h1:jFFKUuXTXv+3ARyHZi3XUqQO+YWMKgBdhEvuGRfnL6s=,241c47d621bcb9a40d33284f407a7fdf458cb3f87ef02db68735cc6b9002afed +github.com/multiformats/go-multibase,v0.0.1,h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA=,ed39145efcf5e8c99deaa183071aed246239730f5781b291bad7de5d1fc12d81 +github.com/multiformats/go-multihash,v0.0.8,h1:wrYcW5yxSi3dU07n5jnuS5PrNwyHy0zRHGVoUugWvXg=,44fae6e8771331f54f267d9440a9d520e7daeb91817ff61e26b8494099ae046a +github.com/multiformats/go-multistream,v0.1.0,h1:UpO6jrsjqs46mqAK3n6wKRYFhugss9ArzbyUzU+4wkQ=,f720be6e29845f0a41c1241a24f19c08adf762f9e7e972b4096416776c603b15 +github.com/munnerz/goautoneg,v0.0.0-20191010083416-a7dc8b61c822,h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=,3d7ce17916779890be02ea6b3dd6345c3c30c1df502ad9d8b5b9b310e636afd9 +github.com/mwitkow/go-conntrack,v0.0.0-20190716064945-2f068394615f,h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=,d6fc513490d5c73e3f64ede3cf18ba973a4f8ef4c39c9816cc6080e39c8c480a +github.com/mwitkow/go-grpc-middleware,v1.0.0,h1:XraEe8LhUuB33YeV4NWfLh2KUZicskSZ2lMhVRnDvTQ=,074f46f92d7a0043c5b283f1af224123cc48e21f96b259e62f77b6da72240812 +github.com/mxk/go-flowrate,v0.0.0-20140419014527-cca7078d478f,h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=,bd0701ef9115469a661c07a3e9c2e572114126eb2d098b01eda34ebf62548492 +github.com/myesui/uuid,v1.0.0,h1:xCBmH4l5KuvLYc5L7AS7SZg9/jKdIFubM7OVoLqaQUI=,3055c4b167daeb9984ccd7c8eeba154e3d84afa6fdf06a3151280ef120d1633d +github.com/myitcv/gobin,v0.0.8,h1:hQORun03Mlnm8yp/OgKX8UYSIVZQ8ebTWf3aahY1u+s=,015311e9db646cb9e5f63a0586c466c9eb5bc5f45661282644f8a5b549607e72 +github.com/myitcv/vbash,v0.0.2,h1:8R+91eSlfcgoRjEbnUgvbXYOmfh+p0+7i5klFOM5VMA=,08dcf62b94843e7fd115cd0605158d948fb361ca8c958db1958c5d2feef9c2d1 +github.com/namedotcom/go,v0.0.0-20180403034216-08470befbe04,h1:o6uBwrhM5C8Ll3MAAxrQxRHEu7FkapwTuI2WmL1rw4g=,0c6ea2c994e982c25e44ccba2ead1a9655cd2f253986eedb73253c30ad21b42f +github.com/naoina/go-stringutil,v0.1.0,h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks=,4cfea6f0ebfecb5e6297f8a6eee0e9ef9fe254883eb75dd6179133995a219c58 +github.com/naoina/toml,v0.1.1,h1:PT/lllxVVN0gzzSqSlHEmP8MJB4MY2U7STGxiouV4X8=,8e34d510563d9e8b3f2dbdf0927bf5108b669144bdbe2fda4fcb44e7e2e55268 +github.com/natefinch/lumberjack,v2.0.0+incompatible,h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=,1f6e7c9e0b915c45151d8780a8711426b19d16d04c9cf0e7995b29035d6b500f +github.com/nats-io/gnatsd,v1.3.0,h1:+5d80klu3QaJgNbdavVBjWJP7cHd11U2CLnRTFM9ICI=,85fa90b3eaef17698734d398a9939b8bb94df1b9f35bc92c8d31cb7a349c1e97 +github.com/nats-io/go-nats,v1.6.0,h1:FznPwMfrVwGnSCh7JTXyJDRW0TIkD4Tr+M1LPJt9T70=,8c63be6f10479802a40c66c0999f724e492bcb9863d5517038c6472e585a76aa +github.com/nats-io/go-nats-streaming,v0.4.2,h1:e7Fs4yxvFTs8N5xKFoJyw0sVW2heJwYvrUWfdf9VQlE=,62dd1d6ba18f3b7686766116e3beaaf9f62b89b58a6efb0b8f1ad04d3ddfb026 +github.com/nats-io/jwt,v0.3.0,h1:xdnzwFETV++jNc4W1mw//qFyJGb2ABOombmZJQS4+Qo=,e131314c7cf6a714ec10ca3b6f95f8af6a41f5cdaf72a364f7c71b33e97314db +github.com/nats-io/nats,v1.6.0,h1:U5b2apHOTZlUou+NGfCRWG4ZEeivbt2hpsZO4kHKIVU=,12cc70ed3477472d110d4b4bc109fbe20218e8199629669ad5f617c199fbf9d2 +github.com/nats-io/nats-server/v2,v2.1.0,h1:Yi0+ZhRPtPAGeIxFn5erIeJIV9wXA+JznfSxK621Fbk=,a5897b8f5302ae38894de2c240f31d33ab7b2f3d4e88a2c212fc9b31f2d4f444 +github.com/nats-io/nats-streaming-server,v0.12.2,h1:EpyLfUBZgwu5c0mdSSytQsapm615AyitPssq7jgafdw=,48605f61f74903ba1322f11aa17806b57f71cebf2557b7dd8620d4193abc868d +github.com/nats-io/nats.go,v1.9.1,h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ=,34a735d158d70685faad1fc3153f08da0ddc21c0ae42f6a0cb09430d638364b2 +github.com/nats-io/nkeys,v0.1.0,h1:qMd4+pRHgdr1nAClu+2h/2a5F2TmKcCzjCDazVgRoX4=,dbc82abacf752e532ffd67db230a97f52a5f92070b04b4028cb79534d2ab0ef6 +github.com/nats-io/nuid,v1.0.1,h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=,809d144fbd16f91651a433e28d2008d339e19dafc450c5995e2ed92f1c17c1f3 +github.com/nats-io/stan.go,v0.5.0,h1:ZaSPMb6jnDXsSlOACynJrUiB3Evleg3ZyyX+rnf3TlQ=,1dcb14e2ef8ad30dd1ee61a63b0a3bfbaa48e9c3d13f69458a149956a14bbab7 +github.com/nbio/st,v0.0.0-20140626010706-e9e8d9816f32,h1:W6apQkHrMkS0Muv8G/TipAy/FJl/rCYT0+EuS8+Z0z4=,e6cd27bd360be27d0f7efd3c4c41c4e14e659e60086b0bc4f09fb09cfd02a50d +github.com/ncw/swift,v1.0.49,h1:eQaKIjSt/PXLKfYgzg01nevmO+CMXfXGRhB1gOhDs7E=,b2be24cad8923c9171835547df2d621d2aa2029ceb9fa770d6ecf3bf70c2c029 +github.com/neelance/astrewrite,v0.0.0-20160511093645-99348263ae86,h1:D6paGObi5Wud7xg83MaEFyjxQB1W5bz5d0IFppr+ymk=,815811c2140669e55e99d59d4bdd2fcf4c810610a9d278fd25cc2c3480c002d4 +github.com/neelance/sourcemap,v0.0.0-20151028013722-8c68805598ab,h1:eFXv9Nu1lGbrNbj619aWwZfVF5HBrm9Plte8aNptuTI=,ce5499f29779a604233bb76f36925c3326a8a8f270533df8d3dff1107b7aa066 +github.com/neurosnap/sentences,v1.0.6,h1:iBVUivNtlwGkYsJblWV8GGVFmXzZzak907Ci8aA0VTE=,9dbe86e291937eba92847454650d1c65338527ff89dec5daccb99aaf7e03865b +github.com/newrelic/go-agent,v2.15.0+incompatible,h1:IB0Fy+dClpBq9aEoIrLyQXzU34JyI1xVTanPLB/+jvU=,4c541c5f7b10055c37cf22843edbb9b0fcb06ad3504e8d6eae3d9c37ff3c64c6 +github.com/nf/cr2,v0.0.0-20140528043846-05d46fef4f2f,h1:nyKdx+jcykIdxGNrbgo/TGjdGi99EY9FKBCjYAUS4bU=,665afbe7830424dd9815cae42aa7762b657484686d671f88704257ea7c9736be +github.com/nfnt/resize,v0.0.0-20180221191011-83c6a9932646,h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ=,b8e97cb14e5e5ef29d762d2dff890f6279a125990ddf9cb7ae5c4d2a015b109c +github.com/ngaut/pools,v0.0.0-20180318154953-b7bc8c42aac7,h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c=,26342833d7a5b91a52f8451e8e34bc9ffc5069d342666ab0b478628c41a86d44 +github.com/ngaut/sync2,v0.0.0-20141008032647-7a24ed77b2ef,h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k=,2635d6120b6172c190f84b57b5fc878f9158b768b4bd6bd4468bfa98a73061a4 +github.com/nicksnyder/go-i18n,v2.0.2+incompatible,h1:Xt6dluut3s2zBUha8/3sj6atWMQbFioi9OMqUGH9khg=,687be9dc953545d390761e5464e07c38f313d19c1f695f7d7702d954afcf6b66 +github.com/nicolai86/scaleway-sdk,v1.10.2-0.20180628010248-798f60e20bb2,h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s=,a2e992324edd4396f24e0b6a165c4d1057eeefdecdc9f7472b0de8a30f3be729 +github.com/niklasfasching/go-org,v0.1.6,h1:F521WcqRNl8OJumlgAnekZgERaTA2HpfOYYfVEKOeI8=,c938afb1ad7f567524686395c9de66da75220eaa60fe8917c02b97aa1e2cbbb1 +github.com/nkovacs/streamquote,v0.0.0-20170412213628-49af9bddb229,h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg=,679a789b4b1409ea81054cb12e5f8441199f5fb17d4a2d3510c51f3aa5f3f0cc +github.com/nlopes/slack,v0.6.0,h1:jt0jxVQGhssx1Ib7naAOZEZcGdtIhTzkP0nopK0AsRA=,048ddfddd4a66407f26b069a65d4d8f3d6d0368adcd52fd5a0dc6d86fe012f47 +github.com/nrdcg/auroradns,v1.0.0,h1:b+NpSqNG6HzMqX2ohGQe4Q/G0WQq8pduWCiZ19vdLY8=,81e3564b38ca27024b6e981a03ae70afcf435d5f8d35a2113321dfd3a220f00b +github.com/nrdcg/goinwx,v0.6.1,h1:AJnjoWPELyCtofhGcmzzcEMFd9YdF2JB/LgutWsWt/s=,8e1e3ea7d38f5b9b21603350d97a583c9108d380f5cc08bf93a4c69d6968dc8a +github.com/nrdcg/namesilo,v0.2.1,h1:kLjCjsufdW/IlC+iSfAqj0iQGgKjlbUUeDJio5Y6eMg=,e20a47d9257fcf7ce95254b14bb84ba290b5f4867e4d63027b669f5a55aaab6c +github.com/nsf/jsondiff,v0.0.0-20160203110537-7de28ed2b6e3,h1:OqFSgO6CJ8heZRAbXLpT+ojX+jnnGij4qZwUz/SJJ9I=,9652618358184592fb7a4657e2c51748cbe0bf5bbf97150a2c6e95ecf65b126b +github.com/nsf/termbox-go,v0.0.0-20190817171036-93860e161317,h1:hhGN4SFXgXo61Q4Sjj/X9sBjyeSa2kdpaOzCO+8EVQw=,a64e374836a25ab74ece4eb5314d79617d8b828bd6d13c654d95bed920c82784 +github.com/nsqio/go-nsq,v1.0.7,h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY=,5acb7902bf31355fa7d77f507ed42847368834eb378fbf407d82ae3e4211e248 +github.com/nu7hatch/gouuid,v0.0.0-20131221200532-179d4d0c4d8d,h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=,0889a0ac13cfa9f32f986a88a82bb24380070932299131ae7d7180a389d08ca7 +github.com/nullstyle/go-xdr,v0.0.0-20180726165426-f4c839f75077,h1:A804awGqaW7i61y8KnbtHmh3scqbNuTJqcycq3u5ZAU=,0ab4f958f0420027d40b53c98bcb8f3cbe1e106dfb49d3e91415cb1c512a552c +github.com/nutmegdevelopment/sumologic,v0.0.0-20160817160817-42ed9b372fa3,h1:xOEJG5C3e8CvgAYsnkgoSBzCr0No+m++aB6v7A2WScY=,a33916e02e1159304145b621ffdf284120e50f618c684f38776a8bab7ae7b3fe +github.com/nwaples/rardecode,v0.0.0-20171029023500-e06696f847ae,h1:UF9xsJn7AeQ72TCus3eRO1lh08Id3AoF37vl+qigL/w=,5598a02308af3b04418b15854ff940be49cf31ce7238ce23c10409110364d40f +github.com/ogier/pflag,v0.0.1,h1:RW6JSWSu/RkSatfcLtogGfFgpim5p7ARQ10ECk5O750=,c4db0ecff32deb3205c705d72a616bce01e1f6a1948c851d30b52deeec3fbf91 +github.com/oklog/run,v1.0.0,h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=,108d409b7d235d61b82cfb6e1df139501123fcd8fa68fe94ddb024b53335cb48 +github.com/oklog/ulid,v1.3.1,h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=,40e502c064a922d5eb7f2bc2cda9c6a2a929ec0fc76c9aae4db54fb7b6b611ae +github.com/olekukonko/tablewriter,v0.0.1,h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=,7e5cc8a9b5a51126a0cb46ac96b274d92a8b1cc24b2321832c38d60c0ea4cc9c +github.com/oliamb/cutter,v0.2.2,h1:Lfwkya0HHNU1YLnGv2hTkzHfasrSMkgv4Dn+5rmlk3k=,9174c2374109a7d3aeb2c59b5f4b744ec5f65752aab797f0d50beb26cfc7d857 +github.com/olivere/elastic,v6.2.25+incompatible,h1:X34sPAlSpZVlnuSjOYwbMbiCMU+WKK7YUxrunuNSdG8=,bf3b4cc7ea89a716e91002a31b33f55ec3168ce5ab36ffe5c02ff68d94b9aad5 +github.com/olivere/env,v1.1.0,h1:owp/uwMwhru5668JjMDp8UTG3JGT27GTCk4ufYQfaTw=,f486deab73b3d7866e762e1ad34fe63c88e9ac38f41d811414361fb6490bbb2c +github.com/onsi/ginkgo,v1.10.3,h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY=,088314495acb90d1e520519b243f4dbdd17b43469e6fb83bd45d600796856e63 +github.com/onsi/gomega,v1.7.1,h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=,0a245e719f17cc2bc399aa7c2005cca84f1cfba5373b0c96f5c64673f758a712 +github.com/op/go-logging,v0.0.0-20160315200505-970db520ece7,h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88=,c506eace74028656eb28677a4c162f9c023ce2f9c0207354ba80cca89f11b461 +github.com/openconfig/gnmi,v0.0.0-20190823184014-89b2bf29312c,h1:a380JP+B7xlMbEQOlha1buKhzBPXFqgFXplyWCEIGEY=,f52967c7b194daa57252042f6ccf9d26f8c599a7e13aca26043f948d5139b91a +github.com/openconfig/reference,v0.0.0-20190727015836-8dfd928c9696,h1:yHCGAHg2zMaW8olLrqEt3SAHGcEx2aJPEQWMRCyravY=,040cf32cee7256a08716313dd7ea4f8c44f1d644ae872ecf2dd381c35b12125c +github.com/opencontainers/go-digest,v1.0.0-rc1,h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=,25fd455029e8a1bbe15ed2eeafc67222372c6f305a47b4ec157d8a1a2849c15c +github.com/opencontainers/image-spec,v1.0.1,h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=,ebb2dca711a137fbfb717158b0368792f834000f4308d9ea259d06c6804c677c +github.com/opencontainers/runc,v0.1.1,h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=,aa212163f009190d0f4f3dbe64f71fcda06d7896b67863d7f7b185fee6a68ea6 +github.com/opencontainers/runtime-spec,v1.0.1,h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=,1958458b00ce912425f5c7d2ee836431b296a3f9320d565512d8c96b107fffbf +github.com/opencontainers/runtime-tools,v0.9.0,h1:FYgwVsKRI/H9hU32MJ/4MLOzXWodKK5zsQavY8NPMkU=,53c720dbb7452cfb2fd3945e37c26b5a0140cb1012d35a2b72a5e035f28a32c4 +github.com/opencontainers/selinux,v1.3.0,h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=,88286825b32cd46a0469e578f378a185032da2d5b03893623861ef3af59359d8 +github.com/openshift/client-go,v3.9.0+incompatible,h1:13k3Ok0B7TA2hA3bQW2aFqn6y04JaJWdk7ITTyg+Ek0=,661b7f28b4905f1936dd58e373374513d54663ec85aecafede1c7d9c260e9369 +github.com/openshift/library-go,v0.0.0-20191101161407-e7c97b468b83,h1:wwR+laNaFKVGiizoIDL/cAKIZVoKXJ9jbjUoUlq2p5I=,c74f8134013f978ef154d6accf9b4b0c5126941f2d45e6eb223db7098f7ab2a4 +github.com/opentracing-contrib/go-observer,v0.0.0-20170622124052-a52f23424492,h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU=,50023eee1ef04412410f43d8b5dcf3ef481c0fc39067add27799654705fa84b2 +github.com/opentracing-contrib/go-stdlib,v0.0.0-20190519235532-cf7a6c988dc9,h1:QsgXACQhd9QJhEmRumbsMQQvBtmdS0mafoVEBplWXEg=,966cdf6d869ff62c35edf1ea00113465cc9b90f34838c6a6990a1f776e7d1152 +github.com/opentracing/basictracer-go,v1.0.0,h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo=,a908957c8e55b7b036b4761fb64c643806fcb9b59d4e7c6fcd03fca1105a9156 +github.com/opentracing/opentracing-go,v1.1.0,h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=,3e0f42d035019fa037991d340da9677a802f8182792770c38e87906d33e06629 +github.com/openzipkin-contrib/zipkin-go-opentracing,v0.4.4,h1:bzTJRoOZEN7uI1gq594S5HhMYNSud4FKUEwd4aFbsEI=,8a4688f80cd67140aa4edb91506d440ecea4d8ec01634caab5c95991af011c5d +github.com/openzipkin/zipkin-go,v0.2.2,h1:nY8Hti+WKaP0cRsSeQ026wU03QsM762XBeCXBb9NAWI=,dfc610dc52d9299df49172a9e61fcc772d85450b6b6f82e8f43cf23562232a4c +github.com/oracle/oci-go-sdk,v7.0.0+incompatible,h1:oj5ESjXwwkFRdhZSnPlShvLWYdt/IZ65RQxveYM3maA=,941cd26813b22873477f1c6bb86fed929bdc85379d435bd9707d923f57d070dc +github.com/orcaman/concurrent-map,v0.0.0-20190826125027-8c72a8bb44f6,h1:lNCW6THrCKBiJBpz8kbVGjC7MgdCGKwuvBgc7LoD6sw=,ec80830c751199283290a8d398ebf28ca5169e866a70347b39856d2c1178f2cb +github.com/ory/dockertest,v3.3.4+incompatible,h1:VrpM6Gqg7CrPm3bL4Wm1skO+zFWLbh7/Xb5kGEbJRh8=,cbcc7ba21c846d38229aa06a2d7cf35b99ac219eb2694bd9a1ceeac89667e475 +github.com/ory/herodot,v0.6.2,h1:zOb5MsuMn7AH9/Ewc/EK83yqcNViK1m1l3C2UuP3RcA=,caf465ffb73c7537212ba4fd58a4c2c41fe7ca69737404a28e84ceff90c340ea +github.com/otiai10/copy,v0.0.0-20180813032824-7e9a647135a1,h1:A7kMXwDPBTfIVRv2l6XV3U6Su3SzLUzZjxnDDQVZDIY=,67d0e4f6ba369653e30257882fbbb20c28b560bc837e1847a42c48e868f1c81c +github.com/otiai10/curr,v0.0.0-20150429015615-9b4961190c95,h1:+OLn68pqasWca0z5ryit9KGfp3sUsW4Lqg32iRMJyzs=,7cf2143067d9bb3e7d54d2906766bb24c11d76f1bb0b0c5069574e9a0d8ae93d +github.com/otiai10/mint,v1.2.3,h1:PsrRBmrxR68kyNu6YlqYHbNlItc5vOkuS6LBEsNttVA=,0b82a05ca43810c9aa8299ddae1663feeb178d699aeb5242c3bdeb61cb5a54fb +github.com/outscale/osc-go,v0.0.1,h1:hvBtORyu7sWSKW1norGlfIP8C7c2aegI2Vkq75SRPCE=,2a988384c564fdba8b8c496024aafc212140e4b996654be7a92a3b0c7a962632 +github.com/ovh/go-ovh,v0.0.0-20181109152953-ba5adb4cf014,h1:37VE5TYj2m/FLA9SNr4z0+A0JefvTmR60Zwf8XSEV7c=,0fa35e8026a9b3aebd804739f31ffe07e553b84e2b8ea145b2f2ebaa0dd7c08f +github.com/oxtoacart/bpool,v0.0.0-20190530202638-03653db5a59c,h1:rp5dCmg/yLR3mgFuSOe4oEnDDmGLROTvMragMUXpTQw=,6816ec3a6f197cbee0ba6ddb9ec70958bc28870e59864b24e43da0c858079a1b +github.com/packer-community/winrmcp,v0.0.0-20180921204643-0fd363d6159a,h1:A3QMuteviunoaY/8ex+RKFqwhcZJ/Cf3fCW3IwL2wx4=,4a48fa503853d129e7e32ca81f069b9e09a9e3249739781f61fae70bb02d098b +github.com/packethost/packngo,v0.1.1-0.20180711074735-b9cb5096f54c,h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE=,6dac4e55c104df58ace636ef31d5dd6173a36747c4fd79299252ba8826127491 +github.com/parnurzeal/gorequest,v0.2.16,h1:T/5x+/4BT+nj+3eSknXmCTnEVGSzFzPGdpqmUVVZXHQ=,cc7b7d56e2e4c3fa0709e0e547875807746ac067b2a5c4b740b3088c1fdf941d +github.com/pascaldekloe/goe,v0.1.0,h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=,37b73886f1eec9b093143e7b03f547b90ab55d8d5c9aa3966e90f9df2d07353c +github.com/patrickmn/go-cache,v2.1.0+incompatible,h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=,d5d1c13e3c9cfeb04a943f656333ec68627dd6ce136af67e2aa5881ad7353c55 +github.com/pborman/uuid,v1.2.0,h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=,b888ff5d33651a1f5f6b8094acc434dd6dc284e2fe5052754a7993cebd539437 +github.com/pelletier/go-buffruneio,v0.2.0,h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA=,70593688607f4d48192776fe257ab9298689267ebcdd7b155bfe40d893735f38 +github.com/pelletier/go-toml,v1.6.0,h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4=,cc6dce19df6c6c30abd67594d17ea6015d1210aa6dd8c6096c6429eec06fdab4 +github.com/peterbourgon/diskv,v2.0.1+incompatible,h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=,1eeff260bd1ad71cd1611078995db99e1c7eba28628e7d6f24c79039536ea1cb +github.com/peterbourgon/g2s,v0.0.0-20170223122336-d4e7ad98afea,h1:sKwxy1H95npauwu8vtF95vG/syrL0p8fSZo/XlDg5gk=,41526f42b4fe3019581ab3745afea18271d7f037eb55a6e9fb3e32fd09ff9b8d +github.com/petergtz/pegomock,v2.7.0+incompatible,h1:42rJ5wIOBAg9OGdkLaPW9PlF/RtqDc5aGl6PcTCXl3o=,dc93e4483e8de4eb429e007aad17348822197ea7a3adde283b7752bc4544dfbb +github.com/peterh/liner,v1.1.0,h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os=,5cdc45c19901db8d8295c139bb382d7eea150e8fd96bd26de10384685728a461 +github.com/peterhellberg/link,v1.0.0,h1:mUWkiegowUXEcmlb+ybF75Q/8D2Y0BjZtR8cxoKhaQo=,d320f4204fbe886e1cefc0b677af2bfaba855e9e6556a6e92e43bcd80c3bb7a5 +github.com/petermattis/goid,v0.0.0-20180202154549-b0b1615b78e5,h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=,5134a176e306f9b973ff670a33c7536b59bf4114d83fd94f74c736ff0cc10ef0 +github.com/phayes/freeport,v0.0.0-20180830031419-95f893ade6f2,h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=,4ac97358de55a9b1ac60f13fdb223c5309a129fb3fb7bf731062f9c095a0796c +github.com/philhofer/fwd,v1.0.0,h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=,b4e79b1f5fdfe8c44bf6dae3dd593c62862930114411a30968f304084de1d0b3 +github.com/pierrec/lz4,v2.3.0+incompatible,h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk=,775487f2be5ddf23034b59bc862cb0d5767155c5e08d1186665d117092ceb50f +github.com/pingcap/check,v0.0.0-20190102082844-67f458068fc8,h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=,b8eeddacc35915d8c40b42e9af4db468ed309a506412a767ba6bb03bb7ce4627 +github.com/pingcap/errors,v0.11.4,h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=,df62e548162429501a88d936a3e8330f2379ddfcd4d23c22b78bc1b157e05b97 +github.com/pingcap/gofail,v0.0.0-20181217135706-6a951c1e42c3,h1:04yuCf5NMvLU8rB2m4Qs3rynH7EYpMno3lHkewIOdMo=,444866a53b7429e80a8a16791e39555de8103c7514cd322fe191c902b8071360 +github.com/pingcap/goleveldb,v0.0.0-20171020122428-b9ff6c35079e,h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8=,08ec0ffe5d0d74bdc543695f975316af6a63c17e36644ae56d42e30b0d1f8777 +github.com/pingcap/kvproto,v0.0.0-20191101062931-76b56d6eb466,h1:C5nV9osqA+R/R2fxYxVfqAUlCi3Oo5yJ/JSKDeHSAOk=,0d834c10c217c5de2c9ef79049891a69e73e102c4dbcd130173c3650e96da570 +github.com/pingcap/log,v0.0.0-20191012051959-b742a5d432e9,h1:AJD9pZYm72vMgPcQDww9rkZ1DnWfl0pXV3BOWlkYIjA=,eaece6f27792a39ccff08152050d4eb7905c250bf36877cacdd7e74c79d80472 +github.com/pingcap/parser,v0.0.0-20191101070347-94a5ef60f10b,h1:TLljHrSTC9MCTiUA6nMhV68my/D/FI3VNkUs94Wo3DE=,94e6857f4d2bf653edf4c2881cb8fb6b3abdf9efaec7d1f49159deec77580df2 +github.com/pingcap/pd,v2.1.17+incompatible,h1:mpfJYffRC14jeAfiq0jbHkqXVc8ZGNV0Lr2xG1sJslw=,b75266cd20abe6b1ccbb777a2f71d74dfcf231a06276b602df08bf27a9ea36f1 +github.com/pingcap/tidb-tools,v2.1.4+incompatible,h1:dkB4FMJcSk9GYRB2ICupU/lsTLf4mHLfkBE6fAsLdJ4=,c5c8e2b5c69c21bba2050c75d3a4582eda26308a355557036f058365d4583e5f +github.com/pingcap/tipb,v0.0.0-20191030045153-07a0962bbc64,h1:wUSHIp4dura5/YAepdgDBEdf2zz20MHXyNtMi1TcaDE=,8ac8e775e3d5fd255b7a8f07460f3b19bebb04cb50a3c0f5d6f64cc2fd585177 +github.com/pkg/browser,v0.0.0-20180916011732-0a3d74bf9ce4,h1:49lOXmGaUpV9Fz3gd7TFZY106KVlPVa5jcYD1gaQf98=,b845f84fbf08bba75401a4eff94c01c9e2c668fa1b43016e835bd60c6a8b4e87 +github.com/pkg/errors,v0.8.1,h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=,4e47d021340b7396a7dee454f527552faf7360a9fc34038b1dc32ba3b5a951d8 +github.com/pkg/profile,v1.3.0,h1:OQIvuDgm00gWVWGTf4m4mCt6W1/0YqU7Ntg0mySWgaI=,5f20c007ac81019900f06cf1e4d451ce8e1d981460e39e04794fbcc60639f851 +github.com/pkg/sftp,v1.10.1,h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc=,4e30f0455865434be7b83d4010ab97667217dafd0017caa651faafa2cc6aed64 +github.com/pkg/term,v0.0.0-20180730021639-bffc007b7fd5,h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ=,165bb00eeab26fe65c64e0e13bc29abc7ea18ac28d288e2218c137cd0bd91d9b +github.com/plaid/plaid-go,v0.0.0-20161222051224-02b6af68061b,h1:Don6I/E8nLCT6gdBi1sKB9hYxkx/24YD7XWwSly8IEo=,bd900ff0acd2968150f60770ab4e870d9f6b92c129a49eac0c9620a8043f901e +github.com/pmezard/go-difflib,v1.0.0,h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=,de04cecc1a4b8d53e4357051026794bcbc54f2e6a260cfac508ce69d5d6457a0 +github.com/polydawn/refmt,v0.0.0-20190408063855-01bf1e26dd14,h1:2m16U/rLwVaRdz7ANkHtHTodP3zTP3N451MADg64x5k=,a92440a944006fd3e0b6f1717fce4c2ea490cf2c4af93b56675216204f138c3a +github.com/portworx/kvdb,v0.0.0-20190911174000-a0108bddd091,h1:DqGiNhvCpvhWW/HJ1naJa0DudtlckvzQ9hEXSsOyv8Y=,d6fa957e1469a1b47ccbebc805034bafc5ed24798a1bef8675f751f9c4ed961e +github.com/portworx/sched-ops,v0.0.0-20191101005636-ded833c86f1e,h1:emQnaLwLEYN3Hner2ekVuZfrcChdN3H3J4Lxu5mPe64=,43ff366e97ff640a34a566c81dd7d63537c2864da85d33b49d5261417cd8d4b0 +github.com/posener/complete,v1.2.1,h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI=,a97f73829e0b71ae7a8f17a4884d5dcbb2c3499d8d3a077c2a8d7c2596f68d37 +github.com/pquerna/cachecontrol,v0.0.0-20180517163645-1555304b9b35,h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU=,0e5185ab4dab1bb2241e9e23e36ebde5713f3fb1e47767c3eb44001b7e17644f +github.com/pquerna/ffjson,v0.0.0-20190930134022-aa0246cd15f7,h1:xoIK0ctDddBMnc74udxJYBqlo9Ylnsp1waqjLsnef20=,377b4667540f620eae19722b5346f6f1efdea5688f9eedda97f2c659dad131f9 +github.com/pquerna/otp,v1.1.0,h1:q2gMsMuMl3JzneUaAX1MRGxLvOG6bzXV51hivBaStf0=,d46d289853f801387dfc514fd50133de30b684a6af34031b27caa877cbb7f687 +github.com/profitbricks/profitbricks-sdk-go,v4.0.2+incompatible,h1:ZoVHH6voxW9Onzo6z2yLtocVoN6mBocyDoqoyAMHokE=,b0baf185752eb96f8890f3e9adf856b13f5c43b5346387b659e2b1deb1d087c7 +github.com/project-flogo/core,v0.9.3,h1:uZXHR9j1Byqt+x3faNnOqB8NlEfwE2gpCh40iQ+44oA=,d1c43e3bc517bb438a9d313d976e327ba219232418064d439fb20671341832a2 +github.com/projectcalico/libcalico-go,v1.7.3,h1:qcbxAhsq/5zqZqpHE24VqMHfmoBVdXZV0Kf82+5rbqU=,4f638d56eb47ff8e1763f65131050294f7d2c828139276fe86127a803245ae8c +github.com/prometheus/alertmanager,v0.18.0,h1:sPppYFge7kdf9O96KIh3fd093D1xN8JxIp03wW6yAEE=,45e122e7c2ac69577d63844313798060673a28b2e86ec8a0197f330c584b379b +github.com/prometheus/client_golang,v1.2.1,h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI=,174c921fe3e154adddd8e0dc572323dd04901bcad0965de614174241981da57c +github.com/prometheus/client_model,v0.0.0-20190812154241-14fe0d1b01d4,h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=,5d4719be47f4f69ab5bf36a04c75eb078a0f69b43a335f400c2d688ac9e61795 +github.com/prometheus/common,v0.7.0,h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY=,f2640a94b18b115552df41ee33effa013e10536aca51e09a971d1503a20e186a +github.com/prometheus/procfs,v0.0.5,h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=,f45b90c72f8c2e4c84e5314092ee1ccf7d6ace1cc14b2f483c82f7c1e6d0d0d4 +github.com/prometheus/prom2json,v1.1.0,h1:/fEL2DK7EEyHVeGMG4TV+gSS9Sw53yYKt//QRL0IIYE=,166f5f98c62d0b90139947d1464ee747f8143772b9e926c7b51c53a4420380ff +github.com/prometheus/prometheus,v2.5.0+incompatible,h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg=,ede73f6ccabd60365549986a6c7ae152c1952129006c8ae521c86ff45c4aadcc +github.com/prometheus/tsdb,v0.10.0,h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic=,34e98f0e9ba55e7290774ee40569737745b395e32811e5940d2ed124a20f927c +github.com/pyinx/gorocket,v0.0.0-20170810024322-78ae1353729f,h1:N1r6pSlez3lLsqaNHbtrHW9ZuzrilETIabr9jPNj3Zs=,dcd920b789a98157bbe1ed7fff249255c1dd4d2fd80f7edc39b3a49fc08db13a +github.com/qor/admin,v0.0.0-20191021122103-f3db8244d2d2,h1:IWw22+hlihdss/qI93QH48jTBUEOD/fsBqj+0z61z/Y=,722e243550878791adcdb3bcea66ab8e7a185637c8a7ad0a752713951aef2a91 +github.com/qor/assetfs,v0.0.0-20170713023933-ff57fdc13a14,h1:JRpyNNSRAkwNHd4WgyPcalTAhxOCh3eFNMoQkxWhjSw=,7fe36875e7e59afd9154f827babbffaa7f67ac54b7790df5a4a4a376c78b2282 +github.com/qor/middlewares,v0.0.0-20170822143614-781378b69454,h1:+WCc1IigwWpWBxMFsmLUsIF230TakGHstDajd8aKDAc=,4c2ed9a2f7b24dfa64464091b2c01ce9fc947524bb834d77aeb9ceaf8610e5fc +github.com/qor/qor,v0.0.0-20191022064424-b3deff729f68,h1:MSbP9P4HnmEyH+uGQAW+V0HoTzlZ9SRq7kdCaRiZEmU=,9053796b8a7afe21483262affaf5b35bac8bf3387e24531448a4833d7b758978 +github.com/qor/render,v1.1.1,h1:DaGaKlf0OzpOB+hJUEiOTbZ40mg+n+LlSJx20/KUfes=,8f957a13173ef1a22d0caeea1cc6d198b064d242676444e00e2f597c405928c9 +github.com/qor/responder,v0.0.0-20171031032654-b6def473574f,h1:sKELSAyL+z5BRHFe97Bx71z197cBEobVJ6rASKTMSqU=,b69784649ec65ec2580d7640af25ec66973d59d82ec5391498cfe4c3076e5f6f +github.com/qor/roles,v0.0.0-20171127035124-d6375609fe3e,h1:F0BNcPJKfubM/+IIILu/GbrH9v2vPZWQ5/StSRKUfK4=,1a35a5480c7169e86025eb19dbcddc13fd00472e6b4ade7574e62c290cf09100 +github.com/qor/session,v0.0.0-20170907035918-8206b0adab70,h1:8l21EEdlZ9R0AA3FbeUAANc5NAx8Y3tn1VKbyAgjYlI=,7c759bc736c4936a602ca1f0ebad9a324d8332ffd342e1e3acd80355180fc858 +github.com/qor/validations,v0.0.0-20171228122639-f364bca61b46,h1:dRlsVUhwD1pwrasuVbNooGQITYjKzmXK5eYoEEvBGQI=,b29360c4a4e9cc8d0ff682d8bf1f446a5d61d5a4f8d3cf2fc6d8cc077e5d810f +github.com/racker/perigee,v0.1.0,h1:8RjBm1YGJKVVjUfO02Uok+npegz8lSSEVqjimDqlFYc=,d43613102ed67445c9fc81b621959b58f827c187189b09cec236c3bac5ce1ccb +github.com/raff/goble,v0.0.0-20190909174656-72afc67d6a99,h1:JtoVdxWJ3tgyqtnPq3r4hJ9aULcIDDnPXBWxZsdmqWU=,ef5dde1af55d451c37ddf13e17ae339d299903cb7e67567fc6d1e69688a789e1 +github.com/rai-project/config,v0.0.0-20190926180509-3bd01e698aad,h1:o0056EwcQBeyaVb2my+T0TvMR5FpEY0CGNgWkbj/xEo=,27c2311ad1fdc185e08f2e1703893482b7d26caf64854ed371bf38f3a9303f92 +github.com/rai-project/godotenv,v0.0.0-20180619160704-a501614c3b8d,h1:reVy+ViZcrx1ILo+L8wa3dGf6hSd4qlY62VqxZxEgWs=,f4d9ecb56f20667fbb09bd5256d0c6b81b9e8cbca8f6476240c5d1800ffb07ed +github.com/rai-project/logger,v0.0.0-20190701163301-49978a80bf96,h1:GeXSVSRfXOBN5XHNA+wq5G+sfq99mhpwn6U5kcGwSeg=,53d7677e7d7dab6b1f83591ec10491301289752e337641403f8413c0749b84d8 +github.com/rai-project/utils,v0.0.0-20180619204045-c582bb171808,h1:cHOS6oMEt8wi93zm5V7cHVnWgOhaAUCpjRDEZHBsckg=,6d43ccc901ad2f19744696f6c3d04ee28b4496cef7fe72ce7eccb89af0d8bfac +github.com/rai-project/vipertags,v0.0.0-20190404224953-d63b0a674aa9,h1:3o86f/tK0DBZdPcUBjzFu1mEZsRCzjSgi5PNHope4AQ=,9aa8cdd1a3369382d28bad0f4581250fbecae51602aa8566cbe68dfadc8f7785 +github.com/raintank/schema,v1.0.0,h1:tK0zKHceZd5nkCUI5Soip1pA2BAvoc4qzloVEsK0y+Q=,9ffc30e882b1cfed3152bab9c8c95e00c984dc0d8895426c95d96a184e09ffe3 +github.com/rainycape/memcache,v0.0.0-20150622160815-1031fa0ce2f2,h1:dq90+d51/hQRaHEqRAsQ1rE/pC1GUS4sc2rCbbFsAIY=,2d42bb018c6b0531f93e2dc862c87374966c64c9a88863612ab5e676a32661fa +github.com/rainycape/unidecode,v0.0.0-20150907023854-cb7f23ec59be,h1:ta7tUOvsPHVHGom5hKW5VXNc2xZIkfCKP8iaqOyYtUQ=,0ab56010a3ef93c20bb6d8c486e3b447b4004052053e280ea6eabf2a5138bdce +github.com/rakyll/statik,v0.1.6,h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs=,58cc0c07f8e9dd17ad5c4e0f89c03d8a3ed420aac0e76b79adf7ebd1d48c5893 +github.com/rcrowley/go-metrics,v0.0.0-20190826022208-cac0b30c2563,h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=,22e944d960aec1a1e62e8cc2daaa70abefbbe989dd9c233060ab533de5f6e724 +github.com/remyoudompheng/bigfft,v0.0.0-20190512091148-babf20351dd7,h1:FUL3b97ZY2EPqg2NbXKuMHs5pXJB9hjj1fDHnF2vl28=,73f78c7e36c32822221f9f676b65ebe7ccb92ab6ff221035ace35c184e165c0d +github.com/renier/xmlrpc,v0.0.0-20170708154548-ce4a1a486c03,h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o=,f9c07652c6de1aecf5baaa3b93c1e6c23379458e30553400d5f96ac8b3ea85c4 +github.com/renstrom/fuzzysearch,v0.0.0-20160331204855-2d205ac6ec17,h1:4qPms2txLWMLXKzqlnYSulKRS4cS9aYgPtAEpUelQok=,01782a5d1682a72614126da402171253030c0de60485bc18a3e63b07d977c094 +github.com/retr0h/go-gilt,v0.0.0-20190206215556-f73826b37af2,h1:vZ42M1tDiMLtirFA1K5k2QVFhWRqR4BjdSw0IMclzH4=,e7956b01b3ccea41395f1f641a0f9045f214c1075d7ecc25553b72383009274e +github.com/revel/config,v0.21.0,h1:Bw4iXLGAuD/Di2HEhPSOyDywrTlFIXUMbds91lXTtTU=,22842698f6c646b9b89649b432d0f24deae1c5a3779c49819ec99c5db6e4b5a0 +github.com/revel/log15,v2.11.20+incompatible,h1:JkA4tbwIo/UGEMumY50zndKq816RQW3LQ0wIpRc+32U=,28e4263b0320a07dd2ae71ba09aef1f9b4af44258a8c0f1dfb1d63300f93c401 +github.com/revel/pathtree,v0.0.0-20140121041023-41257a1839e9,h1:/d6kfjzjyx19ieWqMOXHSTLFuRxLOH15ZubtcAXExKw=,de658b8de908c9c090343447e66e6bbdfe99656fcfa5889997486b0594c2a719 +github.com/revel/revel,v0.21.0,h1:E6kDJmpJSDb0F8XwbyG5h4ayzpZ+8Wcw2IiPZW/2qSc=,c66570c338f37e95626646909af1086f0bf31d8432fe982d24c415d14bc1dc9c +github.com/rivo/tview,v0.0.0-20191018125527-685bf6da76c2,h1:GVXSfgXOMAeLvFH7IrpY3yYM8H3YekZEFcZ14q9gQXM=,000538d9517bd5f28cfe377e63183f7093043acf8bb913eb493adb29518eb6b8 +github.com/rivo/uniseg,v0.1.0,h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY=,cb701df81f36acfbb2627a78662fdcaa150ee1ac00d0796a7f3eafbdb6218128 +github.com/rjeczalik/notify,v0.9.2,h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8=,e8b9b93870f7ed17f30c617acb55f5fa78e7931518c88999c3d1b5b048f51482 +github.com/rkt/rkt,v1.30.0,h1:ZI5RQtSibfjicSttV/HLiHuWreYClEJA2Or5XKAdJb0=,ca2e00335dbeae7e0fbe2c45535d2bb8fce72c2bb6045b0bdf25bc6b8b59179e +github.com/robertkrimen/otto,v0.0.0-20180617131154-15f95af6e78d,h1:1VUlQbCfkoSGv7qP7Y+ro3ap1P1pPZxgdGVqiTVy5C4=,7adbe73b0db5319bae0421a0ed7fc5619002d6e9a2be87dc8c673c8541dfd949 +github.com/robfig/cron,v1.2.0,h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=,0811a1a5a4e1f45824ac520deb2002326a659dbb4918cdfea47d80560a23211d +github.com/robfig/cron/v3,v3.0.0,h1:kQ6Cb7aHOHTSzNVNEhmp8EcWKLb4CbiMW9h9VyIhO4E=,5e29b4f7f4ba62293420b918fb2309823523a583c2adaf6eddb059f525f05496 +github.com/rogpeppe/fastuuid,v1.2.0,h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=,f9b8293f5e20270e26fb4214ca7afec864de92c73d03ff62b5ee29d1db4e72a1 +github.com/rogpeppe/go-charset,v0.0.0-20180617210344-2471d30d28b4,h1:BN/Nyn2nWMoqGRA7G7paDNDqTXE30mXGqzzybrfo05w=,a28b06534aa71873d08578d69b08512dab54caa0ffd9e2943b3479166049eddd +github.com/rogpeppe/go-internal,v1.4.0,h1:LUa41nrWTQNGhzdsZ5lTnkwbNjj6rXTdazA1cSdjkOY=,fb7d843253301d3ea9793f90e6bea16a8f2970a01b361f490ee66b36f81e03a5 +github.com/rpcx-ecosystem/quic-conn,v0.0.0-20190920095804-3967ef162525,h1:Awv5A28rrxuHf1+9+N08cnBa6JuKbhHswmNdfj65Bzo=,b40886ad7129eff9e517187b527467330db3705207349cdaa8f35c0dc8445c08 +github.com/rs/cors,v1.7.0,h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=,67815316761fddc4acfaad852965cf04ec88674abe3a05c6c332519556c55855 +github.com/rs/xhandler,v0.0.0-20160618193221-ed27b6fd6521,h1:3hxavr+IHMsQBrYUPQM5v0CgENFktkkbg1sfpgM3h20=,665ae95533e1a046cf470c7341c59e64b3e2a795cdaaf307368f69a0ba547f2c +github.com/rs/xid,v1.2.1,h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=,4abdedc4de69adcb9a4575f99c59d8ab542191e1800b6a91e12a4e9ea8da0026 +github.com/rs/zerolog,v1.16.0,h1:AaELmZdcJHT8m6oZ5py4213cdFK8XGXkB3dFdAQ+P7Q=,64e248c1fa3c62e2d904868b49acf906d0cb04a00a323d2562ea9ce7c6f154e1 +github.com/rubenv/sql-migrate,v0.0.0-20191025130928-9355dd04f4b3,h1:lwDYefgiwhjuAuVnMVUYknoF+Yg9CBUykYGvYoPCNnQ=,4d4e9e2c7387542b26a1cd9fbfcbdab7b75dce807877d5a0a501180b584c60f2 +github.com/rubyist/circuitbreaker,v2.2.1+incompatible,h1:KUKd/pV8Geg77+8LNDwdow6rVCAYOp8+kHUyFvL6Mhk=,fc1125d9260a471d349c94a251340c437f98743b42324706482596f303c28b11 +github.com/russross/blackfriday,v2.0.0+incompatible,h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=,836047aa9cbd223efba85b892e6897cf7a3b5ee3f2e6ad36b189d40842f703df +github.com/russross/blackfriday/v2,v2.0.1,h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=,496079bbc8c4831cd0507213e059a925d2c22bd1ea9ada4dd85815d51b485228 +github.com/rwcarlsen/goexif,v0.0.0-20190401172101-9e8deecbddbd,h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=,98e8ce7bf484716bdf272f31ee01354599f4ec4b4ece7c04156c15b264d8f6ec +github.com/ryanuber/columnize,v2.1.0+incompatible,h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=,ff687e133db2e470640e511c90cf474154941537a94cd97bb0cf7a28a7d00dc7 +github.com/ryanuber/go-glob,v1.0.0,h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=,2084f36ead38a505489fdb46329502fb627f568224dcc22ef11ec173b61fc2cf +github.com/ryszard/goskiplist,v0.0.0-20150312221310-2dfbae5fcf46,h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=,12c65729fc31d5a9bf246eb387bd4c268d0d68bf33b913cccd81bebd47d6f80d +github.com/sacloud/libsacloud,v1.26.1,h1:td3Kd7lvpSAxxHEVpnaZ9goHmmhi0D/RfP0Rqqf/kek=,4f0e24194ce3566707df5862177cb0f697debe3d5b799decb2685ee8d07dbe11 +github.com/saintfish/chardet,v0.0.0-20120816061221-3af4cd4741ca,h1:NugYot0LIVPxTvN8n+Kvkn6TrbMyxQiuvKdEwFdR9vI=,d9cb0e35c88fbf91a409db0626f2e8ae9db305cf95dc3469dc7d089a8432c9c3 +github.com/samuel/go-zookeeper,v0.0.0-20190923202752-2cc03de413da,h1:p3Vo3i64TCLY7gIfzeQaUJ+kppEO5WQG3cL8iE8tGHU=,499f8144de8a6839b2d70c8869d88f294604188ec501e928ca17446043147d40 +github.com/sanity-io/litter,v1.1.0,h1:BllcKWa3VbZmOZbDCoszYLk7zCsKHz5Beossi8SUcTc=,c4bbddbf1bd7bb4ef74a3c2cac98f4a78a2a3a5a6b8dd140bd31a5d38c459217 +github.com/santhosh-tekuri/jsonschema,v1.2.4,h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis=,1c946415ee3395181090664a37779c296b540ca7eec58844ad0283fef11fec00 +github.com/sasha-s/go-deadlock,v0.2.0,h1:lMqc+fUb7RrFS3gQLtoQsJ7/6TV/pAIFvBsqX73DK8Y=,6c3f90c7947da1090f545438f4b3fd461cfeec79ee1c6e5e83a0eed7258622b1 +github.com/sassoftware/go-rpmutils,v0.0.0-20190420191620-a8f1baeba37b,h1:+gCnWOZV8Z/8jehJ2CdqB47Z3S+SREmQcuXkRFLNsiI=,88264dbd268c88bc8a57e4b4a261f22058fa6e03eb2883b0a82375f854e15188 +github.com/satori/go.uuid,v1.2.0,h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=,4f741306a0cbe97581e34a638531bcafe3c2848150539a2ec2ba12c5e3e6cbdd +github.com/satori/uuid,v1.2.0,h1:6TFY4nxn5XwBx0gDfzbEMCNT6k4N/4FNIuN8RACZ0KI=,bfd4d3d619e3ad4dd915e05fec5bf10949d8af9bc5c19b840db35ec0f21172ad +github.com/scaleway/scaleway-cli,v0.0.0-20180921094345-7b12c9699d70,h1:DaqC32ZwOuO4ctgg9qAdKnlQxwFPkKmCOEqwSNwYy7c=,05566d6711de08738803132b8522f7051fccd3b3bf2c739dde421fffdfa75eaf +github.com/sclevine/agouti,v3.0.0+incompatible,h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGuOeFkFbUWfBgK4=,b20c8a6a2c1fda0ae6a9cd6d319e78a7a5afea4bc90810cd46b99246d8219d23 +github.com/sclevine/spec,v1.2.0,h1:1Jwdf9jSfDl9NVmt8ndHqbTZ7XCCPbh1jI3hkDBHVYA=,582017cd824cf3cdf6803ec7db2250304f66efea705feb69cbabab416928b8f4 +github.com/sean-/conswriter,v0.0.0-20180208195008-f5ae3917a627,h1:Tn2Iev07a4oOcAuFna8AJxDOF/M+6OkNbpEZLX30D6M=,0637d2fc0eb4627827e4b73dbe3a72479708641df8fc71a06e7bc481f6a7f39b +github.com/sean-/pager,v0.0.0-20180208200047-666be9bf53b5,h1:D07EBYJLI26GmLRKNtrs47p8vs/5QqpUX3VcwsAPkEo=,a4288f9116ea01c34efd65b7dce4357ba6f9c02ad984ca758fea0d0aebb605c9 +github.com/sean-/seed,v0.0.0-20170313163322-e2103e2c3529,h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=,0bc8e6e0a07e554674b0bb92ef4eb7de1650056b50878eed8d5d631aec9b6362 +github.com/sebest/xff,v0.0.0-20150611211316-7a36e3a787b5,h1:MqIPVG2sHTgcQxFwZ+iHZSQ869PVP42SgEEeI1+X4Y8=,8cbe518a78ab7998550c509bd9fadc95a1aef8e86b1022cb3d265348ad370cde +github.com/seccomp/libseccomp-golang,v0.9.1,h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=,5989692d87ef4c377fbc60d441795a90d9453b9e357d019e44d9033ab39ca888 +github.com/segmentio/go-loggly,v0.5.1-0.20171222203950-eb91657e62b2,h1:S4OC0+OBKz6mJnzuHioeEat74PuQ4Sgvbf8eus695sc=,5e071d0b6923a0fa78895bf7e673f5a4e482d39d4603b7dabd4056a506923ca7 +github.com/segmentio/go-prompt,v1.2.1-0.20161017233205-f0d19b6901ad,h1:EqOdoSJGI7CsBQczPcIgmpm3hJE7X8Hj3jrgI002whs=,b86fcda4b8afd5a3893ea333431368e60ea5ebee302a3014aee6d2020233bf31 +github.com/segmentio/kafka-go,v0.1.0,h1:IXCHG+sXPNiIR5pC/vTEItZduPKu4cnpr85YgxpxlW0=,e0b749b974d3277438d09dd6178928c3ad6c3760313f7ad45ec5cd88d8eb14b9 +github.com/serenize/snaker,v0.0.0-20171204205717-a683aaf2d516,h1:ofR1ZdrNSkiWcMsRrubK9tb2/SlZVWttAfqUjJi6QYc=,67272dde9cf92af80704869dea59346be1c37098373200dd8eea6e0e034079b4 +github.com/sergi/go-diff,v1.0.0,h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=,287218ffcd136dbb28ce99a2f162048d8dfa6f97b524c17797964aacde2f8f52 +github.com/serialx/hashring,v0.0.0-20180504054112-49a4782e9908,h1:RRpyb4kheanCQVyYfOhkZoD/cwClvn12RzHex2ZmHxw=,4184e14faf8e39222109eb2b7fa3aee2e0a544b66785ad0b7058318483ff76bb +github.com/sethgrid/pester,v0.0.0-20190127155807-68a33a018ad0,h1:X9XMOYjxEfAYSy3xK1DzO5dMkkWhs9E9UCcS1IERx2k=,ddcaf31e63aaf1ac003af97e667bedaa0fc89956e19aeb032c5658629da29800 +github.com/shiena/ansicolor,v0.0.0-20151119151921-a422bbe96644,h1:X+yvsM2yrEktyI+b2qND5gpH8YhURn0k8OCaeRnkINo=,60da6dc53662eb72063784f3bf609edb7aa317c552f81651164bc657754902a6 +github.com/shirou/gopsutil,v2.19.10+incompatible,h1:lA4Pi29JEVIQIgATSeftHSY0rMGI9CLrl2ZvDLiahto=,e5afa6f0b690ecc3ff12458663c6337920a759f27c3d9692a0836644337e4e85 +github.com/shirou/w32,v0.0.0-20160930032740-bb4de0191aa4,h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U=,3ed6741a7e1470feffb50031ecf9919f30b5f573f993683b6574488756ef65c1 +github.com/shopspring/decimal,v0.0.0-20191009025716-f1972eb1d1f5,h1:Gojs/hac/DoYEM7WEICT45+hNWczIeuL5D21e5/HPAw=,91a0ee539fb6f3de1550cdf93c73434fc8a16bab37be693997b20317510331a9 +github.com/shurcooL/component,v0.0.0-20170202220835-f88ec8f54cc4,h1:Fth6mevc5rX7glNLpbAMJnqKlfIkcTjZCSHEeqvKbcI=,2dd1cfac518def9fc8c6ac69022a85b0413269caf93d9532f77dca7375e1d645 +github.com/shurcooL/events,v0.0.0-20181021180414-410e4ca65f48,h1:vabduItPAIz9px5iryD5peyx7O3Ya8TBThapgXim98o=,1dcade8d00ba3945f5d1bc56c09a84e2d51fa20d20ef4fa6f867e5e4cd918e9d +github.com/shurcooL/github_flavored_markdown,v0.0.0-20181002035957-2122de532470,h1:qb9IthCFBmROJ6YBS31BEMeSYjOscSiG+EO+JVNTz64=,d984dc45e823f4c99e89841d675e34d2d35d3b334f1b3690fde05de30a66929f +github.com/shurcooL/githubv4,v0.0.0-20191006152017-6d1ea27df521,h1:ARaYJO1zp2afVv0s28fq7uxgee4WLop35FWrOoSZyak=,7f5c88b38760c5090bffe582a40abe7dc17a789f9041549e5c17e3d71df2d75d +github.com/shurcooL/go,v0.0.0-20180423040247-9e1955d9fb6e,h1:MZM7FHLqUHYI0Y/mQAt3d2aYa0SiNms/hFqC9qJYolM=,350e4c547dbeb657bb3b2eab428f1c29a80808e8096ff87324fd84744f914766 +github.com/shurcooL/go-goon,v0.0.0-20170922171312-37c2f522c041,h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc=,31cb3f736521597c56f962b9d7d21073620fbb1da845305aba743960f09e4115 +github.com/shurcooL/gofontwoff,v0.0.0-20180329035133-29b52fc0a18d,h1:Yoy/IzG4lULT6qZg62sVC+qyBL8DQkmD2zv6i7OImrc=,685dedb79602bb41403a7b5198f5c9d0ffbc99a68d7f99160ecf08a71475e5f4 +github.com/shurcooL/gopherjslib,v0.0.0-20160914041154-feb6d3990c2c,h1:UOk+nlt1BJtTcH15CT7iNO7YVWTfTv/DNwEAQHLIaDQ=,ea6c396c92724a8028793bde957dbe9a1c594b8af085035e652d4335e6aa30e1 +github.com/shurcooL/graphql,v0.0.0-20181231061246-d48a9a75455f,h1:tygelZueB1EtXkPI6mQ4o9DQ0+FKW41hTbunoXZCTqk=,eb1b45dc90aed0edcfc4cacffdc2645121dda8155702440eada1bcafefddcbba +github.com/shurcooL/highlight_diff,v0.0.0-20170515013008-09bb4053de1b,h1:vYEG87HxbU6dXj5npkeulCS96Dtz5xg3jcfCgpcvbIw=,b4bcb7f3e50a99623d5f39c4e054964fc60d5e4b34543408582a0a984a67b630 +github.com/shurcooL/highlight_go,v0.0.0-20181028180052-98c3abbbae20,h1:7pDq9pAMCQgRohFmd25X8hIH8VxmT3TaDm+r9LHxgBk=,9f879b051c8eadb6dc063ca3ff6856d0e64cd30b5ad545e580b77b4f8ef9ddd7 +github.com/shurcooL/home,v0.0.0-20181020052607-80b7ffcb30f9,h1:MPblCbqA5+z6XARjScMfz1TqtJC7TuTRj0U9VqIBs6k=,0042d859afa3221fd4b4049b350a2d6ffcc674e4c4177bb0c232dc120b410ee6 +github.com/shurcooL/htmlg,v0.0.0-20190503024804-b6326af49ef6,h1:kXXs9Xnfv5gU7KLKiOE3AQgaRUUXchcXnO2rP3fZ5Ao=,52485f17bba8920b37a70124b90eea9d43037a9764a785c97a7e531ca09ed5a5 +github.com/shurcooL/httperror,v0.0.0-20190506043526-2e76094aa70e,h1:QTph/PpT1aDtFHk0sVJoVG/Vfox0YZkq70sW/tvXJM0=,7807129d1577611bdf803b7a4dd3253f45e4b63a77c1a73bed48a0c838c463c6 +github.com/shurcooL/httpfs,v0.0.0-20190707220628-8d4bc4ba7749,h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=,a2079dbd8c236262ecbb22312467265fbbddd9b5ee789531c5f7f24fbdda174b +github.com/shurcooL/httpgzip,v0.0.0-20190720172056-320755c1c1b0,h1:mj/nMDAwTBiaCqMEs4cYCqF7pO6Np7vhy1D1wcQGz+E=,70ef73fce2f89d622f828cb439fd6c7b48a7fe63600410a8c0a936042c0e4631 +github.com/shurcooL/issues,v0.0.0-20190705005435-6a96395fbb66,h1:kls/E9JqtKEj8tWx2PwKCWqEWmwzsX7cnj9QkaEhUpM=,dd1ace2ad69b6c130a9294c3eb4032090e73c3b7dace098a5a7e1ad154f8e911 +github.com/shurcooL/issuesapp,v0.0.0-20180602232740-048589ce2241,h1:Y+TeIabU8sJD10Qwd/zMty2/LEaT9GNDaA6nyZf+jgo=,ac947684d3f13beef9433724deddc2c7ddb6d19921d6902f4789dd4ce1af5f3c +github.com/shurcooL/notifications,v0.0.0-20181111060504-bcc2b3082a7a,h1:bQX0+HfDylIQCtf1tzyrxQ+BqIV08ZjkjgspFWiIYhc=,c1c77700f490d0211cec00fd5fd0ee80debf66e0e41de1dc68b24dc726db5409 +github.com/shurcooL/octicon,v0.0.0-20190930024621-43309dfb482e,h1:C2+alklsN4yRHXaOX3v9TuCGlTSwZQjSnN88nLGVhg8=,88953a9951a14e24afd2d1040e9de0b4fbe194805fdc7ec9d9d9bbcd8c2f3448 +github.com/shurcooL/reactions,v0.0.0-20181222204718-145cd5e7f3d1,h1:hHIhW4KrmPQ/hJ7AuKNNvVPVE2k/LVE5NTFsQ68taBw=,fd5f9a0c6e7e292bdfa81fcad767f61c95dc84f18bf4f9f02a4fe02f75327d37 +github.com/shurcooL/sanitized_anchor_name,v1.0.0,h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=,0af034323e0627a9e94367f87aa50ce29e5b165d54c8da2926cbaffd5834f757 +github.com/shurcooL/users,v0.0.0-20180125191416-49c67e49c537,h1:YGaxtkYjb8mnTvtufv2LKLwCQu2/C7qFB7UtrOlTWOY=,3f17089e996438a88a478d38807ce4f3c045a91114830946a1bdc760eb2b7c58 +github.com/shurcooL/vfsgen,v0.0.0-20181202132449-6a9ea43bcacd,h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=,8a093681b21159514a1742b1a49e88fa2cf562673a5a0055e9abeb7ff590ee19 +github.com/shurcooL/webdavfs,v0.0.0-20170829043945-18c3829fa133,h1:JtcyT0rk/9PKOdnKQzuDR+FSjh7SGtJwpgVpfZBRKlQ=,bb70104152800cbb490c480bead0d2ef24176be9e1304e6701ab161115484863 +github.com/siddontang/go,v0.0.0-20180604090527-bdc77568d726,h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM=,ef97fabc8a96a758fac273b01dff6be7957ed44c4b6c6a8316f43741329a0049 +github.com/siddontang/go-snappy,v0.0.0-20140704025258-d8f7bb82a96d,h1:qQWKKOvHN7Q9c6GdmUteCef2F9ubxMpxY1IKwpIKz68=,faf83d6459d06f5f4a9acd09e23e284e11792d14de331bd7b87852b18f9cf5c3 +github.com/siddontang/ledisdb,v0.0.0-20190202134119-8ceb77e66a92,h1:qvsJwGToa8rxb42cDRhkbKeX2H5N8BH+s2aUikGt8mI=,dab81c0bdfc62063a340f61dfab19c065d2d10b1245cd56cc04832130a6bbea5 +github.com/siddontang/rdb,v0.0.0-20150307021120-fc89ed2e418d,h1:NVwnfyR3rENtlz62bcrkXME3INVUa4lcdGt+opvxExs=,93bf89960d84b8732e648cb413dced692c1d3d9000997e99826538a5f20b1d82 +github.com/sigurn/crc8,v0.0.0-20160107002456-e55481d6f45c,h1:hk0Jigjfq59yDMgd6bzi22Das5tyxU0CtOkh7a9io84=,12916a0da94e747b99653138a25112e24b082db53bc0d5cffe62214ce3fb884d +github.com/sigurn/utils,v0.0.0-20190728110027-e1fefb11a144,h1:ccb8W1+mYuZvlpn/mJUMAbsFHTMCpcJBS78AsBQxNcY=,694bb4cbe9dd17447c1e0054ef327eebd9bed8682aa39f5f4d282fb9b1717299 +github.com/sirupsen/logrus,v1.4.2,h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=,9a8e55830261a4b1c9350d7c45db029c8586c0b2d934d1224cde469425031edd +github.com/skratchdot/open-golang,v0.0.0-20190402232053-79abb63cd66e,h1:VAzdS5Nw68fbf5RZ8RDVlUvPXNU6Z3jtPCK/qvm4FoQ=,242db3338b172ecb58bdf3406b4cafecfa738cfb7b8cd71698d23831aedd94b0 +github.com/skyrings/skyring-common,v0.0.0-20160929130248-d1c0bb1cbd5e,h1:jrZSSgPUDtBeJbGXqgGUeupQH8I+ZvGXfhpIahye2Bc=,d5010d4900d7417c05d4863399e5509e82dfaca9c09c31ac9e5ebdcaf109e833 +github.com/smallnest/libkv-etcdv3-store,v0.0.0-20191101045330-f92940446965,h1:YQtdLz+7JQdKn7f5cG+xSrSbI7X4jObx0Jy6ZzffGew=,b9fb22d7d67e16cd3a1d7c7a5b2faf6c35c690ae1c3bcf70dbf77813db7dc563 +github.com/smallnest/rpcx,v0.0.0-20191101045608-2a801682117a,h1:Fzp1HLqyYg8koEELgwfSEUgkE6QPvrN9qCkHZ8tikFY=,0d2255c9ffc429e32936dbb9e51c79bbf2b76a7dec95c5d9dc1668053d5642bc +github.com/smallnest/valkeyrie,v0.0.0-20191030064635-54a884e4b303,h1:NDOAHb1sE8pYWd0Dge8W6bGQ63FHfa0/QjClXG2hrgw=,b846d492aaf7053115b2e143b7c7696299b852ec670d261bd78b5cd996eacde3 +github.com/smartystreets/assertions,v1.0.1,h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w=,2e3d9f61f68cdf7b48653582640ef88744c1a3bdd4257ac68f621579a2f807dd +github.com/smartystreets/go-aws-auth,v0.0.0-20180515143844-0c1422d1fdb9,h1:hp2CYQUINdZMHdvTdXtPOY2ainKl4IoMcpAXEf2xj3Q=,d9441cfbef2c680269ced67f8e1d99af9cf649e11a7f133a5b0685be0277ca7d +github.com/smartystreets/goconvey,v0.0.0-20190731233626-505e41936337,h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8=,fd90be078397b45806e4dfaca367235aef6d6133871c8a6cc6d3d579280d8d03 +github.com/smartystreets/gunit,v1.0.0,h1:RyPDUFcJbvtXlhJPk7v+wnxZRY2EUokhEYl2EJOPToI=,36cf43529cfadeb297ce1537c7d0fca8373a95936806121ce7ce0bf653e959ee +github.com/smola/gocompat,v0.2.0,h1:6b1oIMlUXIpz//VKEDzPVBK8KG7beVwmHIUEBIs/Pns=,7812934f407beeab20aa289b0056234ae6637b30b301ebf97a5d7a9fd8e665fc +github.com/snikch/goodman,v0.0.0-20171125024755-10e37e294daa,h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY=,ab939c56cb7afcff213aef4568f40c9ddeae30166e34a2fa7f5718a47227c2e1 +github.com/softlayer/softlayer-go,v0.0.0-20180806151055-260589d94c7d,h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic=,63ad57bc2d4c27db3dcab7cf545a075bb4d7ea66aba57c284c07a2c938220f8c +github.com/soheilhy/cmux,v0.1.4,h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=,6d6cadade0e186f84b5f8e7ddf8f4256601b21e49b0ca49fd003a7e570ae1885 +github.com/songtianyi/rrframework,v0.0.0-20180901111106-4caefe307b3f,h1:o3QHyJEW1U+8oyEZeaXFcYqdhhiZjrs25/8AZmsWjiU=,b1cf04474a48de1ed7ae535ae4a2d5b17a0df4ce0d3b953c5268f42ee34cb17d +github.com/soniakeys/unit,v1.0.0,h1:UMIgu6dxDQaK6tYaQV6dJn5oovB6035KRxCS0O7Jiec=,565c64fe777e1140d82422e9b8d29ce8de82d7916e50dac2f7591d2c6f2d79e7 +github.com/sony/gobreaker,v0.4.1,h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ=,eab9bf8f98b16b051d7d13c4f5c70d6d1039347e380e0a12cb9ff6e33200d784 +github.com/sourcegraph/annotate,v0.0.0-20160123013949-f4cad6c6324d,h1:yKm7XZV6j9Ev6lojP2XaIshpT4ymkqhMeSghO5Ps00E=,2a58cbf2485b2e97e49d7c3e83e81385d1418bfbab2b846dabec041a3d402b3e +github.com/sourcegraph/syntaxhighlight,v0.0.0-20170531221838-bd320f5d308e,h1:qpG93cPwA5f7s/ZPBJnGOYQNK/vKsaDaseuKT5Asee8=,c0e6323ed7a5dcddcdd7686f2d7c68dff44a8ecbfd6818db3bdb33a7af422792 +github.com/spacemonkeygo/errors,v0.0.0-20171212215202-9064522e9fd1,h1:xHQewZjohU9/wUsyC99navCjQDNHtTgUOM/J1jAbzfw=,b360a46f9534dd46d2b2c27c84ba8bbe3942832e74aa4ceb16acaa6ba30620be +github.com/spacemonkeygo/monotime,v0.0.0-20180824235756-e3f48a95f98a,h1:8+cCjxhToanKmxLIbuyBNe2EnpgwhiivsIaRJstDRFA=,4a55e556811ab93b23b46907b354e53fc553eb93314cf0b524933f37ac1437f8 +github.com/spacemonkeygo/openssl,v0.0.0-20181017203307-c2dcc5cca94a,h1:/eS3yfGjQKG+9kayBkj0ip1BGhq6zJ3eaVksphxAaek=,23031c8d37bbaa5aace338eed65af68c7d72bf134d7d0e09c963ed4974c56e58 +github.com/spacemonkeygo/spacelog,v0.0.0-20180420211403-2296661a0572,h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=,91eb98e80c44d42e6f3ff7ddf84f825d20eb55669452d752fb8ed3adeb723be7 +github.com/spaolacci/murmur3,v1.1.0,h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=,60bd43ada88cc70823b31fd678a8b906d48631b47145300544d45219ee6a17bc +github.com/spf13/afero,v1.2.2,h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=,81d51799397212c9adb2cea6cf3a96a2b50f1baff8aff7bd410128a84f2a9e73 +github.com/spf13/cast,v1.3.0,h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=,001ed519a3ec007e76e639f72bd9560be70497d499acbf1a32ccf32dc4647d91 +github.com/spf13/cobra,v0.0.5,h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=,6c6739f11d69fa1e5b60ba1e04529f355f8a30e1aa2b137ba26260de8fa7a647 +github.com/spf13/fsync,v0.9.0,h1:f9CEt3DOB2mnHxZaftmEOFWjABEvKM/xpf3cUwJrGOY=,d470c73c6e821d6c8f47ce05be3360f4d686d9079dd5af1585420c73e4725c56 +github.com/spf13/jwalterweatherman,v1.1.0,h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=,43cc5f056caf66dc8225dca36637bfc18509521b103a69ca76fbc2b6519194a3 +github.com/spf13/pflag,v1.0.5,h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=,fc6e704f2f6a84ddcdce6de0404e5340fa20c8676181bf5d381b17888107ba84 +github.com/spf13/viper,v1.5.0,h1:GpsTwfsQ27oS/Aha/6d1oD7tpKIqWnOA6tgOX9HHkt4=,7f3513d0a1186b765937c788f0ac751076067b7a0abc82420171b6f262787ac5 +github.com/src-d/envconfig,v1.0.0,h1:/AJi6DtjFhZKNx3OB2qMsq7y4yT5//AeSZIe7rk+PX8=,c694b1440b6969dfd4ebcba669faea8a05bdc7791ac78dcfbe29f153b0a8f0cd +github.com/src-d/gcfg,v1.4.0,h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=,2aa52404cbeec89c0a976d333448d1a4a6e113f03e000a715ce9006c84eb2e32 +github.com/srwiley/oksvg,v0.0.0-20190829233741-58e08c8fe40e,h1:LJUrNHytcMXWKxnULIHPe5SCb1jDpO9o672VB1x2EuQ=,e29e85accb2169d2f0f4dc90c22c446c24d244d68e0bbe038ba9df63381916c5 +github.com/srwiley/rasterx,v0.0.0-20181219215540-696f7edb7a7e,h1:FFotfUvew9Eg02LYRl8YybAnm0HCwjjfY5JlOI1oB00=,8a4b0686258a3e1b4f8b3e5f25efbaaefe7919d4e47e89eb36a6779504f8b116 +github.com/ssdb/gossdb,v0.0.0-20180723034631-88f6b59b84ec,h1:q6XVwXmKvCRHRqesF3cSv6lNqqHi0QWOvgDlSohg8UA=,2c20531d93416fa34ee9039308166c869c72c16fff715c73c05a3977157fdc2d +github.com/ssor/bom,v0.0.0-20170718123548-6386211fdfcf,h1:pvbZ0lM0XWPBqUKqFU8cmavspvIl9nulOYwdy6IFRRo=,7622ce25bbc5d5376ccb113f267f3d68bf2363963b02d04c053dfbc252f62c4a +github.com/steakknife/bloomfilter,v0.0.0-20180922174646-6819c0d2a570,h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE=,fb001f6df1197d462e7dfdbeded863aebd85bb904da5075117174a027a1b8cb1 +github.com/steakknife/hamming,v0.0.0-20180906055917-c99c65617cd3,h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=,e42bd1bc7073772613c2b4879110dd5330fded46a8cdf9269ff03cb6a82d1108 +github.com/stellar/go,v0.0.0-20191031165136-ed88b67b723d,h1:0pucQZ9fngYUl/tIGO/H96N3F5NL5ySjM3fuz+XEFSY=,d9d23bd5fc8cae6e65d4bb0d87e3cb582bc684eac1a519ca787b187a175999a5 +github.com/stellar/go-xdr,v0.0.0-20180917104419-0bc96f33a18e,h1:n/hfey8pO+RYMoGXyvyzuw5pdO8IFDoyAL/g5OiCesY=,5122e57a861bd0c38a3a3607f13576a150face8cacf9cafaf24e21e38a104b87 +github.com/stellar/throttled,v2.2.3-0.20190823235211-89d75816f59d+incompatible,h1:jMXXAcz6xTarGDQ4VtVbtERogcmDQw4RaE85Cr9CgoQ=,a89e929d8d8ba24e621c479708378263714861d8fce137085108da9f0cc8805a +github.com/steveyen/gtreap,v0.0.0-20150807155958-0abe01ef9be2,h1:JNEGSiWg6D3lcBCMCBqN3ELniXujt+0QNHLhNnO0w3s=,64b6a1f094784f1a843a6787bd159a103b9bebd2e85cc09a7e8445cc9e3ffc03 +github.com/streadway/amqp,v0.0.0-20190827072141-edfb9018d271,h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw=,66bd109504bf565a4a777c20a8cf6a1c5d05cd87b59baa50da8b6f2b0da4c494 +github.com/stretchr/objx,v0.2.0,h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=,5517d43cfb7e628b9c2c64010b934e346cd24726e3d6eaf02b7f86e10752e968 +github.com/stretchr/testify,v1.4.0,h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=,0400c42ab95389bb4f4577bc09917a040a97f0f4251db2a54a7f6f5e65065b73 +github.com/stripe/stripe-go,v66.1.1+incompatible,h1:D8qUD1rxv+RdXi2qo+IdDELkDevxYUQDfje20bGQPiw=,471de64dbc99da2b83fc1822ff9b4627b1b0738a8e3ee9ffb038510ce84e4baf +github.com/struCoder/pidusage,v0.1.2,h1:fFPTThlcWFQyizv3xKs5Lyq1lpG5lZ36arEGNhWz2Vs=,6ae03cd6cab9014ca7c0326fc233b27d942556c9753d2da87a93dd0fecbb9986 +github.com/stumble/gorocksdb,v0.0.3,h1:9UU+QA1pqFYJuf9+5p7z1IqdE5k0mma4UAeu2wmX8kA=,8bf18874189196133dabeb8fb7444633a0961e8983f8b2d8588d522d6aa679de +github.com/subosito/gotenv,v1.2.0,h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=,21474df92536f36de6f91dfbf466995289445cc4e5a5900d9c40ae8776b8b0cf +github.com/svanharmelen/jsonapi,v0.0.0-20180618144545-0c0828c3f16d,h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI=,482b13f426a15f3cb64ae5cb1a5fd2f27ca142465a174e24a2cc356812a3ed28 +github.com/swaggo/files,v0.0.0-20190704085106-630677cd5c14,h1:PyYN9JH5jY9j6av01SpfRMb+1DWg/i3MbGOKPxJ2wjM=,e1fe1ffca3a181bede3787e75797345bc69a583a67d8bb10b934f7a140516162 +github.com/swaggo/gin-swagger,v1.2.0,h1:YskZXEiv51fjOMTsXrOetAjrMDfFaXD79PEoQBOe2W0=,7ba6476ca79affa95429821a187b7cb3458305737ac2d1b86340814c3f276f71 +github.com/swaggo/swag,v1.6.3,h1:N+uVPGP4H2hXoss2pt5dctoSUPKKRInr6qcTMOm0usI=,1adbe98538a3f1b5e64fdf08f86cea4502a2c0d0cf1b047a27af6acf764f8c17 +github.com/syndtr/gocapability,v0.0.0-20180916011248-d98352740cb2,h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=,ece41bcca6ca06202649ccee0d2ab62667217ceb70f3a84794c3751c16b75cee +github.com/syndtr/goleveldb,v1.0.1-0.20190318030020-c3a204f8e965,h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw=,b0dbd1bdec73ea70eb1db85322046d202bcbfe901bc821d6a50ffc182c276306 +github.com/tarm/serial,v0.0.0-20180830185346-98f6abe2eb07,h1:UyzmZLoiDWMRywV4DUYb9Fbt8uiOSooupjTq10vpvnU=,cd962b3b9ef46158abad455c95ed92f2632cdb9217df2b7690171cc3db507add +github.com/tatsushid/go-fastping,v0.0.0-20160109021039-d7bb493dee3e,h1:nt2877sKfojlHCTOBXbpWjBkuWKritFaGIfgQwbQUls=,1c25333d4ca05ca13828835e07876c0efdd90a1c32a715527aa722b3c63c2d48 +github.com/tchap/go-patricia,v2.3.0+incompatible,h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=,19db63cf16ba944ea853c18397e4336342f1e95e4b2cb12127405bb64c67cf73 +github.com/tdewolff/minify,v2.3.6+incompatible,h1:2hw5/9ZvxhWLvBUnHE06gElGYz+Jv9R4Eys0XUzItYo=,8cabb8163bd65e43b42c5842b700d55e2daeae60c82b019007aceb1ae63638d5 +github.com/tdewolff/minify/v2,v2.5.2,h1:If/q1brvT+91oWiWnIMEGuFcwWtpB6AtLTxba78tvMs=,0af37ec252d094917a1ff4178659fe9f4539fdc3dca108bbeb9c0c2f86499eb9 +github.com/tdewolff/parse,v2.3.4+incompatible,h1:x05/cnGwIMf4ceLuDMBOdQ1qGniMoxpP46ghf0Qzh38=,f290dda8150ebdc2b9586f509770a6c82093ac9027329aeb9f3004a0b26de8e9 +github.com/tdewolff/parse/v2,v2.3.9,h1:d8/K6XOLy5JVpLTG9Kx+SxA72rlm5OowFmVSVgtOlmM=,5f517cbecd071b97ed822e8f88f96ba7d8b5a8accc49fc515298210ac088e7ef +github.com/tdewolff/test,v1.0.4,h1:ih38SXuQJ32Hng5EtSW32xqEsVeMnPp6nNNRPhBBDE8=,807205136d8f39bb7533d10b72932a183f15b45c385cd5464ae9d06e4af43337 +github.com/tealeg/xlsx,v1.0.5,h1:+f8oFmvY8Gw1iUXzPk+kz+4GpbDZPK1FhPiQRd+ypgE=,ff32f4336aed03df7c9cb7a4df9f1f42a1c64fe5d17c34566159511943d24bde +github.com/tecbot/gorocksdb,v0.0.0-20181010114359-8752a9433481,h1:HOxvxvnntLiPn123Fk+twfUhCQdMDaqmb0cclArW0T0=,26c0e94162340c7b4d1da3ee4c71ca03f9d6638711cf440d6835e1a8f07e4fb4 +github.com/technoweenie/multipartstreamer,v1.0.1,h1:XRztA5MXiR1TIRHxH2uNxXxaIkKQDeX7m2XsSOlQEnM=,5a9aff85522275b125767b746869d24f4e2f776d5031631bf6e29641d99344dc +github.com/tedsuo/ifrit,v0.0.0-20191009134036-9a97d0632f00,h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg=,1c502a5584dfbce25ff99c1a5689e2d106a138989e4a03249221ca4818674098 +github.com/tedsuo/rata,v1.0.0,h1:Sf9aZrYy6ElSTncjnGkyC2yuVvz5YJetBIUKJ4CmeKE=,f6745fd8ef8ee098410b31b1219def2c4e86c337ba6ff1319f086419b928f134 +github.com/temoto/robotstxt,v1.1.1,h1:Gh8RCs8ouX3hRSxxK7B1mO5RFByQ4CmJZDwgom++JaA=,c37f16f826a27512b7ae683ed32be5124a0252d1c7a8c4a00fd4e27d01c563d4 +github.com/templexxx/cpufeat,v0.0.0-20180724012125-cef66df7f161,h1:89CEmDvlq/F7SJEOqkIdNDGJXrQIhuIx9D2DBXjavSU=,c29bd644943d69b238da1936593421373d2db675a0fce54090d1c8b7eab7397b +github.com/templexxx/xor,v0.0.0-20181023030647-4e92f724b73b,h1:mnG1fcsIB1d/3vbkBak2MM0u+vhGhlQwpeimUi7QncM=,578ab42785a74d1a5dd3e65bf0979138b3a98bf877de4767b8eae5701a2342e1 +github.com/tencentcloud/tencentcloud-sdk-go,v3.0.71+incompatible,h1:9sIWfe6ZC7xoSlshYWNGicPqomK7N+CsHMa1YFWBCWU=,33a9526ee0244844270e532358a22616d821cc7f8f0638e33c60f722f84c5e42 +github.com/tendermint/btcd,v0.1.1,h1:0VcxPfflS2zZ3RiOAHkBiFUcPvbtRj5O7zHmcJWHV7s=,1967aa3cbabfb9e9780c0371a5359cc21ed77e5673b64e7dd5b234e838c82e62 +github.com/tendermint/crypto,v0.0.0-20180820045704-3764759f34a5,h1:u8i49c+BxloX3XQ55cvzFNXplizZP/q00i+IlttUjAU=,49ad334d452402d59757d3a415602f57bd7b66962d6115262f5c7413112d61bb +github.com/tendermint/ed25519,v0.0.0-20171027050219-d8387025d2b9,h1:zccWau0P8FELSb4HTDJ88hRo+WVNMbIbg27rFqDrhCE=,7c4a6e57c787df7c6e990c35bb31df3f4a5aa89f45c3b3df4a25dfb70c01f7e3 +github.com/tendermint/go-amino,v0.15.1,h1:D2uk35eT4iTsvJd9jWIetzthE5C0/k2QmMFkCN+4JgQ=,e91cde0d10d5a8ea6ab726fbab02ef737ba52f47e207cf675440f625153d3205 +github.com/tendermint/iavl,v0.12.2,h1:Ls5p5VINCM1HRT9g5Vvs2zmDOCU/CCIvIHzd/pZ8P0E=,a56011434929c4003fd735cbef8147e8aca3d241983c5fa7a006f5753e123020 +github.com/tendermint/tendermint,v0.32.7,h1:Szu5Fm1L3pvn3t4uQxPAcP+7ndZEQKgLie/yokM56rU=,495a31dc762d79a689ce00cdd52f66b6b4071fc2b738ce4b3d1c2a9447389ecc +github.com/tendermint/tm-db,v0.2.0,h1:rJxgdqn6fIiVJZy4zLpY1qVlyD0TU6vhkT4kEf71TQQ=,99b7c1a00ee483b97e73126a25327b75da9a5bc6e34bf9fb1ecd6b83832fe13e +github.com/tent/http-link-go,v0.0.0-20130702225549-ac974c61c2f9,h1:/Bsw4C+DEdqPjt8vAqaC9LAqpAQnaCQQqmolqq3S1T4=,a4fe19fdbf8fbc30fe866e2cbb8761ee179f4a83bda63a0a6d30a651f3700ec2 +github.com/terraform-providers/terraform-provider-openstack,v1.15.0,h1:adpjqej+F8BAX9dHmuPF47sUIkgifeqBu6p7iCsyj0Y=,9c7419845747d0c4e3a9432f50788d8adec7ed6fca93ec9ffbf99e8c8b1cf0c3 +github.com/testcontainers/testcontainers-go,v0.0.8,h1:71E+jJpE9dSgydCfn5aWESVM7+l8giw/DBWaTy35TTU=,bceec8989a3beb9f14802c13c496c9158509f6b4cee6f855c0fb06b01e7da150 +github.com/tevino/abool,v0.0.0-20170917061928-9b9efcf221b5,h1:hNna6Fi0eP1f2sMBe/rJicDmaHmoXGe1Ta84FPYHLuE=,924168edd97fe37d4af80990d69c1d11d06b8e9236ebae65b9b68ba0261baaf1 +github.com/tgulacsi/picago,v0.0.0-20171229130838-9e1ac2306c70,h1:elvpffAnrLcWnsunBkvTwxr+Q79bPSNT1+2/pOFkCj0=,68e0cb434718215eae670723ce9327ec16d462a6403007ca22b6af71346445c5 +github.com/thanos-io/thanos,v0.3.2,h1:gNWga6sqv5kZp6ltaA7oUIFj+tTG2ohq4W9SQ4YU6ds=,99491658e5ed421ba1563818dd7c01034803fa1e0c4e5d7c28b06f3d3ed2a570 +github.com/theplant/cldr,v0.0.0-20190423050709-9f76f7ce4ee8,h1:di0cR5qqo2DllBMwmP75kZpUX6dAXhsn1O2dshQfMaA=,214ea2cc1e66f278928d0b5b1b40a3e12358b7a71e0fa6d6ea606c4d687e8eef +github.com/theupdateframework/notary,v0.6.1,h1:7wshjstgS9x9F5LuB1L5mBI2xNMObWqjz+cjWoom6l0=,f921dfb3d54538118367d9018d9abacc3c0c026951442140d669443977180b66 +github.com/thoj/go-ircevent,v0.0.0-20180816043103-14f3614f28c3,h1:389FrrKIAlxqQMTscCQ7VH3JAVuxb/pe53v2LBiA7z8=,32edd7a9e219bdff36d2aac0c6c5f3ac982c2daf4869e6e0718e917efb23b3de +github.com/tiancaiamao/appdash,v0.0.0-20181126055449-889f96f722a2,h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ=,a9961e6079339aec983f97fdb39d5d7258bf8d2031da68482e58e17b27a93a78 +github.com/tidwall/gjson,v1.3.3,h1:wM/XREVc9c0LbRLcNMgVcGpI16r0pbbTJpltR4jJjh0=,17da724ffc86cfb3132bd9c7ac3eb860ca43a2748be519a59aa50b436c147bc6 +github.com/tidwall/match,v1.0.1,h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=,a1b9d52b9a4c7574f46068665279522f2084be26bac71594630786f6ee9a70f2 +github.com/tidwall/pretty,v1.0.0,h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=,3b25a1a0fe7688989326aaa1ca1c74c972b30152ef2a756fbf2d217a827fc07d +github.com/tidwall/sjson,v1.0.4,h1:UcdIRXff12Lpnu3OLtZvnc03g4vH2suXDXhBwBqmzYg=,cb47595016d45d72e6ee0f5585a86247aaeb93d9efa74e07676d32f60e8a7398 +github.com/tildeleb/cuckoo,v0.0.0-20190627040100-71059d5a2b62,h1:rXSNik45VDd1hfRLUAZwDLCY0FWvn2KlCeXjbd1yAI0=,f81f44544ec771ab630ddd5d65f4735ba2acc7619e41ccbc4bfad2473c21dc2f +github.com/timewasted/linode,v0.0.0-20160829202747-37e84520dcf7,h1:CpHxIaZzVy26GqJn8ptRyto8fuoYOd1v0fXm9bG3wQ8=,9a3190b3751964a3d47449265d48e2d3a76b23c66a7cb402cc9bdf3d732d82b4 +github.com/tinylib/msgp,v1.1.0,h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=,61bd58489c555b30abffbe1175565b6f8460583349118e9ee12025fd17b67ea4 +github.com/tj/assert,v0.0.0-20171129193455-018094318fb0,h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA=,59a81d1883aac9635ac15d8a6a6e0630cf0a4122328116f921289dab840374b7 +github.com/tj/cobra,v0.0.0-20160702192511-5e2db986a612,h1:eiUtRvCN5HSnOg9AyX5z5od5VWy/ukyJ2oTboInm9MM=,493ac2ac61730652fcdd0b9b4c1e0c63855666df7fcaa02821b63982a5a7ccdf +github.com/tj/go-elastic,v0.0.0-20171221160941-36157cbbebc2,h1:eGaGNxrtoZf/mBURsnNQKDR7u50Klgcf2eFDQEnc8Bc=,a0df933432e9c7ec276cbc0edbb941375726cf5a39c663aafe0e945f9ba3079f +github.com/tj/go-kinesis,v0.0.0-20171128231115-08b17f58cb1b,h1:m74UWYy+HBs+jMFR9mdZU6shPewugMyH5+GV6LNgW8w=,0885f4631d33a20b5447ebbe12a0d23eb5ea3394de4bbc849cfe54ad19cadb2a +github.com/tj/go-spin,v1.1.0,h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds=,060d09c35b1db5992747cde71ccbdaefe596ada06a6fe146e0ef10dc67d817dd +github.com/tj/pflag,v0.0.0-20160702191705-e367e44eec04,h1:RAPJe7XUQhTjVUKvYegzhXnWkJd/1daXdoiXjvkSURU=,2156357bb17b30ccb893b8f7013168c85c1eb265b7156aca845d06fb35805257 +github.com/tjfoc/gmsm,v1.0.1,h1:R11HlqhXkDospckjZEihx9SW/2VW0RgdwrykyWMFOQU=,f8fe3c4d02f0dc90fd873278957d57c4c45f1c53b1fee3969216b67844efabb1 +github.com/tmc/grpc-websocket-proxy,v0.0.0-20190109142713-0ad062ec5ee5,h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=,dadf62266d259ffb6aa1d707892b97fa36c3f39df5cae99f54d3ef7682995376 +github.com/tomnomnom/linkheader,v0.0.0-20180905144013-02ca5825eb80,h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y=,558504ea96d4312be0fe5faa6de13fb6abd8f1b2ac154123c67b623a5f219cdb +github.com/toqueteos/webbrowser,v1.2.0,h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ=,1227d3ebeab16d8232a304a10b087984a96ad30f7439b6687bab2f5747d308cf +github.com/transip/gotransip,v0.0.0-20190812104329-6d8d9179b66f,h1:clyOmELPZd2LuFEyuo1mP6RXpbAW75PwD+RfDj4kBm0=,38b593cbdeb59e64d042533c1ce6196d89662de3282373de0d3c0749fe4c4856 +github.com/trivago/tgo,v1.0.5,h1:ihzy8zFF/LPsd8oxsjYOE8CmyOTNViyFCy0EaFreUIk=,06dc60662735374365cd525e2f4f4d1580f348125546e1f3e0d92d2deca4fa9a +github.com/tstranex/u2f,v1.0.0,h1:HhJkSzDDlVSVIVt7pDJwCHQj67k7A5EeBgPmeD+pVsQ=,325e3db32035ce38a5981bfaa35fb6d9b5cb4b960cfa0285b92448d21d29f379 +github.com/tsuru/config,v0.0.0-20180418191556-87403ee7da02,h1:mHuZ6JOixltE9fJmS+W1xLi4t/uDuR6Nl7w/e4uj0+I=,0255268934770d67b9d101a030ed7ed578938e346a279a273ab3983b0eee53fb +github.com/ttacon/chalk,v0.0.0-20160626202418-22c06c80ed31,h1:OXcKh35JaYsGMRzpvFkLv/MEyPuL49CThT1pZ8aSml4=,325521131515e4840e0083bc62cd9553da0b8d2480820f7e92ca89ae324f4c23 +github.com/tus/tusd,v1.0.1,h1:jb0SDf8zCUvlWv5SuHalOuRn684aW6WIvhfWRHC/XB8=,9a91d59123262b9bb1c43d39588a26d7560513b9e3c18254cd321890e8975083 +github.com/tv42/httpunix,v0.0.0-20150427012821-b75d8614f926,h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=,8246ebc82e0d9d3142f5aeb50d4fcd67f3f435fb5464120c356a4e5d57ef4aa0 +github.com/twinj/uuid,v1.0.0,h1:fzz7COZnDrXGTAOHGuUGYd6sG+JMq+AoE7+Jlu0przk=,842c314d6d2ef9cb95b0f3f1b4cf998715680e836cfab8c2a7f75e351765a345 +github.com/twitchtv/twirp,v5.8.0+incompatible,h1:DTfGS9u/jHbo34cBB+qhzVHRaAq+tRois71j8pvjQ5M=,a4137792083eedd9ac04e88918d8952a841120b11e71161d2d444065b8e65d79 +github.com/tyler-smith/go-bip39,v1.0.2,h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8=,6173ded455fa17cddd889bf3bc123be2343a09aeb60f83e2b63823dd9ce94e09 +github.com/tylerb/graceful,v1.2.15,h1:B0x01Y8fsJpogzZTkDg6BDi6eMf03s01lEKGdrv83oA=,770bd36defb9463ebe8b190f508e47c37bbb6bedf23a32c675066f8edbd7aa8d +github.com/u-root/dhcp4,v0.0.0-20190206235119-03363dc71ec8,h1:F9cRXeXZ95CzG7352mm+yfgloHFrjpr1L+CQFiCH/iU=,1db09816d65071cfc5dbf25d5dbf11b2b48c3442495d30777cc0714bb4cf4163 +github.com/u-root/u-root,v6.0.0+incompatible,h1:YqPGmRoRyYmeg17KIWFRSyVq6LX5T6GSzawyA6wG6EE=,f3ec29d4b285e50d7b3116e121caca0d722535346a0ddf189d4c7d8e7e0a07d3 +github.com/uber-go/atomic,v1.4.0,h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o=,f380292d46ebec89bf53939e4d7d19d617327cbcdf2978e30e6c39bc77df5e73 +github.com/uber/jaeger-client-go,v2.19.0+incompatible,h1:pbwbYfHUoaase0oPQOdZ1GcaUjImYGimUXSQ/+8+Z8Q=,d4928d51ce4440c825df67b4a54f851ead075701e67ece4b07fbc5c5857c091c +github.com/uber/jaeger-lib,v2.2.0+incompatible,h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=,496f63f6df32c28ceb6574959c70969da2b609abc8f9f3b3a709466f862054bf +github.com/uber/tchannel-go,v1.16.0,h1:B7dirDs15/vJJYDeoHpv3xaEUjuRZ38Rvt1qq9g7pSo=,64a37a5e89dd111ab943d94a1670f9addc0d2d41d34d630c95b0a756df916e01 +github.com/ucloud/ucloud-sdk-go,v0.8.7,h1:BmXOb5RivI0Uu4oZRpjI6SQ9/y7n/H9wxTGR1txIE8o=,d94766624c6f676880de354d4ed5c62c9ee7755c3d59cdf106ac0f5a070c0ece +github.com/ugorji/go,v1.1.7,h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=,d02959e71c59b273d5b099697c058426941a862feef66c191c63e2934db7a2ff +github.com/ugorji/go/codec,v1.1.7,h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=,8d482061c55b4c4fbf78de9fbf98a8d1b295f5904769679c73a2dc0b06a1a102 +github.com/ulikunitz/xz,v0.5.6,h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=,19ebb331e7ae7a542ed58597d13ab703fc423acf93a1e3c4db86476b0322049a +github.com/unixpickle/anyvec,v0.0.0-20170908190750-59aa66ba0472,h1:eVBSKiY98Zth6cEYVzeu0CYagakYqbSWgpWqjZFiUvI=,4159f95762f7a99ee540397e78c7a60da788e6775ad9eca7fc1bd07d332a88f1 +github.com/unixpickle/autofunc,v0.0.0-20170112172612-f27a3f82164a,h1:ZUrHljv3rPkFyTYzUmBH8gBFjDwCIHc4a2DdPCWRjl0=,b39c092ab522c2ca3e8889dfbff281223628c08590b361242e72cc29015da9df +github.com/unixpickle/essentials,v0.0.0-20180916162721-ae02bc395f1d,h1:mRwAxGRBEFcoKSWDoX5CROMJo6xmXBh4rNqOmyhpRi0=,7aa26b2cbcbac91669e88903f1e05b7696b32a6d8194d66c0fe7d93c613c2f5f +github.com/unixpickle/num-analysis,v0.0.0-20161229165253-c45203c63047,h1:gipJz9DZGU3fgBjoaiNg+5CG9UdE7MmlBvSwNp1ulnY=,c1dac9bfeb72d39bb0b445f0f0b2af61753e5b11ff66e69bc196886189b7d50a +github.com/unixpickle/serializer,v0.0.0-20170723202158-c6c092dc55bb,h1:kdurEYFZ2P58xnfWtmxKWkVtFPyK80BMIaJ2zW5uskY=,2cbf6cce1b2a57307c2c675a283ce9b46adcb9d18c3a9317d3ee20772175ae40 +github.com/unknwon/com,v1.0.1,h1:3d1LTxD+Lnf3soQiD4Cp/0BRB+Rsa/+RTvz8GMMzIXs=,f6264780f210f130a0edeafe4ffb0753c64b5168771f2d6cd1613999a7b79cd1 +github.com/unrolled/render,v1.0.1,h1:VDDnQQVfBMsOsp3VaCJszSO0nkBIVEYoPWeRThk9spY=,5b0ace5c3798f8989322a32b75c3eeabce7f6568533f808065cacf92425dd867 +github.com/unrolled/secure,v0.0.0-20190103195806-76e6d4e9b90c,h1:ZY4dowVsuIAQtXXwKJ9ezfonDQ2YT7pcXRpPF2iAy3Y=,1aba4f13fe4199198f9b59bbfd337773d049bad06f68360483a5f4c5431bdce4 +github.com/urfave/cli,v1.22.1,h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=,116fc1fba7db091617cd47c2b83c78d22489deeaf8390a6d3509da7fc9217d57 +github.com/urfave/cli/v2,v2.0.0-alpha.2,h1:2OVOKijPPhkA1cJA5SABACE8TT3Cwx9T0N6VtI8LJSI=,57250f97530fcb6fef7abc87cde3fbaf11ea45830adf98e3f1c986e2674e3b5f +github.com/urfave/negroni,v1.0.0,h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc=,7b50615961d34d748866565b8885edd7013e33812acdbaed47502d7cc73a4bbd +github.com/valyala/bytebufferpool,v1.0.0,h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=,7f59f32c568539afee9a21a665a4156962b019beaac8404e26ba37af056b4f1e +github.com/valyala/fasthttp,v1.6.0,h1:uWF8lgKmeaIewWVPwi4GRq2P6+R46IgYZdxWtM+GtEY=,b15a953ed5395599871097c94977d21c026205e6ca7ad6e340cd595096d5840e +github.com/valyala/fastrand,v1.0.0,h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI=,ed2166483141b4f3d59ee07975a5d91990e4c17f36c919565b8063c0cb02f7ed +github.com/valyala/fasttemplate,v1.0.1,h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=,b4d9f77c6c15a0404952925ad59b759102c0ff48426b6fc88d6bfd347fe243b8 +github.com/valyala/tcplisten,v0.0.0-20161114210144-ceec8f93295a,h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=,07066d5b879a94d6bc1feed20ad4003c62865975dd1f4c062673178be406206a +github.com/vbatts/tar-split,v0.11.1,h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=,73136db95ff35c2547c49be43727aa3f67da2d8837e1475954db910b41b1fa18 +github.com/veandco/go-sdl2,v0.3.3,h1:4/TirgB2MQ7oww3pM3Yfgf1YbChMlAQAmiCPe5koK0I=,d19e162daa2a6cc72569eb052adfd3d757fd069ee461a64803e9e8f2e9bb87a7 +github.com/vektah/dataloaden,v0.2.1-0.20190515034641-a19b9a6e7c9e,h1:+w0Zm/9gaWpEAyDlU1eKOuk5twTjAjuevXqcJJw8hrg=,92fe72fa4962bb2f375fae83f7a44a804e398ec08818f7d018724e0a23394ae3 +github.com/vektah/gqlparser,v1.1.2,h1:ZsyLGn7/7jDNI+y4SEhI4yAxRChlv15pUHMjijT+e68=,cdd0119855b98641e7af60dce5b2848b31f8ef03dfcf097c06912309b86fc97c +github.com/viant/assertly,v0.4.8,h1:5x1GzBaRteIwTr5RAGFVG14uNeRFxVNbXPWrK2qAgpc=,253a5e53bb09bf94be7131d5034a6ba19c6eb1f9b8c7fa66182d577bd7b2d6cd +github.com/viant/toolbox,v0.24.0,h1:6TteTDQ68CjgcCe8wH3D3ZhUQQOJXMTbj/D9rkk2a1k=,d6773a06b59de043eff2003bb97567056a1910eb0fd514f5503873b8f23309f4 +github.com/vimeo/go-util,v1.2.0,h1:YHzwOnM+V2tc6r67K9fXpYqUiRwXp0TgFKuyj+A5bsg=,85e52371bcf8299d47d8242546bc06e9e0c9c555b719008096889cd081a69173 +github.com/vincent-petithory/dataurl,v0.0.0-20160330182126-9a301d65acbb,h1:lyL3z7vYwTWXf4/bI+A01+cCSnfhKIBhy+SQ46Z/ml8=,5d5fa46ce0f88ba0734f52d0b0bcaa8a427770ef13cd1bfd7995e4d2a8439abb +github.com/vishvananda/netlink,v1.0.0,h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM=,6fb7184280eb1321e1857171862bdb624eae29876496f1cb56932fbc0064020f +github.com/vishvananda/netns,v0.0.0-20190625233234-7109fa855b0f,h1:nBX3nTcmxEtHSERBJaIo1Qa26VwRaopnZmfDQUXsF4I=,a99a67e03a35e1d02d1a17900185a1c38c513a79b2b325ad826553dc078a90de +github.com/vivint/infectious,v0.0.0-20190108171102-2455b059135b,h1:dLkqBELopfQNhe8S9ucnSf+HhiUCgK/hPIjVG0f9GlY=,f5d948bf34ac58786ad20df4fd6e99f990f72458dd2825558bf2e3c871f3f37a +github.com/vmihailenco/msgpack,v4.0.4+incompatible,h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=,918f7dd7883105b9c55728c704a3bc54c80568b2b09583890b51508c03391356 +github.com/vmware/govmomi,v0.21.0,h1:jc8uMuxpcV2xMAA/cnEDlnsIjvqcMra5Y8onh/U3VuY=,75ca40f34da851e95d7e63685adbaf1ec5c7f659fb0b47096c85da44f098c4a3 +github.com/vmware/vic,v1.5.4,h1:y546pkye0aes2j2h2n6fWz++v8WxMZTLFl1mLOMzqYQ=,2a6f0c20be8acb3b467c78d3de18009ccd0ab2429a997266089d14341e43115c +github.com/vmware/vmw-guestinfo,v0.0.0-20170707015358-25eff159a728,h1:sH9mEk+flyDxiUa5BuPiuhDETMbzrt9A20I2wktMvRQ=,29c73ba44ac315461640797d6ebfda2d906c28dbe21c20656c6e5fa1f515f220 +github.com/vulcand/oxy,v1.0.0,h1:7vL5/pjDFzHGbtBEhmlHITUi6KLH4xXTDF33/wrdRKw=,148843b55ed01813f8920aab70a799aa10cfdccc0bbd55e270cde78e1ad23b88 +github.com/vulcand/predicate,v1.1.0,h1:Gq/uWopa4rx/tnZu2opOSBqHK63Yqlou/SzrbwdJiNg=,3dd716f2436651429ce7f5fdd59fa1a9944ab4d57fdbae5fef00ef01baf7c4be +github.com/vultr/govultr,v0.1.4,h1:UnNMixYFVO0p80itc8PcweoVENyo1PasfvwKhoasR9U=,7281fa718c076b84610b155fb0dec34503ea1ae5f2930cc714ed7772e475bb08 +github.com/warpfork/go-wish,v0.0.0-20190328234359-8b3e70f8e830,h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0=,77a9eefa3edf38cb90eba443f282bd73ffcb6f1b87aebe8f891d8c8b38124d95 +github.com/weaveworks/common,v0.0.0-20190917143411-a2b2a6303c33,h1:UAh7j96ZXQID3shhQsrtfJsrQ2uO3tyRxCuXvh+kipw=,e1ceacd5b24c6414ae664f4b09e295dc25e48d3a1dcd5100d8c98dd405a0d162 +github.com/weaveworks/mesh,v0.0.0-20191031093817-8e3db2fe8f47,h1:RUdrWPah1Xu+efIGqN0YGTv7gQeyR5qwBq9uL4HloKw=,05f5d769f7ff6af1c098f0e42983227d6a86f5d8d1f8453cb0566450cad49358 +github.com/wellington/go-libsass,v0.9.3-0.20181113175235-c63644206701,h1:9vG9vvVNVupO4Y7uwFkRgIMNe9rdaJMCINDe8vhAhLo=,2ae95ed360950fab28eff3bedf1c1a6f5f81b73078000d3a0bd67443d38df87f +github.com/wendal/errors,v0.0.0-20130201093226-f66c77a7882b,h1:0Ve0/CCjiAiyKddUMUn3RwIGlq2iTW4GuVzyoKBYO/8=,f7722558c5c450fa02e800ce7bf4d0bc1d2a0e1696d3fc50ff1489bcd02ff3b3 +github.com/weppos/publicsuffix-go,v0.5.0,h1:rutRtjBJViU/YjcI5d80t4JAVvDltS6bciJg2K1HrLU=,bd8365c8501b307a1fbd62501bc3332ff97721bef51921a99e67a3f8b96318fc +github.com/whyrusleeping/cbor-gen,v0.0.0-20190910031516-c1cbffdb01bb,h1:8yBVx6dgk1GfkiWOQ+RbeDDBLCOZxOtmZ949O2uj5H4=,9d5ab8362eaffa07bc2700d9a9e967c1ecf394e3233a6e7141efb48970bfd4e5 +github.com/whyrusleeping/chunker,v0.0.0-20181014151217-fe64bd25879f,h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E=,b28fdb03b69be216c423967e9dee2481aa10c3e39c71d3bfc8911940dadb26a9 +github.com/whyrusleeping/go-keyspace,v0.0.0-20160322163242-5b898ac5add1,h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=,9416f8227e6c516294b9b938fcf2347bebe2cdab4377454150ba60dcd86c2990 +github.com/whyrusleeping/go-logging,v0.0.0-20170515211332-0457bb6b88fc,h1:9lDbC6Rz4bwmou+oE6Dt4Cb2BGMur5eR/GYptkKUVHo=,125b1a836936436354791583be42ae19f7c04a636b5c0c96135645d52aaa72ea +github.com/whyrusleeping/go-notifier,v0.0.0-20170827234753-097c5d47330f,h1:M/lL30eFZTKnomXY6huvM6G0+gVquFNf6mxghaWlFUg=,08dddb594554c3b35791893207e66dd3c04e4da24d0e0df001bb185f97dec6cc +github.com/whyrusleeping/go-smux-multiplex,v3.0.16+incompatible,h1:iqksILj8STw03EJQe7Laj4ubnw+ojOyik18cd5vPL1o=,e16e3da58e283e71955b21725c384d180a2999bc2a50cb0490b5e2f7a74b5fc6 +github.com/whyrusleeping/go-smux-multistream,v2.0.2+incompatible,h1:BdYHctE9HJZLquG9tpTdwWcbG4FaX6tVKPGjCGgiVxo=,9a783c4a1b69f6002ac4e0af684f4d5c4d360b7107fbbdde48faf38f7e23e998 +github.com/whyrusleeping/go-smux-yamux,v2.0.9+incompatible,h1:nVkExQ7pYlN9e45LcqTCOiDD0904fjtm0flnHZGbXkw=,3f44f41fc7b133085bba08d52e7615e9a8eb92f55fde6a07d3cd7804117e9985 +github.com/whyrusleeping/mafmt,v1.2.8,h1:TCghSl5kkwEE0j+sU/gudyhVMRlpBin8fMBBHg59EbA=,e5d5783d2bc35f7c23f2034fd52c5750ad0590773115c10b4e15360575322c69 +github.com/whyrusleeping/mdns,v0.0.0-20180901202407-ef14215e6b30,h1:nMCC9Pwz1pxfC1Y6mYncdk+kq8d5aLx0Q+/gyZGE44M=,fc2e4d2365ba40d52d03126ea490e712762b4ad398c8d6adb2a1a08699a10eb1 +github.com/whyrusleeping/multiaddr-filter,v0.0.0-20160516205228-e903e4adabd7,h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds=,14e8963464dab0f6277f596985be5ea419bc3bae8bf4f4f139cce456e1815faf +github.com/whyrusleeping/timecache,v0.0.0-20160911033111-cfcb2f1abfee,h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow=,c33dfc5ac935582261bf5ddbab31bb07febc471a9c26eb3e1a895eddd574d3e8 +github.com/whyrusleeping/yamux,v1.1.5,h1:4CK3aUUJQu0qpKZv5gEWJjNOQtdbdDhVVS6PJ+HimdE=,658f9e704cbe1cac295ed34471bb096a4d2713f69ffbb8140fbf50b8ff6420e0 +github.com/willf/bitset,v1.1.9,h1:GBtFynGY9ZWZmEC9sWuu41/7VBXPFCOAbCbqTflOg9c=,ddd687772ccfd6774e55e7e9d9e71dab86d85a64b98ce1d864d9661f5b0767e4 +github.com/x-cray/logrus-prefixed-formatter,v0.5.2,h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg=,00719eeb4f9eadb9431dd9f763fa4013dc52b37a8803a973c6d0c1ce8281e14b +github.com/xanzy/go-cloudstack,v0.0.0-20190526095453-42f262b63ed0,h1:NJrcIkdzq0C3I8ypAZwFE9RHtGbfp+mJvqIcoFATZuk=,34b46eae351e4916015ce2a43ed501403937e4079cf69dae98a9544bfeec8092 +github.com/xanzy/go-gitlab,v0.21.0,h1:Ru55sR4TBoDNsAKwCOpzeaGtbiWj7xTksVmzBJbLu6c=,12ae6fa35c19fffc31d1fa2891f386875caac8077d19f3f09f49b5e2e51b1755 +github.com/xanzy/ssh-agent,v0.2.1,h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=,7011c1771f8ad9b65795f8a85113e4518c9a2c7493029c4c988bc802b63d9e28 +github.com/xdg/scram,v0.0.0-20180814205039-7eeb5667e42c,h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=,33884d438b686676ceaa2a439634a108f7fe763ce974342d2aa811c22b34112c +github.com/xdg/stringprep,v1.0.0,h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=,2b262e4e8e9655100c98e2b7e75b517e3e83e2155818174c63ea09d3cce22721 +github.com/xeipuuv/gojsonpointer,v0.0.0-20180127040702-4e3ac2762d5f,h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=,5b1a4bcc8e003f214c92b3fa52959d9eb0e3af1c0c529efa55815db951146e48 +github.com/xeipuuv/gojsonreference,v0.0.0-20180127040603-bd5ef7bd5415,h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=,7ec98f4df894413f4dc58c8df330ca8b24ff425b05a8e1074c3028c99f7e45e7 +github.com/xeipuuv/gojsonschema,v1.2.0,h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=,55c8ce068257aa0d263aad7470113dafcd50f955ee754fc853c2fdcd31ad096f +github.com/xenolf/lego,v2.7.2+incompatible,h1:aGxxYqhnQLQ71HsvEAjJVw6ao14APwPpRk0mpFroPXk=,25c2495e4fc2f5fea8c70b442add86c049f2f8810235e1ee94f29d8e0267ad2c +github.com/xeonx/timeago,v1.0.0-rc4,h1:9rRzv48GlJC0vm+iBpLcWAr8YbETyN9Vij+7h2ammz4=,b06f4ede554b35387394827ca0350b628a72228a8002653817826991867e1fdd +github.com/xi2/xz,v0.0.0-20171230120015-48954b6210f8,h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=,1ffe8f24af5118966084d41eca2c9bee7a831a07deb4356e4d707d208da22e8e +github.com/xiang90/probing,v0.0.0-20190116061207-43a291ad63a2,h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=,437bdc666239fda4581b592b068001f08269c68c70699a721bff9334412d4181 +github.com/xlab/treeprint,v0.0.0-20181112141820-a009c3971eca,h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI=,d14ebea967caa835f25e4c3980c60719e07f0e36375b74dc48928613fca5b2ff +github.com/xo/dburl,v0.0.0-20191005012637-293c3298d6c0,h1:6DtWz8hNS4qbq0OCRPhdBMG9E2qKTSDKlwnP3dmZvuA=,1fb150cf2144a4b7a571360af52d9b22dfe53e2ba9ab3e56584fdb0eb282d315 +github.com/xordataexchange/crypt,v0.0.3-0.20170626215501-b2862e3d0a77,h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=,46dc29ef77d77a2bc3e7bd70c94dbaeec0062dd3bd6fcacbaab785c15dcd625b +github.com/xtaci/kcp-go,v5.4.5+incompatible,h1:CdPonwNu3RKu7HcXSno5r0GXfTViDY2iFV2RDOao/4U=,98e77493d94b33bfec990bd5791d15a09add1a0ba2f3281f26bdc98c1815d9a7 +github.com/xtaci/lossyconn,v0.0.0-20190602105132-8df528c0c9ae,h1:J0GxkO96kL4WF+AIT3M4mfUVinOCPgf2uUWYFUzN0sM=,75cc8c3e14cf812dcc56a1e8cecafd8affd9b2843d39540ab67929f7ce3d1abc +github.com/xtgo/set,v1.0.0,h1:6BCNBRv3ORNDQ7fyoJXRv+tstJz3m1JVFQErfeZz2pY=,6b70026a5ea66bc0be7efb2247afa53ae970b9535c7a8541795750ef9b640217 +github.com/yalp/jsonpath,v0.0.0-20150812003900-31a79c7593bb,h1:06WAhQa+mYv7BiOk13B/ywyTlkoE/S7uu6TBKU6FHnE=,d2041be5f19a3dbcd4b384dbbf5782cdb96d80ad9c60c8c9b887f2c5170cb25f +github.com/yandex-cloud/go-genproto,v0.0.0-20190928220815-a36c849d0fc1,h1:GDyRNvsi/tOZj1ssPkk+kocO1djpbmLSpDKg4XeRPy4=,5502c680146902518514935af5ab5b554a80f5ebe2e79d491db3120911f5498d +github.com/yandex-cloud/go-sdk,v0.0.0-20190916101744-c781afa45829,h1:2FGwbx03GpP1Ulzg/L46tSoKh9t4yg8BhMKQl/Ff1x8=,4b375b871ce7501943a26ba02c348ad4fdf2cb112520513628566a15a98a4796 +github.com/yohcop/openid-go,v0.0.0-20160914080427-2c050d2dae53,h1:HsIQ6yAjfjQ3IxPGrTusxp6Qxn92gNVq2x5CbvQvx3w=,8c4f676193e3aa5ec012e0661d0e552a3e5d5d96086a73901dcfbf0bd4a6d2e9 +github.com/yookoala/realpath,v1.0.0,h1:7OA9pj4FZd+oZDsyvXWQvjn5oBdcHRTV44PpdMSuImQ=,9fe8b06f8efabb7df08608f18edc77d284e04ad06d490af9f55196e4184c339f +github.com/yosssi/ace,v0.0.5,h1:tUkIP/BLdKqrlrPwcmH0shwEEhTRHoGnc1wFIWmaBUA=,96157dbef72f2f69a900e09b3e58093ee24f7df341ac287bddfb15f8c3f530db +github.com/yosssi/gmq,v0.0.1,h1:GhlDVaAQoi3Mvjul/qJXXGfL4JBeE0GQwbWp3eIsja8=,d06bbe96ba0e8c3c79bfb0b9191a02a19d8d3d3c181eba62df6d94c0602c784e +github.com/youtube/vitess,v2.1.1+incompatible,h1:SE+P7DNX/jw5RHFs5CHRhZQjq402EJFCD33JhzQMdDw=,2eb3c516c8b24a72b8cb14f76f39562638acf0cd7fc3858002163d28047607f2 +github.com/yudai/gojsondiff,v0.0.0-20170107030110-7b1b7adf999d,h1:yJIizrfO599ot2kQ6Af1enICnwBD3XoxgX3MrMwot2M=,3f61230fe62a6fe2e93a75264d176bda3f62323063c1e9bfb87c0be31ac5d269 +github.com/yudai/golcs,v0.0.0-20150405163532-d1c525dea8ce,h1:888GrqRxabUce7lj4OaoShPxodm3kXOMpSa85wdYzfY=,ff1f3899e710574a08aaa51051a36c523ecf850180ad0564d55eec611c3cff72 +github.com/yudai/pp,v2.0.1+incompatible,h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI=,ecfda4152182e295f2b21a7b2726e2865a9415fc135a955ce42e039db29e7a20 +github.com/yuin/gopher-lua,v0.0.0-20190514113301-1cd887cd7036,h1:1b6PAtenNyhsmo/NKXVe34h7JEZKva1YB/ne7K7mqKM=,fd157d5d26c336c44837eceef5c6fc4b442a56b25931d4afae3c4080932a7aa7 +github.com/zach-klippenstein/goregen,v0.0.0-20160303162051-795b5e3961ea,h1:CyhwejzVGvZ3Q2PSbQ4NRRYn+ZWv5eS1vlaEusT+bAI=,6f523a11fcb80dca31c3bae99c8c4a59b7e5a4176e36cad0e3f1e64e1b9a7b11 +github.com/zclconf/go-cty,v1.1.0,h1:uJwc9HiBOCpoKIObTQaLR+tsEXx1HBHnOsOOpcdhZgw=,024660decfe11e74a9fab80f1447b79c61e328baf6418629a15c74e183b95e95 +github.com/zclconf/go-cty-yaml,v1.0.1,h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8=,2502da37ac6d9105b07748c4252f970aa6a7ffc8929b92a0b85abb81b804e9b7 +github.com/zeebo/admission,v0.0.0-20180821192747-f24f2a94a40c,h1:WoYvMZp+keiJz+ZogLAhwsUZvWe81W+mCnpfdgEUOl4=,b62a80509cfa84e697b23dd6b1b314a264e6f68586661ecd84026625f7753cb1 +github.com/zeebo/assert,v1.0.0,h1:qw3LXzO7lbptWIQ6DsemJIUOoaqyKbgY3M8b8yvlaaY=,bb31d428cc59a322975ab6b5757832e62507655f3e2c467a88345b21d7431d98 +github.com/zeebo/errs,v1.2.2,h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g=,d2fa293e275c21bfb413e2968d79036931a55f503d8b62381563ed189b523cd2 +github.com/zeebo/float16,v0.1.0,h1:kRqxv5og6z1emEyz5FpW0/BVHe5VfxEAw6b1ljCZlUc=,ffc6b2a7bce5e37798bc3ac53448b6190039a77f2e7d589779680fbd3cb53a48 +github.com/zeebo/incenc,v0.0.0-20180505221441-0d92902eec54,h1:+cwNE5KJ3pika4HuzmDHkDlK5myo0G9Sv+eO7WWxnUQ=,141b997c5ece8f136f43644f5a2526305563128c4ecce280d9a54ce1ae506ba2 +github.com/zeebo/structs,v1.0.2,h1:kvcd7s2LqXuO9cdV5LqrGHCOAfCBXaZpKCA3jD9SJIc=,0495c69abfeb2ffa0911f4c44ba145d81b04ec76d2311e2eedfc2b3e2efd66c9 +github.com/zenazn/goji,v0.9.0,h1:RSQQAbXGArQ0dIDEq+PI6WqN6if+5KHu6x2Cx/GXLTQ=,0807a255d9d715d18427a6eedd8e4f5a22670b09e5f45fddd229c1ae38da25a9 +github.com/ziutek/mymysql,v1.5.4,h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=,1ea104186e0990a3d97a1e67fcd31177849c975de4abd9399270ab0a04c025de +github.com/zkfy/cron,v0.0.0-20170309132418-df38d32658d8,h1:jxPemXnLeekMXItoaw4jZtDfe8HmvFmviUm2L5tEBhE=,81a903448f6bc140e07bc4ff70762f0a46e750388f4b92f700d358331b1ca8d5 +github.com/zkfy/go-metrics,v0.0.0-20161128210544-1f30fe9094a5,h1:Rb2qQMbEon+BI3IXGh4eW3u/iTLPA3+Y6kNK+gHO32w=,07f9078cbc233559128dc4ae80d69505dd1a07d47d33135fc8f4969829fd6ee8 +github.com/zkfy/jwt-go,v3.0.0+incompatible,h1:5hZNIkrRRa0mrkRiXoPFdLJWpMDByIZ6VIbX9aWhwmk=,8306a4a65059e17be035dd47f45d83aac503c50c954716c83e481d0b6530aed6 +github.com/zkfy/log,v0.0.0-20180312054228-b2704c3ef896,h1:nktyhX5ycnu+WA489Ei7SUi00bF+LW8TF2N7se5gQ/o=,dd0acb5ccceb2225c89f0f50dc8eea9f1cae0971b731750ea7a1b186c194d9bc +github.com/zkfy/stompngo,v0.0.0-20170803022748-9378e70ca481,h1:dqbWcJVZJv06ZR7zK8yN9w8oNOHL23eylL4o9Xj9Zn0=,9e643fbfd166421cb186275742bafc663fc350da83e59e9d88c06feb12ec4462 +github.com/zmap/rc2,v0.0.0-20131011165748-24b9757f5521,h1:kKCF7VX/wTmdg2ZjEaqlq99Bjsoiz7vH6sFniF/vI4M=,fd70713ed40c95220e95c7c47f7e15051e8dc909d39253f403bb694f45fbe789 +github.com/zmap/zcertificate,v0.0.0-20180516150559-0e3d58b1bac4,h1:17HHAgFKlLcZsDOjBOUrd5hDihb1ggf+1a5dTbkgkIY=,7dc2c0bedccfdeb9c42ef41ef502f404befa9ef073c35db3b15c99cae6697b41 +github.com/zmap/zcrypto,v0.0.0-20190729165852-9051775e6a2e,h1:mvOa4+/DXStR4ZXOks/UsjeFdn5O5JpLUtzqk9U8xXw=,871979cf16453ddb4db7f153f449af4e346f68b51355c74b5eee832225618ff0 +github.com/zmap/zlint,v0.0.0-20190806154020-fd021b4cfbeb,h1:vxqkjztXSaPVDc8FQCdHTaejm2x747f6yPbnu1h2xkg=,e62f5cd5f434d84f53d336261e3a6e50c8902152ce8f2f5ce918270d6d201cab +github.com/zondax/hid,v0.9.0,h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8=,9c72a6bdbf03d9465dfdf1ba876eabf5fa923d5bb9e726c9e4a994098dc9bd79 +github.com/zondax/ledger-go,v0.9.0,h1:oTrtFqPFA4VdCPRvqMaN45mQnJxkPc0JxoVZfCoUpjI=,6c6a7e036f9a621ce951939d7d13ae1f0c098f58829307c78f9312e02e78e438 +github.com/zquestz/grab,v0.0.0-20190224022517-abcee96e61b1,h1:1qKTeMTSIEvRIjvVYzgcRp0xVp0eoiRTTiHSncb5gD8=,4decd67f1252df4ee34968cb0cb4e7dc6010302b24ce8edd418f1c2520f1c351 +gitlab.com/NebulousLabs/errors,v0.0.0-20171229012116-7ead97ef90b8,h1:gZfMjx7Jr6N8b7iJO4eUjDsn6xJqoyXg8D+ogdoAfKY=,b355474f1a2ef2722ae450ef6df7209d223188ae413706be122b472fcc053c48 +gitlab.com/NebulousLabs/fastrand,v0.0.0-20181126182046-603482d69e40,h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs=,a56acdda993c7a4795028fe38844d54de9b1877d22e8ae09f205e488ce2284bc +go.bug.st/serial.v1,v0.0.0-20180827123349-5f7892a7bb45,h1:mACY1anK6HNCZtm/DK2Rf2ZPHggVqeB0+7rY9Gl6wyI=,f0ea4cd4c51228f1a3cf14c6b92888169944f267e1ee778909512a4c8ac4762f +go.cryptoscope.co/luigi,v0.3.4,h1:eDrtCoUL5Vl2Atr5ty2dq0uFbzFCc6Pz1HEqU1e7I1I=,949612e92dcb2fc919e506740f36d0cfe0797c1f85579a98763aad0135a4580a +go.dedis.ch/fixbuf,v1.0.3,h1:hGcV9Cd/znUxlusJ64eAlExS+5cJDIyTyEG+otu5wQs=,dfa737543a5873b14cdfd0eec675c63044b16d3dbe481b2289c758ae4186ae95 +go.dedis.ch/kyber/v3,v3.0.8,h1:qnHzOBaxEO3+ZYuZAfwPTOPzX+F6QMmWGo8YJvENh68=,d69db17bd37bf14c4e508eb84974c3df9a82b8cb30b55ddc3ac0ee2784abcbac +go.dedis.ch/kyber/v4,v4.0.0-pre1,h1:1f5OPESkyxK6kPaCSV3J9BlpnoysIpbGLNujX9Ov8m4=,d082a41e2178f7e18c088e414e020928794245a9dae41d07da842ebb667a337e +go.dedis.ch/onet/v3,v3.0.26,h1:wQhVGB+SCdG7B0tbo6ZeZINQKWkU4u9TNMkGBH16EEM=,a41978897a3371f2eaaab5c84c354c95b4fdbd7b8207afa7c79f32b85f857d5d +go.elastic.co/apm,v1.5.0,h1:arba7i+CVc36Jptww3R1ttW+O10ydvnBtidyd85DLpg=,447a5954db3f7fc61575c83782be0b6d69e453f1e667b0534d3bf5336039238a +go.elastic.co/apm/module/apmhttp,v1.5.0,h1:sxntP97oENyWWi+6GAwXUo05oEpkwbiarZLqrzLRA4o=,1e6bc42b2e3ab10165036afd95a8a4d910acadce451c0b4e7c998cbb5c06da73 +go.elastic.co/apm/module/apmot,v1.5.0,h1:rPyHRI6Ooqjwny67au6e2eIxLZshqd7bJfAUpdgOw/4=,235fb0c1d0e107ffb7c5056e49226152063ac87ebc657428ea410d5170804d2e +go.elastic.co/fastjson,v1.0.0,h1:ooXV/ABvf+tBul26jcVViPT3sBir0PvXgibYB1IQQzg=,451e29b2854f9e09c58e3fe4c1b3a72d9b2ee293628ab4c4323e8192af015c6c +go.etcd.io/bbolt,v1.3.3,h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=,1ea076dbe18dabe78909e1fb7ec2954fc2d58cd72e7730ad69b35248a30049fd +go.etcd.io/etcd,v3.3.17+incompatible,h1:g8iRku1SID8QAW8cDlV0L/PkZlw63LSiYEHYHoE6j/s=,7bd292878f70e154a061ed6b85fc70502aa270fcf0072340cbde1a0cb35b0d2d +go.mongodb.org/mongo-driver,v1.1.2,h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA=,6b3141ced32d7a41ebd0539df957b76331fc3efdca22eae68da54d41aad23fed +go.opencensus.io,v0.22.1,h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50=,b8d9a5fca5e714c4bf66f6497dd905992113cfd6aae948bb7fad5ce987a520ed +go.starlark.net,v0.0.0-20191021185836-28350e608555,h1:FhmD1D59MmncMfRVTRa889iERZG3jdaKj/1FtOQB1G0=,add124cd355e714f076a385eb3f2ddcfb8ce0c7c8e6611e2e03acc427a4c32bf +go.uber.org/atomic,v1.5.0,h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY=,7e32f8f75b2029aa53399c2cd6e581398ac4e971c17a763980377279ede95c77 +go.uber.org/automaxprocs,v1.2.0,h1:+RUihKM+nmYUoB9w0D0Ov5TJ2PpFO2FgenTxMJiZBZA=,4c7bf41eab5dd7781c69130aa37011427531dee231ffbdc3c9ed4267c06aa93c +go.uber.org/multierr,v1.3.0,h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc=,29b25df332dea2dbfaaa308013fc6d3673ecd3d9ee09c666c69df504533d0714 +go.uber.org/ratelimit,v0.1.0,h1:U2AruXqeTb4Eh9sYQSTrMhH8Cb7M0Ian2ibBOnBcnAw=,78f82854809625c784088b9dec5dfb4810fbbd09c24891b8aaf2c2679212dfd8 +go.uber.org/thriftrw,v1.20.2,h1:0JlCE7dOyWHEQdfDm0MWIbgTn6vXkiMA6LNIe8FQXjw=,148b93f97a6ab865e2dbe0eb09b9f9504248808efc437e20efc1bf9b7896de9a +go.uber.org/tools,v0.0.0-20190618225709-2cfd321de3ee,h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=,988dba9c5074080240d33d98e8ce511532f728698db7a9a4ac316c02c94030d6 +go.uber.org/zap,v1.12.0,h1:dySoUQPFBGj6xwjmBzageVL8jGi8uxc6bEmJQjA06bw=,d4b304046a3f9443e4abe217889b5b2a4ecef35d52f175bcacf2baff18646595 +go4.org,v0.0.0-20191010144846-132d2879e1e9,h1:zHLoVtbywceo2hE4Wqv8CmIufe7jDERQ2KJHZoSDfCU=,21811f50d48c55047df1d6bf68db778087afe9116f1f32faf79f8ca459d29d89 +gobot.io/x/gobot,v1.14.0,h1:IJv4A9f5/lUz4JQaS37UW8bRVl3lG+jCGUcNmJ2F0vE=,95ad64d1bf33ee46816b2c87edb10d7b3bfe118b6f7026bf4b5f762867d1e776 +gocloud.dev,v0.17.0,h1:UuDiCphYsiNhRNLtgHVL/eZheQeCt00hL3XjDfbt820=,0df8e26a2356735d596e8a3917ec4b69f61fb5e9f6f291b51f6145a51b646a9b +gocv.io/x/gocv,v0.21.0,h1:dVjagrupZrfCRY0qPEaYWgoNMRpBel6GYDH4mvQOK8Y=,9e1a70258d72b873d9605a2939b38f9e560650472d70b97f5dd0fc2657eaf35f +golang.org/x/arch,v0.0.0-20191101135251-a0d8588395bd,h1:e1iK2rWppIPlzzqtjXT/p6WR/+ritGZ8xkfL8uDZb0g=,daba41c9150ebf192ce54952d69ef12fe47c5c6250a33c01f0624befea35354e +golang.org/x/build,v0.0.0-20191031202223-0706ea4fce0c,h1:jjNoDZTS0vmbqBhqD5MPXauZW+kcGyflfDDFBNCPSVI=,a675f674bcee677f1dc9a15ca4d84bb2e842c29d745b165ba3e5423c09367d29 +golang.org/x/crypto,v0.0.0-20191029031824-8986dd9e96cf,h1:fnPsqIDRbCSgumaMCRpoIoF2s4qxv0xSSS0BVZUE/ss=,0a303100f9afba8628988bef45404b23c2e0c6aa73b5ad4ac9259af14a0e53ae +golang.org/x/exp,v0.0.0-20191030013958-a1ab85dbe136,h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw=,18ff05b39d29a3fd4c7f9071e7013264994ac18f7faa72f66b2f514fcdd141b0 +golang.org/x/image,v0.0.0-20191009234506-e7c1f5e7dbb8,h1:hVwzHzIUGRjiF7EcUjqNxk3NCfkPxbDKRdnNE1Rpg0U=,aebca4c096dac7c20d9024b73bd0b4a87a85f4c6b50aae7615dec504c5f478c8 +golang.org/x/lint,v0.0.0-20190930215403-16217165b5de,h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=,91323fe1a77f13de722a0ce8efc5c5f2da4f26216d858acec64cb23c956fa163 +golang.org/x/mobile,v0.0.0-20191031020345-0945064e013a,h1:CrJ8+QyIm2tcw/zt9Rp/vGFsey+jndL1y5EnFwzgGOg=,5ee0c7eed83b64cc851d6ddb76346413d7c43213ea1241385b588c66e2169854 +golang.org/x/mod,v0.1.0,h1:sfUMP1Gu8qASkorDVjnMuvgJzwFbTZSeXFiGBYAVdl4=,e0d9b32f6f66103f777e8357b5b60f94a486330d46c6c8ea87789dab1a14cefa +golang.org/x/net,v0.0.0-20191101175033-0deb6923b6d9,h1:DPz9iiH3YoKiKhX/ijjoZvT0VFwK2c6CWYWQ7Zyr8TU=,b07094a5589a436fd98c6700cd5898f2094d9c02f8385f9331a7ace46305c7ae +golang.org/x/oauth2,v0.0.0-20190604053449-0f29369cfe45,h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=,f72b6c3c2b734ad053fadf5fa2adb2ad23024cfeacd567fec31a751526d1dfe0 +golang.org/x/perf,v0.0.0-20180704124530-6e6d33e29852,h1:xYq6+9AtI+xP3M4r0N1hCkHrInHDBohhquRgx9Kk6gI=,a2c7d02cc94c4ba767b6322f70ddcba4941cb5f60fed1bada3aa7a4d3a8128f1 +golang.org/x/sync,v0.0.0-20190911185100-cd5d95a43a6e,h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=,9c63fe51b0c533b258d3acc30d9319fe78679ce1a051109c9dea3105b93e2eef +golang.org/x/sys,v0.0.0-20191029155521-f43be2a4598c,h1:S/FtSvpNLtFBgjTqcKsRpsa6aVsI6iztaz1bQd9BJwE=,c5a8efb84e706e4ec1e1fa5cda44d1d571e8b3f46afe165d5e93b90e777a15fc +golang.org/x/text,v0.3.2,h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=,f755c0e7f4693f170e2f03c161f500b33f82accb8184a38dcfda63fed883f13c +golang.org/x/time,v0.0.0-20191024005414-555d28b269f0,h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=,e0ca5eceb4840bcc264237408ff8942044e19b503d6e8e5546ed9f7e1f4bf82e +golang.org/x/tools,v0.0.0-20191101200257-8dbcdeb83d3f,h1:+QO45yvqhfD79HVNFPAgvstYLFye8zA+rd0mHFsGV9s=,c3beb2acb726571e4cca3e922dd1eb037dcb6ef66ca562e9544716a53b6a1026 +golang.org/x/xerrors,v0.0.0-20191011141410-1b5146add898,h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA=,5059c7b7e95f139b8c42d9001972fa5fa688b3581ef946c912c1dbc52415ff16 +gomodules.xyz/envconfig,v1.3.0,h1:w1laMNVtP05uOKqmRAY6Vx7HvfPL9yc388gcVtUiI/M=,ae5b4ee26eeb143c16bfb5316eb97e8ff4418bce379ae74e2a0bba367706d69c +gomodules.xyz/jsonpatch/v2,v2.0.1,h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0=,3c97ac5b7cfa3388f3dc157e20e6ad7b7a5789a4df1d5257a39589cf66edd462 +gonum.org/v1/gonum,v0.6.0,h1:DJy6UzXbahnGUf1ujUNkh/NEtK14qMo2nvlBPs4U5yw=,98857b431471c87facf3cd779eadc5d33760c9edee4b56a8228af4b383b90aa2 +gonum.org/v1/netlib,v0.0.0-20190331212654-76723241ea4e,h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts=,ed4dca5026c9ab5410d23bbe21c089433ca58a19bd2902311c6a91791142a687 +gonum.org/v1/plot,v0.0.0-20191004082913-159cd04f920c,h1:Ssc2Jy4xun3/JMt2asledr/xSPAvX7ZZ7HimX2Gwz1w=,9246b6f7a9299061b31d99e50b2ac2685853dc478a6c2c730fada016c7268ea1 +google.golang.org/api,v0.13.0,h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA=,4c853034281c673829b7a7f3e39c62640d01895d20a666f003f855ad5f55ec30 +google.golang.org/appengine,v1.6.5,h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=,24ddb4adf72189738dc8340b28f9493a385515e680eb0bfbffe08951412b6655 +google.golang.org/genproto,v0.0.0-20191028173616-919d9bdd9fe6,h1:UXl+Zk3jqqcbEVV7ace5lrt4YdA4tXiz3f/KbmD29Vo=,cb4eec9cf94aa450efbb0d131cf1484f6334f1e8c1e1475b76c3ab2dea76c72a +google.golang.org/grpc,v1.24.0,h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=,eb4433685a85e20f934c2a98e35d104db2d77abe438a242d75d5aae9f78898fb +google.golang.org/protobuf,v0.0.0-20191101204728-ef19a2a99470,h1:wSgCzfaFwg6Q4Eh+T7XknFfgswhFaeYkEs8t5endA/c=,73a49a6e5fd3330de7364564ab0954146e25ad8bbdff0ea6180f8ace153b0c1b +gopkg.in/Acconut/lockfile.v1,v1.1.0,h1:c5AMZOxgM1y+Zl8eSbaCENzVYp/LCaWosbQSXzb3FVI=,66e89c98908e2b9295de1a32cdd90f626a2468c256ce6182d6339e6659548e71 +gopkg.in/AlecAivazis/survey.v1,v1.8.7,h1:oBJqtgsyBLg9K5FK9twNUbcPnbCPoh+R9a+7nag3qJM=,c924df9f9d79f015cc619b1ecede52c92618c0ab8d020cd63e2c783f46b3907d +gopkg.in/DataDog/dd-trace-go.v1,v1.19.0,h1:aFSFd6oDMdvPYiToGqTv7/ERA6QrPhGaXSuueRCaM88=,f8eb14519d62c80eea88fca1daa69b274a0b492aa8b775890424b48d362c32b3 +gopkg.in/Shopify/sarama.v1,v1.18.0,h1:f9aTXuIEFEjVvLG9p+kMSk01dMfFumHsySRk1okTdqU=,beeb8546c4202289f282529630bc3db4452dc5f7eb69c3d8546196470c7d8be3 +gopkg.in/VividCortex/ewma.v1,v1.1.1,h1:tWHEKkKq802K/JT9RiqGCBU5fW3raAPnJGTE9ostZvg=,fe7800182ce944f2b28834d6cf60c620de0cbba1d691d9442f3473baf2a3d50d +gopkg.in/airbrake/gobrake.v2,v2.0.9,h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=,2db903664908e5a9afafefba94821b9579bbf271e2929c1f0b7b1fdd23f7bbcf +gopkg.in/alecthomas/gometalinter.v2,v2.0.12,h1:/xBWwtjmOmVxn8FXfIk9noV8m2E2Id9jFfUY/Mh9QAI=,7e6b56f4b985a08d11c1494f9dcc2b595676e787afe7a1caa9c522d41cab9487 +gopkg.in/alecthomas/kingpin.v2,v2.2.6,h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=,638080591aefe7d2642f2575b627d534c692606f02ea54ba89f42db112ba8839 +gopkg.in/alecthomas/kingpin.v3-unstable,v3.0.0-20180810215634-df19058c872c,h1:vTxShRUnK60yd8DZU+f95p1zSLj814+5CuEh7NjF2/Y=,0e35a5bb02770611e4c53c611529b95b96d0bc573f05d10bb43f7441abef2fde +gopkg.in/alexcesaro/quotedprintable.v3,v3.0.0-20150716171945-2caba252f4dc,h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=,1a310c5e55038937be3e69765276449601ca582f681129f7d9d47e052846cafc +gopkg.in/asn1-ber.v1,v1.0.0-20181015200546-f715ec2f112d,h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=,fee158570ba9cbfc11156afbe9b9ab0833ab00d0f1a2a2af29a6325984a79903 +gopkg.in/bblfsh/sdk.v1,v1.17.0,h1:Ez/4P0S0Zaq30iZKfiTlhOtqMx6dfQHMTYpqKFvnv4A=,172521b9f2bdd4180751ed5122971c9c37a8c0bca2e0710bc255bc0e5ff8c106 +gopkg.in/bblfsh/sdk.v2,v2.16.4,h1:Ta/kBVRGXf8UOBYDw/ih8mw13/8NND+AdR0JiXBQrOw=,eb7a8a7d08bd80cd0673a6b9c90fa524bda9db24242bd6ef82fb414941c4ef0f +gopkg.in/bsm/ratelimit.v1,v1.0.0-20160220154919-db14e161995a,h1:stTHdEoWg1pQ8riaP5ROrjS6zy6wewH/Q2iwnLCQUXY=,fea8af18591a0ac50d29c8db124d13a43da6bee7a624c411b7449a99ee87b489 +gopkg.in/bufio.v1,v1.0.0-20140618132640-567b2bfa514e,h1:wGA78yza6bu/mWcc4QfBuIEHEtc06xdiU0X8sY36yUU=,9d63fe986f79edba7fca9bcd3bee0c7dcff7787cd30b43b5f2ae8a59feae512c +gopkg.in/check.v1,v1.0.0-20190902080502-41f04d3bba15,h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=,004537cb19dbe45954ec1605f331705f6685ccc267eddd4289c1eb27513ab817 +gopkg.in/cheggaaa/pb.v1,v1.0.28,h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk=,39725f9f37aac44dd55bdc9ade65a2d066953a090456298d34203257fc7e8ee9 +gopkg.in/cheggaaa/pb.v2,v2.0.7,h1:beaAg8eacCdMQS9Y7obFEtkY7gQl0uZ6Zayb3ry41VY=,a6ba73f81893f0eca8c0a60c238a705a12bae499a44fe6217a4471687766ef02 +gopkg.in/clog.v1,v1.2.0,h1:BHfwHRNQy497iBNsRBassPixSAxRbn2z5KVkdBFbwxc=,51eb8901943d1cec850b55556a9989e21488a9636ac692d6f7575db057804f3d +gopkg.in/editorconfig/editorconfig-core-go.v1,v1.3.0,h1:oxOEwvhxLMpWpN+0pb2r9TWrM0DCFBHxbuIlS27tmFg=,b5371885f56b40c03da4fd05006c717fabdfb8ee9ea1ceef4cc5b7caeda35041 +gopkg.in/errgo.v1,v1.0.1,h1:oQFRXzZ7CkBGdm1XZm/EbQYaYNNEElNBOd09M6cqNso=,32f45f7cfacfc04ae9e7e8c9fc55a53812554799da7c2bd17b043068b5fd5171 +gopkg.in/errgo.v2,v2.1.0,h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=,6b8954819a20ec52982a206fd3eb94629ff53c5790aa77534e6d8daf7de01bee +gopkg.in/fatih/color.v1,v1.7.0,h1:bYGjb+HezBM6j/QmgBfgm1adxHpzzrss6bj4r9ROppk=,ed20c58de8c575144c2cc1c924121ee1a240e0621c77918231547b576d46d3ce +gopkg.in/fatih/set.v0,v0.2.1,h1:Xvyyp7LXu34P0ROhCyfXkmQCAoOUKb1E2JS9I7SE5CY=,d743141e21d20f6d5ae8e784dd4644c0947948103b63404a878b0298f14a9e62 +gopkg.in/fsnotify.v1,v1.4.7,h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=,ce003d540f42b3c0a3dec385deb387b255b536b25ea4438baa65b89458b28f75 +gopkg.in/fsnotify/fsnotify.v1,v1.4.7,h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo=,6f74f844c970ff3059d1639c8a850d9ba7029dd059b5d9a305f87bd307c05491 +gopkg.in/gavv/httpexpect.v1,v1.0.0-20170111145843-40724cf1e4a0,h1:r5ptJ1tBxVAeqw4CrYWhXIMr0SybY3CDHuIbCg5CFVw=,4fe4a5e78a26ac5b60fc16405d3a5918d83cd645d36bd9dc0d558824136930b6 +gopkg.in/gcfg.v1,v1.2.3,h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs=,06cdad29610507bafb35e2e73d64fd7aa6c5c2ce1e5feff30a622af5475bca3b +gopkg.in/gemnasium/logrus-airbrake-hook.v2,v2.1.2,h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=,ce35c69d2a1f49d8672447bced4833c02cc7af036aa9df94d5a6a0f5d871cccd +gopkg.in/go-playground/assert.v1,v1.2.1,h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM=,11da2f608d82304df2384a2301e0155fe72e8414e1a17776f1966c3a4c403bc4 +gopkg.in/go-playground/validator.v8,v8.18.2,h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ=,fea7482c7122c2573d964b7d294a78f2162fa206ccd4b808d0c82f3d87b4d159 +gopkg.in/go-playground/validator.v9,v9.30.0,h1:Wk0Z37oBmKj9/n+tPyBHZmeL19LaCoK3Qq48VwYENss=,f4769db84ddc2db880bc190a5420762ef45f80ebbce678b622c4fa82b422b890 +gopkg.in/gobwas/glob.v0,v0.2.3,h1:uLMy+ys6BqRCutdUNyWLlmEnd7VULqh1nsxxV1kj0qQ=,3a5fe045be1ff9b47c5e21a9f97bdefaada31463f365503d6b176b76e18a0257 +gopkg.in/gographics/imagick.v3,v3.2.0,h1:eUwlkCw2fa20OGu47G39Im8c50S9n/CVkh8PwtOKExA=,99695d22cf7d5609887609cc9dc63ca1031b5a3238c26f6b779f32e39d572a01 +gopkg.in/gomail.v2,v2.0.0-20160411212932-81ebce5c23df,h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE=,08b3372836aef3a403b0a01e6867a3a2252a07f65c28e0d33fe9c4b1b3ac517a +gopkg.in/gorp.v1,v1.7.2,h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw=,eaad3325e8b5358d5d54a1ca8b1e6aa19d16968a1f11f3dc45671588d914ef25 +gopkg.in/guregu/null.v3,v3.4.0,h1:AOpMtZ85uElRhQjEDsFx21BkXqFPwA7uoJukd4KErIs=,b38d62a816c5905933396a02eb11e23cbe2c17f8837563cc10794274e5af7e6e +gopkg.in/h2non/gentleman.v2,v2.0.3,h1:exsUPKJDFwNjJykboVj8+BKPWMNOxR/AmPL3f7Hutwo=,7a71dc2dd74e413832782e4478f85cc0617aed125e078e308b46207f34d6a500 +gopkg.in/h2non/gock.v1,v1.0.15,h1:SzLqcIlb/fDfg7UvukMpNcWsu7sI5tWwL+KCATZqks0=,c6a3d33e638b56ddd050c1dc6c1c6c8e9007c70cacfcc29e778fcf421f1fc029 +gopkg.in/httprequest.v1,v1.2.0,h1:YTGV1oXzaoKI6oPzQ0knoIPcrrVzeRG3amkoxoP7Xng=,3960019870090d0de3fca818633111186d46a908b4bcac6d87e5f08e7fb58770 +gopkg.in/inconshreveable/log15.v2,v2.0.0-20180818164646-67afb5ed74ec,h1:RlWgLqCMMIYYEVcAR5MDsuHlVkaIPDAF+5Dehzg8L5A=,799307ed46ca30ca0ac2dc0332f3673814b8ff6cc1ee905a462ccfd438e8e695 +gopkg.in/inf.v0,v0.9.1,h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=,08abac18c95cc43b725d4925f63309398d618beab68b4669659b61255e5374a0 +gopkg.in/ini.v1,v1.49.0,h1:MW0aLMiezbm/Ray0gJJ+nQFE2uOC9EpK2p5zPN3NqpM=,579074067ceacbf11e938940d65647094da4f23f627645b5c58218bf05c060f0 +gopkg.in/jarcoal/httpmock.v1,v1.0.0-20181117152235-275e9df93516,h1:H6trpavCIuipdInWrab8l34Mf+GGVfphniHostMdMaQ=,5b896c9e5e44146260a066533409c1b86268458301a7155624ef27f784e5d94a +gopkg.in/jcmturner/aescts.v1,v1.0.1,h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw=,8bfd83c7204032fb16946202d5d643bd9a7e618005bd39578f29030a7d51dcf9 +gopkg.in/jcmturner/dnsutils.v1,v1.0.1,h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM=,4fb8b6a5471cb6dda1d0aabd1e01e4d54cb5ee83c395849916392b19153f5203 +gopkg.in/jcmturner/goidentity.v3,v3.0.0,h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=,1be44bee93d9080ce89f40827c57e8a396b7c801e2d19a1f5446a4325afa755e +gopkg.in/jcmturner/gokrb5.v7,v7.2.3,h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=,3eec5b25adb89633174beb9798d8092e91ff4eed146a4b4cb950dd02414bd75e +gopkg.in/jcmturner/rpc.v1,v1.1.0,h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=,83d897b60ecb5a66d25232b775ed04c182ca8e02431f351b3768d4d2876d07ae +gopkg.in/jmcvetta/napping.v3,v3.2.0,h1:NpSZLAL6VgiyhdqaOkxwVtHXOLrQJZ6fFOMQgp7G8PQ=,887358529a8cd287b6a8232b43cc48636463fa266bac5ba48328cb0609d1dcb6 +gopkg.in/juju/charm.v6,v6.0.0-20191031115626-f595bfd8a049,h1:+isWLR3tDZyDacru13gHH0ooIuuDB28kuZJjSc8kOqU=,8d404b146f31d35148015de3f5bd4d25260f0a4b9f22a540a9167864d9e5d082 +gopkg.in/juju/charmrepo.v3,v3.0.1,h1:mm7/CwCczsO7JYHlYkw4iCUYR7X8upEOaY5bYj7eUkw=,8f673109a6d98e4abe4ef612f85dea26bdbd7de5c66b6722c546a08aefb548fc +gopkg.in/juju/environschema.v1,v1.0.0,h1:51vT1bzbP9fntQ0I9ECSlku2p19Szj/N2beZFeIH2kM=,46ae8efc5a450745fea959dc8532d2a013aa741ab7193d3cea8b0735f09c6e8a +gopkg.in/juju/names.v2,v2.0.0-20190813004204-e057c73bd1be,h1:xDxN+Fe8olIH8sTqvFJBMsuflBYzeHVeYC4Iz97+f5M=,72ac554c125260751aadf6d41eb82d85de22ef8bff1d59c6602e9e0f5b84a28c +gopkg.in/juju/worker.v1,v1.0.0-20191018043616-19a698a7150f,h1:UAHa7z4EdrOcMN+9p5P+ojJshcIC34vwi0hCmEL6Qf8=,2e0da8053029ca9da961f8e6f1037a9d7ba12623e5c16fc5f88bf1a724c5dd23 +gopkg.in/karalabe/cookiejar.v2,v2.0.0-20150724131613-8dcd6a7f4951,h1:DMTcQRFbEH62YPRWwOI647s2e5mHda3oBPMHfrLs2bw=,07aae15601f54a5806705d218e313794118d54d9dda7addc1bf4bda4332dfc16 +gopkg.in/kothar/go-backblaze.v0,v0.0.0-20190520213052-702d4e7eb465,h1:DKgyTtKkmpZZesLue2fz/LxEhzBDUWg4N8u/BVRJqlA=,215300ce3726c40f51ee43c41a27c204441e756c8cb4f4b76b1a4dd08f509eef +gopkg.in/ldap.v2,v2.5.1,h1:wiu0okdNfjlBzg6UWvd1Hn8Y+Ux17/u/4nlk4CQr6tU=,4fd426691e674164a701ef3ec3548596574f95447cde1fa331018f7d73f8399b +gopkg.in/ldap.v3,v3.0.2,h1:R6RBtabK6e1GO0eQKtkyOFbAHO73QesLzI2w2DZ6b9w=,f79d1cb87a0a6d571e671c2028409056d65e6bfa7d3d0563ded0edbe8ff0998e +gopkg.in/macaron.v1,v1.3.4,h1:HvIscOwxhFhx3swWM/979wh2QMYyuXrNmrF9l+j3HZs=,f9aca15b099dada4382e47898516d500876aae45d36895314cde86700636c05c +gopkg.in/macaroon-bakery.v2,v2.1.0,h1:9Jw/+9XHBSutkaeVpWhDx38IcSNLJwWUICkOK98DHls=,0a12f46df7290b131ee74ec6a4d4760170192920a091939aa2d7a39a4d0fb310 +gopkg.in/macaroon-bakery.v2-unstable,v2.0.0-20171026135619-38b77b89a624,h1:FIOL4YpoNbXH6K+LnOoAEMa/1ebliK7B9mj5NuJHmiA=,51476e40e03bd1f64fd3cdf936d1cde4b8c1395884af9376ff65755041c247aa +gopkg.in/macaroon.v2,v2.1.0,h1:HZcsjBCzq9t0eBPMKqTN/uSN6JOm78ZJ2INbqcBQOUI=,ae47a93d20ce5c053eafc9d6a76c01b2b06784f9886137dc73a99302928046eb +gopkg.in/macaroon.v2-unstable,v2.0.0-20180319203259-5c9beabe0e9e,h1:yPxshueS06kvTVlsymSbHvk6VQ1WhX1Ou3hCqqWBp/s=,e09a1f8268d65e3dc28da85c75e78f15f1f742d1dcd31cce427fd885b1962bc4 +gopkg.in/mail.v2,v2.0.0-20180731213649-a0242b2233b4,h1:a3llQg4+Czqaf+QH4diHuHiKv4j1abMwuRXwaRNHTPU=,d7d60701b95fd7f62d3f83bc026f42c0fa69c3f16cc445d2b20497c9dd182ff6 +gopkg.in/mattes/migrate.v1,v1.3.2,h1:tWus4MPMhDY/htX+NCvASiQVRU2pj4Jyj4T8AIv6vUw=,c50f590108871c25d55631addd6bc267f311830d4306ff4d36a6feaad0b23255 +gopkg.in/mattn/go-colorable.v0,v0.1.0,h1:WYuADWvfvYC07fm8ygYB3LMcsc5CunpxfMGKawHkAos=,337a25f7f87a87097e5fb853313c1fac3d3126ed0eb9bb88511d52ba9a0eb4e0 +gopkg.in/mattn/go-isatty.v0,v0.0.4,h1:NtS1rQGQr4IaFWBGz4Cz4BhB///gyys4gDVtKA7hIsc=,18500935e08e5b74487537b8b78a30778a5b2304a138f53aa8758b86266773ff +gopkg.in/mattn/go-runewidth.v0,v0.0.4,h1:r0P71TnzQDlNIcizCqvPSSANoFa3WVGtcNJf3TWurcY=,e0307a435e39658f761b7526dda9149e7664b7250958494c1a4eebd14884b82d +gopkg.in/mcuadros/go-syslog.v2,v2.2.1,h1:60g8zx1BijSVSgLTzLCW9UC4/+i1Ih9jJ1DR5Tgp9vE=,1f444e24504b6a21c0d204441a84336ab1240f77a1280b60e48f68ea1b99da7b +gopkg.in/mgo.v2,v2.0.0-20190816093944-a6b53ec6cb22,h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw=,14edbec0d97107b0e0980b66166400f8a4c3844b03bd3240fc57be2b82734b16 +gopkg.in/natefinch/lumberjack.v2,v2.0.0,h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=,8c268e36660d6ce36af808d74b9be80207c05463679703e93d857e954c637aaa +gopkg.in/neurosnap/sentences.v1,v1.0.6,h1:v7ElyP020iEZQONyLld3fHILHWOPs+ntzuQTNPkul8E=,e3df38d6fc6097f9d1d76ee13e24fec69103c43248ca6a7f3ade2afec5e85bdd +gopkg.in/ns1/ns1-go.v2,v2.0.0-20190730140822-b51389932cbc,h1:GAcf+t0o8gdJAdSFYdE9wChu4bIyguMVqz0RHiFL5VY=,c51d0889ff5eb72df2f9e4adc28e9f3602e6eb567c3824bebb3c7d315a60710a +gopkg.in/olivere/elastic.v2,v2.0.61,h1:7cpl3MW8ysa4GYFBXklpo5mspe4NK0rpZTdyZ+QcD4U=,0a20d84f6003850343937ef79179cabe99feef9b038c281fd65ec32ec6c7e85c +gopkg.in/olivere/elastic.v5,v5.0.82,h1:QH7ere4lvOAWnnOd0VLJ54W8LzExZszoGIRijnb1h2Y=,3c66a7606b226d19f61651b3ad58aecda3155edc802029bd21cd4b8724bd0c9f +gopkg.in/ory-am/dockertest.v3,v3.3.4,h1:oen8RiwxVNxtQ1pRoV4e4jqh6UjNsOuIZ1NXns6jdcw=,73b01a1d025d30c8f11def182179b873410eae72f7b2fd9f9394b0fcf4683c93 +gopkg.in/redis.v2,v2.3.2,h1:GPVIIB/JnL1wvfULefy3qXmPu1nfNu2d0yA09FHgwfs=,abe2fa39afa36f8186ee287bcf82f9f4bc083aa35d17dd82a2ccbf5850ecdde8 +gopkg.in/redis.v3,v3.6.4,h1:u7XgPH1rWwsdZnR+azldXC6x9qDU2luydOIeU/l52fE=,749ef3e08eb4eda43969f88135040ae4517b450b27dbd48aefb9bf5e72465621 +gopkg.in/redis.v4,v4.2.4,h1:y3XbwQAiHwgNLUng56mgWYK39vsPqo8sT84XTEcxjr0=,6403d2b45edf2804bfd07b6d697184fc97377168589ad43ad19b2433e1dcee34 +gopkg.in/redis.v5,v5.2.9,h1:MNZYOLPomQzZMfpN3ZtD1uyJ2IDonTTlxYiV/pEApiw=,3c30e42670d1ef5f0b33876928b3bd5693ef3b5be1df6b2710d48c2667ca7133 +gopkg.in/resty.v1,v1.12.0,h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=,43487bb0bb40626d16502b1fe9e719cf751e7a5b4e4233276971873e7863d3cf +gopkg.in/robfig/cron.v2,v2.0.0-20150107220207-be2e0b0deed5,h1:E846t8CnR+lv5nE+VuiKTDG/v1U2stad0QzddfJC7kY=,b25da9b8747e664334044e581d1a8fb700237239e7f182fd226d6296e6180bc0 +gopkg.in/satori/go.uuid.v1,v1.2.0,h1:AH9uksa7bGe9rluapecRKBCpZvxaBEyu0RepitcD0Hw=,794cefc3062e09b17f4300eb6b02622ac348af9d368341ff71a655a15884547f +gopkg.in/sourcemap.v1,v1.0.5,h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI=,05b5f382bfa60212f444c7207168e9eb0c722e26b57a688123cb8bbf234de692 +gopkg.in/spacemonkeygo/monkit.v2,v2.0.0-20190623001553-09813957f0a8,h1:nyw4hxw2zz4S0EHqr5nQfA3zGbMFJDRJlQPM4PCb7O4=,4a8e607c4f16b32bb9ee380627716979b19ac3df74ca2a4f80aefbaf0b411784 +gopkg.in/square/go-jose.v2,v2.4.0,h1:0kXPskUMGAXXWJlP05ktEMOV0vmzFQUWw6d+aZJQU8A=,d00c4af5a633ab9cf7645b68f6fa389c8f0d9ffebc486742c7a5292280cae84b +gopkg.in/src-d/go-billy.v4,v4.3.2,h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg=,c49871e1d08bba07b2261626b929096b6dc5c839e781adfc24fcc410067cc2bf +gopkg.in/src-d/go-cli.v0,v0.0.0-20181105080154-d492247bbc0d,h1:mXa4inJUuWOoA4uEROxtJ3VMELMlVkIxIfcR0HBekAM=,86042ffc0c8492845917453682c5bdba46beb2f0c067b61e495a92b9a8621076 +gopkg.in/src-d/go-errors.v1,v1.0.0,h1:cooGdZnCjYbeS1zb1s6pVAAimTdKceRrpn7aKOnNIfc=,f7d9f00c057d4b49bc6e57167561a7fb508ebb113a1946cb2b6f71dac5b14cfb +gopkg.in/src-d/go-git-fixtures.v3,v3.5.0,h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg=,282dc6474c5ecf62c1169d04ad1f6d75e6058922897b4709a16a1007a5f22eb7 +gopkg.in/src-d/go-git.v4,v4.13.1,h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE=,13364fe60f2316a179e912fb4eb6c576e2aabd67e8d390651a155e85c69146d2 +gopkg.in/src-d/go-log.v1,v1.0.1,h1:heWvX7J6qbGWbeFS/aRmiy1eYaT+QMV6wNvHDyMjQV4=,48f6c8a7bdc5436d296f388cd5d40ffb9c749e1e4ab1e455984efc61008fd5d7 +gopkg.in/stack.v0,v0.0.0-20141108040640-9b43fcefddd0,h1:lMH45EKqD8Nf6LwoF+43YOKjOAEEHQRVgDyG8RCV4MU=,a88c4cb4af34bb5c4dd69d0c771829331be7416d2f18d58ff599126f7b291984 +gopkg.in/stretchr/testify.v1,v1.2.2,h1:yhQC6Uy5CqibAIlk1wlusa/MJ3iAN49/BsR/dCCKz3M=,0126e73e5f2ce5687dec597bb276e11dc4031dbdf199e68de735bc67bf808149 +gopkg.in/telegram-bot-api.v3,v3.0.0,h1:Y6QmqOMwRKv5NUdlvzEBtEZChjsrqdTS6O858cvuCww=,03c58e32567a5cc4ec631cc226ecc99dd1113a7a98bab4778b02cde073ab5ed4 +gopkg.in/telegram-bot-api.v4,v4.6.4,h1:hpHWhzn4jTCsAJZZ2loNKfy2QWyPDRJVl3aTFXeMW8g=,01a91b240fb416bf83bcaaa07133cafac28fd8eb8f0f251f6a616beec88c92ac +gopkg.in/testfixtures.v2,v2.5.0,h1:N08B7l2GzFQenyYbzqthDnKAA+cmb17iAZhhFxr7JHw=,05baac4af6e2855d296a5c045b27deb1b33d0a04cd0df96f029927f0742765a3 +gopkg.in/tomb.v1,v1.0.0-20141024135613-dd632973f1e7,h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=,34898dc0e38ba7a792ab74a3e0fa113116313fd9142ffb444b011fd392762186 +gopkg.in/tomb.v2,v2.0.0-20161208151619-d5d1b5820637,h1:yiW+nvdHb9LVqSHQBXfZCieqV4fzYhNBql77zY0ykqs=,15d93d96e1e8b2d8daf7b9e57a2a9193c0e676a2c6b63d9325bf34b53e93db00 +gopkg.in/tylerb/graceful.v1,v1.2.15,h1:1JmOyhKqAyX3BgTXMI84LwT6FOJ4tP2N9e2kwTCM0nQ=,0a8639cfe62508438ebf2cae721468b64d8cd2992fc0f80439c83c718f4608e0 +gopkg.in/urfave/cli.v1,v1.20.0,h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=,413704688402027dc0f51666bac42152eb1668a73fa0e33858c3d2123c0592e5 +gopkg.in/warnings.v0,v0.1.2,h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=,c412b1f704c1e8ba59b6cfdb1072f8be847c03f77d6507c692913d6d9454e51c +gopkg.in/yaml.v1,v1.0.0-20140924161607-9f9df34309c0,h1:POO/ycCATvegFmVuPpQzZFJ+pGZeX22Ufu6fibxDVjU=,7abff7973fdab7386de5a1e9e197d8dc50d41ded9d24ff914685900caa0eb742 +gopkg.in/yaml.v2,v2.2.4,h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=,815be785649ae218b51efd8e40b3b75de8f9b57dd43162386ffe3e76709f2a5d +gorgonia.org/tensor,v0.9.2,h1:bVTWB68apbLfdrAlz5Ev3daGhfOhKuPkVFacMSNzpHs=,17562e7c1c6477b8b530d6236ab9a61228edbabe01c1cfb9ba23286c2394ba4c +gorgonia.org/vecf32,v0.9.0,h1:PClazic1r+JVJ1dEzRXgeiVl4g1/Hf/w+wUSqnco1Xg=,618df2e604236a2d143958a3571f9939c8264ab2aaae7d8c71b897b728240a23 +gorgonia.org/vecf64,v0.9.0,h1:bgZDP5x0OzBF64PjMGC3EvTdOoMEcmfAh1VCUnZFm1A=,f57695832a12a6f1fbcc04cdaa267ed01fb6b8105f518590d64b2c63b9ac4c61 +gotest.tools,v2.2.0+incompatible,h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=,55fab831b2660201183b54d742602563d4e17e7125ee75788a309a4f6cb7285e +grpc.go4.org,v0.0.0-20170609214715-11d0a25b4919,h1:tmXTu+dfa+d9Evp8NpJdgOy6+rt8/x4yG7qPBrtNfLY=,58b5c3cccf3e765d0f42918d458cddcd03fc28ff5d701790783677513a8446e3 +h12.io/socks,v1.0.0,h1:oiFI7YXv4h/0kBNcmAb5EkkoFJgYsOF88EQjMBxjitc=,3bf83125284ccabf811aa238954b442e39f53e3e068d4ddb6bf679ba2be28bbe +honnef.co/go/js/dom,v0.0.0-20190526011328-ebc4cf92d81f,h1:b3Q9PqH+5NYHfIjNUEN+f8lYvBh9A25AX+kPh8dpYmc=,a65720d9c0339450c8818226693a85986549fb156ee4df65913682c350bd4d60 +honnef.co/go/js/util,v0.0.0-20150216223935-96b8dd9d1621,h1:QBApQyt1KyR3SvDWU8sHcIXeWTSCUamO7xQopvwuLWI=,db5638addc7638cc5cf2245cb9bcb19cf04a5912120330560149b54b4575ae50 +honnef.co/go/js/xhr,v0.0.0-20150307031022-00e3346113ae,h1:2dIKMawnBWvHzZrS8STyu/KdhYIOpnKQpp1WZm+K7TE=,d2a4a85c43fb4ccd9b5be6521450d272406a1722f7547f188f4a1d0cc65c4e13 +honnef.co/go/tools,v0.0.1-2019.2.3,h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=,539825114c487680f99df80f6107410e1e53bbfd5deb931b84d1faf2d221638e +howett.net/plist,v0.0.0-20181124034731-591f970eefbb,h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=,58c94cd949be714c0ee320d1be0cff3116fc829c412b9e7b816b03fb3c85f463 +istio.io/api,v0.0.0-20191029012234-9fe6a7da3673,h1:wxFykuQoScKAnEtKujAPqjwR8Aqo2LNtkoIvodxyCSs=,9ba545b9c5411725b709287b590082d140d3b29d924be351e38942f46c33ff55 +istio.io/gogo-genproto,v0.0.0-20190930162913-45029607206a,h1:w7zILua2dnYo9CxImhpNW4NE/8ZxEoc/wfBfHrhUhrE=,3b5a81f1807f48117d6691c8d007402a94b648f45f4446841a3f56229aa94aba +istio.io/pkg,v0.0.0-20191029184635-5c2f5ef63692,h1:MT7e5hpQ8cGtKCeWIjtdluEVkIhkN2tw4iVkAzhWHYA=,887882f7e721e6d00dee301f0b029792bd04bd38c455ab7e5cf4f2bc5bf309df +k8s.io/api,v0.0.0-20191031065753-b19d8caf39be,h1:X0MqzqUHuZj50SrMQFExejJfy67RKPf30Vt2nnpa4AA=,00a67ed9b84be18f621701796b42cee630c770c858582753fe0eb9c146ef93ff +k8s.io/apiextensions-apiserver,v0.0.0-20191028232452-c47e10e6d5a3,h1:XxkWdWvPKTParJ1sXpUIvHJsJ2iIIj5Ebjxxy5YU1Zo=,2cb12eb8b2b0f95fb5d69b1f80b754b32ae46ef1f9636333fe27c6b17b1a6e19 +k8s.io/apimachinery,v0.0.0-20191030190112-bb31b70367b7,h1:81UYA9Qq3JXPpZMmRBnq6T3qU+b71Dvnm6sV3NSQTVk=,4c16a440acf7559b0974d99650c876969ad4811ddc76f9f5b7aa43afc34f66ec +k8s.io/apiserver,v0.0.0-20191031110436-8cb875160ee0,h1:BGkQMPpKpx07hvq9AW64gifbf+zbAh/xUbB5OYXPvQ0=,baefad9177a1f8077c94a2d88b7e85deb7df79317d3d6d6afe8ca0be8261b1ae +k8s.io/cli-runtime,v0.0.0-20191025231729-08207da42a69,h1:05z+vSvn9yPr7GTAt3MXpVc9VeU4D80HHwvJU6jC3D4=,46264219a6e1c8263acd610841a156086bbbdd43436a836effcd7285c37b0e8a +k8s.io/client-go,v11.0.0+incompatible,h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o=,70925f536d409accf4f6ae3f20dafd81370ac096f848e99009141bea971103c2 +k8s.io/cloud-provider,v0.0.0-20191025232453-66dd06a864dd,h1:CxSfhPPmLwYFZquskmKvODMeEm82ZLc4eph47AdUp+o=,55623701c005f824ae847ecad409c6e61ef5fcfc6588b8dd2995a4a0991eeae8 +k8s.io/cluster-bootstrap,v0.0.0-20191025232351-410fafc3baf5,h1:P1mMVQngKW9pj1haVjyAtqViIBqkJmsITXsfuaHGRko=,ff50816340cdb73313e7233c3ff6df383c350102762b88ea0cc744910053c7bd +k8s.io/code-generator,v0.0.0-20191029223907-9f431a56fdbc,h1:klQ4aWfZ3uk4UiSLkZZt5qQDI+7DwSdvbvyL5QUBHsQ=,1f63d3191c255d8fcc47ad24b3bd979865a3c12eca678b39f278c194c2ae560a +k8s.io/component-base,v0.0.0-20191029070825-5e0e35147053,h1:W9/+uFw7olz+qQOCmSOG92c6j2YgIwagxqR9RWai/cE=,45bd9877048a57c3dfe7eb9c98bc1939775c73fdd6451afd055d7e4f7b9659bc +k8s.io/cri-api,v0.0.0-20191025232916-446748cffdda,h1:HVTA1bXCQek+NF0xTZkryScnkGYWHkoeYAQVEVs73r8=,7aea277309740df8d1fad1c620901d60633569d755d2d0715d97bc553988d7be +k8s.io/csi-api,v0.0.0-20190313123203-94ac839bf26c,h1:m3xih+9aI7l7Z/PvwzizV1J4vBvaUpkHrmagnGa5UNg=,0579fba2111dfd5b3cb62d7d234e52c54051176d9564ae3f0f2fdc69b31872b0 +k8s.io/gengo,v0.0.0-20191010091904-7fa3014cb28f,h1:eW/6wVuHNZgQJmFesyAxu0cvj0WAHHUuGaLbPcmNY3Q=,7fe69109e947204ee0b95705626e3c3b540faefb947d3426260f2991d1e4c036 +k8s.io/helm,v2.15.2+incompatible,h1:UjEb+c5BUZDGR9zU3dWG3OXASLIeqLeY0FCIx6ZyfTY=,377860d9db9fb1d45ffa90fe6ee79d7cfc4e91e5bc04183921480a823cf79ede +k8s.io/klog,v1.0.0,h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=,a564b06078ddf014c5b793a7d36643d6fda31fc131e36b95cdea94ff838b99be +k8s.io/kube-aggregator,v0.0.0-20191025230902-aa872b06629d,h1:ls0BmbSFkF5BhZN7grE+W5/X49QMU42RH6J9DWdP7UQ=,9a51f29a98f603acde33e3a10625e9ab603d7db29b3c2b2256d145adf7396393 +k8s.io/kube-controller-manager,v0.0.0-20191025232249-6de08162fd59,h1:bTAKwqwK2HvJVmpowb/ccyeV3wsxQZUtFQE1AqhMZ6I=,a1183bb172f19f6ae319da09c111a3f5dded663a9be85e18b9bceb131f03a342 +k8s.io/kube-openapi,v0.0.0-20190918143330-0270cf2f1c1d,h1:Xpe6sK+RY4ZgCTyZ3y273UmFmURhjtoJiwOMbQsXitY=,fb1dcd1508144991be0a243cea9a066944775ba4e9fa23c7ea038822e4e8e232 +k8s.io/kubelet,v0.0.0-20191025232041-e9c3b1e8a9ed,h1:ISiRMWhiLjmSsx24QQ6NJSgW1oKmAN59LCFzB7llrSk=,8c409ac5922dfb4cb0b8e6bb52823b2132e82faa3756dc3a94cdeabeaa3ff51e +k8s.io/kubernetes,v1.11.10,h1:wCo67+wmguioiYv0ipIiTaXbVPfFBBjOTgIngeGGG+A=,7c8ca4ca473e9f2b5c6586a714209e98d99f193af84cdb3e8536a3d1e26be4bf +k8s.io/metrics,v0.0.0-20191026071343-a166cc0bce8f,h1:D4AcfwGLY2gFDQaeK2QVyb8g4fy4Xzs0GopdwAgfSGc=,f4243455881a38d4962483ba5f2888220743a6ddece179c2b8f706815b75778f +k8s.io/node-api,v0.0.0-20191025232816-761e5a80fde0,h1:V3FaBxwSQWPjPScXd5ioFx9+aREXGU24yFl8Gm7ib8w=,cb8718b6e148a66097f8cae4e4544dc55c674748202f9c21d07a2139b6e83fd1 +k8s.io/sample-apiserver,v0.0.0-20191030110742-cbfc6c263d7e,h1:9bsKcUCncu1Qg3A4pB5ZySTM0JMEZW4qgybjVhmaS4A=,91d701af12da2ff6cde6f07d53547885faafa32c2eadfa8d4614b4d814a854b9 +k8s.io/sample-controller,v0.0.0-20191025231305-d7b8b8302943,h1:ZYb6if7+Qa5kXFidUsQRLFDyZjCjRyG1sFf6GpZaA70=,07c3e3a95d0fac07a247a98c38448a8fc4ab0069ad599ec06ac9405df88b470b +k8s.io/utils,v0.0.0-20191030222137-2b95a09bc58d,h1:1P0iBJsBzxRmR+dIFnM+Iu4aLxnoa7lBqozW/0uHbT8=,e21be6d971127d4650bd13525a2d2627b2a98dbb8589f168b734a45d50f3ea22 +launchpad.net/gocheck,v0.0.0-20140225173054-000000000087,h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54=,1a1d9b10f2c69564e69993e4340d5299392a518d895ec06502e842e6c69f4857 +layeh.com/radius,v0.0.0-20190322222518-890bc1058917,h1:BDXFaFzUt5EIqe/4wrTc4AcYZWP6iC6Ult+jQWLh5eU=,5eb6b6a05a5f89bc114f37085deda268f895a46621aee2e36649b8d80061357e +leb.io/aeshash,v0.0.0-20190627052759-9e6b40329b3b,h1:MG17Tc0pA3XmFTsPwklMMEfcos3pTFnVYM4A0YfVSbU=,a78b48ac18e98ea68dacce16cd94c9074688a0b125f824f047313a33b264ea88 +leb.io/hashland,v0.0.0-20171003003232-07375b562dea,h1:s9IkzZTqYqw77voO6taUZHc0C1B096h4T/kQtujGApE=,0698177f24cbde0a7b45495e7fe976fe7623f2b9205995b7d91fd2e7b0f0e243 +leb.io/hrff,v0.0.0-20170927164517-757f8bd43e20,h1:9CHS8LIq9MDwUsAaCHUsbUq7zb5lSjLQYWlJ/AbMZKg=,538008712599401a903a7982714c0a9ae745221042d3dfb1437bc508d8fb9e96 +modernc.org/cc,v1.0.0,h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ=,24711e9b28b0d79dd32438eeb7debd86b850350f5f7749b7af640422ecf6b93b +modernc.org/golex,v1.0.0,h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE=,335133038991d7feaba5349ac2385db7b49601bba0904abf680803ee2d3c99df +modernc.org/mathutil,v1.0.0,h1:93vKjrJopTPrtTNpZ8XIovER7iCIH1QU7wNbOQXC60I=,766ad95195543fe1ac217ce9f54e1fb43119c25db2b89013b9ef5477ad2dd9d1 +modernc.org/strutil,v1.0.0,h1:XVFtQwFVwc02Wk+0L/Z/zDDXO81r5Lhe6iMKmGX3KhE=,4bbca362df97450c6f24b90b7dc80b97ecf19e5f0f5954655b26f335a0b8f378 +modernc.org/xc,v1.0.0,h1:7ccXrupWZIS3twbUGrtKmHS2DXY6xegFua+6O3xgAFU=,ef80e60acacc023cd294eef2555bd348f74c1bcd22c8cfbbd2472cb91e35900d +moul.io/http2curl,v1.0.0,h1:6XwpyZOYsgZJrU8exnG87ncVkU1FVCcTRpwzOkTDUi8=,422e2b8833089b001da02c6d7235ecb4c0591bb585fee125cbd0d72b1371dba5 +mvdan.cc/sh,v2.6.0+incompatible,h1:BLDuJ+D75OCaBF7W70+2oALi8aKAjcAiDBNmmwR8BQA=,c5c335f4ae8f1c4228a01710b84ba8f847709b1920d2beeddc4648e62cdd25f7 +mvdan.cc/sh/v3,v3.0.0-alpha1,h1:ao/4li6H9nZe5HDXA14cynXoq90+DLZz0HmjZE/qjhA=,5da16556569786a039c24229b55eb0f76049c2293ac96a9b978cede87676962e +mvdan.cc/xurls/v2,v2.0.0,h1:r1zSOSNS/kqtpmATyMMMvaZ4/djsesbYz5kr0+qMRWc=,67e609a744e93b7ba05adee985d7e3471e6d414cea611ac73206e007a5e03082 +myitcv.io,v0.0.0-20190927111909-7837eed0ff8e,h1:aTqeLMcNZAhWxtvBgs0fbjTxg5BuNvHYnLo1lhSq9hE=,0d734b4e576c5c34dd9788481761864faef6cacdd735296d22f885b211fe9c70 +pack.ag/amqp,v0.11.2,h1:cuNDWLUTbKRtEZwhB0WQBXf9pGbm87pUBXQhvcFxBWg=,7cdc81d1aeef4ad24c4a49f6227aac060ee193587c95d48bfe4437beaf08310a +periph.io/x/periph,v3.6.2+incompatible,h1:B9vqhYVuhKtr6bXua8N9GeBEvD7yanczCvE0wU2LEqw=,aeb77a51a9e20e0414e7ea7c9a3a30302fcb5ffc5cf4dd41c3455ec0c3d7b1bc +perkeep.org,v0.0.0-20190926184543-d342b0e26632,h1:6ZKRr0VZtsfdHyYDJ/G9rCy7z8jGfrpmYANf0BR+vJM=,fd9e06dfc30d3bcb49399fd062094dfdf364a8344d409541896cb96d36465ade +rsc.io/binaryregexp,v0.2.0,h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=,b3e706aa278fa7f880d32fa1cc40ef8282d1fc7d6e00356579ed0db88f3b0047 +rsc.io/goversion,v1.2.0,h1:SPn+NLTiAG7w30IRK/DKp1BjvpWabYgxlLp/+kx5J8w=,f8426f6078b1d1b4e29a8c6223603680169c7c0a8789d2aee7e401a46ff6343f +rsc.io/pdf,v0.1.1,h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=,79bf310e399cf0e2d8aa61536750d2a6999c5ca884e7a27faf88d3701cd5ba8f +rsc.io/qr,v0.1.0,h1:M/sAxsU2J5mlQ4W84Bxga2EgdQqOaAliipcjPmMUM5Q=,fd09c124eb71d01dab3a0116eac47a6fce78f34bbdd84620b2dc01b90582b11c +sigs.k8s.io/cluster-api,v0.2.7,h1:WjhtuvyjnMgo62kKlVizhI/nYs4DJxHNf+ZMSk/uUsM=,1e3767e7d0f655b72a52eab40e122779ccd1f734c06b9c6488ea9615a3db7b24 +sigs.k8s.io/controller-runtime,v0.3.0,h1:ZtdgqJXVHsIytjdmDuk0QjagnzyLq9FjojXRqIp+dU4=,f37a21668e57315e7248169bec6d4a71f86bcf53d7528c9752e7b459ee74efe0 +sigs.k8s.io/kustomize,v2.0.3+incompatible,h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=,e0f6ad3aaaf7160abcb5e7b16f711c13aebe876833ae9e6ad6f858f31641bf62 +sigs.k8s.io/structured-merge-diff,v0.0.0-20191023203907-336d3378ca53,h1:WCMuuk4OLJ1WdEK3fx+hroiutCODdAGwDlL2Dj4mpa0=,b389a2eafcce0dcef4ca1052942980f26b62030da007b3a84a653de5c0f91668 +sigs.k8s.io/testing_frameworks,v0.1.2-0.20190130140139-57f07443c2d4,h1:GtDhkj3cF4A4IW+A9LScsuxvJqA9DE7G7PGH1f8B07U=,bfb65beb3dda386efc0c0ff9237a07877cec71922f4d3dc1f4a40d5fcaa090a9 +sigs.k8s.io/yaml,v1.1.0,h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=,a0d39252e8665a428a8cb9d4dfc9cbea07b7ae90ae62e7cf3651be719adf515a +sofastack.io/sofa-mosn,v0.0.0-20191101130505-becc7a6dc50c,h1:8IAozA6SkwfqCAF7fVyy8gu4FdyJvH5iBC12WhiocB8=,0ed9cc5b20e6233051bb4de2ffee5c7f3365704fe01d28e87237d9e8041a786d +sourcegraph.com/sourcegraph/appdash,v0.0.0-20190107175209-d9ea5c54f7dc,h1:lmf242UNy8ucQUSUse9oXtyxHb6kaF82XRLqeVDXXhA=,49e3fd73d6218c97f49266f0e32bbdab1b6352f2f40da8d1aa98ee8dfdeec072 +sourcegraph.com/sourcegraph/appdash-data,v0.0.0-20151005221446-73f23eafcf67,h1:e1sMhtVq9AfcEy8AXNb8eSg6gbzfdpYhoNqnPJa+GzI=,382adefecd62bb79172e2552bcfb7d45f47122f9bd22259b0566b26fb2627b87 +sourcegraph.com/sourcegraph/go-diff,v0.5.0,h1:eTiIR0CoWjGzJcnQ3OkhIl/b9GJovq4lSAVRt0ZFEG8=,2c5eaad1d3743b3d4bd6de70459a413e62d1753673d5b96402dda27508454b3b +sourcegraph.com/sqs/pbtypes,v0.0.0-20180604144634-d3ebe8f20ae4,h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c=,6750f8618ecbde1668de332800ec01d713debb145dee395c23fc9a373c207fe3 +storj.io/drpc,v0.0.7-0.20191021224058-08e7133752cd,h1:Oh7Nww1cgFA3fhrCOheDwQ0VcUKFcO1LsBSJEgiGgUQ=,51befd9e6e3aa6cfb9f5b56e47b3cd59715dbe656d0a12cfbb0282609b5456dd +storj.io/storj,v0.24.5,h1:dWqApMsdhPoUufrljPQC1gZWkYcSTjRr5AoZ7mrSjCw=,ce0628bdcce2b8f0241d27993431d343d212b2e55323510bf657928001c2fb26 +strk.kbt.io/projects/go/libravatar,v0.0.0-20160628055650-5eed7bff870a,h1:8q33ShxKXRwQ7JVd1ZnhIU3hZhwwn0Le+4fTeAackuM=,be48b3949775d6ba0dd3105d7d31d338fede9fbd1471b41fe861f1cfcabbf85c +v.io/x/lib,v0.1.4,h1:PCDfluqBeRbA7OgDIs9tIpT+z6ZNZ5VMeR+t7h/K2ig=,411c5ded56ba1b69269c37748d184954089c320f43ee76beb0c53f7c598baeaf +vbom.ml/util,v0.0.0-20180919145318-efcd4e0f9787,h1:O69FD9pJA4WUZlEwYatBEEkRWKQ5cKodWpdKTrCS/iQ=,abbc7a9ac1d820f336ccbe247404800d0f79859b4e4412f0d107aebbb564f920 +vitess.io/vitess,v2.1.1+incompatible,h1:nuuGHiWYWpudD3gOCLeGzol2EJ25e/u5Wer2wV1O130=,8f823ede6775b4f5b3f6cd4c04b3b6be453416e124362a8d68fa2e829429fa68 +xorm.io/builder,v0.3.6,h1:ha28mQ2M+TFx96Hxo+iq6tQgnkC9IZkM6D8w9sKHHF8=,8f16bb96bf2f75b4813be77072a966d1f2248a38f2c7afff4132b666876310a7 +xorm.io/core,v0.7.2,h1:mEO22A2Z7a3fPaZMk6gKL/jMD80iiyNwRrX5HOv3XLw=,24c93962a78b2a177ff5c66cd43921eb1e8b13290d0e8a4d87c6f075a81c4531 diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go new file mode 100644 index 0000000000000000000000000000000000000000..16cc1457058933366d1826c5ecefa08522f00078 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modfetch/zip_sum_test/zip_sum_test.go @@ -0,0 +1,232 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package zip_sum_test tests that the module zip files produced by modfetch +// have consistent content sums. Ideally the zip files themselves are also +// stable over time, though this is not strictly necessary. +// +// This test loads a table from testdata/zip_sums.csv. The table has columns +// for module path, version, content sum, and zip file hash. The table +// includes a large number of real modules. The test downloads these modules +// in direct mode and verifies the zip files. +// +// This test is very slow, and it depends on outside modules that change +// frequently, so this is a manual test. To enable it, pass the -zipsum flag. +package zip_sum_test + +import ( + "context" + "crypto/sha256" + "encoding/csv" + "encoding/hex" + "flag" + "fmt" + "internal/testenv" + "io" + "os" + "path/filepath" + "strings" + "testing" + + "cmd/go/internal/cfg" + "cmd/go/internal/modfetch" + "cmd/go/internal/modload" + + "golang.org/x/mod/module" +) + +var ( + updateTestData = flag.Bool("u", false, "when set, tests may update files in testdata instead of failing") + enableZipSum = flag.Bool("zipsum", false, "enable TestZipSums") + debugZipSum = flag.Bool("testwork", false, "when set, TestZipSums will preserve its test directory") + modCacheDir = flag.String("zipsumcache", "", "module cache to use instead of temp directory") + shardCount = flag.Int("zipsumshardcount", 1, "number of shards to divide TestZipSums into") + shardIndex = flag.Int("zipsumshard", 0, "index of TestZipSums shard to test (0 <= zipsumshard < zipsumshardcount)") +) + +const zipSumsPath = "testdata/zip_sums.csv" + +type zipSumTest struct { + m module.Version + wantSum, wantFileHash string +} + +func TestZipSums(t *testing.T) { + if !*enableZipSum { + // This test is very slow and heavily dependent on external repositories. + // Only run it explicitly. + t.Skip("TestZipSum not enabled with -zipsum") + } + if *shardCount < 1 { + t.Fatal("-zipsumshardcount must be a positive integer") + } + if *shardIndex < 0 || *shardCount <= *shardIndex { + t.Fatal("-zipsumshard must be between 0 and -zipsumshardcount") + } + + testenv.MustHaveGoBuild(t) + testenv.MustHaveExternalNetwork(t) + testenv.MustHaveExecPath(t, "bzr") + testenv.MustHaveExecPath(t, "git") + // TODO(jayconrod): add hg, svn, and fossil modules to testdata. + // Could not find any for now. + + tests, err := readZipSumTests() + if err != nil { + t.Fatal(err) + } + + if *modCacheDir != "" { + cfg.BuildContext.GOPATH = *modCacheDir + } else { + tmpDir, err := os.MkdirTemp("", "TestZipSums") + if err != nil { + t.Fatal(err) + } + if *debugZipSum { + fmt.Fprintf(os.Stderr, "TestZipSums: modCacheDir: %s\n", tmpDir) + } else { + defer os.RemoveAll(tmpDir) + } + cfg.BuildContext.GOPATH = tmpDir + } + + cfg.GOPROXY = "direct" + cfg.GOSUMDB = "off" + modload.Init() + + // Shard tests by downloading only every nth module when shard flags are set. + // This makes it easier to test small groups of modules quickly. We avoid + // testing similarly named modules together (the list is sorted by module + // path and version). + if *shardCount > 1 { + r := *shardIndex + w := 0 + for r < len(tests) { + tests[w] = tests[r] + w++ + r += *shardCount + } + tests = tests[:w] + } + + // Download modules with a rate limit. We may run out of file descriptors + // or cause timeouts without a limit. + needUpdate := false + for i := range tests { + test := &tests[i] + name := fmt.Sprintf("%s@%s", strings.ReplaceAll(test.m.Path, "/", "_"), test.m.Version) + t.Run(name, func(t *testing.T) { + t.Parallel() + ctx := context.Background() + + zipPath, err := modfetch.DownloadZip(ctx, test.m) + if err != nil { + if *updateTestData { + t.Logf("%s: could not download module: %s (will remove from testdata)", test.m, err) + test.m.Path = "" // mark for deletion + needUpdate = true + } else { + t.Errorf("%s: could not download module: %s", test.m, err) + } + return + } + + sum := modfetch.Sum(ctx, test.m) + if sum != test.wantSum { + if *updateTestData { + t.Logf("%s: updating content sum to %s", test.m, sum) + test.wantSum = sum + needUpdate = true + } else { + t.Errorf("%s: got content sum %s; want sum %s", test.m, sum, test.wantSum) + return + } + } + + h := sha256.New() + f, err := os.Open(zipPath) + if err != nil { + t.Errorf("%s: %v", test.m, err) + } + defer f.Close() + if _, err := io.Copy(h, f); err != nil { + t.Errorf("%s: %v", test.m, err) + } + zipHash := hex.EncodeToString(h.Sum(nil)) + if zipHash != test.wantFileHash { + if *updateTestData { + t.Logf("%s: updating zip file hash to %s", test.m, zipHash) + test.wantFileHash = zipHash + needUpdate = true + } else { + t.Errorf("%s: got zip file hash %s; want hash %s (but content sum matches)", test.m, zipHash, test.wantFileHash) + } + } + }) + } + + if needUpdate { + // Remove tests marked for deletion + r, w := 0, 0 + for r < len(tests) { + if tests[r].m.Path != "" { + tests[w] = tests[r] + w++ + } + r++ + } + tests = tests[:w] + + if err := writeZipSumTests(tests); err != nil { + t.Error(err) + } + } +} + +func readZipSumTests() ([]zipSumTest, error) { + f, err := os.Open(filepath.FromSlash(zipSumsPath)) + if err != nil { + return nil, err + } + defer f.Close() + r := csv.NewReader(f) + + var tests []zipSumTest + for { + line, err := r.Read() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } else if len(line) != 4 { + return nil, fmt.Errorf("%s:%d: malformed line", f.Name(), len(tests)+1) + } + test := zipSumTest{m: module.Version{Path: line[0], Version: line[1]}, wantSum: line[2], wantFileHash: line[3]} + tests = append(tests, test) + } + return tests, nil +} + +func writeZipSumTests(tests []zipSumTest) (err error) { + f, err := os.Create(filepath.FromSlash(zipSumsPath)) + if err != nil { + return err + } + defer func() { + if cerr := f.Close(); err == nil && cerr != nil { + err = cerr + } + }() + w := csv.NewWriter(f) + line := make([]string, 0, 4) + for _, test := range tests { + line = append(line[:0], test.m.Path, test.m.Version, test.wantSum, test.wantFileHash) + if err := w.Write(line); err != nil { + return err + } + } + w.Flush() + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modget/get.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modget/get.go new file mode 100644 index 0000000000000000000000000000000000000000..d9f0c6ae68ce1a814ad85fe4b4c8d7b2d3f24564 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modget/get.go @@ -0,0 +1,1929 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package modget implements the module-aware “go get” command. +package modget + +// The arguments to 'go get' are patterns with optional version queries, with +// the version queries defaulting to "upgrade". +// +// The patterns are normally interpreted as package patterns. However, if a +// pattern cannot match a package, it is instead interpreted as a *module* +// pattern. For version queries such as "upgrade" and "patch" that depend on the +// selected version of a module (or of the module containing a package), +// whether a pattern denotes a package or module may change as updates are +// applied (see the example in mod_get_patchmod.txt). +// +// There are a few other ambiguous cases to resolve, too. A package can exist in +// two different modules at the same version: for example, the package +// example.com/foo might be found in module example.com and also in module +// example.com/foo, and those modules may have independent v0.1.0 tags — so the +// input 'example.com/foo@v0.1.0' could syntactically refer to the variant of +// the package loaded from either module! (See mod_get_ambiguous_pkg.txt.) +// If the argument is ambiguous, the user can often disambiguate by specifying +// explicit versions for *all* of the potential module paths involved. + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/imports" + "cmd/go/internal/modfetch" + "cmd/go/internal/modload" + "cmd/go/internal/par" + "cmd/go/internal/search" + "cmd/go/internal/toolchain" + "cmd/go/internal/work" + + "golang.org/x/mod/modfile" + "golang.org/x/mod/module" +) + +var CmdGet = &base.Command{ + // Note: flags below are listed explicitly because they're the most common. + // Do not send CLs removing them because they're covered by [get flags]. + UsageLine: "go get [-t] [-u] [-v] [build flags] [packages]", + Short: "add dependencies to current module and install them", + Long: ` +Get resolves its command-line arguments to packages at specific module versions, +updates go.mod to require those versions, and downloads source code into the +module cache. + +To add a dependency for a package or upgrade it to its latest version: + + go get example.com/pkg + +To upgrade or downgrade a package to a specific version: + + go get example.com/pkg@v1.2.3 + +To remove a dependency on a module and downgrade modules that require it: + + go get example.com/mod@none + +To upgrade the minimum required Go version to the latest released Go version: + + go get go@latest + +To upgrade the Go toolchain to the latest patch release of the current Go toolchain: + + go get toolchain@patch + +See https://golang.org/ref/mod#go-get for details. + +In earlier versions of Go, 'go get' was used to build and install packages. +Now, 'go get' is dedicated to adjusting dependencies in go.mod. 'go install' +may be used to build and install commands instead. When a version is specified, +'go install' runs in module-aware mode and ignores the go.mod file in the +current directory. For example: + + go install example.com/pkg@v1.2.3 + go install example.com/pkg@latest + +See 'go help install' or https://golang.org/ref/mod#go-install for details. + +'go get' accepts the following flags. + +The -t flag instructs get to consider modules needed to build tests of +packages specified on the command line. + +The -u flag instructs get to update modules providing dependencies +of packages named on the command line to use newer minor or patch +releases when available. + +The -u=patch flag (not -u patch) also instructs get to update dependencies, +but changes the default to select patch releases. + +When the -t and -u flags are used together, get will update +test dependencies as well. + +The -x flag prints commands as they are executed. This is useful for +debugging version control commands when a module is downloaded directly +from a repository. + +For more about modules, see https://golang.org/ref/mod. + +For more about using 'go get' to update the minimum Go version and +suggested Go toolchain, see https://go.dev/doc/toolchain. + +For more about specifying packages, see 'go help packages'. + +This text describes the behavior of get using modules to manage source +code and dependencies. If instead the go command is running in GOPATH +mode, the details of get's flags and effects change, as does 'go help get'. +See 'go help gopath-get'. + +See also: go build, go install, go clean, go mod. + `, +} + +var HelpVCS = &base.Command{ + UsageLine: "vcs", + Short: "controlling version control with GOVCS", + Long: ` +The 'go get' command can run version control commands like git +to download imported code. This functionality is critical to the decentralized +Go package ecosystem, in which code can be imported from any server, +but it is also a potential security problem, if a malicious server finds a +way to cause the invoked version control command to run unintended code. + +To balance the functionality and security concerns, the 'go get' command +by default will only use git and hg to download code from public servers. +But it will use any known version control system (bzr, fossil, git, hg, svn) +to download code from private servers, defined as those hosting packages +matching the GOPRIVATE variable (see 'go help private'). The rationale behind +allowing only Git and Mercurial is that these two systems have had the most +attention to issues of being run as clients of untrusted servers. In contrast, +Bazaar, Fossil, and Subversion have primarily been used in trusted, +authenticated environments and are not as well scrutinized as attack surfaces. + +The version control command restrictions only apply when using direct version +control access to download code. When downloading modules from a proxy, +'go get' uses the proxy protocol instead, which is always permitted. +By default, the 'go get' command uses the Go module mirror (proxy.golang.org) +for public packages and only falls back to version control for private +packages or when the mirror refuses to serve a public package (typically for +legal reasons). Therefore, clients can still access public code served from +Bazaar, Fossil, or Subversion repositories by default, because those downloads +use the Go module mirror, which takes on the security risk of running the +version control commands using a custom sandbox. + +The GOVCS variable can be used to change the allowed version control systems +for specific packages (identified by a module or import path). +The GOVCS variable applies when building package in both module-aware mode +and GOPATH mode. When using modules, the patterns match against the module path. +When using GOPATH, the patterns match against the import path corresponding to +the root of the version control repository. + +The general form of the GOVCS setting is a comma-separated list of +pattern:vcslist rules. The pattern is a glob pattern that must match +one or more leading elements of the module or import path. The vcslist +is a pipe-separated list of allowed version control commands, or "all" +to allow use of any known command, or "off" to disallow all commands. +Note that if a module matches a pattern with vcslist "off", it may still be +downloaded if the origin server uses the "mod" scheme, which instructs the +go command to download the module using the GOPROXY protocol. +The earliest matching pattern in the list applies, even if later patterns +might also match. + +For example, consider: + + GOVCS=github.com:git,evil.com:off,*:git|hg + +With this setting, code with a module or import path beginning with +github.com/ can only use git; paths on evil.com cannot use any version +control command, and all other paths (* matches everything) can use +only git or hg. + +The special patterns "public" and "private" match public and private +module or import paths. A path is private if it matches the GOPRIVATE +variable; otherwise it is public. + +If no rules in the GOVCS variable match a particular module or import path, +the 'go get' command applies its default rule, which can now be summarized +in GOVCS notation as 'public:git|hg,private:all'. + +To allow unfettered use of any version control system for any package, use: + + GOVCS=*:all + +To disable all use of version control, use: + + GOVCS=*:off + +The 'go env -w' command (see 'go help env') can be used to set the GOVCS +variable for future go command invocations. +`, +} + +var ( + getD = CmdGet.Flag.Bool("d", true, "") + getF = CmdGet.Flag.Bool("f", false, "") + getFix = CmdGet.Flag.Bool("fix", false, "") + getM = CmdGet.Flag.Bool("m", false, "") + getT = CmdGet.Flag.Bool("t", false, "") + getU upgradeFlag + getInsecure = CmdGet.Flag.Bool("insecure", false, "") + // -v is cfg.BuildV +) + +// upgradeFlag is a custom flag.Value for -u. +type upgradeFlag struct { + rawVersion string + version string +} + +func (*upgradeFlag) IsBoolFlag() bool { return true } // allow -u + +func (v *upgradeFlag) Set(s string) error { + if s == "false" { + v.version = "" + v.rawVersion = "" + } else if s == "true" { + v.version = "upgrade" + v.rawVersion = "" + } else { + v.version = s + v.rawVersion = s + } + return nil +} + +func (v *upgradeFlag) String() string { return "" } + +func init() { + work.AddBuildFlags(CmdGet, work.OmitModFlag) + CmdGet.Run = runGet // break init loop + CmdGet.Flag.Var(&getU, "u", "") +} + +func runGet(ctx context.Context, cmd *base.Command, args []string) { + switch getU.version { + case "", "upgrade", "patch": + // ok + default: + base.Fatalf("go: unknown upgrade flag -u=%s", getU.rawVersion) + } + // TODO(#43684): in the future (Go 1.20), warn that -d is a no-op. + if !*getD { + base.Fatalf("go: -d flag may not be disabled") + } + if *getF { + fmt.Fprintf(os.Stderr, "go: -f flag is a no-op when using modules\n") + } + if *getFix { + fmt.Fprintf(os.Stderr, "go: -fix flag is a no-op when using modules\n") + } + if *getM { + base.Fatalf("go: -m flag is no longer supported") + } + if *getInsecure { + base.Fatalf("go: -insecure flag is no longer supported; use GOINSECURE instead") + } + + modload.ForceUseModules = true + + // Do not allow any updating of go.mod until we've applied + // all the requested changes and checked that the result matches + // what was requested. + modload.ExplicitWriteGoMod = true + + // Allow looking up modules for import paths when outside of a module. + // 'go get' is expected to do this, unlike other commands. + modload.AllowMissingModuleImports() + + // 'go get' no longer builds or installs packages, so there's nothing to do + // if there's no go.mod file. + // TODO(#40775): make modload.Init return ErrNoModRoot instead of exiting. + // We could handle that here by printing a different message. + modload.Init() + if !modload.HasModRoot() { + base.Fatalf("go: go.mod file not found in current directory or any parent directory.\n" + + "\t'go get' is no longer supported outside a module.\n" + + "\tTo build and install a command, use 'go install' with a version,\n" + + "\tlike 'go install example.com/cmd@latest'\n" + + "\tFor more information, see https://golang.org/doc/go-get-install-deprecation\n" + + "\tor run 'go help get' or 'go help install'.") + } + + dropToolchain, queries := parseArgs(ctx, args) + opts := modload.WriteOpts{ + DropToolchain: dropToolchain, + } + for _, q := range queries { + if q.pattern == "toolchain" { + opts.ExplicitToolchain = true + } + } + + r := newResolver(ctx, queries) + r.performLocalQueries(ctx) + r.performPathQueries(ctx) + + for { + r.performWildcardQueries(ctx) + r.performPatternAllQueries(ctx) + + if changed := r.resolveQueries(ctx, queries); changed { + // 'go get' arguments can be (and often are) package patterns rather than + // (just) modules. A package can be provided by any module with a prefix + // of its import path, and a wildcard can even match packages in modules + // with totally different paths. Because of these effects, and because any + // change to the selected version of a module can bring in entirely new + // module paths as dependencies, we need to reissue queries whenever we + // change the build list. + // + // The result of any version query for a given module — even "upgrade" or + // "patch" — is always relative to the build list at the start of + // the 'go get' command, not an intermediate state, and is therefore + // deterministic and therefore cacheable, and the constraints on the + // selected version of each module can only narrow as we iterate. + // + // "all" is functionally very similar to a wildcard pattern. The set of + // packages imported by the main module does not change, and the query + // result for the module containing each such package also does not change + // (it is always relative to the initial build list, before applying + // queries). So the only way that the result of an "all" query can change + // is if some matching package moves from one module in the build list + // to another, which should not happen very often. + continue + } + + // When we load imports, we detect the following conditions: + // + // - missing transitive dependencies that need to be resolved from outside the + // current build list (note that these may add new matches for existing + // pattern queries!) + // + // - transitive dependencies that didn't match any other query, + // but need to be upgraded due to the -u flag + // + // - ambiguous import errors. + // TODO(#27899): Try to resolve ambiguous import errors automatically. + upgrades := r.findAndUpgradeImports(ctx, queries) + if changed := r.applyUpgrades(ctx, upgrades); changed { + continue + } + + r.findMissingWildcards(ctx) + if changed := r.resolveQueries(ctx, r.wildcardQueries); changed { + continue + } + + break + } + + r.checkWildcardVersions(ctx) + + var pkgPatterns []string + for _, q := range queries { + if q.matchesPackages { + pkgPatterns = append(pkgPatterns, q.pattern) + } + } + r.checkPackageProblems(ctx, pkgPatterns) + + // Everything succeeded. Update go.mod. + oldReqs := reqsFromGoMod(modload.ModFile()) + + if err := modload.WriteGoMod(ctx, opts); err != nil { + // A TooNewError can happen for 'go get go@newversion' + // when all the required modules are old enough + // but the command line is not. + // TODO(bcmills): modload.EditBuildList should catch this instead, + // and then this can be changed to base.Fatal(err). + toolchain.SwitchOrFatal(ctx, err) + } + + newReqs := reqsFromGoMod(modload.ModFile()) + r.reportChanges(oldReqs, newReqs) + + if gowork := modload.FindGoWork(base.Cwd()); gowork != "" { + wf, err := modload.ReadWorkFile(gowork) + if err == nil && modload.UpdateWorkGoVersion(wf, modload.MainModules.GoVersion()) { + modload.WriteWorkFile(gowork, wf) + } + } +} + +// parseArgs parses command-line arguments and reports errors. +// +// The command-line arguments are of the form path@version or simply path, with +// implicit @upgrade. path@none is "downgrade away". +func parseArgs(ctx context.Context, rawArgs []string) (dropToolchain bool, queries []*query) { + defer base.ExitIfErrors() + + for _, arg := range search.CleanPatterns(rawArgs) { + q, err := newQuery(arg) + if err != nil { + base.Error(err) + continue + } + + if q.version == "none" { + switch q.pattern { + case "go": + base.Errorf("go: cannot use go@none") + continue + case "toolchain": + dropToolchain = true + continue + } + } + + // If there were no arguments, CleanPatterns returns ".". Set the raw + // string back to "" for better errors. + if len(rawArgs) == 0 { + q.raw = "" + } + + // Guard against 'go get x.go', a common mistake. + // Note that package and module paths may end with '.go', so only print an error + // if the argument has no version and either has no slash or refers to an existing file. + if strings.HasSuffix(q.raw, ".go") && q.rawVersion == "" { + if !strings.Contains(q.raw, "/") { + base.Errorf("go: %s: arguments must be package or module paths", q.raw) + continue + } + if fi, err := os.Stat(q.raw); err == nil && !fi.IsDir() { + base.Errorf("go: %s exists as a file, but 'go get' requires package arguments", q.raw) + continue + } + } + + queries = append(queries, q) + } + + return dropToolchain, queries +} + +type resolver struct { + localQueries []*query // queries for absolute or relative paths + pathQueries []*query // package path literal queries in original order + wildcardQueries []*query // path wildcard queries in original order + patternAllQueries []*query // queries with the pattern "all" + + // Indexed "none" queries. These are also included in the slices above; + // they are indexed here to speed up noneForPath. + nonesByPath map[string]*query // path-literal "@none" queries indexed by path + wildcardNones []*query // wildcard "@none" queries + + // resolvedVersion maps each module path to the version of that module that + // must be selected in the final build list, along with the first query + // that resolved the module to that version (the “reason”). + resolvedVersion map[string]versionReason + + buildList []module.Version + buildListVersion map[string]string // index of buildList (module path → version) + + initialVersion map[string]string // index of the initial build list at the start of 'go get' + + missing []pathSet // candidates for missing transitive dependencies + + work *par.Queue + + matchInModuleCache par.ErrCache[matchInModuleKey, []string] +} + +type versionReason struct { + version string + reason *query +} + +type matchInModuleKey struct { + pattern string + m module.Version +} + +func newResolver(ctx context.Context, queries []*query) *resolver { + // LoadModGraph also sets modload.Target, which is needed by various resolver + // methods. + mg, err := modload.LoadModGraph(ctx, "") + if err != nil { + toolchain.SwitchOrFatal(ctx, err) + } + + buildList := mg.BuildList() + initialVersion := make(map[string]string, len(buildList)) + for _, m := range buildList { + initialVersion[m.Path] = m.Version + } + + r := &resolver{ + work: par.NewQueue(runtime.GOMAXPROCS(0)), + resolvedVersion: map[string]versionReason{}, + buildList: buildList, + buildListVersion: initialVersion, + initialVersion: initialVersion, + nonesByPath: map[string]*query{}, + } + + for _, q := range queries { + if q.pattern == "all" { + r.patternAllQueries = append(r.patternAllQueries, q) + } else if q.patternIsLocal { + r.localQueries = append(r.localQueries, q) + } else if q.isWildcard() { + r.wildcardQueries = append(r.wildcardQueries, q) + } else { + r.pathQueries = append(r.pathQueries, q) + } + + if q.version == "none" { + // Index "none" queries to make noneForPath more efficient. + if q.isWildcard() { + r.wildcardNones = append(r.wildcardNones, q) + } else { + // All "@none" queries for the same path are identical; we only + // need to index one copy. + r.nonesByPath[q.pattern] = q + } + } + } + + return r +} + +// initialSelected returns the version of the module with the given path that +// was selected at the start of this 'go get' invocation. +func (r *resolver) initialSelected(mPath string) (version string) { + v, ok := r.initialVersion[mPath] + if !ok { + return "none" + } + return v +} + +// selected returns the version of the module with the given path that is +// selected in the resolver's current build list. +func (r *resolver) selected(mPath string) (version string) { + v, ok := r.buildListVersion[mPath] + if !ok { + return "none" + } + return v +} + +// noneForPath returns a "none" query matching the given module path, +// or found == false if no such query exists. +func (r *resolver) noneForPath(mPath string) (nq *query, found bool) { + if nq = r.nonesByPath[mPath]; nq != nil { + return nq, true + } + for _, nq := range r.wildcardNones { + if nq.matchesPath(mPath) { + return nq, true + } + } + return nil, false +} + +// queryModule wraps modload.Query, substituting r.checkAllowedOr to decide +// allowed versions. +func (r *resolver) queryModule(ctx context.Context, mPath, query string, selected func(string) string) (module.Version, error) { + current := r.initialSelected(mPath) + rev, err := modload.Query(ctx, mPath, query, current, r.checkAllowedOr(query, selected)) + if err != nil { + return module.Version{}, err + } + return module.Version{Path: mPath, Version: rev.Version}, nil +} + +// queryPackages wraps modload.QueryPackage, substituting r.checkAllowedOr to +// decide allowed versions. +func (r *resolver) queryPackages(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, err error) { + results, err := modload.QueryPackages(ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) + if len(results) > 0 { + pkgMods = make([]module.Version, 0, len(results)) + for _, qr := range results { + pkgMods = append(pkgMods, qr.Mod) + } + } + return pkgMods, err +} + +// queryPattern wraps modload.QueryPattern, substituting r.checkAllowedOr to +// decide allowed versions. +func (r *resolver) queryPattern(ctx context.Context, pattern, query string, selected func(string) string) (pkgMods []module.Version, mod module.Version, err error) { + results, modOnly, err := modload.QueryPattern(ctx, pattern, query, selected, r.checkAllowedOr(query, selected)) + if len(results) > 0 { + pkgMods = make([]module.Version, 0, len(results)) + for _, qr := range results { + pkgMods = append(pkgMods, qr.Mod) + } + } + if modOnly != nil { + mod = modOnly.Mod + } + return pkgMods, mod, err +} + +// checkAllowedOr is like modload.CheckAllowed, but it always allows the requested +// and current versions (even if they are retracted or otherwise excluded). +func (r *resolver) checkAllowedOr(requested string, selected func(string) string) modload.AllowedFunc { + return func(ctx context.Context, m module.Version) error { + if m.Version == requested { + return modload.CheckExclusions(ctx, m) + } + if (requested == "upgrade" || requested == "patch") && m.Version == selected(m.Path) { + return nil + } + return modload.CheckAllowed(ctx, m) + } +} + +// matchInModule is a caching wrapper around modload.MatchInModule. +func (r *resolver) matchInModule(ctx context.Context, pattern string, m module.Version) (packages []string, err error) { + return r.matchInModuleCache.Do(matchInModuleKey{pattern, m}, func() ([]string, error) { + match := modload.MatchInModule(ctx, pattern, m, imports.AnyTags()) + if len(match.Errs) > 0 { + return match.Pkgs, match.Errs[0] + } + return match.Pkgs, nil + }) +} + +// queryNone adds a candidate set to q for each module matching q.pattern. +// Each candidate set has only one possible module version: the matched +// module at version "none". +// +// We interpret arguments to 'go get' as packages first, and fall back to +// modules second. However, no module exists at version "none", and therefore no +// package exists at that version either: we know that the argument cannot match +// any packages, and thus it must match modules instead. +func (r *resolver) queryNone(ctx context.Context, q *query) { + if search.IsMetaPackage(q.pattern) { + panic(fmt.Sprintf("internal error: queryNone called with pattern %q", q.pattern)) + } + + if !q.isWildcard() { + q.pathOnce(q.pattern, func() pathSet { + hasModRoot := modload.HasModRoot() + if hasModRoot && modload.MainModules.Contains(q.pattern) { + v := module.Version{Path: q.pattern} + // The user has explicitly requested to downgrade their own module to + // version "none". This is not an entirely unreasonable request: it + // could plausibly mean “downgrade away everything that depends on any + // explicit version of the main module”, or “downgrade away the + // package with the same path as the main module, found in a module + // with a prefix of the main module's path”. + // + // However, neither of those behaviors would be consistent with the + // plain meaning of the query. To try to reduce confusion, reject the + // query explicitly. + return errSet(&modload.QueryMatchesMainModulesError{MainModules: []module.Version{v}, Pattern: q.pattern, Query: q.version}) + } + + return pathSet{mod: module.Version{Path: q.pattern, Version: "none"}} + }) + } + + for _, curM := range r.buildList { + if !q.matchesPath(curM.Path) { + continue + } + q.pathOnce(curM.Path, func() pathSet { + if modload.HasModRoot() && curM.Version == "" && modload.MainModules.Contains(curM.Path) { + return errSet(&modload.QueryMatchesMainModulesError{MainModules: []module.Version{curM}, Pattern: q.pattern, Query: q.version}) + } + return pathSet{mod: module.Version{Path: curM.Path, Version: "none"}} + }) + } +} + +func (r *resolver) performLocalQueries(ctx context.Context) { + for _, q := range r.localQueries { + q.pathOnce(q.pattern, func() pathSet { + absDetail := "" + if !filepath.IsAbs(q.pattern) { + if absPath, err := filepath.Abs(q.pattern); err == nil { + absDetail = fmt.Sprintf(" (%s)", absPath) + } + } + + // Absolute paths like C:\foo and relative paths like ../foo... are + // restricted to matching packages in the main module. + pkgPattern, mainModule := modload.MainModules.DirImportPath(ctx, q.pattern) + if pkgPattern == "." { + modload.MustHaveModRoot() + var modRoots []string + for _, m := range modload.MainModules.Versions() { + modRoots = append(modRoots, modload.MainModules.ModRoot(m)) + } + var plural string + if len(modRoots) != 1 { + plural = "s" + } + return errSet(fmt.Errorf("%s%s is not within module%s rooted at %s", q.pattern, absDetail, plural, strings.Join(modRoots, ", "))) + } + + match := modload.MatchInModule(ctx, pkgPattern, mainModule, imports.AnyTags()) + if len(match.Errs) > 0 { + return pathSet{err: match.Errs[0]} + } + + if len(match.Pkgs) == 0 { + if q.raw == "" || q.raw == "." { + return errSet(fmt.Errorf("no package to get in current directory")) + } + if !q.isWildcard() { + modload.MustHaveModRoot() + return errSet(fmt.Errorf("%s%s is not a package in module rooted at %s", q.pattern, absDetail, modload.MainModules.ModRoot(mainModule))) + } + search.WarnUnmatched([]*search.Match{match}) + return pathSet{} + } + + return pathSet{pkgMods: []module.Version{mainModule}} + }) + } +} + +// performWildcardQueries populates the candidates for each query whose pattern +// is a wildcard. +// +// The candidates for a given module path matching (or containing a package +// matching) a wildcard query depend only on the initial build list, but the set +// of modules may be expanded by other queries, so wildcard queries need to be +// re-evaluated whenever a potentially-matching module path is added to the +// build list. +func (r *resolver) performWildcardQueries(ctx context.Context) { + for _, q := range r.wildcardQueries { + q := q + r.work.Add(func() { + if q.version == "none" { + r.queryNone(ctx, q) + } else { + r.queryWildcard(ctx, q) + } + }) + } + <-r.work.Idle() +} + +// queryWildcard adds a candidate set to q for each module for which: +// - some version of the module is already in the build list, and +// - that module exists at some version matching q.version, and +// - either the module path itself matches q.pattern, or some package within +// the module at q.version matches q.pattern. +func (r *resolver) queryWildcard(ctx context.Context, q *query) { + // For wildcard patterns, modload.QueryPattern only identifies modules + // matching the prefix of the path before the wildcard. However, the build + // list may already contain other modules with matching packages, and we + // should consider those modules to satisfy the query too. + // We want to match any packages in existing dependencies, but we only want to + // resolve new dependencies if nothing else turns up. + for _, curM := range r.buildList { + if !q.canMatchInModule(curM.Path) { + continue + } + q.pathOnce(curM.Path, func() pathSet { + if _, hit := r.noneForPath(curM.Path); hit { + // This module is being removed, so it will no longer be in the build list + // (and thus will no longer match the pattern). + return pathSet{} + } + + if modload.MainModules.Contains(curM.Path) && !versionOkForMainModule(q.version) { + if q.matchesPath(curM.Path) { + return errSet(&modload.QueryMatchesMainModulesError{ + MainModules: []module.Version{curM}, + Pattern: q.pattern, + Query: q.version, + }) + } + + packages, err := r.matchInModule(ctx, q.pattern, curM) + if err != nil { + return errSet(err) + } + if len(packages) > 0 { + return errSet(&modload.QueryMatchesPackagesInMainModuleError{ + Pattern: q.pattern, + Query: q.version, + Packages: packages, + }) + } + + return r.tryWildcard(ctx, q, curM) + } + + m, err := r.queryModule(ctx, curM.Path, q.version, r.initialSelected) + if err != nil { + if !isNoSuchModuleVersion(err) { + // We can't tell whether a matching version exists. + return errSet(err) + } + // There is no version of curM.Path matching the query. + + // We haven't checked whether curM contains any matching packages at its + // currently-selected version, or whether curM.Path itself matches q. If + // either of those conditions holds, *and* no other query changes the + // selected version of curM, then we will fail in checkWildcardVersions. + // (This could be an error, but it's too soon to tell.) + // + // However, even then the transitive requirements of some other query + // may downgrade this module out of the build list entirely, in which + // case the pattern will no longer include it and it won't be an error. + // + // Either way, punt on the query rather than erroring out just yet. + return pathSet{} + } + + return r.tryWildcard(ctx, q, m) + }) + } + + // Even if no modules matched, we shouldn't query for a new module to provide + // the pattern yet: some other query may yet induce a new requirement that + // will match the wildcard. Instead, we'll check in findMissingWildcards. +} + +// tryWildcard returns a pathSet for module m matching query q. +// If m does not actually match q, tryWildcard returns an empty pathSet. +func (r *resolver) tryWildcard(ctx context.Context, q *query, m module.Version) pathSet { + mMatches := q.matchesPath(m.Path) + packages, err := r.matchInModule(ctx, q.pattern, m) + if err != nil { + return errSet(err) + } + if len(packages) > 0 { + return pathSet{pkgMods: []module.Version{m}} + } + if mMatches { + return pathSet{mod: m} + } + return pathSet{} +} + +// findMissingWildcards adds a candidate set for each query in r.wildcardQueries +// that has not yet resolved to any version containing packages. +func (r *resolver) findMissingWildcards(ctx context.Context) { + for _, q := range r.wildcardQueries { + if q.version == "none" || q.matchesPackages { + continue // q is not “missing” + } + r.work.Add(func() { + q.pathOnce(q.pattern, func() pathSet { + pkgMods, mod, err := r.queryPattern(ctx, q.pattern, q.version, r.initialSelected) + if err != nil { + if isNoSuchPackageVersion(err) && len(q.resolved) > 0 { + // q already resolved one or more modules but matches no packages. + // That's ok: this pattern is just a module pattern, and we don't + // need to add any more modules to satisfy it. + return pathSet{} + } + return errSet(err) + } + + return pathSet{pkgMods: pkgMods, mod: mod} + }) + }) + } + <-r.work.Idle() +} + +// checkWildcardVersions reports an error if any module in the build list has a +// path (or contains a package) matching a query with a wildcard pattern, but +// has a selected version that does *not* match the query. +func (r *resolver) checkWildcardVersions(ctx context.Context) { + defer base.ExitIfErrors() + + for _, q := range r.wildcardQueries { + for _, curM := range r.buildList { + if !q.canMatchInModule(curM.Path) { + continue + } + if !q.matchesPath(curM.Path) { + packages, err := r.matchInModule(ctx, q.pattern, curM) + if len(packages) == 0 { + if err != nil { + reportError(q, err) + } + continue // curM is not relevant to q. + } + } + + rev, err := r.queryModule(ctx, curM.Path, q.version, r.initialSelected) + if err != nil { + reportError(q, err) + continue + } + if rev.Version == curM.Version { + continue // curM already matches q. + } + + if !q.matchesPath(curM.Path) { + m := module.Version{Path: curM.Path, Version: rev.Version} + packages, err := r.matchInModule(ctx, q.pattern, m) + if err != nil { + reportError(q, err) + continue + } + if len(packages) == 0 { + // curM at its original version contains a path matching q.pattern, + // but at rev.Version it does not, so (somewhat paradoxically) if + // we changed the version of curM it would no longer match the query. + var version any = m + if rev.Version != q.version { + version = fmt.Sprintf("%s@%s (%s)", m.Path, q.version, m.Version) + } + reportError(q, fmt.Errorf("%v matches packages in %v but not %v: specify a different version for module %s", q, curM, version, m.Path)) + continue + } + } + + // Since queryModule succeeded and either curM or one of the packages it + // contains matches q.pattern, we should have either selected the version + // of curM matching q, or reported a conflict error (and exited). + // If we're still here and the version doesn't match, + // something has gone very wrong. + reportError(q, fmt.Errorf("internal error: selected %v instead of %v", curM, rev.Version)) + } + } +} + +// performPathQueries populates the candidates for each query whose pattern is +// a path literal. +// +// The candidate packages and modules for path literals depend only on the +// initial build list, not the current build list, so we only need to query path +// literals once. +func (r *resolver) performPathQueries(ctx context.Context) { + for _, q := range r.pathQueries { + q := q + r.work.Add(func() { + if q.version == "none" { + r.queryNone(ctx, q) + } else { + r.queryPath(ctx, q) + } + }) + } + <-r.work.Idle() +} + +// queryPath adds a candidate set to q for the package with path q.pattern. +// The candidate set consists of all modules that could provide q.pattern +// and have a version matching q, plus (if it exists) the module whose path +// is itself q.pattern (at a matching version). +func (r *resolver) queryPath(ctx context.Context, q *query) { + q.pathOnce(q.pattern, func() pathSet { + if search.IsMetaPackage(q.pattern) || q.isWildcard() { + panic(fmt.Sprintf("internal error: queryPath called with pattern %q", q.pattern)) + } + if q.version == "none" { + panic(`internal error: queryPath called with version "none"`) + } + + if search.IsStandardImportPath(q.pattern) { + stdOnly := module.Version{} + packages, _ := r.matchInModule(ctx, q.pattern, stdOnly) + if len(packages) > 0 { + if q.rawVersion != "" { + return errSet(fmt.Errorf("can't request explicit version %q of standard library package %s", q.version, q.pattern)) + } + + q.matchesPackages = true + return pathSet{} // No module needed for standard library. + } + } + + pkgMods, mod, err := r.queryPattern(ctx, q.pattern, q.version, r.initialSelected) + if err != nil { + return errSet(err) + } + return pathSet{pkgMods: pkgMods, mod: mod} + }) +} + +// performPatternAllQueries populates the candidates for each query whose +// pattern is "all". +// +// The candidate modules for a given package in "all" depend only on the initial +// build list, but we cannot follow the dependencies of a given package until we +// know which candidate is selected — and that selection may depend on the +// results of other queries. We need to re-evaluate the "all" queries whenever +// the module for one or more packages in "all" are resolved. +func (r *resolver) performPatternAllQueries(ctx context.Context) { + if len(r.patternAllQueries) == 0 { + return + } + + findPackage := func(ctx context.Context, path string, m module.Version) (versionOk bool) { + versionOk = true + for _, q := range r.patternAllQueries { + q.pathOnce(path, func() pathSet { + pkgMods, err := r.queryPackages(ctx, path, q.version, r.initialSelected) + if len(pkgMods) != 1 || pkgMods[0] != m { + // There are candidates other than m for the given path, so we can't + // be certain that m will actually be the module selected to provide + // the package. Don't load its dependencies just yet, because they + // might no longer be dependencies after we resolve the correct + // version. + versionOk = false + } + return pathSet{pkgMods: pkgMods, err: err} + }) + } + return versionOk + } + + r.loadPackages(ctx, []string{"all"}, findPackage) + + // Since we built up the candidate lists concurrently, they may be in a + // nondeterministic order. We want 'go get' to be fully deterministic, + // including in which errors it chooses to report, so sort the candidates + // into a deterministic-but-arbitrary order. + for _, q := range r.patternAllQueries { + sort.Slice(q.candidates, func(i, j int) bool { + return q.candidates[i].path < q.candidates[j].path + }) + } +} + +// findAndUpgradeImports returns a pathSet for each package that is not yet +// in the build list but is transitively imported by the packages matching the +// given queries (which must already have been resolved). +// +// If the getU flag ("-u") is set, findAndUpgradeImports also returns a +// pathSet for each module that is not constrained by any other +// command-line argument and has an available matching upgrade. +func (r *resolver) findAndUpgradeImports(ctx context.Context, queries []*query) (upgrades []pathSet) { + patterns := make([]string, 0, len(queries)) + for _, q := range queries { + if q.matchesPackages { + patterns = append(patterns, q.pattern) + } + } + if len(patterns) == 0 { + return nil + } + + // mu guards concurrent writes to upgrades, which will be sorted + // (to restore determinism) after loading. + var mu sync.Mutex + + findPackage := func(ctx context.Context, path string, m module.Version) (versionOk bool) { + version := "latest" + if m.Path != "" { + if getU.version == "" { + // The user did not request that we upgrade transitive dependencies. + return true + } + if _, ok := r.resolvedVersion[m.Path]; ok { + // We cannot upgrade m implicitly because its version is determined by + // an explicit pattern argument. + return true + } + version = getU.version + } + + // Unlike other queries, the "-u" flag upgrades relative to the build list + // after applying changes so far, not the initial build list. + // This is for two reasons: + // + // - The "-u" flag intentionally applies to transitive dependencies, + // which may not be known or even resolved in advance of applying + // other version changes. + // + // - The "-u" flag, unlike other arguments, does not cause version + // conflicts with other queries. (The other query always wins.) + + pkgMods, err := r.queryPackages(ctx, path, version, r.selected) + for _, u := range pkgMods { + if u == m { + // The selected package version is already upgraded appropriately; there + // is no need to change it. + return true + } + } + + if err != nil { + if isNoSuchPackageVersion(err) || (m.Path == "" && module.CheckPath(path) != nil) { + // We can't find the package because it doesn't — or can't — even exist + // in any module at the latest version. (Note that invalid module paths + // could in general exist due to replacements, so we at least need to + // run the query to check those.) + // + // There is no version change we can make to fix the package, so leave + // it unresolved. Either some other query (perhaps a wildcard matching a + // newly-added dependency for some other missing package) will fill in + // the gaps, or we will report an error (with a better import stack) in + // the final LoadPackages call. + return true + } + } + + mu.Lock() + upgrades = append(upgrades, pathSet{path: path, pkgMods: pkgMods, err: err}) + mu.Unlock() + return false + } + + r.loadPackages(ctx, patterns, findPackage) + + // Since we built up the candidate lists concurrently, they may be in a + // nondeterministic order. We want 'go get' to be fully deterministic, + // including in which errors it chooses to report, so sort the candidates + // into a deterministic-but-arbitrary order. + sort.Slice(upgrades, func(i, j int) bool { + return upgrades[i].path < upgrades[j].path + }) + return upgrades +} + +// loadPackages loads the packages matching the given patterns, invoking the +// findPackage function for each package that may require a change to the +// build list. +// +// loadPackages invokes the findPackage function for each package loaded from a +// module outside the main module. If the module or version that supplies that +// package needs to be changed due to a query, findPackage may return false +// and the imports of that package will not be loaded. +// +// loadPackages also invokes the findPackage function for each imported package +// that is neither present in the standard library nor in any module in the +// build list. +func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPackage func(ctx context.Context, path string, m module.Version) (versionOk bool)) { + opts := modload.PackageOpts{ + Tags: imports.AnyTags(), + VendorModulesInGOROOTSrc: true, + LoadTests: *getT, + AssumeRootsImported: true, // After 'go get foo', imports of foo should build. + SilencePackageErrors: true, // May be fixed by subsequent upgrades or downgrades. + Switcher: new(toolchain.Switcher), + } + + opts.AllowPackage = func(ctx context.Context, path string, m module.Version) error { + if m.Path == "" || m.Version == "" { + // Packages in the standard library and main modules are already at their + // latest (and only) available versions. + return nil + } + if ok := findPackage(ctx, path, m); !ok { + return errVersionChange + } + return nil + } + + _, pkgs := modload.LoadPackages(ctx, opts, patterns...) + for _, path := range pkgs { + const ( + parentPath = "" + parentIsStd = false + ) + _, _, err := modload.Lookup(parentPath, parentIsStd, path) + if err == nil { + continue + } + if errors.Is(err, errVersionChange) { + // We already added candidates during loading. + continue + } + + var ( + importMissing *modload.ImportMissingError + ambiguous *modload.AmbiguousImportError + ) + if !errors.As(err, &importMissing) && !errors.As(err, &ambiguous) { + // The package, which is a dependency of something we care about, has some + // problem that we can't resolve with a version change. + // Leave the error for the final LoadPackages call. + continue + } + + path := path + r.work.Add(func() { + findPackage(ctx, path, module.Version{}) + }) + } + <-r.work.Idle() +} + +// errVersionChange is a sentinel error indicating that a module's version needs +// to be updated before its dependencies can be loaded. +var errVersionChange = errors.New("version change needed") + +// resolveQueries resolves candidate sets that are attached to the given +// queries and/or needed to provide the given missing-package dependencies. +// +// resolveQueries starts by resolving one module version from each +// unambiguous pathSet attached to the given queries. +// +// If no unambiguous query results in a change to the build list, +// resolveQueries revisits the ambiguous query candidates and resolves them +// arbitrarily in order to guarantee forward progress. +// +// If all pathSets are resolved without any changes to the build list, +// resolveQueries returns with changed=false. +func (r *resolver) resolveQueries(ctx context.Context, queries []*query) (changed bool) { + defer base.ExitIfErrors() + + // Note: this is O(N²) with the number of pathSets in the worst case. + // + // We could perhaps get it down to O(N) if we were to index the pathSets + // by module path, so that we only revisit a given pathSet when the + // version of some module in its containingPackage list has been determined. + // + // However, N tends to be small, and most candidate sets will include only one + // candidate module (so they will be resolved in the first iteration), so for + // now we'll stick to the simple O(N²) approach. + + resolved := 0 + for { + prevResolved := resolved + + // If we found modules that were too new, find the max of the required versions + // and then try to switch to a newer toolchain. + var sw toolchain.Switcher + for _, q := range queries { + for _, cs := range q.candidates { + sw.Error(cs.err) + } + } + // Only switch if we need a newer toolchain. + // Otherwise leave the cs.err for reporting later. + if sw.NeedSwitch() { + sw.Switch(ctx) + // If NeedSwitch is true and Switch returns, Switch has failed to locate a newer toolchain. + // It printed the errors along with one more about not finding a good toolchain. + base.Exit() + } + + for _, q := range queries { + unresolved := q.candidates[:0] + + for _, cs := range q.candidates { + if cs.err != nil { + reportError(q, cs.err) + resolved++ + continue + } + + filtered, isPackage, m, unique := r.disambiguate(cs) + if !unique { + unresolved = append(unresolved, filtered) + continue + } + + if m.Path == "" { + // The query is not viable. Choose an arbitrary candidate from + // before filtering and “resolve” it to report a conflict. + isPackage, m = r.chooseArbitrarily(cs) + } + if isPackage { + q.matchesPackages = true + } + r.resolve(q, m) + resolved++ + } + + q.candidates = unresolved + } + + base.ExitIfErrors() + if resolved == prevResolved { + break // No unambiguous candidate remains. + } + } + + if resolved > 0 { + if changed = r.updateBuildList(ctx, nil); changed { + // The build list has changed, so disregard any remaining ambiguous queries: + // they might now be determined by requirements in the build list, which we + // would prefer to use instead of arbitrary versions. + return true + } + } + + // The build list will be the same on the next iteration as it was on this + // iteration, so any ambiguous queries will remain so. In order to make + // progress, resolve them arbitrarily but deterministically. + // + // If that results in conflicting versions, the user can re-run 'go get' + // with additional explicit versions for the conflicting packages or + // modules. + resolvedArbitrarily := 0 + for _, q := range queries { + for _, cs := range q.candidates { + isPackage, m := r.chooseArbitrarily(cs) + if isPackage { + q.matchesPackages = true + } + r.resolve(q, m) + resolvedArbitrarily++ + } + } + if resolvedArbitrarily > 0 { + changed = r.updateBuildList(ctx, nil) + } + return changed +} + +// applyUpgrades disambiguates candidate sets that are needed to upgrade (or +// provide) transitive dependencies imported by previously-resolved packages. +// +// applyUpgrades modifies the build list by adding one module version from each +// pathSet in upgrades, then downgrading (or further upgrading) those modules as +// needed to maintain any already-resolved versions of other modules. +// applyUpgrades does not mark the new versions as resolved, so they can still +// be further modified by other queries (such as wildcards). +// +// If all pathSets are resolved without any changes to the build list, +// applyUpgrades returns with changed=false. +func (r *resolver) applyUpgrades(ctx context.Context, upgrades []pathSet) (changed bool) { + defer base.ExitIfErrors() + + // Arbitrarily add a "latest" version that provides each missing package, but + // do not mark the version as resolved: we still want to allow the explicit + // queries to modify the resulting versions. + var tentative []module.Version + for _, cs := range upgrades { + if cs.err != nil { + base.Error(cs.err) + continue + } + + filtered, _, m, unique := r.disambiguate(cs) + if !unique { + _, m = r.chooseArbitrarily(filtered) + } + if m.Path == "" { + // There is no viable candidate for the missing package. + // Leave it unresolved. + continue + } + tentative = append(tentative, m) + } + base.ExitIfErrors() + + changed = r.updateBuildList(ctx, tentative) + return changed +} + +// disambiguate eliminates candidates from cs that conflict with other module +// versions that have already been resolved. If there is only one (unique) +// remaining candidate, disambiguate returns that candidate, along with +// an indication of whether that result interprets cs.path as a package +// +// Note: we're only doing very simple disambiguation here. The goal is to +// reproduce the user's intent, not to find a solution that a human couldn't. +// In the vast majority of cases, we expect only one module per pathSet, +// but we want to give some minimal additional tools so that users can add an +// extra argument or two on the command line to resolve simple ambiguities. +func (r *resolver) disambiguate(cs pathSet) (filtered pathSet, isPackage bool, m module.Version, unique bool) { + if len(cs.pkgMods) == 0 && cs.mod.Path == "" { + panic("internal error: resolveIfUnambiguous called with empty pathSet") + } + + for _, m := range cs.pkgMods { + if _, ok := r.noneForPath(m.Path); ok { + // A query with version "none" forces the candidate module to version + // "none", so we cannot use any other version for that module. + continue + } + + if modload.MainModules.Contains(m.Path) { + if m.Version == "" { + return pathSet{}, true, m, true + } + // A main module can only be set to its own version. + continue + } + + vr, ok := r.resolvedVersion[m.Path] + if !ok { + // m is a viable answer to the query, but other answers may also + // still be viable. + filtered.pkgMods = append(filtered.pkgMods, m) + continue + } + + if vr.version != m.Version { + // Some query forces the candidate module to a version other than this + // one. + // + // The command could be something like + // + // go get example.com/foo/bar@none example.com/foo/bar/baz@latest + // + // in which case we *cannot* resolve the package from + // example.com/foo/bar (because it is constrained to version + // "none") and must fall through to module example.com/foo@latest. + continue + } + + // Some query forces the candidate module *to* the candidate version. + // As a result, this candidate is the only viable choice to provide + // its package(s): any other choice would result in an ambiguous import + // for this path. + // + // For example, consider the command + // + // go get example.com/foo@latest example.com/foo/bar/baz@latest + // + // If modules example.com/foo and example.com/foo/bar both provide + // package example.com/foo/bar/baz, then we *must* resolve the package + // from example.com/foo: if we instead resolved it from + // example.com/foo/bar, we would have two copies of the package. + return pathSet{}, true, m, true + } + + if cs.mod.Path != "" { + vr, ok := r.resolvedVersion[cs.mod.Path] + if !ok || vr.version == cs.mod.Version { + filtered.mod = cs.mod + } + } + + if len(filtered.pkgMods) == 1 && + (filtered.mod.Path == "" || filtered.mod == filtered.pkgMods[0]) { + // Exactly one viable module contains the package with the given path + // (by far the common case), so we can resolve it unambiguously. + return pathSet{}, true, filtered.pkgMods[0], true + } + + if len(filtered.pkgMods) == 0 { + // All modules that could provide the path as a package conflict with other + // resolved arguments. If it can refer to a module instead, return that; + // otherwise, this pathSet cannot be resolved (and we will return the + // zero module.Version). + return pathSet{}, false, filtered.mod, true + } + + // The query remains ambiguous: there are at least two different modules + // to which cs.path could refer. + return filtered, false, module.Version{}, false +} + +// chooseArbitrarily returns an arbitrary (but deterministic) module version +// from among those in the given set. +// +// chooseArbitrarily prefers module paths that were already in the build list at +// the start of 'go get', prefers modules that provide packages over those that +// do not, and chooses the first module meeting those criteria (so biases toward +// longer paths). +func (r *resolver) chooseArbitrarily(cs pathSet) (isPackage bool, m module.Version) { + // Prefer to upgrade some module that was already in the build list. + for _, m := range cs.pkgMods { + if r.initialSelected(m.Path) != "none" { + return true, m + } + } + + // Otherwise, arbitrarily choose the first module that provides the package. + if len(cs.pkgMods) > 0 { + return true, cs.pkgMods[0] + } + + return false, cs.mod +} + +// checkPackageProblems reloads packages for the given patterns and reports +// missing and ambiguous package errors. It also reports retractions and +// deprecations for resolved modules and modules needed to build named packages. +// It also adds a sum for each updated module in the build list if we had one +// before and didn't get one while loading packages. +// +// We skip missing-package errors earlier in the process, since we want to +// resolve pathSets ourselves, but at that point, we don't have enough context +// to log the package-import chains leading to each error. +func (r *resolver) checkPackageProblems(ctx context.Context, pkgPatterns []string) { + defer base.ExitIfErrors() + + // Gather information about modules we might want to load retractions and + // deprecations for. Loading this metadata requires at least one version + // lookup per module, and we don't want to load information that's neither + // relevant nor actionable. + type modFlags int + const ( + resolved modFlags = 1 << iota // version resolved by 'go get' + named // explicitly named on command line or provides a named package + hasPkg // needed to build named packages + direct // provides a direct dependency of the main module + ) + relevantMods := make(map[module.Version]modFlags) + for path, reason := range r.resolvedVersion { + m := module.Version{Path: path, Version: reason.version} + relevantMods[m] |= resolved + } + + // Reload packages, reporting errors for missing and ambiguous imports. + if len(pkgPatterns) > 0 { + // LoadPackages will print errors (since it has more context) but will not + // exit, since we need to load retractions later. + pkgOpts := modload.PackageOpts{ + VendorModulesInGOROOTSrc: true, + LoadTests: *getT, + ResolveMissingImports: false, + AllowErrors: true, + SilenceNoGoErrors: true, + } + matches, pkgs := modload.LoadPackages(ctx, pkgOpts, pkgPatterns...) + for _, m := range matches { + if len(m.Errs) > 0 { + base.SetExitStatus(1) + break + } + } + for _, pkg := range pkgs { + if dir, _, err := modload.Lookup("", false, pkg); err != nil { + if dir != "" && errors.Is(err, imports.ErrNoGo) { + // Since dir is non-empty, we must have located source files + // associated with either the package or its test — ErrNoGo must + // indicate that none of those source files happen to apply in this + // configuration. If we are actually building the package (no -d + // flag), we will report the problem then; otherwise, assume that the + // user is going to build or test this package in some other + // configuration and suppress the error. + continue + } + + base.SetExitStatus(1) + if ambiguousErr := (*modload.AmbiguousImportError)(nil); errors.As(err, &ambiguousErr) { + for _, m := range ambiguousErr.Modules { + relevantMods[m] |= hasPkg + } + } + } + if m := modload.PackageModule(pkg); m.Path != "" { + relevantMods[m] |= hasPkg + } + } + for _, match := range matches { + for _, pkg := range match.Pkgs { + m := modload.PackageModule(pkg) + relevantMods[m] |= named + } + } + } + + reqs := modload.LoadModFile(ctx) + for m := range relevantMods { + if reqs.IsDirect(m.Path) { + relevantMods[m] |= direct + } + } + + // Load retractions for modules mentioned on the command line and modules + // needed to build named packages. We care about retractions of indirect + // dependencies, since we might be able to upgrade away from them. + type modMessage struct { + m module.Version + message string + } + retractions := make([]modMessage, 0, len(relevantMods)) + for m, flags := range relevantMods { + if flags&(resolved|named|hasPkg) != 0 { + retractions = append(retractions, modMessage{m: m}) + } + } + sort.Slice(retractions, func(i, j int) bool { return retractions[i].m.Path < retractions[j].m.Path }) + for i := range retractions { + i := i + r.work.Add(func() { + err := modload.CheckRetractions(ctx, retractions[i].m) + if retractErr := (*modload.ModuleRetractedError)(nil); errors.As(err, &retractErr) { + retractions[i].message = err.Error() + } + }) + } + + // Load deprecations for modules mentioned on the command line. Only load + // deprecations for indirect dependencies if they're also direct dependencies + // of the main module. Deprecations of purely indirect dependencies are + // not actionable. + deprecations := make([]modMessage, 0, len(relevantMods)) + for m, flags := range relevantMods { + if flags&(resolved|named) != 0 || flags&(hasPkg|direct) == hasPkg|direct { + deprecations = append(deprecations, modMessage{m: m}) + } + } + sort.Slice(deprecations, func(i, j int) bool { return deprecations[i].m.Path < deprecations[j].m.Path }) + for i := range deprecations { + i := i + r.work.Add(func() { + deprecation, err := modload.CheckDeprecation(ctx, deprecations[i].m) + if err != nil || deprecation == "" { + return + } + deprecations[i].message = modload.ShortMessage(deprecation, "") + }) + } + + // Load sums for updated modules that had sums before. When we update a + // module, we may update another module in the build list that provides a + // package in 'all' that wasn't loaded as part of this 'go get' command. + // If we don't add a sum for that module, builds may fail later. + // Note that an incidentally updated package could still import packages + // from unknown modules or from modules in the build list that we didn't + // need previously. We can't handle that case without loading 'all'. + sumErrs := make([]error, len(r.buildList)) + for i := range r.buildList { + i := i + m := r.buildList[i] + mActual := m + if mRepl := modload.Replacement(m); mRepl.Path != "" { + mActual = mRepl + } + old := module.Version{Path: m.Path, Version: r.initialVersion[m.Path]} + if old.Version == "" { + continue + } + oldActual := old + if oldRepl := modload.Replacement(old); oldRepl.Path != "" { + oldActual = oldRepl + } + if mActual == oldActual || mActual.Version == "" || !modfetch.HaveSum(oldActual) { + continue + } + r.work.Add(func() { + if _, err := modfetch.DownloadZip(ctx, mActual); err != nil { + verb := "upgraded" + if gover.ModCompare(m.Path, m.Version, old.Version) < 0 { + verb = "downgraded" + } + replaced := "" + if mActual != m { + replaced = fmt.Sprintf(" (replaced by %s)", mActual) + } + err = fmt.Errorf("%s %s %s => %s%s: error finding sum for %s: %v", verb, m.Path, old.Version, m.Version, replaced, mActual, err) + sumErrs[i] = err + } + }) + } + + <-r.work.Idle() + + // Report deprecations, then retractions, then errors fetching sums. + // Only errors fetching sums are hard errors. + for _, mm := range deprecations { + if mm.message != "" { + fmt.Fprintf(os.Stderr, "go: module %s is deprecated: %s\n", mm.m.Path, mm.message) + } + } + var retractPath string + for _, mm := range retractions { + if mm.message != "" { + fmt.Fprintf(os.Stderr, "go: warning: %v\n", mm.message) + if retractPath == "" { + retractPath = mm.m.Path + } else { + retractPath = "" + } + } + } + if retractPath != "" { + fmt.Fprintf(os.Stderr, "go: to switch to the latest unretracted version, run:\n\tgo get %s@latest\n", retractPath) + } + for _, err := range sumErrs { + if err != nil { + base.Error(err) + } + } + base.ExitIfErrors() +} + +// reportChanges logs version changes to os.Stderr. +// +// reportChanges only logs changes to modules named on the command line and to +// explicitly required modules in go.mod. Most changes to indirect requirements +// are not relevant to the user and are not logged. +// +// reportChanges should be called after WriteGoMod. +func (r *resolver) reportChanges(oldReqs, newReqs []module.Version) { + type change struct { + path, old, new string + } + changes := make(map[string]change) + + // Collect changes in modules matched by command line arguments. + for path, reason := range r.resolvedVersion { + if gover.IsToolchain(path) { + continue + } + old := r.initialVersion[path] + new := reason.version + if old != new && (old != "" || new != "none") { + changes[path] = change{path, old, new} + } + } + + // Collect changes to explicit requirements in go.mod. + for _, req := range oldReqs { + if gover.IsToolchain(req.Path) { + continue + } + path := req.Path + old := req.Version + new := r.buildListVersion[path] + if old != new { + changes[path] = change{path, old, new} + } + } + for _, req := range newReqs { + if gover.IsToolchain(req.Path) { + continue + } + path := req.Path + old := r.initialVersion[path] + new := req.Version + if old != new { + changes[path] = change{path, old, new} + } + } + + // Toolchain diffs are easier than requirements: diff old and new directly. + toolchainVersions := func(reqs []module.Version) (goV, toolchain string) { + for _, req := range reqs { + if req.Path == "go" { + goV = req.Version + } + if req.Path == "toolchain" { + toolchain = req.Version + } + } + return + } + oldGo, oldToolchain := toolchainVersions(oldReqs) + newGo, newToolchain := toolchainVersions(newReqs) + if oldGo != newGo { + changes["go"] = change{"go", oldGo, newGo} + } + if oldToolchain != newToolchain { + changes["toolchain"] = change{"toolchain", oldToolchain, newToolchain} + } + + sortedChanges := make([]change, 0, len(changes)) + for _, c := range changes { + sortedChanges = append(sortedChanges, c) + } + sort.Slice(sortedChanges, func(i, j int) bool { + pi := sortedChanges[i].path + pj := sortedChanges[j].path + if pi == pj { + return false + } + // go first; toolchain second + switch { + case pi == "go": + return true + case pj == "go": + return false + case pi == "toolchain": + return true + case pj == "toolchain": + return false + } + return pi < pj + }) + + for _, c := range sortedChanges { + if c.old == "" { + fmt.Fprintf(os.Stderr, "go: added %s %s\n", c.path, c.new) + } else if c.new == "none" || c.new == "" { + fmt.Fprintf(os.Stderr, "go: removed %s %s\n", c.path, c.old) + } else if gover.ModCompare(c.path, c.new, c.old) > 0 { + fmt.Fprintf(os.Stderr, "go: upgraded %s %s => %s\n", c.path, c.old, c.new) + if c.path == "go" && gover.Compare(c.old, gover.ExplicitIndirectVersion) < 0 && gover.Compare(c.new, gover.ExplicitIndirectVersion) >= 0 { + fmt.Fprintf(os.Stderr, "\tnote: expanded dependencies to upgrade to go %s or higher; run 'go mod tidy' to clean up\n", gover.ExplicitIndirectVersion) + } + + } else { + fmt.Fprintf(os.Stderr, "go: downgraded %s %s => %s\n", c.path, c.old, c.new) + } + } + + // TODO(golang.org/issue/33284): attribute changes to command line arguments. + // For modules matched by command line arguments, this probably isn't + // necessary, but it would be useful for unmatched direct dependencies of + // the main module. +} + +// resolve records that module m must be at its indicated version (which may be +// "none") due to query q. If some other query forces module m to be at a +// different version, resolve reports a conflict error. +func (r *resolver) resolve(q *query, m module.Version) { + if m.Path == "" { + panic("internal error: resolving a module.Version with an empty path") + } + + if modload.MainModules.Contains(m.Path) && m.Version != "" { + reportError(q, &modload.QueryMatchesMainModulesError{ + MainModules: []module.Version{{Path: m.Path}}, + Pattern: q.pattern, + Query: q.version, + }) + return + } + + vr, ok := r.resolvedVersion[m.Path] + if ok && vr.version != m.Version { + reportConflict(q, m, vr) + return + } + r.resolvedVersion[m.Path] = versionReason{m.Version, q} + q.resolved = append(q.resolved, m) +} + +// updateBuildList updates the module loader's global build list to be +// consistent with r.resolvedVersion, and to include additional modules +// provided that they do not conflict with the resolved versions. +// +// If the additional modules conflict with the resolved versions, they will be +// downgraded to a non-conflicting version (possibly "none"). +// +// If the resulting build list is the same as the one resulting from the last +// call to updateBuildList, updateBuildList returns with changed=false. +func (r *resolver) updateBuildList(ctx context.Context, additions []module.Version) (changed bool) { + defer base.ExitIfErrors() + + resolved := make([]module.Version, 0, len(r.resolvedVersion)) + for mPath, rv := range r.resolvedVersion { + if !modload.MainModules.Contains(mPath) { + resolved = append(resolved, module.Version{Path: mPath, Version: rv.version}) + } + } + + changed, err := modload.EditBuildList(ctx, additions, resolved) + if err != nil { + if errors.Is(err, gover.ErrTooNew) { + toolchain.SwitchOrFatal(ctx, err) + } + + var constraint *modload.ConstraintError + if !errors.As(err, &constraint) { + base.Fatal(err) + } + + if cfg.BuildV { + // Log complete paths for the conflicts before we summarize them. + for _, c := range constraint.Conflicts { + fmt.Fprintf(os.Stderr, "go: %v\n", c.String()) + } + } + + // modload.EditBuildList reports constraint errors at + // the module level, but 'go get' operates on packages. + // Rewrite the errors to explain them in terms of packages. + reason := func(m module.Version) string { + rv, ok := r.resolvedVersion[m.Path] + if !ok { + return fmt.Sprintf("(INTERNAL ERROR: no reason found for %v)", m) + } + return rv.reason.ResolvedString(module.Version{Path: m.Path, Version: rv.version}) + } + for _, c := range constraint.Conflicts { + adverb := "" + if len(c.Path) > 2 { + adverb = "indirectly " + } + firstReason := reason(c.Path[0]) + last := c.Path[len(c.Path)-1] + if c.Err != nil { + base.Errorf("go: %v %srequires %v: %v", firstReason, adverb, last, c.UnwrapModuleError()) + } else { + base.Errorf("go: %v %srequires %v, not %v", firstReason, adverb, last, reason(c.Constraint)) + } + } + return false + } + if !changed { + return false + } + + mg, err := modload.LoadModGraph(ctx, "") + if err != nil { + toolchain.SwitchOrFatal(ctx, err) + } + + r.buildList = mg.BuildList() + r.buildListVersion = make(map[string]string, len(r.buildList)) + for _, m := range r.buildList { + r.buildListVersion[m.Path] = m.Version + } + return true +} + +func reqsFromGoMod(f *modfile.File) []module.Version { + reqs := make([]module.Version, len(f.Require), 2+len(f.Require)) + for i, r := range f.Require { + reqs[i] = r.Mod + } + if f.Go != nil { + reqs = append(reqs, module.Version{Path: "go", Version: f.Go.Version}) + } + if f.Toolchain != nil { + reqs = append(reqs, module.Version{Path: "toolchain", Version: f.Toolchain.Name}) + } + return reqs +} + +// isNoSuchModuleVersion reports whether err indicates that the requested module +// does not exist at the requested version, either because the module does not +// exist at all or because it does not include that specific version. +func isNoSuchModuleVersion(err error) bool { + var noMatch *modload.NoMatchingVersionError + return errors.Is(err, os.ErrNotExist) || errors.As(err, &noMatch) +} + +// isNoSuchPackageVersion reports whether err indicates that the requested +// package does not exist at the requested version, either because no module +// that could contain it exists at that version, or because every such module +// that does exist does not actually contain the package. +func isNoSuchPackageVersion(err error) bool { + var noPackage *modload.PackageNotInModuleError + return isNoSuchModuleVersion(err) || errors.As(err, &noPackage) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modget/query.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modget/query.go new file mode 100644 index 0000000000000000000000000000000000000000..498ba6c2ff862b50710662cf7f10a72a4a83cafe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modget/query.go @@ -0,0 +1,358 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modget + +import ( + "fmt" + "path/filepath" + "regexp" + "strings" + "sync" + + "cmd/go/internal/base" + "cmd/go/internal/gover" + "cmd/go/internal/modload" + "cmd/go/internal/search" + "cmd/go/internal/str" + "cmd/internal/pkgpattern" + + "golang.org/x/mod/module" +) + +// A query describes a command-line argument and the modules and/or packages +// to which that argument may resolve.. +type query struct { + // raw is the original argument, to be printed in error messages. + raw string + + // rawVersion is the portion of raw corresponding to version, if any + rawVersion string + + // pattern is the part of the argument before "@" (or the whole argument + // if there is no "@"), which may match either packages (preferred) or + // modules (if no matching packages). + // + // The pattern may also be "-u", for the synthetic query representing the -u + // (“upgrade”)flag. + pattern string + + // patternIsLocal indicates whether pattern is restricted to match only paths + // local to the main module, such as absolute filesystem paths or paths + // beginning with './'. + // + // A local pattern must resolve to one or more packages in the main module. + patternIsLocal bool + + // version is the part of the argument after "@", or an implied + // "upgrade" or "patch" if there is no "@". version specifies the + // module version to get. + version string + + // matchWildcard, if non-nil, reports whether pattern, which must be a + // wildcard (with the substring "..."), matches the given package or module + // path. + matchWildcard func(path string) bool + + // canMatchWildcardInModule, if non-nil, reports whether the module with the given + // path could lexically contain a package matching pattern, which must be a + // wildcard. + canMatchWildcardInModule func(mPath string) bool + + // conflict is the first query identified as incompatible with this one. + // conflict forces one or more of the modules matching this query to a + // version that does not match version. + conflict *query + + // candidates is a list of sets of alternatives for a path that matches (or + // contains packages that match) the pattern. The query can be resolved by + // choosing exactly one alternative from each set in the list. + // + // A path-literal query results in only one set: the path itself, which + // may resolve to either a package path or a module path. + // + // A wildcard query results in one set for each matching module path, each + // module for which the matching version contains at least one matching + // package, and (if no other modules match) one candidate set for the pattern + // overall if no existing match is identified in the build list. + // + // A query for pattern "all" results in one set for each package transitively + // imported by the main module. + // + // The special query for the "-u" flag results in one set for each + // otherwise-unconstrained package that has available upgrades. + candidates []pathSet + candidatesMu sync.Mutex + + // pathSeen ensures that only one pathSet is added to the query per + // unique path. + pathSeen sync.Map + + // resolved contains the set of modules whose versions have been determined by + // this query, in the order in which they were determined. + // + // The resolver examines the candidate sets for each query, resolving one + // module per candidate set in a way that attempts to avoid obvious conflicts + // between the versions resolved by different queries. + resolved []module.Version + + // matchesPackages is true if the resolved modules provide at least one + // package matching q.pattern. + matchesPackages bool +} + +// A pathSet describes the possible options for resolving a specific path +// to a package and/or module. +type pathSet struct { + // path is a package (if "all" or "-u" or a non-wildcard) or module (if + // wildcard) path that could be resolved by adding any of the modules in this + // set. For a wildcard pattern that so far matches no packages, the path is + // the wildcard pattern itself. + // + // Each path must occur only once in a query's candidate sets, and the path is + // added implicitly to each pathSet returned to pathOnce. + path string + + // pkgMods is a set of zero or more modules, each of which contains the + // package with the indicated path. Due to the requirement that imports be + // unambiguous, only one such module can be in the build list, and all others + // must be excluded. + pkgMods []module.Version + + // mod is either the zero Version, or a module that does not contain any + // packages matching the query but for which the module path itself + // matches the query pattern. + // + // We track this module separately from pkgMods because, all else equal, we + // prefer to match a query to a package rather than just a module. Also, + // unlike the modules in pkgMods, this module does not inherently exclude + // any other module in pkgMods. + mod module.Version + + err error +} + +// errSet returns a pathSet containing the given error. +func errSet(err error) pathSet { return pathSet{err: err} } + +// newQuery returns a new query parsed from the raw argument, +// which must be either path or path@version. +func newQuery(raw string) (*query, error) { + pattern, rawVers, found := strings.Cut(raw, "@") + if found && (strings.Contains(rawVers, "@") || rawVers == "") { + return nil, fmt.Errorf("invalid module version syntax %q", raw) + } + + // If no version suffix is specified, assume @upgrade. + // If -u=patch was specified, assume @patch instead. + version := rawVers + if version == "" { + if getU.version == "" { + version = "upgrade" + } else { + version = getU.version + } + } + + q := &query{ + raw: raw, + rawVersion: rawVers, + pattern: pattern, + patternIsLocal: filepath.IsAbs(pattern) || search.IsRelativePath(pattern), + version: version, + } + if strings.Contains(q.pattern, "...") { + q.matchWildcard = pkgpattern.MatchPattern(q.pattern) + q.canMatchWildcardInModule = pkgpattern.TreeCanMatchPattern(q.pattern) + } + if err := q.validate(); err != nil { + return q, err + } + return q, nil +} + +// validate reports a non-nil error if q is not sensible and well-formed. +func (q *query) validate() error { + if q.patternIsLocal { + if q.rawVersion != "" { + return fmt.Errorf("can't request explicit version %q of path %q in main module", q.rawVersion, q.pattern) + } + return nil + } + + if q.pattern == "all" { + // If there is no main module, "all" is not meaningful. + if !modload.HasModRoot() { + return fmt.Errorf(`cannot match "all": %v`, modload.ErrNoModRoot) + } + if !versionOkForMainModule(q.version) { + // TODO(bcmills): "all@none" seems like a totally reasonable way to + // request that we remove all module requirements, leaving only the main + // module and standard library. Perhaps we should implement that someday. + return &modload.QueryUpgradesAllError{ + MainModules: modload.MainModules.Versions(), + Query: q.version, + } + } + } + + if search.IsMetaPackage(q.pattern) && q.pattern != "all" { + if q.pattern != q.raw { + return fmt.Errorf("can't request explicit version of standard-library pattern %q", q.pattern) + } + } + + return nil +} + +// String returns the original argument from which q was parsed. +func (q *query) String() string { return q.raw } + +// ResolvedString returns a string describing m as a resolved match for q. +func (q *query) ResolvedString(m module.Version) string { + if m.Path != q.pattern { + if m.Version != q.version { + return fmt.Sprintf("%v (matching %s@%s)", m, q.pattern, q.version) + } + return fmt.Sprintf("%v (matching %v)", m, q) + } + if m.Version != q.version { + return fmt.Sprintf("%s@%s (%s)", q.pattern, q.version, m.Version) + } + return q.String() +} + +// isWildcard reports whether q is a pattern that can match multiple paths. +func (q *query) isWildcard() bool { + return q.matchWildcard != nil || (q.patternIsLocal && strings.Contains(q.pattern, "...")) +} + +// matchesPath reports whether the given path matches q.pattern. +func (q *query) matchesPath(path string) bool { + if q.matchWildcard != nil && !gover.IsToolchain(path) { + return q.matchWildcard(path) + } + return path == q.pattern +} + +// canMatchInModule reports whether the given module path can potentially +// contain q.pattern. +func (q *query) canMatchInModule(mPath string) bool { + if gover.IsToolchain(mPath) { + return false + } + if q.canMatchWildcardInModule != nil { + return q.canMatchWildcardInModule(mPath) + } + return str.HasPathPrefix(q.pattern, mPath) +} + +// pathOnce invokes f to generate the pathSet for the given path, +// if one is still needed. +// +// Note that, unlike sync.Once, pathOnce does not guarantee that a concurrent +// call to f for the given path has completed on return. +// +// pathOnce is safe for concurrent use by multiple goroutines, but note that +// multiple concurrent calls will result in the sets being added in +// nondeterministic order. +func (q *query) pathOnce(path string, f func() pathSet) { + if _, dup := q.pathSeen.LoadOrStore(path, nil); dup { + return + } + + cs := f() + + if len(cs.pkgMods) > 0 || cs.mod != (module.Version{}) || cs.err != nil { + cs.path = path + q.candidatesMu.Lock() + q.candidates = append(q.candidates, cs) + q.candidatesMu.Unlock() + } +} + +// reportError logs err concisely using base.Errorf. +func reportError(q *query, err error) { + errStr := err.Error() + + // If err already mentions all of the relevant parts of q, just log err to + // reduce stutter. Otherwise, log both q and err. + // + // TODO(bcmills): Use errors.As to unpack these errors instead of parsing + // strings with regular expressions. + + patternRE := regexp.MustCompile("(?m)(?:[ \t(\"`]|^)" + regexp.QuoteMeta(q.pattern) + "(?:[ @:;)\"`]|$)") + if patternRE.MatchString(errStr) { + if q.rawVersion == "" { + base.Errorf("go: %s", errStr) + return + } + + versionRE := regexp.MustCompile("(?m)(?:[ @(\"`]|^)" + regexp.QuoteMeta(q.version) + "(?:[ :;)\"`]|$)") + if versionRE.MatchString(errStr) { + base.Errorf("go: %s", errStr) + return + } + } + + if qs := q.String(); qs != "" { + base.Errorf("go: %s: %s", qs, errStr) + } else { + base.Errorf("go: %s", errStr) + } +} + +func reportConflict(pq *query, m module.Version, conflict versionReason) { + if pq.conflict != nil { + // We've already reported a conflict for the proposed query. + // Don't report it again, even if it has other conflicts. + return + } + pq.conflict = conflict.reason + + proposed := versionReason{ + version: m.Version, + reason: pq, + } + if pq.isWildcard() && !conflict.reason.isWildcard() { + // Prefer to report the specific path first and the wildcard second. + proposed, conflict = conflict, proposed + } + reportError(pq, &conflictError{ + mPath: m.Path, + proposed: proposed, + conflict: conflict, + }) +} + +type conflictError struct { + mPath string + proposed versionReason + conflict versionReason +} + +func (e *conflictError) Error() string { + argStr := func(q *query, v string) string { + if v != q.version { + return fmt.Sprintf("%s@%s (%s)", q.pattern, q.version, v) + } + return q.String() + } + + pq := e.proposed.reason + rq := e.conflict.reason + modDetail := "" + if e.mPath != pq.pattern { + modDetail = fmt.Sprintf("for module %s, ", e.mPath) + } + + return fmt.Sprintf("%s%s conflicts with %s", + modDetail, + argStr(pq, e.proposed.version), + argStr(rq, e.conflict.version)) +} + +func versionOkForMainModule(version string) bool { + return version == "upgrade" || version == "patch" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/build.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/build.go new file mode 100644 index 0000000000000000000000000000000000000000..0b06373984190a53f1b2dc1f30e5f96662e0d404 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/build.go @@ -0,0 +1,955 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a lightly modified copy go/build/build.go with unused parts +// removed. + +package modindex + +import ( + "bytes" + "cmd/go/internal/fsys" + "cmd/go/internal/str" + "errors" + "fmt" + "go/ast" + "go/build" + "go/build/constraint" + "go/token" + "io" + "io/fs" + "path/filepath" + "sort" + "strings" + "unicode" + "unicode/utf8" +) + +// A Context specifies the supporting context for a build. +type Context struct { + GOARCH string // target architecture + GOOS string // target operating system + GOROOT string // Go root + GOPATH string // Go paths + + // Dir is the caller's working directory, or the empty string to use + // the current directory of the running process. In module mode, this is used + // to locate the main module. + // + // If Dir is non-empty, directories passed to Import and ImportDir must + // be absolute. + Dir string + + CgoEnabled bool // whether cgo files are included + UseAllFiles bool // use files regardless of //go:build lines, file names + Compiler string // compiler to assume when computing target paths + + // The build, tool, and release tags specify build constraints + // that should be considered satisfied when processing +build lines. + // Clients creating a new context may customize BuildTags, which + // defaults to empty, but it is usually an error to customize ToolTags or ReleaseTags. + // ToolTags defaults to build tags appropriate to the current Go toolchain configuration. + // ReleaseTags defaults to the list of Go releases the current release is compatible with. + // BuildTags is not set for the Default build Context. + // In addition to the BuildTags, ToolTags, and ReleaseTags, build constraints + // consider the values of GOARCH and GOOS as satisfied tags. + // The last element in ReleaseTags is assumed to be the current release. + BuildTags []string + ToolTags []string + ReleaseTags []string + + // The install suffix specifies a suffix to use in the name of the installation + // directory. By default it is empty, but custom builds that need to keep + // their outputs separate can set InstallSuffix to do so. For example, when + // using the race detector, the go command uses InstallSuffix = "race", so + // that on a Linux/386 system, packages are written to a directory named + // "linux_386_race" instead of the usual "linux_386". + InstallSuffix string + + // By default, Import uses the operating system's file system calls + // to read directories and files. To read from other sources, + // callers can set the following functions. They all have default + // behaviors that use the local file system, so clients need only set + // the functions whose behaviors they wish to change. + + // JoinPath joins the sequence of path fragments into a single path. + // If JoinPath is nil, Import uses filepath.Join. + JoinPath func(elem ...string) string + + // SplitPathList splits the path list into a slice of individual paths. + // If SplitPathList is nil, Import uses filepath.SplitList. + SplitPathList func(list string) []string + + // IsAbsPath reports whether path is an absolute path. + // If IsAbsPath is nil, Import uses filepath.IsAbs. + IsAbsPath func(path string) bool + + // IsDir reports whether the path names a directory. + // If IsDir is nil, Import calls os.Stat and uses the result's IsDir method. + IsDir func(path string) bool + + // HasSubdir reports whether dir is lexically a subdirectory of + // root, perhaps multiple levels below. It does not try to check + // whether dir exists. + // If so, HasSubdir sets rel to a slash-separated path that + // can be joined to root to produce a path equivalent to dir. + // If HasSubdir is nil, Import uses an implementation built on + // filepath.EvalSymlinks. + HasSubdir func(root, dir string) (rel string, ok bool) + + // ReadDir returns a slice of fs.FileInfo, sorted by Name, + // describing the content of the named directory. + // If ReadDir is nil, Import uses ioutil.ReadDir. + ReadDir func(dir string) ([]fs.FileInfo, error) + + // OpenFile opens a file (not a directory) for reading. + // If OpenFile is nil, Import uses os.Open. + OpenFile func(path string) (io.ReadCloser, error) +} + +// joinPath calls ctxt.JoinPath (if not nil) or else filepath.Join. +func (ctxt *Context) joinPath(elem ...string) string { + if f := ctxt.JoinPath; f != nil { + return f(elem...) + } + return filepath.Join(elem...) +} + +// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList. +func (ctxt *Context) splitPathList(s string) []string { + if f := ctxt.SplitPathList; f != nil { + return f(s) + } + return filepath.SplitList(s) +} + +// isAbsPath calls ctxt.IsAbsPath (if not nil) or else filepath.IsAbs. +func (ctxt *Context) isAbsPath(path string) bool { + if f := ctxt.IsAbsPath; f != nil { + return f(path) + } + return filepath.IsAbs(path) +} + +// isDir calls ctxt.IsDir (if not nil) or else uses fsys.Stat. +func isDir(path string) bool { + fi, err := fsys.Stat(path) + return err == nil && fi.IsDir() +} + +// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses +// the local file system to answer the question. +func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) { + if f := ctxt.HasSubdir; f != nil { + return f(root, dir) + } + + // Try using paths we received. + if rel, ok = hasSubdir(root, dir); ok { + return + } + + // Try expanding symlinks and comparing + // expanded against unexpanded and + // expanded against expanded. + rootSym, _ := filepath.EvalSymlinks(root) + dirSym, _ := filepath.EvalSymlinks(dir) + + if rel, ok = hasSubdir(rootSym, dir); ok { + return + } + if rel, ok = hasSubdir(root, dirSym); ok { + return + } + return hasSubdir(rootSym, dirSym) +} + +// hasSubdir reports if dir is within root by performing lexical analysis only. +func hasSubdir(root, dir string) (rel string, ok bool) { + root = str.WithFilePathSeparator(filepath.Clean(root)) + dir = filepath.Clean(dir) + if !strings.HasPrefix(dir, root) { + return "", false + } + return filepath.ToSlash(dir[len(root):]), true +} + +// gopath returns the list of Go path directories. +func (ctxt *Context) gopath() []string { + var all []string + for _, p := range ctxt.splitPathList(ctxt.GOPATH) { + if p == "" || p == ctxt.GOROOT { + // Empty paths are uninteresting. + // If the path is the GOROOT, ignore it. + // People sometimes set GOPATH=$GOROOT. + // Do not get confused by this common mistake. + continue + } + if strings.HasPrefix(p, "~") { + // Path segments starting with ~ on Unix are almost always + // users who have incorrectly quoted ~ while setting GOPATH, + // preventing it from expanding to $HOME. + // The situation is made more confusing by the fact that + // bash allows quoted ~ in $PATH (most shells do not). + // Do not get confused by this, and do not try to use the path. + // It does not exist, and printing errors about it confuses + // those users even more, because they think "sure ~ exists!". + // The go command diagnoses this situation and prints a + // useful error. + // On Windows, ~ is used in short names, such as c:\progra~1 + // for c:\program files. + continue + } + all = append(all, p) + } + return all +} + +var defaultToolTags, defaultReleaseTags []string + +// NoGoError is the error used by Import to describe a directory +// containing no buildable Go source files. (It may still contain +// test files, files hidden by build tags, and so on.) +type NoGoError struct { + Dir string +} + +func (e *NoGoError) Error() string { + return "no buildable Go source files in " + e.Dir +} + +// MultiplePackageError describes a directory containing +// multiple buildable Go source files for multiple packages. +type MultiplePackageError struct { + Dir string // directory containing files + Packages []string // package names found + Files []string // corresponding files: Files[i] declares package Packages[i] +} + +func (e *MultiplePackageError) Error() string { + // Error string limited to two entries for compatibility. + return fmt.Sprintf("found packages %s (%s) and %s (%s) in %s", e.Packages[0], e.Files[0], e.Packages[1], e.Files[1], e.Dir) +} + +func nameExt(name string) string { + i := strings.LastIndex(name, ".") + if i < 0 { + return "" + } + return name[i:] +} + +func fileListForExt(p *build.Package, ext string) *[]string { + switch ext { + case ".c": + return &p.CFiles + case ".cc", ".cpp", ".cxx": + return &p.CXXFiles + case ".m": + return &p.MFiles + case ".h", ".hh", ".hpp", ".hxx": + return &p.HFiles + case ".f", ".F", ".for", ".f90": + return &p.FFiles + case ".s", ".S", ".sx": + return &p.SFiles + case ".swig": + return &p.SwigFiles + case ".swigcxx": + return &p.SwigCXXFiles + case ".syso": + return &p.SysoFiles + } + return nil +} + +var errNoModules = errors.New("not using modules") + +func findImportComment(data []byte) (s string, line int) { + // expect keyword package + word, data := parseWord(data) + if string(word) != "package" { + return "", 0 + } + + // expect package name + _, data = parseWord(data) + + // now ready for import comment, a // or /* */ comment + // beginning and ending on the current line. + for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') { + data = data[1:] + } + + var comment []byte + switch { + case bytes.HasPrefix(data, slashSlash): + comment, _, _ = bytes.Cut(data[2:], newline) + case bytes.HasPrefix(data, slashStar): + var ok bool + comment, _, ok = bytes.Cut(data[2:], starSlash) + if !ok { + // malformed comment + return "", 0 + } + if bytes.Contains(comment, newline) { + return "", 0 + } + } + comment = bytes.TrimSpace(comment) + + // split comment into `import`, `"pkg"` + word, arg := parseWord(comment) + if string(word) != "import" { + return "", 0 + } + + line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline) + return strings.TrimSpace(string(arg)), line +} + +var ( + slashSlash = []byte("//") + slashStar = []byte("/*") + starSlash = []byte("*/") + newline = []byte("\n") +) + +// skipSpaceOrComment returns data with any leading spaces or comments removed. +func skipSpaceOrComment(data []byte) []byte { + for len(data) > 0 { + switch data[0] { + case ' ', '\t', '\r', '\n': + data = data[1:] + continue + case '/': + if bytes.HasPrefix(data, slashSlash) { + i := bytes.Index(data, newline) + if i < 0 { + return nil + } + data = data[i+1:] + continue + } + if bytes.HasPrefix(data, slashStar) { + data = data[2:] + i := bytes.Index(data, starSlash) + if i < 0 { + return nil + } + data = data[i+2:] + continue + } + } + break + } + return data +} + +// parseWord skips any leading spaces or comments in data +// and then parses the beginning of data as an identifier or keyword, +// returning that word and what remains after the word. +func parseWord(data []byte) (word, rest []byte) { + data = skipSpaceOrComment(data) + + // Parse past leading word characters. + rest = data + for { + r, size := utf8.DecodeRune(rest) + if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' { + rest = rest[size:] + continue + } + break + } + + word = data[:len(data)-len(rest)] + if len(word) == 0 { + return nil, nil + } + + return word, rest +} + +var dummyPkg build.Package + +// fileInfo records information learned about a file included in a build. +type fileInfo struct { + name string // full name including dir + header []byte + fset *token.FileSet + parsed *ast.File + parseErr error + imports []fileImport + embeds []fileEmbed + directives []build.Directive + + // Additional fields added to go/build's fileinfo for the purposes of the modindex package. + binaryOnly bool + goBuildConstraint string + plusBuildConstraints []string +} + +type fileImport struct { + path string + pos token.Pos + doc *ast.CommentGroup +} + +type fileEmbed struct { + pattern string + pos token.Position +} + +var errNonSource = errors.New("non source file") + +// getFileInfo extracts the information needed from each go file for the module +// index. +// +// If Name denotes a Go program, matchFile reads until the end of the +// Imports and returns that section of the file in the FileInfo's Header field, +// even though it only considers text until the first non-comment +// for +build lines. +// +// getFileInfo will return errNonSource if the file is not a source or object +// file and shouldn't even be added to IgnoredFiles. +func getFileInfo(dir, name string, fset *token.FileSet) (*fileInfo, error) { + if strings.HasPrefix(name, "_") || + strings.HasPrefix(name, ".") { + return nil, nil + } + + i := strings.LastIndex(name, ".") + if i < 0 { + i = len(name) + } + ext := name[i:] + + if ext != ".go" && fileListForExt(&dummyPkg, ext) == nil { + // skip + return nil, errNonSource + } + + info := &fileInfo{name: filepath.Join(dir, name), fset: fset} + if ext == ".syso" { + // binary, no reading + return info, nil + } + + f, err := fsys.Open(info.name) + if err != nil { + return nil, err + } + + // TODO(matloob) should we decide whether to ignore binary only here or earlier + // when we create the index file? + var ignoreBinaryOnly bool + if strings.HasSuffix(name, ".go") { + err = readGoInfo(f, info) + if strings.HasSuffix(name, "_test.go") { + ignoreBinaryOnly = true // ignore //go:binary-only-package comments in _test.go files + } + } else { + info.header, err = readComments(f) + } + f.Close() + if err != nil { + return nil, fmt.Errorf("read %s: %v", info.name, err) + } + + // Look for +build comments to accept or reject the file. + info.goBuildConstraint, info.plusBuildConstraints, info.binaryOnly, err = getConstraints(info.header) + if err != nil { + return nil, fmt.Errorf("%s: %v", name, err) + } + + if ignoreBinaryOnly && info.binaryOnly { + info.binaryOnly = false // override info.binaryOnly + } + + return info, nil +} + +func cleanDecls(m map[string][]token.Position) ([]string, map[string][]token.Position) { + all := make([]string, 0, len(m)) + for path := range m { + all = append(all, path) + } + sort.Strings(all) + return all, m +} + +var ( + bSlashSlash = []byte(slashSlash) + bStarSlash = []byte(starSlash) + bSlashStar = []byte(slashStar) + bPlusBuild = []byte("+build") + + goBuildComment = []byte("//go:build") + + errMultipleGoBuild = errors.New("multiple //go:build comments") +) + +func isGoBuildComment(line []byte) bool { + if !bytes.HasPrefix(line, goBuildComment) { + return false + } + line = bytes.TrimSpace(line) + rest := line[len(goBuildComment):] + return len(rest) == 0 || len(bytes.TrimSpace(rest)) < len(rest) +} + +// Special comment denoting a binary-only package. +// See https://golang.org/design/2775-binary-only-packages +// for more about the design of binary-only packages. +var binaryOnlyComment = []byte("//go:binary-only-package") + +func getConstraints(content []byte) (goBuild string, plusBuild []string, binaryOnly bool, err error) { + // Identify leading run of // comments and blank lines, + // which must be followed by a blank line. + // Also identify any //go:build comments. + content, goBuildBytes, sawBinaryOnly, err := parseFileHeader(content) + if err != nil { + return "", nil, false, err + } + + // If //go:build line is present, it controls, so no need to look for +build . + // Otherwise, get plusBuild constraints. + if goBuildBytes == nil { + p := content + for len(p) > 0 { + line := p + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, p = line[:i], p[i+1:] + } else { + p = p[len(p):] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, bSlashSlash) || !bytes.Contains(line, bPlusBuild) { + continue + } + text := string(line) + if !constraint.IsPlusBuild(text) { + continue + } + plusBuild = append(plusBuild, text) + } + } + + return string(goBuildBytes), plusBuild, sawBinaryOnly, nil +} + +func parseFileHeader(content []byte) (trimmed, goBuild []byte, sawBinaryOnly bool, err error) { + end := 0 + p := content + ended := false // found non-blank, non-// line, so stopped accepting // +build lines + inSlashStar := false // in /* */ comment + +Lines: + for len(p) > 0 { + line := p + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, p = line[:i], p[i+1:] + } else { + p = p[len(p):] + } + line = bytes.TrimSpace(line) + if len(line) == 0 && !ended { // Blank line + // Remember position of most recent blank line. + // When we find the first non-blank, non-// line, + // this "end" position marks the latest file position + // where a // +build line can appear. + // (It must appear _before_ a blank line before the non-blank, non-// line. + // Yes, that's confusing, which is part of why we moved to //go:build lines.) + // Note that ended==false here means that inSlashStar==false, + // since seeing a /* would have set ended==true. + end = len(content) - len(p) + continue Lines + } + if !bytes.HasPrefix(line, slashSlash) { // Not comment line + ended = true + } + + if !inSlashStar && isGoBuildComment(line) { + if goBuild != nil { + return nil, nil, false, errMultipleGoBuild + } + goBuild = line + } + if !inSlashStar && bytes.Equal(line, binaryOnlyComment) { + sawBinaryOnly = true + } + + Comments: + for len(line) > 0 { + if inSlashStar { + if i := bytes.Index(line, starSlash); i >= 0 { + inSlashStar = false + line = bytes.TrimSpace(line[i+len(starSlash):]) + continue Comments + } + continue Lines + } + if bytes.HasPrefix(line, bSlashSlash) { + continue Lines + } + if bytes.HasPrefix(line, bSlashStar) { + inSlashStar = true + line = bytes.TrimSpace(line[len(bSlashStar):]) + continue Comments + } + // Found non-comment text. + break Lines + } + } + + return content[:end], goBuild, sawBinaryOnly, nil +} + +// saveCgo saves the information from the #cgo lines in the import "C" comment. +// These lines set CFLAGS, CPPFLAGS, CXXFLAGS and LDFLAGS and pkg-config directives +// that affect the way cgo's C code is built. +func (ctxt *Context) saveCgo(filename string, di *build.Package, text string) error { + for _, line := range strings.Split(text, "\n") { + orig := line + + // Line is + // #cgo [GOOS/GOARCH...] LDFLAGS: stuff + // + line = strings.TrimSpace(line) + if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') { + continue + } + + // #cgo (nocallback|noescape) + if fields := strings.Fields(line); len(fields) == 3 && (fields[1] == "nocallback" || fields[1] == "noescape") { + continue + } + + // Split at colon. + line, argstr, ok := strings.Cut(strings.TrimSpace(line[4:]), ":") + if !ok { + return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig) + } + + // Parse GOOS/GOARCH stuff. + f := strings.Fields(line) + if len(f) < 1 { + return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig) + } + + cond, verb := f[:len(f)-1], f[len(f)-1] + if len(cond) > 0 { + ok := false + for _, c := range cond { + if ctxt.matchAuto(c, nil) { + ok = true + break + } + } + if !ok { + continue + } + } + + args, err := splitQuoted(argstr) + if err != nil { + return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig) + } + for i, arg := range args { + if arg, ok = expandSrcDir(arg, di.Dir); !ok { + return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg) + } + args[i] = arg + } + + switch verb { + case "CFLAGS", "CPPFLAGS", "CXXFLAGS", "FFLAGS", "LDFLAGS": + // Change relative paths to absolute. + ctxt.makePathsAbsolute(args, di.Dir) + } + + switch verb { + case "CFLAGS": + di.CgoCFLAGS = append(di.CgoCFLAGS, args...) + case "CPPFLAGS": + di.CgoCPPFLAGS = append(di.CgoCPPFLAGS, args...) + case "CXXFLAGS": + di.CgoCXXFLAGS = append(di.CgoCXXFLAGS, args...) + case "FFLAGS": + di.CgoFFLAGS = append(di.CgoFFLAGS, args...) + case "LDFLAGS": + di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...) + case "pkg-config": + di.CgoPkgConfig = append(di.CgoPkgConfig, args...) + default: + return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig) + } + } + return nil +} + +// expandSrcDir expands any occurrence of ${SRCDIR}, making sure +// the result is safe for the shell. +func expandSrcDir(str string, srcdir string) (string, bool) { + // "\" delimited paths cause safeCgoName to fail + // so convert native paths with a different delimiter + // to "/" before starting (eg: on windows). + srcdir = filepath.ToSlash(srcdir) + + chunks := strings.Split(str, "${SRCDIR}") + if len(chunks) < 2 { + return str, safeCgoName(str) + } + ok := true + for _, chunk := range chunks { + ok = ok && (chunk == "" || safeCgoName(chunk)) + } + ok = ok && (srcdir == "" || safeCgoName(srcdir)) + res := strings.Join(chunks, srcdir) + return res, ok && res != "" +} + +// makePathsAbsolute looks for compiler options that take paths and +// makes them absolute. We do this because through the 1.8 release we +// ran the compiler in the package directory, so any relative -I or -L +// options would be relative to that directory. In 1.9 we changed to +// running the compiler in the build directory, to get consistent +// build results (issue #19964). To keep builds working, we change any +// relative -I or -L options to be absolute. +// +// Using filepath.IsAbs and filepath.Join here means the results will be +// different on different systems, but that's OK: -I and -L options are +// inherently system-dependent. +func (ctxt *Context) makePathsAbsolute(args []string, srcDir string) { + nextPath := false + for i, arg := range args { + if nextPath { + if !filepath.IsAbs(arg) { + args[i] = filepath.Join(srcDir, arg) + } + nextPath = false + } else if strings.HasPrefix(arg, "-I") || strings.HasPrefix(arg, "-L") { + if len(arg) == 2 { + nextPath = true + } else { + if !filepath.IsAbs(arg[2:]) { + args[i] = arg[:2] + filepath.Join(srcDir, arg[2:]) + } + } + } + } +} + +// NOTE: $ is not safe for the shell, but it is allowed here because of linker options like -Wl,$ORIGIN. +// We never pass these arguments to a shell (just to programs we construct argv for), so this should be okay. +// See golang.org/issue/6038. +// The @ is for OS X. See golang.org/issue/13720. +// The % is for Jenkins. See golang.org/issue/16959. +// The ! is because module paths may use them. See golang.org/issue/26716. +// The ~ and ^ are for sr.ht. See golang.org/issue/32260. +const safeString = "+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:$@%! ~^" + +func safeCgoName(s string) bool { + if s == "" { + return false + } + for i := 0; i < len(s); i++ { + if c := s[i]; c < utf8.RuneSelf && strings.IndexByte(safeString, c) < 0 { + return false + } + } + return true +} + +// splitQuoted splits the string s around each instance of one or more consecutive +// white space characters while taking into account quotes and escaping, and +// returns an array of substrings of s or an empty list if s contains only white space. +// Single quotes and double quotes are recognized to prevent splitting within the +// quoted region, and are removed from the resulting substrings. If a quote in s +// isn't closed err will be set and r will have the unclosed argument as the +// last element. The backslash is used for escaping. +// +// For example, the following string: +// +// a b:"c d" 'e''f' "g\"" +// +// Would be parsed as: +// +// []string{"a", "b:c d", "ef", `g"`} +func splitQuoted(s string) (r []string, err error) { + var args []string + arg := make([]rune, len(s)) + escaped := false + quoted := false + quote := '\x00' + i := 0 + for _, rune := range s { + switch { + case escaped: + escaped = false + case rune == '\\': + escaped = true + continue + case quote != '\x00': + if rune == quote { + quote = '\x00' + continue + } + case rune == '"' || rune == '\'': + quoted = true + quote = rune + continue + case unicode.IsSpace(rune): + if quoted || i > 0 { + quoted = false + args = append(args, string(arg[:i])) + i = 0 + } + continue + } + arg[i] = rune + i++ + } + if quoted || i > 0 { + args = append(args, string(arg[:i])) + } + if quote != 0 { + err = errors.New("unclosed quote") + } else if escaped { + err = errors.New("unfinished escaping") + } + return args, err +} + +// matchAuto interprets text as either a +build or //go:build expression (whichever works), +// reporting whether the expression matches the build context. +// +// matchAuto is only used for testing of tag evaluation +// and in #cgo lines, which accept either syntax. +func (ctxt *Context) matchAuto(text string, allTags map[string]bool) bool { + if strings.ContainsAny(text, "&|()") { + text = "//go:build " + text + } else { + text = "// +build " + text + } + x, err := constraint.Parse(text) + if err != nil { + return false + } + return ctxt.eval(x, allTags) +} + +func (ctxt *Context) eval(x constraint.Expr, allTags map[string]bool) bool { + return x.Eval(func(tag string) bool { return ctxt.matchTag(tag, allTags) }) +} + +// matchTag reports whether the name is one of: +// +// cgo (if cgo is enabled) +// $GOOS +// $GOARCH +// boringcrypto +// ctxt.Compiler +// linux (if GOOS == android) +// solaris (if GOOS == illumos) +// tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags) +// +// It records all consulted tags in allTags. +func (ctxt *Context) matchTag(name string, allTags map[string]bool) bool { + if allTags != nil { + allTags[name] = true + } + + // special tags + if ctxt.CgoEnabled && name == "cgo" { + return true + } + if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler { + return true + } + if ctxt.GOOS == "android" && name == "linux" { + return true + } + if ctxt.GOOS == "illumos" && name == "solaris" { + return true + } + if ctxt.GOOS == "ios" && name == "darwin" { + return true + } + if name == "unix" && unixOS[ctxt.GOOS] { + return true + } + if name == "boringcrypto" { + name = "goexperiment.boringcrypto" // boringcrypto is an old name for goexperiment.boringcrypto + } + + // other tags + for _, tag := range ctxt.BuildTags { + if tag == name { + return true + } + } + for _, tag := range ctxt.ToolTags { + if tag == name { + return true + } + } + for _, tag := range ctxt.ReleaseTags { + if tag == name { + return true + } + } + + return false +} + +// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH +// suffix which does not match the current system. +// The recognized name formats are: +// +// name_$(GOOS).* +// name_$(GOARCH).* +// name_$(GOOS)_$(GOARCH).* +// name_$(GOOS)_test.* +// name_$(GOARCH)_test.* +// name_$(GOOS)_$(GOARCH)_test.* +// +// Exceptions: +// if GOOS=android, then files with GOOS=linux are also matched. +// if GOOS=illumos, then files with GOOS=solaris are also matched. +// if GOOS=ios, then files with GOOS=darwin are also matched. +func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool { + name, _, _ = strings.Cut(name, ".") + + // Before Go 1.4, a file called "linux.go" would be equivalent to having a + // build tag "linux" in that file. For Go 1.4 and beyond, we require this + // auto-tagging to apply only to files with a non-empty prefix, so + // "foo_linux.go" is tagged but "linux.go" is not. This allows new operating + // systems, such as android, to arrive without breaking existing code with + // innocuous source code in "android.go". The easiest fix: cut everything + // in the name before the initial _. + i := strings.Index(name, "_") + if i < 0 { + return true + } + name = name[i:] // ignore everything before first _ + + l := strings.Split(name, "_") + if n := len(l); n > 0 && l[n-1] == "test" { + l = l[:n-1] + } + n := len(l) + if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] { + if allTags != nil { + // In case we short-circuit on l[n-1]. + allTags[l[n-2]] = true + } + return ctxt.matchTag(l[n-1], allTags) && ctxt.matchTag(l[n-2], allTags) + } + if n >= 1 && (knownOS[l[n-1]] || knownArch[l[n-1]]) { + return ctxt.matchTag(l[n-1], allTags) + } + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/build_read.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/build_read.go new file mode 100644 index 0000000000000000000000000000000000000000..9137200123d17cfddd492749133c644026836469 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/build_read.go @@ -0,0 +1,594 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a lightly modified copy go/build/read.go with unused parts +// removed. + +package modindex + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "io" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +type importReader struct { + b *bufio.Reader + buf []byte + peek byte + err error + eof bool + nerr int + pos token.Position +} + +var bom = []byte{0xef, 0xbb, 0xbf} + +func newImportReader(name string, r io.Reader) *importReader { + b := bufio.NewReader(r) + // Remove leading UTF-8 BOM. + // Per https://golang.org/ref/spec#Source_code_representation: + // a compiler may ignore a UTF-8-encoded byte order mark (U+FEFF) + // if it is the first Unicode code point in the source text. + if leadingBytes, err := b.Peek(3); err == nil && bytes.Equal(leadingBytes, bom) { + b.Discard(3) + } + return &importReader{ + b: b, + pos: token.Position{ + Filename: name, + Line: 1, + Column: 1, + }, + } +} + +func isIdent(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf +} + +var ( + errSyntax = errors.New("syntax error") + errNUL = errors.New("unexpected NUL in input") +) + +// syntaxError records a syntax error, but only if an I/O error has not already been recorded. +func (r *importReader) syntaxError() { + if r.err == nil { + r.err = errSyntax + } +} + +// readByte reads the next byte from the input, saves it in buf, and returns it. +// If an error occurs, readByte records the error in r.err and returns 0. +func (r *importReader) readByte() byte { + c, err := r.b.ReadByte() + if err == nil { + r.buf = append(r.buf, c) + if c == 0 { + err = errNUL + } + } + if err != nil { + if err == io.EOF { + r.eof = true + } else if r.err == nil { + r.err = err + } + c = 0 + } + return c +} + +// readByteNoBuf is like readByte but doesn't buffer the byte. +// It exhausts r.buf before reading from r.b. +func (r *importReader) readByteNoBuf() byte { + var c byte + var err error + if len(r.buf) > 0 { + c = r.buf[0] + r.buf = r.buf[1:] + } else { + c, err = r.b.ReadByte() + if err == nil && c == 0 { + err = errNUL + } + } + + if err != nil { + if err == io.EOF { + r.eof = true + } else if r.err == nil { + r.err = err + } + return 0 + } + r.pos.Offset++ + if c == '\n' { + r.pos.Line++ + r.pos.Column = 1 + } else { + r.pos.Column++ + } + return c +} + +// peekByte returns the next byte from the input reader but does not advance beyond it. +// If skipSpace is set, peekByte skips leading spaces and comments. +func (r *importReader) peekByte(skipSpace bool) byte { + if r.err != nil { + if r.nerr++; r.nerr > 10000 { + panic("go/build: import reader looping") + } + return 0 + } + + // Use r.peek as first input byte. + // Don't just return r.peek here: it might have been left by peekByte(false) + // and this might be peekByte(true). + c := r.peek + if c == 0 { + c = r.readByte() + } + for r.err == nil && !r.eof { + if skipSpace { + // For the purposes of this reader, semicolons are never necessary to + // understand the input and are treated as spaces. + switch c { + case ' ', '\f', '\t', '\r', '\n', ';': + c = r.readByte() + continue + + case '/': + c = r.readByte() + if c == '/' { + for c != '\n' && r.err == nil && !r.eof { + c = r.readByte() + } + } else if c == '*' { + var c1 byte + for (c != '*' || c1 != '/') && r.err == nil { + if r.eof { + r.syntaxError() + } + c, c1 = c1, r.readByte() + } + } else { + r.syntaxError() + } + c = r.readByte() + continue + } + } + break + } + r.peek = c + return r.peek +} + +// nextByte is like peekByte but advances beyond the returned byte. +func (r *importReader) nextByte(skipSpace bool) byte { + c := r.peekByte(skipSpace) + r.peek = 0 + return c +} + +var goEmbed = []byte("go:embed") + +// findEmbed advances the input reader to the next //go:embed comment. +// It reports whether it found a comment. +// (Otherwise it found an error or EOF.) +func (r *importReader) findEmbed(first bool) bool { + // The import block scan stopped after a non-space character, + // so the reader is not at the start of a line on the first call. + // After that, each //go:embed extraction leaves the reader + // at the end of a line. + startLine := !first + var c byte + for r.err == nil && !r.eof { + c = r.readByteNoBuf() + Reswitch: + switch c { + default: + startLine = false + + case '\n': + startLine = true + + case ' ', '\t': + // leave startLine alone + + case '"': + startLine = false + for r.err == nil { + if r.eof { + r.syntaxError() + } + c = r.readByteNoBuf() + if c == '\\' { + r.readByteNoBuf() + if r.err != nil { + r.syntaxError() + return false + } + continue + } + if c == '"' { + c = r.readByteNoBuf() + goto Reswitch + } + } + goto Reswitch + + case '`': + startLine = false + for r.err == nil { + if r.eof { + r.syntaxError() + } + c = r.readByteNoBuf() + if c == '`' { + c = r.readByteNoBuf() + goto Reswitch + } + } + + case '\'': + startLine = false + for r.err == nil { + if r.eof { + r.syntaxError() + } + c = r.readByteNoBuf() + if c == '\\' { + r.readByteNoBuf() + if r.err != nil { + r.syntaxError() + return false + } + continue + } + if c == '\'' { + c = r.readByteNoBuf() + goto Reswitch + } + } + + case '/': + c = r.readByteNoBuf() + switch c { + default: + startLine = false + goto Reswitch + + case '*': + var c1 byte + for (c != '*' || c1 != '/') && r.err == nil { + if r.eof { + r.syntaxError() + } + c, c1 = c1, r.readByteNoBuf() + } + startLine = false + + case '/': + if startLine { + // Try to read this as a //go:embed comment. + for i := range goEmbed { + c = r.readByteNoBuf() + if c != goEmbed[i] { + goto SkipSlashSlash + } + } + c = r.readByteNoBuf() + if c == ' ' || c == '\t' { + // Found one! + return true + } + } + SkipSlashSlash: + for c != '\n' && r.err == nil && !r.eof { + c = r.readByteNoBuf() + } + startLine = true + } + } + } + return false +} + +// readKeyword reads the given keyword from the input. +// If the keyword is not present, readKeyword records a syntax error. +func (r *importReader) readKeyword(kw string) { + r.peekByte(true) + for i := 0; i < len(kw); i++ { + if r.nextByte(false) != kw[i] { + r.syntaxError() + return + } + } + if isIdent(r.peekByte(false)) { + r.syntaxError() + } +} + +// readIdent reads an identifier from the input. +// If an identifier is not present, readIdent records a syntax error. +func (r *importReader) readIdent() { + c := r.peekByte(true) + if !isIdent(c) { + r.syntaxError() + return + } + for isIdent(r.peekByte(false)) { + r.peek = 0 + } +} + +// readString reads a quoted string literal from the input. +// If an identifier is not present, readString records a syntax error. +func (r *importReader) readString() { + switch r.nextByte(true) { + case '`': + for r.err == nil { + if r.nextByte(false) == '`' { + break + } + if r.eof { + r.syntaxError() + } + } + case '"': + for r.err == nil { + c := r.nextByte(false) + if c == '"' { + break + } + if r.eof || c == '\n' { + r.syntaxError() + } + if c == '\\' { + r.nextByte(false) + } + } + default: + r.syntaxError() + } +} + +// readImport reads an import clause - optional identifier followed by quoted string - +// from the input. +func (r *importReader) readImport() { + c := r.peekByte(true) + if c == '.' { + r.peek = 0 + } else if isIdent(c) { + r.readIdent() + } + r.readString() +} + +// readComments is like io.ReadAll, except that it only reads the leading +// block of comments in the file. +func readComments(f io.Reader) ([]byte, error) { + r := newImportReader("", f) + r.peekByte(true) + if r.err == nil && !r.eof { + // Didn't reach EOF, so must have found a non-space byte. Remove it. + r.buf = r.buf[:len(r.buf)-1] + } + return r.buf, r.err +} + +// readGoInfo expects a Go file as input and reads the file up to and including the import section. +// It records what it learned in *info. +// If info.fset is non-nil, readGoInfo parses the file and sets info.parsed, info.parseErr, +// info.imports and info.embeds. +// +// It only returns an error if there are problems reading the file, +// not for syntax errors in the file itself. +func readGoInfo(f io.Reader, info *fileInfo) error { + r := newImportReader(info.name, f) + + r.readKeyword("package") + r.readIdent() + for r.peekByte(true) == 'i' { + r.readKeyword("import") + if r.peekByte(true) == '(' { + r.nextByte(false) + for r.peekByte(true) != ')' && r.err == nil { + r.readImport() + } + r.nextByte(false) + } else { + r.readImport() + } + } + + info.header = r.buf + + // If we stopped successfully before EOF, we read a byte that told us we were done. + // Return all but that last byte, which would cause a syntax error if we let it through. + if r.err == nil && !r.eof { + info.header = r.buf[:len(r.buf)-1] + } + + // If we stopped for a syntax error, consume the whole file so that + // we are sure we don't change the errors that go/parser returns. + if r.err == errSyntax { + r.err = nil + for r.err == nil && !r.eof { + r.readByte() + } + info.header = r.buf + } + if r.err != nil { + return r.err + } + + if info.fset == nil { + return nil + } + + // Parse file header & record imports. + info.parsed, info.parseErr = parser.ParseFile(info.fset, info.name, info.header, parser.ImportsOnly|parser.ParseComments) + if info.parseErr != nil { + return nil + } + + hasEmbed := false + for _, decl := range info.parsed.Decls { + d, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + for _, dspec := range d.Specs { + spec, ok := dspec.(*ast.ImportSpec) + if !ok { + continue + } + quoted := spec.Path.Value + path, err := strconv.Unquote(quoted) + if err != nil { + return fmt.Errorf("parser returned invalid quoted string: <%s>", quoted) + } + if path == "embed" { + hasEmbed = true + } + + doc := spec.Doc + if doc == nil && len(d.Specs) == 1 { + doc = d.Doc + } + info.imports = append(info.imports, fileImport{path, spec.Pos(), doc}) + } + } + + // Extract directives. + for _, group := range info.parsed.Comments { + if group.Pos() >= info.parsed.Package { + break + } + for _, c := range group.List { + if strings.HasPrefix(c.Text, "//go:") { + info.directives = append(info.directives, build.Directive{Text: c.Text, Pos: info.fset.Position(c.Slash)}) + } + } + } + + // If the file imports "embed", + // we have to look for //go:embed comments + // in the remainder of the file. + // The compiler will enforce the mapping of comments to + // declared variables. We just need to know the patterns. + // If there were //go:embed comments earlier in the file + // (near the package statement or imports), the compiler + // will reject them. They can be (and have already been) ignored. + if hasEmbed { + var line []byte + for first := true; r.findEmbed(first); first = false { + line = line[:0] + pos := r.pos + for { + c := r.readByteNoBuf() + if c == '\n' || r.err != nil || r.eof { + break + } + line = append(line, c) + } + // Add args if line is well-formed. + // Ignore badly-formed lines - the compiler will report them when it finds them, + // and we can pretend they are not there to help go list succeed with what it knows. + embs, err := parseGoEmbed(string(line), pos) + if err == nil { + info.embeds = append(info.embeds, embs...) + } + } + } + + return nil +} + +// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns. +// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings. +// This is based on a similar function in cmd/compile/internal/gc/noder.go; +// this version calculates position information as well. +func parseGoEmbed(args string, pos token.Position) ([]fileEmbed, error) { + trimBytes := func(n int) { + pos.Offset += n + pos.Column += utf8.RuneCountInString(args[:n]) + args = args[n:] + } + trimSpace := func() { + trim := strings.TrimLeftFunc(args, unicode.IsSpace) + trimBytes(len(args) - len(trim)) + } + + var list []fileEmbed + for trimSpace(); args != ""; trimSpace() { + var path string + pathPos := pos + Switch: + switch args[0] { + default: + i := len(args) + for j, c := range args { + if unicode.IsSpace(c) { + i = j + break + } + } + path = args[:i] + trimBytes(i) + + case '`': + var ok bool + path, _, ok = strings.Cut(args[1:], "`") + if !ok { + return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) + } + trimBytes(1 + len(path) + 1) + + case '"': + i := 1 + for ; i < len(args); i++ { + if args[i] == '\\' { + i++ + continue + } + if args[i] == '"' { + q, err := strconv.Unquote(args[:i+1]) + if err != nil { + return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1]) + } + path = q + trimBytes(i + 1) + break Switch + } + } + if i >= len(args) { + return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) + } + } + + if args != "" { + r, _ := utf8.DecodeRuneInString(args) + if !unicode.IsSpace(r) { + return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args) + } + } + list = append(list, fileEmbed{path, pathPos}) + } + return list, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/index_format.txt b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/index_format.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b1d2c6bc5b871d4828fca2aac31bda460ae8f26 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/index_format.txt @@ -0,0 +1,63 @@ +This file documents the index format that is read and written by this package. +The index format is an encoding of a series of RawPackage structs + +Field names refer to fields on RawPackage and rawFile. +The file uses little endian encoding for the uint32s. +Strings are written into the string table at the end of the file. +Each string is prefixed with a uvarint-encoded length. +Bools are written as uint32s: 0 for false and 1 for true. + +The following is the format for a full module: + +“go index v2\n” +str uint32 - offset of string table +n uint32 - number of packages +for each rawPackage: + dirname - string offset + package - offset where package begins +for each rawPackage: + error uint32 - string offset // error is produced by fsys.ReadDir or fmt.Errorf + dir uint32 - string offset (directory path relative to module root) + len(sourceFiles) uint32 + sourceFiles [n]uint32 - offset to source file (relative to start of index file) + for each sourceFile: + error - string offset // error is either produced by fmt.Errorf,errors.New or is io.EOF + parseError - string offset // if non-empty, a json-encoded parseError struct (see below). Is either produced by io.ReadAll,os.ReadFile,errors.New or is scanner.Error,scanner.ErrorList + synopsis - string offset + name - string offset + pkgName - string offset + ignoreFile - int32 bool // report the file in Ignored(Go|Other)Files because there was an error reading it or parsing its build constraints. + binaryOnly uint32 bool + cgoDirectives string offset // the #cgo directive lines in the comment on import "C" + goBuildConstraint - string offset + len(plusBuildConstraints) - uint32 + plusBuildConstraints - [n]uint32 (string offsets) + len(imports) uint32 + for each rawImport: + path - string offset + position - file, offset, line, column - uint32 + len(embeds) uint32 + for each embed: + pattern - string offset + position - file, offset, line, column - uint32 + len(directives) uint32 + for each directive: + text - string offset + position - file, offset, line, column - uint32 +[string table] +0xFF (marker) + +The following is the format for a single indexed package: + +“go index v0\n” +str uint32 - offset of string table +for the single RawPackage: + [same RawPackage format as above] +[string table] + +The following is the definition of the json-serialized parseError struct: + +type parseError struct { + ErrorList *scanner.ErrorList // non-nil if the error was an ErrorList, nil otherwise + ErrorString string // non-empty for all other cases +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/index_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/index_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6bc62f393fad5fd0b20d6577b98d20cb61caace0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/index_test.go @@ -0,0 +1,104 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modindex + +import ( + "encoding/hex" + "encoding/json" + "go/build" + "internal/diff" + "path/filepath" + "reflect" + "runtime" + "testing" +) + +func init() { + isTest = true + enabled = true // to allow GODEBUG=goindex=0 go test, when things are very broken +} + +func TestIndex(t *testing.T) { + src := filepath.Join(runtime.GOROOT(), "src") + checkPkg := func(t *testing.T, m *Module, pkg string, data []byte) { + p := m.Package(pkg) + bp, err := p.Import(build.Default, build.ImportComment) + if err != nil { + t.Fatal(err) + } + bp1, err := build.Default.Import(".", filepath.Join(src, pkg), build.ImportComment) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(bp, bp1) { + t.Errorf("mismatch") + t.Logf("index:\n%s", hex.Dump(data)) + + js, err := json.MarshalIndent(bp, "", "\t") + if err != nil { + t.Fatal(err) + } + js1, err := json.MarshalIndent(bp1, "", "\t") + if err != nil { + t.Fatal(err) + } + t.Logf("diff:\n%s", diff.Diff("index", js, "correct", js1)) + t.FailNow() + } + } + + // Check packages in increasing complexity, one at a time. + pkgs := []string{ + "crypto", + "encoding", + "unsafe", + "encoding/json", + "runtime", + "net", + } + var raws []*rawPackage + for _, pkg := range pkgs { + raw := importRaw(src, pkg) + raws = append(raws, raw) + t.Run(pkg, func(t *testing.T) { + data := encodeModuleBytes([]*rawPackage{raw}) + m, err := fromBytes(src, data) + if err != nil { + t.Fatal(err) + } + checkPkg(t, m, pkg, data) + }) + } + + // Check that a multi-package index works too. + t.Run("all", func(t *testing.T) { + data := encodeModuleBytes(raws) + m, err := fromBytes(src, data) + if err != nil { + t.Fatal(err) + } + for _, pkg := range pkgs { + checkPkg(t, m, pkg, data) + } + }) +} + +func TestImportRaw_IgnoreNonGo(t *testing.T) { + path := filepath.Join("testdata", "ignore_non_source") + p := importRaw(path, ".") + + wantFiles := []string{"a.syso", "b.go", "c.c"} + + var gotFiles []string + for i := range p.sourceFiles { + gotFiles = append(gotFiles, p.sourceFiles[i].name) + } + + if !reflect.DeepEqual(gotFiles, wantFiles) { + t.Errorf("names of files in importRaw(testdata/ignore_non_source): got %v; want %v", + gotFiles, wantFiles) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/read.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/read.go new file mode 100644 index 0000000000000000000000000000000000000000..83d5faf28fbefafe201dc869e5be72a30a9c74f0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/read.go @@ -0,0 +1,1037 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modindex + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "go/build" + "go/build/constraint" + "go/token" + "internal/godebug" + "internal/goroot" + "path" + "path/filepath" + "runtime" + "runtime/debug" + "sort" + "strings" + "sync" + "time" + "unsafe" + + "cmd/go/internal/base" + "cmd/go/internal/cache" + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/go/internal/imports" + "cmd/go/internal/par" + "cmd/go/internal/str" +) + +// enabled is used to flag off the behavior of the module index on tip. +// It will be removed before the release. +// TODO(matloob): Remove enabled once we have more confidence on the +// module index. +var enabled = godebug.New("#goindex").Value() != "0" + +// Module represents and encoded module index file. It is used to +// do the equivalent of build.Import of packages in the module and answer other +// questions based on the index file's data. +type Module struct { + modroot string + d *decoder + n int // number of packages +} + +// moduleHash returns an ActionID corresponding to the state of the module +// located at filesystem path modroot. +func moduleHash(modroot string, ismodcache bool) (cache.ActionID, error) { + // We expect modules stored within the module cache to be checksummed and + // immutable, and we expect released modules within GOROOT to change only + // infrequently (when the Go version changes). + if !ismodcache { + // The contents of this module may change over time. We don't want to pay + // the cost to detect changes and re-index whenever they occur, so just + // don't index it at all. + // + // Note that this is true even for modules in GOROOT/src: non-release builds + // of the Go toolchain may have arbitrary development changes on top of the + // commit reported by runtime.Version, or could be completely artificial due + // to lacking a `git` binary (like "devel gomote.XXXXX", as synthesized by + // "gomote push" as of 2022-06-15). (Release builds shouldn't have + // modifications, but we don't want to use a behavior for releases that we + // haven't tested during development.) + return cache.ActionID{}, ErrNotIndexed + } + + h := cache.NewHash("moduleIndex") + // TODO(bcmills): Since modules in the index are checksummed, we could + // probably improve the cache hit rate by keying off of the module + // path@version (perhaps including the checksum?) instead of the module root + // directory. + fmt.Fprintf(h, "module index %s %s %v\n", runtime.Version(), indexVersion, modroot) + return h.Sum(), nil +} + +const modTimeCutoff = 2 * time.Second + +// dirHash returns an ActionID corresponding to the state of the package +// located at filesystem path pkgdir. +func dirHash(modroot, pkgdir string) (cache.ActionID, error) { + h := cache.NewHash("moduleIndex") + fmt.Fprintf(h, "modroot %s\n", modroot) + fmt.Fprintf(h, "package %s %s %v\n", runtime.Version(), indexVersion, pkgdir) + entries, err := fsys.ReadDir(pkgdir) + if err != nil { + // pkgdir might not be a directory. give up on hashing. + return cache.ActionID{}, ErrNotIndexed + } + cutoff := time.Now().Add(-modTimeCutoff) + for _, info := range entries { + if info.IsDir() { + continue + } + + if !info.Mode().IsRegular() { + return cache.ActionID{}, ErrNotIndexed + } + // To avoid problems for very recent files where a new + // write might not change the mtime due to file system + // mtime precision, reject caching if a file was read that + // is less than modTimeCutoff old. + // + // This is the same strategy used for hashing test inputs. + // See hashOpen in cmd/go/internal/test/test.go for the + // corresponding code. + if info.ModTime().After(cutoff) { + return cache.ActionID{}, ErrNotIndexed + } + + fmt.Fprintf(h, "file %v %v %v\n", info.Name(), info.ModTime(), info.Size()) + } + return h.Sum(), nil +} + +var ErrNotIndexed = errors.New("not in module index") + +var ( + errDisabled = fmt.Errorf("%w: module indexing disabled", ErrNotIndexed) + errNotFromModuleCache = fmt.Errorf("%w: not from module cache", ErrNotIndexed) +) + +// GetPackage returns the IndexPackage for the package at the given path. +// It will return ErrNotIndexed if the directory should be read without +// using the index, for instance because the index is disabled, or the package +// is not in a module. +func GetPackage(modroot, pkgdir string) (*IndexPackage, error) { + mi, err := GetModule(modroot) + if err == nil { + return mi.Package(relPath(pkgdir, modroot)), nil + } + if !errors.Is(err, errNotFromModuleCache) { + return nil, err + } + if cfg.BuildContext.Compiler == "gccgo" && str.HasPathPrefix(modroot, cfg.GOROOTsrc) { + return nil, err // gccgo has no sources for GOROOT packages. + } + return openIndexPackage(modroot, pkgdir) +} + +// GetModule returns the Module for the given modroot. +// It will return ErrNotIndexed if the directory should be read without +// using the index, for instance because the index is disabled, or the package +// is not in a module. +func GetModule(modroot string) (*Module, error) { + if !enabled || cache.DefaultDir() == "off" { + return nil, errDisabled + } + if modroot == "" { + panic("modindex.GetPackage called with empty modroot") + } + if cfg.BuildMod == "vendor" { + // Even if the main module is in the module cache, + // its vendored dependencies are not loaded from their + // usual cached locations. + return nil, errNotFromModuleCache + } + modroot = filepath.Clean(modroot) + if str.HasFilePathPrefix(modroot, cfg.GOROOTsrc) || !str.HasFilePathPrefix(modroot, cfg.GOMODCACHE) { + return nil, errNotFromModuleCache + } + return openIndexModule(modroot, true) +} + +var mcache par.ErrCache[string, *Module] + +// openIndexModule returns the module index for modPath. +// It will return ErrNotIndexed if the module can not be read +// using the index because it contains symlinks. +func openIndexModule(modroot string, ismodcache bool) (*Module, error) { + return mcache.Do(modroot, func() (*Module, error) { + fsys.Trace("openIndexModule", modroot) + id, err := moduleHash(modroot, ismodcache) + if err != nil { + return nil, err + } + data, _, err := cache.GetMmap(cache.Default(), id) + if err != nil { + // Couldn't read from modindex. Assume we couldn't read from + // the index because the module hasn't been indexed yet. + data, err = indexModule(modroot) + if err != nil { + return nil, err + } + if err = cache.PutBytes(cache.Default(), id, data); err != nil { + return nil, err + } + } + mi, err := fromBytes(modroot, data) + if err != nil { + return nil, err + } + return mi, nil + }) +} + +var pcache par.ErrCache[[2]string, *IndexPackage] + +func openIndexPackage(modroot, pkgdir string) (*IndexPackage, error) { + return pcache.Do([2]string{modroot, pkgdir}, func() (*IndexPackage, error) { + fsys.Trace("openIndexPackage", pkgdir) + id, err := dirHash(modroot, pkgdir) + if err != nil { + return nil, err + } + data, _, err := cache.GetMmap(cache.Default(), id) + if err != nil { + // Couldn't read from index. Assume we couldn't read from + // the index because the package hasn't been indexed yet. + data = indexPackage(modroot, pkgdir) + if err = cache.PutBytes(cache.Default(), id, data); err != nil { + return nil, err + } + } + pkg, err := packageFromBytes(modroot, data) + if err != nil { + return nil, err + } + return pkg, nil + }) +} + +var errCorrupt = errors.New("corrupt index") + +// protect marks the start of a large section of code that accesses the index. +// It should be used as: +// +// defer unprotect(protect, &err) +// +// It should not be used for trivial accesses which would be +// dwarfed by the overhead of the defer. +func protect() bool { + return debug.SetPanicOnFault(true) +} + +var isTest = false + +// unprotect marks the end of a large section of code that accesses the index. +// It should be used as: +// +// defer unprotect(protect, &err) +// +// end looks for panics due to errCorrupt or bad mmap accesses. +// When it finds them, it adds explanatory text, consumes the panic, and sets *errp instead. +// If errp is nil, end adds the explanatory text but then calls base.Fatalf. +func unprotect(old bool, errp *error) { + // SetPanicOnFault's errors _may_ satisfy this interface. Even though it's not guaranteed + // that all its errors satisfy this interface, we'll only check for these errors so that + // we don't suppress panics that could have been produced from other sources. + type addrer interface { + Addr() uintptr + } + + debug.SetPanicOnFault(old) + + if e := recover(); e != nil { + if _, ok := e.(addrer); ok || e == errCorrupt { + // This panic was almost certainly caused by SetPanicOnFault or our panic(errCorrupt). + err := fmt.Errorf("error reading module index: %v", e) + if errp != nil { + *errp = err + return + } + if isTest { + panic(err) + } + base.Fatalf("%v", err) + } + // The panic was likely not caused by SetPanicOnFault. + panic(e) + } +} + +// fromBytes returns a *Module given the encoded representation. +func fromBytes(moddir string, data []byte) (m *Module, err error) { + if !enabled { + panic("use of index") + } + + defer unprotect(protect(), &err) + + if !bytes.HasPrefix(data, []byte(indexVersion+"\n")) { + return nil, errCorrupt + } + + const hdr = len(indexVersion + "\n") + d := &decoder{data: data} + str := d.intAt(hdr) + if str < hdr+8 || len(d.data) < str { + return nil, errCorrupt + } + d.data, d.str = data[:str], d.data[str:] + // Check that string table looks valid. + // First string is empty string (length 0), + // and we leave a marker byte 0xFF at the end + // just to make sure that the file is not truncated. + if len(d.str) == 0 || d.str[0] != 0 || d.str[len(d.str)-1] != 0xFF { + return nil, errCorrupt + } + + n := d.intAt(hdr + 4) + if n < 0 || n > (len(d.data)-8)/8 { + return nil, errCorrupt + } + + m = &Module{ + moddir, + d, + n, + } + return m, nil +} + +// packageFromBytes returns a *IndexPackage given the encoded representation. +func packageFromBytes(modroot string, data []byte) (p *IndexPackage, err error) { + m, err := fromBytes(modroot, data) + if err != nil { + return nil, err + } + if m.n != 1 { + return nil, fmt.Errorf("corrupt single-package index") + } + return m.pkg(0), nil +} + +// pkgDir returns the dir string of the i'th package in the index. +func (m *Module) pkgDir(i int) string { + if i < 0 || i >= m.n { + panic(errCorrupt) + } + return m.d.stringAt(12 + 8 + 8*i) +} + +// pkgOff returns the offset of the data for the i'th package in the index. +func (m *Module) pkgOff(i int) int { + if i < 0 || i >= m.n { + panic(errCorrupt) + } + return m.d.intAt(12 + 8 + 8*i + 4) +} + +// Walk calls f for each package in the index, passing the path to that package relative to the module root. +func (m *Module) Walk(f func(path string)) { + defer unprotect(protect(), nil) + for i := 0; i < m.n; i++ { + f(m.pkgDir(i)) + } +} + +// relPath returns the path relative to the module's root. +func relPath(path, modroot string) string { + return str.TrimFilePathPrefix(filepath.Clean(path), filepath.Clean(modroot)) +} + +var installgorootAll = godebug.New("installgoroot").Value() == "all" + +// Import is the equivalent of build.Import given the information in Module. +func (rp *IndexPackage) Import(bctxt build.Context, mode build.ImportMode) (p *build.Package, err error) { + defer unprotect(protect(), &err) + + ctxt := (*Context)(&bctxt) + + p = &build.Package{} + + p.ImportPath = "." + p.Dir = filepath.Join(rp.modroot, rp.dir) + + var pkgerr error + switch ctxt.Compiler { + case "gccgo", "gc": + default: + // Save error for end of function. + pkgerr = fmt.Errorf("import %q: unknown compiler %q", p.Dir, ctxt.Compiler) + } + + if p.Dir == "" { + return p, fmt.Errorf("import %q: import of unknown directory", p.Dir) + } + + // goroot and gopath + inTestdata := func(sub string) bool { + return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || str.HasPathPrefix(sub, "testdata") + } + var pkga string + if !inTestdata(rp.dir) { + // In build.go, p.Root should only be set in the non-local-import case, or in + // GOROOT or GOPATH. Since module mode only calls Import with path set to "." + // and the module index doesn't apply outside modules, the GOROOT case is + // the only case where p.Root needs to be set. + if ctxt.GOROOT != "" && str.HasFilePathPrefix(p.Dir, cfg.GOROOTsrc) && p.Dir != cfg.GOROOTsrc { + p.Root = ctxt.GOROOT + p.Goroot = true + modprefix := str.TrimFilePathPrefix(rp.modroot, cfg.GOROOTsrc) + p.ImportPath = rp.dir + if modprefix != "" { + p.ImportPath = filepath.Join(modprefix, p.ImportPath) + } + + // Set GOROOT-specific fields (sometimes for modules in a GOPATH directory). + // The fields set below (SrcRoot, PkgRoot, BinDir, PkgTargetRoot, and PkgObj) + // are only set in build.Import if p.Root != "". + var pkgtargetroot string + suffix := "" + if ctxt.InstallSuffix != "" { + suffix = "_" + ctxt.InstallSuffix + } + switch ctxt.Compiler { + case "gccgo": + pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix + dir, elem := path.Split(p.ImportPath) + pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a" + case "gc": + pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix + pkga = pkgtargetroot + "/" + p.ImportPath + ".a" + } + p.SrcRoot = ctxt.joinPath(p.Root, "src") + p.PkgRoot = ctxt.joinPath(p.Root, "pkg") + p.BinDir = ctxt.joinPath(p.Root, "bin") + if pkga != "" { + // Always set PkgTargetRoot. It might be used when building in shared + // mode. + p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot) + + // Set the install target if applicable. + if !p.Goroot || (installgorootAll && p.ImportPath != "unsafe" && p.ImportPath != "builtin") { + p.PkgObj = ctxt.joinPath(p.Root, pkga) + } + } + } + } + + if rp.error != nil { + if errors.Is(rp.error, errCannotFindPackage) && ctxt.Compiler == "gccgo" && p.Goroot { + return p, nil + } + return p, rp.error + } + + if mode&build.FindOnly != 0 { + return p, pkgerr + } + + // We need to do a second round of bad file processing. + var badGoError error + badGoFiles := make(map[string]bool) + badGoFile := func(name string, err error) { + if badGoError == nil { + badGoError = err + } + if !badGoFiles[name] { + p.InvalidGoFiles = append(p.InvalidGoFiles, name) + badGoFiles[name] = true + } + } + + var Sfiles []string // files with ".S"(capital S)/.sx(capital s equivalent for case insensitive filesystems) + var firstFile string + embedPos := make(map[string][]token.Position) + testEmbedPos := make(map[string][]token.Position) + xTestEmbedPos := make(map[string][]token.Position) + importPos := make(map[string][]token.Position) + testImportPos := make(map[string][]token.Position) + xTestImportPos := make(map[string][]token.Position) + allTags := make(map[string]bool) + for _, tf := range rp.sourceFiles { + name := tf.name() + // Check errors for go files and call badGoFiles to put them in + // InvalidGoFiles if they do have an error. + if strings.HasSuffix(name, ".go") { + if error := tf.error(); error != "" { + badGoFile(name, errors.New(tf.error())) + continue + } else if parseError := tf.parseError(); parseError != "" { + badGoFile(name, parseErrorFromString(tf.parseError())) + // Fall through: we still want to list files with parse errors. + } + } + + var shouldBuild = true + if !ctxt.goodOSArchFile(name, allTags) && !ctxt.UseAllFiles { + shouldBuild = false + } else if goBuildConstraint := tf.goBuildConstraint(); goBuildConstraint != "" { + x, err := constraint.Parse(goBuildConstraint) + if err != nil { + return p, fmt.Errorf("%s: parsing //go:build line: %v", name, err) + } + shouldBuild = ctxt.eval(x, allTags) + } else if plusBuildConstraints := tf.plusBuildConstraints(); len(plusBuildConstraints) > 0 { + for _, text := range plusBuildConstraints { + if x, err := constraint.Parse(text); err == nil { + if !ctxt.eval(x, allTags) { + shouldBuild = false + } + } + } + } + + ext := nameExt(name) + if !shouldBuild || tf.ignoreFile() { + if ext == ".go" { + p.IgnoredGoFiles = append(p.IgnoredGoFiles, name) + } else if fileListForExt(p, ext) != nil { + p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, name) + } + continue + } + + // Going to save the file. For non-Go files, can stop here. + switch ext { + case ".go": + // keep going + case ".S", ".sx": + // special case for cgo, handled at end + Sfiles = append(Sfiles, name) + continue + default: + if list := fileListForExt(p, ext); list != nil { + *list = append(*list, name) + } + continue + } + + pkg := tf.pkgName() + if pkg == "documentation" { + p.IgnoredGoFiles = append(p.IgnoredGoFiles, name) + continue + } + isTest := strings.HasSuffix(name, "_test.go") + isXTest := false + if isTest && strings.HasSuffix(tf.pkgName(), "_test") && p.Name != tf.pkgName() { + isXTest = true + pkg = pkg[:len(pkg)-len("_test")] + } + + if !isTest && tf.binaryOnly() { + p.BinaryOnly = true + } + + if p.Name == "" { + p.Name = pkg + firstFile = name + } else if pkg != p.Name { + // TODO(#45999): The choice of p.Name is arbitrary based on file iteration + // order. Instead of resolving p.Name arbitrarily, we should clear out the + // existing Name and mark the existing files as also invalid. + badGoFile(name, &MultiplePackageError{ + Dir: p.Dir, + Packages: []string{p.Name, pkg}, + Files: []string{firstFile, name}, + }) + } + // Grab the first package comment as docs, provided it is not from a test file. + if p.Doc == "" && !isTest && !isXTest { + if synopsis := tf.synopsis(); synopsis != "" { + p.Doc = synopsis + } + } + + // Record Imports and information about cgo. + isCgo := false + imports := tf.imports() + for _, imp := range imports { + if imp.path == "C" { + if isTest { + badGoFile(name, fmt.Errorf("use of cgo in test %s not supported", name)) + continue + } + isCgo = true + } + } + if directives := tf.cgoDirectives(); directives != "" { + if err := ctxt.saveCgo(name, p, directives); err != nil { + badGoFile(name, err) + } + } + + var fileList *[]string + var importMap, embedMap map[string][]token.Position + var directives *[]build.Directive + switch { + case isCgo: + allTags["cgo"] = true + if ctxt.CgoEnabled { + fileList = &p.CgoFiles + importMap = importPos + embedMap = embedPos + directives = &p.Directives + } else { + // Ignore Imports and Embeds from cgo files if cgo is disabled. + fileList = &p.IgnoredGoFiles + } + case isXTest: + fileList = &p.XTestGoFiles + importMap = xTestImportPos + embedMap = xTestEmbedPos + directives = &p.XTestDirectives + case isTest: + fileList = &p.TestGoFiles + importMap = testImportPos + embedMap = testEmbedPos + directives = &p.TestDirectives + default: + fileList = &p.GoFiles + importMap = importPos + embedMap = embedPos + directives = &p.Directives + } + *fileList = append(*fileList, name) + if importMap != nil { + for _, imp := range imports { + importMap[imp.path] = append(importMap[imp.path], imp.position) + } + } + if embedMap != nil { + for _, e := range tf.embeds() { + embedMap[e.pattern] = append(embedMap[e.pattern], e.position) + } + } + if directives != nil { + *directives = append(*directives, tf.directives()...) + } + } + + p.EmbedPatterns, p.EmbedPatternPos = cleanDecls(embedPos) + p.TestEmbedPatterns, p.TestEmbedPatternPos = cleanDecls(testEmbedPos) + p.XTestEmbedPatterns, p.XTestEmbedPatternPos = cleanDecls(xTestEmbedPos) + + p.Imports, p.ImportPos = cleanDecls(importPos) + p.TestImports, p.TestImportPos = cleanDecls(testImportPos) + p.XTestImports, p.XTestImportPos = cleanDecls(xTestImportPos) + + for tag := range allTags { + p.AllTags = append(p.AllTags, tag) + } + sort.Strings(p.AllTags) + + if len(p.CgoFiles) > 0 { + p.SFiles = append(p.SFiles, Sfiles...) + sort.Strings(p.SFiles) + } else { + p.IgnoredOtherFiles = append(p.IgnoredOtherFiles, Sfiles...) + sort.Strings(p.IgnoredOtherFiles) + } + + if badGoError != nil { + return p, badGoError + } + if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 { + return p, &build.NoGoError{Dir: p.Dir} + } + return p, pkgerr +} + +// IsStandardPackage reports whether path is a standard package +// for the goroot and compiler using the module index if possible, +// and otherwise falling back to internal/goroot.IsStandardPackage +func IsStandardPackage(goroot_, compiler, path string) bool { + if !enabled || compiler != "gc" { + return goroot.IsStandardPackage(goroot_, compiler, path) + } + + reldir := filepath.FromSlash(path) // relative dir path in module index for package + modroot := filepath.Join(goroot_, "src") + if str.HasFilePathPrefix(reldir, "cmd") { + reldir = str.TrimFilePathPrefix(reldir, "cmd") + modroot = filepath.Join(modroot, "cmd") + } + if _, err := GetPackage(modroot, filepath.Join(modroot, reldir)); err == nil { + // Note that goroot.IsStandardPackage doesn't check that the directory + // actually contains any go files-- merely that it exists. GetPackage + // returning a nil error is enough for us to know the directory exists. + return true + } else if errors.Is(err, ErrNotIndexed) { + // Fall back because package isn't indexable. (Probably because + // a file was modified recently) + return goroot.IsStandardPackage(goroot_, compiler, path) + } + return false +} + +// IsDirWithGoFiles is the equivalent of fsys.IsDirWithGoFiles using the information in the index. +func (rp *IndexPackage) IsDirWithGoFiles() (_ bool, err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("error reading module index: %v", e) + } + }() + for _, sf := range rp.sourceFiles { + if strings.HasSuffix(sf.name(), ".go") { + return true, nil + } + } + return false, nil +} + +// ScanDir implements imports.ScanDir using the information in the index. +func (rp *IndexPackage) ScanDir(tags map[string]bool) (sortedImports []string, sortedTestImports []string, err error) { + // TODO(matloob) dir should eventually be relative to indexed directory + // TODO(matloob): skip reading raw package and jump straight to data we need? + + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("error reading module index: %v", e) + } + }() + + imports_ := make(map[string]bool) + testImports := make(map[string]bool) + numFiles := 0 + +Files: + for _, sf := range rp.sourceFiles { + name := sf.name() + if strings.HasPrefix(name, "_") || strings.HasPrefix(name, ".") || !strings.HasSuffix(name, ".go") || !imports.MatchFile(name, tags) { + continue + } + + // The following section exists for backwards compatibility reasons: + // scanDir ignores files with import "C" when collecting the list + // of imports unless the "cgo" tag is provided. The following comment + // is copied from the original. + // + // import "C" is implicit requirement of cgo tag. + // When listing files on the command line (explicitFiles=true) + // we do not apply build tag filtering but we still do apply + // cgo filtering, so no explicitFiles check here. + // Why? Because we always have, and it's not worth breaking + // that behavior now. + imps := sf.imports() // TODO(matloob): directly read import paths to avoid the extra strings? + for _, imp := range imps { + if imp.path == "C" && !tags["cgo"] && !tags["*"] { + continue Files + } + } + + if !shouldBuild(sf, tags) { + continue + } + numFiles++ + m := imports_ + if strings.HasSuffix(name, "_test.go") { + m = testImports + } + for _, p := range imps { + m[p.path] = true + } + } + if numFiles == 0 { + return nil, nil, imports.ErrNoGo + } + return keys(imports_), keys(testImports), nil +} + +func keys(m map[string]bool) []string { + list := make([]string, 0, len(m)) + for k := range m { + list = append(list, k) + } + sort.Strings(list) + return list +} + +// implements imports.ShouldBuild in terms of an index sourcefile. +func shouldBuild(sf *sourceFile, tags map[string]bool) bool { + if goBuildConstraint := sf.goBuildConstraint(); goBuildConstraint != "" { + x, err := constraint.Parse(goBuildConstraint) + if err != nil { + return false + } + return imports.Eval(x, tags, true) + } + + plusBuildConstraints := sf.plusBuildConstraints() + for _, text := range plusBuildConstraints { + if x, err := constraint.Parse(text); err == nil { + if !imports.Eval(x, tags, true) { + return false + } + } + } + + return true +} + +// IndexPackage holds the information needed to access information in the +// index needed to load a package in a specific directory. +type IndexPackage struct { + error error + dir string // directory of the package relative to the modroot + + modroot string + + // Source files + sourceFiles []*sourceFile +} + +var errCannotFindPackage = errors.New("cannot find package") + +// Package and returns finds the package with the given path (relative to the module root). +// If the package does not exist, Package returns an IndexPackage that will return an +// appropriate error from its methods. +func (m *Module) Package(path string) *IndexPackage { + defer unprotect(protect(), nil) + + i, ok := sort.Find(m.n, func(i int) int { + return strings.Compare(path, m.pkgDir(i)) + }) + if !ok { + return &IndexPackage{error: fmt.Errorf("%w %q in:\n\t%s", errCannotFindPackage, path, filepath.Join(m.modroot, path))} + } + return m.pkg(i) +} + +// pkg returns the i'th IndexPackage in m. +func (m *Module) pkg(i int) *IndexPackage { + r := m.d.readAt(m.pkgOff(i)) + p := new(IndexPackage) + if errstr := r.string(); errstr != "" { + p.error = errors.New(errstr) + } + p.dir = r.string() + p.sourceFiles = make([]*sourceFile, r.int()) + for i := range p.sourceFiles { + p.sourceFiles[i] = &sourceFile{ + d: m.d, + pos: r.int(), + } + } + p.modroot = m.modroot + return p +} + +// sourceFile represents the information of a given source file in the module index. +type sourceFile struct { + d *decoder // encoding of this source file + pos int // start of sourceFile encoding in d + onceReadImports sync.Once + savedImports []rawImport // saved imports so that they're only read once +} + +// Offsets for fields in the sourceFile. +const ( + sourceFileError = 4 * iota + sourceFileParseError + sourceFileSynopsis + sourceFileName + sourceFilePkgName + sourceFileIgnoreFile + sourceFileBinaryOnly + sourceFileCgoDirectives + sourceFileGoBuildConstraint + sourceFileNumPlusBuildConstraints +) + +func (sf *sourceFile) error() string { + return sf.d.stringAt(sf.pos + sourceFileError) +} +func (sf *sourceFile) parseError() string { + return sf.d.stringAt(sf.pos + sourceFileParseError) +} +func (sf *sourceFile) synopsis() string { + return sf.d.stringAt(sf.pos + sourceFileSynopsis) +} +func (sf *sourceFile) name() string { + return sf.d.stringAt(sf.pos + sourceFileName) +} +func (sf *sourceFile) pkgName() string { + return sf.d.stringAt(sf.pos + sourceFilePkgName) +} +func (sf *sourceFile) ignoreFile() bool { + return sf.d.boolAt(sf.pos + sourceFileIgnoreFile) +} +func (sf *sourceFile) binaryOnly() bool { + return sf.d.boolAt(sf.pos + sourceFileBinaryOnly) +} +func (sf *sourceFile) cgoDirectives() string { + return sf.d.stringAt(sf.pos + sourceFileCgoDirectives) +} +func (sf *sourceFile) goBuildConstraint() string { + return sf.d.stringAt(sf.pos + sourceFileGoBuildConstraint) +} + +func (sf *sourceFile) plusBuildConstraints() []string { + pos := sf.pos + sourceFileNumPlusBuildConstraints + n := sf.d.intAt(pos) + pos += 4 + ret := make([]string, n) + for i := 0; i < n; i++ { + ret[i] = sf.d.stringAt(pos) + pos += 4 + } + return ret +} + +func (sf *sourceFile) importsOffset() int { + pos := sf.pos + sourceFileNumPlusBuildConstraints + n := sf.d.intAt(pos) + // each build constraint is 1 uint32 + return pos + 4 + n*4 +} + +func (sf *sourceFile) embedsOffset() int { + pos := sf.importsOffset() + n := sf.d.intAt(pos) + // each import is 5 uint32s (string + tokpos) + return pos + 4 + n*(4*5) +} + +func (sf *sourceFile) directivesOffset() int { + pos := sf.embedsOffset() + n := sf.d.intAt(pos) + // each embed is 5 uint32s (string + tokpos) + return pos + 4 + n*(4*5) +} + +func (sf *sourceFile) imports() []rawImport { + sf.onceReadImports.Do(func() { + importsOffset := sf.importsOffset() + r := sf.d.readAt(importsOffset) + numImports := r.int() + ret := make([]rawImport, numImports) + for i := 0; i < numImports; i++ { + ret[i] = rawImport{r.string(), r.tokpos()} + } + sf.savedImports = ret + }) + return sf.savedImports +} + +func (sf *sourceFile) embeds() []embed { + embedsOffset := sf.embedsOffset() + r := sf.d.readAt(embedsOffset) + numEmbeds := r.int() + ret := make([]embed, numEmbeds) + for i := range ret { + ret[i] = embed{r.string(), r.tokpos()} + } + return ret +} + +func (sf *sourceFile) directives() []build.Directive { + directivesOffset := sf.directivesOffset() + r := sf.d.readAt(directivesOffset) + numDirectives := r.int() + ret := make([]build.Directive, numDirectives) + for i := range ret { + ret[i] = build.Directive{Text: r.string(), Pos: r.tokpos()} + } + return ret +} + +func asString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// A decoder helps decode the index format. +type decoder struct { + data []byte // data after header + str []byte // string table +} + +// intAt returns the int at the given offset in d.data. +func (d *decoder) intAt(off int) int { + if off < 0 || len(d.data)-off < 4 { + panic(errCorrupt) + } + i := binary.LittleEndian.Uint32(d.data[off : off+4]) + if int32(i)>>31 != 0 { + panic(errCorrupt) + } + return int(i) +} + +// boolAt returns the bool at the given offset in d.data. +func (d *decoder) boolAt(off int) bool { + return d.intAt(off) != 0 +} + +// stringAt returns the string pointed at by the int at the given offset in d.data. +func (d *decoder) stringAt(off int) string { + return d.stringTableAt(d.intAt(off)) +} + +// stringTableAt returns the string at the given offset in the string table d.str. +func (d *decoder) stringTableAt(off int) string { + if off < 0 || off >= len(d.str) { + panic(errCorrupt) + } + s := d.str[off:] + v, n := binary.Uvarint(s) + if n <= 0 || v > uint64(len(s[n:])) { + panic(errCorrupt) + } + return asString(s[n : n+int(v)]) +} + +// A reader reads sequential fields from a section of the index format. +type reader struct { + d *decoder + pos int +} + +// readAt returns a reader starting at the given position in d. +func (d *decoder) readAt(pos int) *reader { + return &reader{d, pos} +} + +// int reads the next int. +func (r *reader) int() int { + i := r.d.intAt(r.pos) + r.pos += 4 + return i +} + +// string reads the next string. +func (r *reader) string() string { + return r.d.stringTableAt(r.int()) +} + +// bool reads the next bool. +func (r *reader) bool() bool { + return r.int() != 0 +} + +// tokpos reads the next token.Position. +func (r *reader) tokpos() token.Position { + return token.Position{ + Filename: r.string(), + Offset: r.int(), + Line: r.int(), + Column: r.int(), + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/scan.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/scan.go new file mode 100644 index 0000000000000000000000000000000000000000..6ca73e29f58e55a6b616ebb33cd2d287539bdbd2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/scan.go @@ -0,0 +1,290 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modindex + +import ( + "cmd/go/internal/base" + "cmd/go/internal/fsys" + "cmd/go/internal/str" + "encoding/json" + "errors" + "fmt" + "go/build" + "go/doc" + "go/scanner" + "go/token" + "io/fs" + "path/filepath" + "strings" +) + +// moduleWalkErr returns filepath.SkipDir if the directory isn't relevant +// when indexing a module or generating a filehash, ErrNotIndexed, +// if the module shouldn't be indexed, and nil otherwise. +func moduleWalkErr(root string, path string, info fs.FileInfo, err error) error { + if err != nil { + return ErrNotIndexed + } + // stop at module boundaries + if info.IsDir() && path != root { + if fi, err := fsys.Stat(filepath.Join(path, "go.mod")); err == nil && !fi.IsDir() { + return filepath.SkipDir + } + } + if info.Mode()&fs.ModeSymlink != 0 { + if target, err := fsys.Stat(path); err == nil && target.IsDir() { + // return an error to make the module hash invalid. + // Symlink directories in modules are tricky, so we won't index + // modules that contain them. + // TODO(matloob): perhaps don't return this error if the symlink leads to + // a directory with a go.mod file. + return ErrNotIndexed + } + } + return nil +} + +// indexModule indexes the module at the given directory and returns its +// encoded representation. It returns ErrNotIndexed if the module can't +// be indexed because it contains symlinks. +func indexModule(modroot string) ([]byte, error) { + fsys.Trace("indexModule", modroot) + var packages []*rawPackage + + // If the root itself is a symlink to a directory, + // we want to follow it (see https://go.dev/issue/50807). + // Add a trailing separator to force that to happen. + root := str.WithFilePathSeparator(modroot) + err := fsys.Walk(root, func(path string, info fs.FileInfo, err error) error { + if err := moduleWalkErr(root, path, info, err); err != nil { + return err + } + + if !info.IsDir() { + return nil + } + if !strings.HasPrefix(path, root) { + panic(fmt.Errorf("path %v in walk doesn't have modroot %v as prefix", path, modroot)) + } + rel := path[len(root):] + packages = append(packages, importRaw(modroot, rel)) + return nil + }) + if err != nil { + return nil, err + } + return encodeModuleBytes(packages), nil +} + +// indexPackage indexes the package at the given directory and returns its +// encoded representation. It returns ErrNotIndexed if the package can't +// be indexed. +func indexPackage(modroot, pkgdir string) []byte { + fsys.Trace("indexPackage", pkgdir) + p := importRaw(modroot, relPath(pkgdir, modroot)) + return encodePackageBytes(p) +} + +// rawPackage holds the information from each package that's needed to +// fill a build.Package once the context is available. +type rawPackage struct { + error string + dir string // directory containing package sources, relative to the module root + + // Source files + sourceFiles []*rawFile +} + +type parseError struct { + ErrorList *scanner.ErrorList + ErrorString string +} + +// parseErrorToString converts the error from parsing the file into a string +// representation. A nil error is converted to an empty string, and all other +// errors are converted to a JSON-marshalled parseError struct, with ErrorList +// set for errors of type scanner.ErrorList, and ErrorString set to the error's +// string representation for all other errors. +func parseErrorToString(err error) string { + if err == nil { + return "" + } + var p parseError + if e, ok := err.(scanner.ErrorList); ok { + p.ErrorList = &e + } else { + p.ErrorString = e.Error() + } + s, err := json.Marshal(p) + if err != nil { + panic(err) // This should be impossible because scanner.Error contains only strings and ints. + } + return string(s) +} + +// parseErrorFromString converts a string produced by parseErrorToString back +// to an error. An empty string is converted to a nil error, and all +// other strings are expected to be JSON-marshalled parseError structs. +// The two functions are meant to preserve the structure of an +// error of type scanner.ErrorList in a round trip, but may not preserve the +// structure of other errors. +func parseErrorFromString(s string) error { + if s == "" { + return nil + } + var p parseError + if err := json.Unmarshal([]byte(s), &p); err != nil { + base.Fatalf(`go: invalid parse error value in index: %q. This indicates a corrupted index. Run "go clean -cache" to reset the module cache.`, s) + } + if p.ErrorList != nil { + return *p.ErrorList + } + return errors.New(p.ErrorString) +} + +// rawFile is the struct representation of the file holding all +// information in its fields. +type rawFile struct { + error string + parseError string + + name string + synopsis string // doc.Synopsis of package comment... Compute synopsis on all of these? + pkgName string + ignoreFile bool // starts with _ or . or should otherwise always be ignored + binaryOnly bool // cannot be rebuilt from source (has //go:binary-only-package comment) + cgoDirectives string // the #cgo directive lines in the comment on import "C" + goBuildConstraint string + plusBuildConstraints []string + imports []rawImport + embeds []embed + directives []build.Directive +} + +type rawImport struct { + path string + position token.Position +} + +type embed struct { + pattern string + position token.Position +} + +// importRaw fills the rawPackage from the package files in srcDir. +// dir is the package's path relative to the modroot. +func importRaw(modroot, reldir string) *rawPackage { + p := &rawPackage{ + dir: reldir, + } + + absdir := filepath.Join(modroot, reldir) + + // We still haven't checked + // that p.dir directory exists. This is the right time to do that check. + // We can't do it earlier, because we want to gather partial information for the + // non-nil *build.Package returned when an error occurs. + // We need to do this before we return early on FindOnly flag. + if !isDir(absdir) { + // package was not found + p.error = fmt.Errorf("cannot find package in:\n\t%s", absdir).Error() + return p + } + + entries, err := fsys.ReadDir(absdir) + if err != nil { + p.error = err.Error() + return p + } + + fset := token.NewFileSet() + for _, d := range entries { + if d.IsDir() { + continue + } + if d.Mode()&fs.ModeSymlink != 0 { + if isDir(filepath.Join(absdir, d.Name())) { + // Symlinks to directories are not source files. + continue + } + } + + name := d.Name() + ext := nameExt(name) + + if strings.HasPrefix(name, "_") || strings.HasPrefix(name, ".") { + continue + } + info, err := getFileInfo(absdir, name, fset) + if err == errNonSource { + // not a source or object file. completely ignore in the index + continue + } else if err != nil { + p.sourceFiles = append(p.sourceFiles, &rawFile{name: name, error: err.Error()}) + continue + } else if info == nil { + p.sourceFiles = append(p.sourceFiles, &rawFile{name: name, ignoreFile: true}) + continue + } + rf := &rawFile{ + name: name, + goBuildConstraint: info.goBuildConstraint, + plusBuildConstraints: info.plusBuildConstraints, + binaryOnly: info.binaryOnly, + directives: info.directives, + } + if info.parsed != nil { + rf.pkgName = info.parsed.Name.Name + } + + // Going to save the file. For non-Go files, can stop here. + p.sourceFiles = append(p.sourceFiles, rf) + if ext != ".go" { + continue + } + + if info.parseErr != nil { + rf.parseError = parseErrorToString(info.parseErr) + // Fall through: we might still have a partial AST in info.Parsed, + // and we want to list files with parse errors anyway. + } + + if info.parsed != nil && info.parsed.Doc != nil { + rf.synopsis = doc.Synopsis(info.parsed.Doc.Text()) + } + + var cgoDirectives []string + for _, imp := range info.imports { + if imp.path == "C" { + cgoDirectives = append(cgoDirectives, extractCgoDirectives(imp.doc.Text())...) + } + rf.imports = append(rf.imports, rawImport{path: imp.path, position: fset.Position(imp.pos)}) + } + rf.cgoDirectives = strings.Join(cgoDirectives, "\n") + for _, emb := range info.embeds { + rf.embeds = append(rf.embeds, embed{emb.pattern, emb.pos}) + } + + } + return p +} + +// extractCgoDirectives filters only the lines containing #cgo directives from the input, +// which is the comment on import "C". +func extractCgoDirectives(doc string) []string { + var out []string + for _, line := range strings.Split(doc, "\n") { + // Line is + // #cgo [GOOS/GOARCH...] LDFLAGS: stuff + // + line = strings.TrimSpace(line) + if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') { + continue + } + + out = append(out, line) + } + return out +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/syslist.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/syslist.go new file mode 100644 index 0000000000000000000000000000000000000000..41adcc5342d1ed0c8f358a3f778823ab090ac1ef --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/syslist.go @@ -0,0 +1,78 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a lightly modified copy go/build/syslist_test.go. + +package modindex + +// knownOS is the list of past, present, and future known GOOS values. +// Do not remove from this list, as it is used for filename matching. +// If you add an entry to this list, look at unixOS, below. +var knownOS = map[string]bool{ + "aix": true, + "android": true, + "darwin": true, + "dragonfly": true, + "freebsd": true, + "hurd": true, + "illumos": true, + "ios": true, + "js": true, + "linux": true, + "nacl": true, + "netbsd": true, + "openbsd": true, + "plan9": true, + "solaris": true, + "wasip1": true, + "windows": true, + "zos": true, +} + +// unixOS is the set of GOOS values matched by the "unix" build tag. +// This is not used for filename matching. +// This list also appears in cmd/dist/build.go. +var unixOS = map[string]bool{ + "aix": true, + "android": true, + "darwin": true, + "dragonfly": true, + "freebsd": true, + "hurd": true, + "illumos": true, + "ios": true, + "linux": true, + "netbsd": true, + "openbsd": true, + "solaris": true, +} + +// knownArch is the list of past, present, and future known GOARCH values. +// Do not remove from this list, as it is used for filename matching. +var knownArch = map[string]bool{ + "386": true, + "amd64": true, + "amd64p32": true, + "arm": true, + "armbe": true, + "arm64": true, + "arm64be": true, + "loong64": true, + "mips": true, + "mipsle": true, + "mips64": true, + "mips64le": true, + "mips64p32": true, + "mips64p32le": true, + "ppc": true, + "ppc64": true, + "ppc64le": true, + "riscv": true, + "riscv64": true, + "s390": true, + "s390x": true, + "sparc": true, + "sparc64": true, + "wasm": true, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/syslist_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/syslist_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1a61562deffafcfb3898cfddf6fde2d64c6de83d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/syslist_test.go @@ -0,0 +1,65 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a lightly modified copy go/build/syslist_test.go. + +package modindex + +import ( + "go/build" + "runtime" + "testing" +) + +var ( + thisOS = runtime.GOOS + thisArch = runtime.GOARCH + otherOS = anotherOS() + otherArch = anotherArch() +) + +func anotherOS() string { + if thisOS != "darwin" && thisOS != "ios" { + return "darwin" + } + return "linux" +} + +func anotherArch() string { + if thisArch != "amd64" { + return "amd64" + } + return "386" +} + +type GoodFileTest struct { + name string + result bool +} + +var tests = []GoodFileTest{ + {"file.go", true}, + {"file.c", true}, + {"file_foo.go", true}, + {"file_" + thisArch + ".go", true}, + {"file_" + otherArch + ".go", false}, + {"file_" + thisOS + ".go", true}, + {"file_" + otherOS + ".go", false}, + {"file_" + thisOS + "_" + thisArch + ".go", true}, + {"file_" + otherOS + "_" + thisArch + ".go", false}, + {"file_" + thisOS + "_" + otherArch + ".go", false}, + {"file_" + otherOS + "_" + otherArch + ".go", false}, + {"file_foo_" + thisArch + ".go", true}, + {"file_foo_" + otherArch + ".go", false}, + {"file_" + thisOS + ".c", true}, + {"file_" + otherOS + ".c", false}, +} + +func TestGoodOSArch(t *testing.T) { + for _, test := range tests { + if (*Context)(&build.Default).goodOSArchFile(test.name, make(map[string]bool)) != test.result { + t.Fatalf("goodOSArchFile(%q) != %v", test.name, test.result) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/testdata/ignore_non_source/a.syso b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/testdata/ignore_non_source/a.syso new file mode 100644 index 0000000000000000000000000000000000000000..9527d05936c460a35a9b875e1dc722988d0079d6 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/testdata/ignore_non_source/a.syso @@ -0,0 +1 @@ +package ignore_non_source diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/testdata/ignore_non_source/b.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/testdata/ignore_non_source/b.go new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/testdata/ignore_non_source/bar.json b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/testdata/ignore_non_source/bar.json new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/testdata/ignore_non_source/baz.log b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/testdata/ignore_non_source/baz.log new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/testdata/ignore_non_source/c.c b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/testdata/ignore_non_source/c.c new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/write.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/write.go new file mode 100644 index 0000000000000000000000000000000000000000..cd18ad96dd19c3f22e5abe502f379e574591944d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modindex/write.go @@ -0,0 +1,164 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modindex + +import ( + "cmd/go/internal/base" + "encoding/binary" + "go/token" + "sort" +) + +const indexVersion = "go index v2" // 11 bytes (plus \n), to align uint32s in index + +// encodeModuleBytes produces the encoded representation of the module index. +// encodeModuleBytes may modify the packages slice. +func encodeModuleBytes(packages []*rawPackage) []byte { + e := newEncoder() + e.Bytes([]byte(indexVersion + "\n")) + stringTableOffsetPos := e.Pos() // fill this at the end + e.Uint32(0) // string table offset + sort.Slice(packages, func(i, j int) bool { + return packages[i].dir < packages[j].dir + }) + e.Int(len(packages)) + packagesPos := e.Pos() + for _, p := range packages { + e.String(p.dir) + e.Int(0) + } + for i, p := range packages { + e.IntAt(e.Pos(), packagesPos+8*i+4) + encodePackage(e, p) + } + e.IntAt(e.Pos(), stringTableOffsetPos) + e.Bytes(e.stringTable) + e.Bytes([]byte{0xFF}) // end of string table marker + return e.b +} + +func encodePackageBytes(p *rawPackage) []byte { + return encodeModuleBytes([]*rawPackage{p}) +} + +func encodePackage(e *encoder, p *rawPackage) { + e.String(p.error) + e.String(p.dir) + e.Int(len(p.sourceFiles)) // number of source files + sourceFileOffsetPos := e.Pos() // the pos of the start of the source file offsets + for range p.sourceFiles { + e.Int(0) + } + for i, f := range p.sourceFiles { + e.IntAt(e.Pos(), sourceFileOffsetPos+4*i) + encodeFile(e, f) + } +} + +func encodeFile(e *encoder, f *rawFile) { + e.String(f.error) + e.String(f.parseError) + e.String(f.synopsis) + e.String(f.name) + e.String(f.pkgName) + e.Bool(f.ignoreFile) + e.Bool(f.binaryOnly) + e.String(f.cgoDirectives) + e.String(f.goBuildConstraint) + + e.Int(len(f.plusBuildConstraints)) + for _, s := range f.plusBuildConstraints { + e.String(s) + } + + e.Int(len(f.imports)) + for _, m := range f.imports { + e.String(m.path) + e.Position(m.position) + } + + e.Int(len(f.embeds)) + for _, embed := range f.embeds { + e.String(embed.pattern) + e.Position(embed.position) + } + + e.Int(len(f.directives)) + for _, d := range f.directives { + e.String(d.Text) + e.Position(d.Pos) + } +} + +func newEncoder() *encoder { + e := &encoder{strings: make(map[string]int)} + + // place the empty string at position 0 in the string table + e.stringTable = append(e.stringTable, 0) + e.strings[""] = 0 + + return e +} + +func (e *encoder) Position(position token.Position) { + e.String(position.Filename) + e.Int(position.Offset) + e.Int(position.Line) + e.Int(position.Column) +} + +type encoder struct { + b []byte + stringTable []byte + strings map[string]int +} + +func (e *encoder) Pos() int { + return len(e.b) +} + +func (e *encoder) Bytes(b []byte) { + e.b = append(e.b, b...) +} + +func (e *encoder) String(s string) { + if n, ok := e.strings[s]; ok { + e.Int(n) + return + } + pos := len(e.stringTable) + e.strings[s] = pos + e.Int(pos) + e.stringTable = binary.AppendUvarint(e.stringTable, uint64(len(s))) + e.stringTable = append(e.stringTable, s...) +} + +func (e *encoder) Bool(b bool) { + if b { + e.Uint32(1) + } else { + e.Uint32(0) + } +} + +func (e *encoder) Uint32(n uint32) { + e.b = binary.LittleEndian.AppendUint32(e.b, n) +} + +// Int encodes n. Note that all ints are written to the index as uint32s, +// and to avoid problems on 32-bit systems we require fitting into a 32-bit int. +func (e *encoder) Int(n int) { + if n < 0 || int(int32(n)) != n { + base.Fatalf("go: attempting to write an int to the index that overflows int32") + } + e.Uint32(uint32(n)) +} + +func (e *encoder) IntAt(n int, at int) { + if n < 0 || int(int32(n)) != n { + base.Fatalf("go: attempting to write an int to the index that overflows int32") + } + binary.LittleEndian.PutUint32(e.b[at:], uint32(n)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modinfo/info.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modinfo/info.go new file mode 100644 index 0000000000000000000000000000000000000000..b0adcbcfb3dc99528ca5afc7655f9da21b4c22d1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modinfo/info.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modinfo + +import ( + "cmd/go/internal/modfetch/codehost" + "encoding/json" + "time" +) + +// Note that these structs are publicly visible (part of go list's API) +// and the fields are documented in the help text in ../list/list.go + +type ModulePublic struct { + Path string `json:",omitempty"` // module path + Version string `json:",omitempty"` // module version + Query string `json:",omitempty"` // version query corresponding to this version + Versions []string `json:",omitempty"` // available module versions + Replace *ModulePublic `json:",omitempty"` // replaced by this module + Time *time.Time `json:",omitempty"` // time version was created + Update *ModulePublic `json:",omitempty"` // available update (with -u) + Main bool `json:",omitempty"` // is this the main module? + Indirect bool `json:",omitempty"` // module is only indirectly needed by main module + Dir string `json:",omitempty"` // directory holding local copy of files, if any + GoMod string `json:",omitempty"` // path to go.mod file describing module, if any + GoVersion string `json:",omitempty"` // go version used in module + Retracted []string `json:",omitempty"` // retraction information, if any (with -retracted or -u) + Deprecated string `json:",omitempty"` // deprecation message, if any (with -u) + Error *ModuleError `json:",omitempty"` // error loading module + + Origin *codehost.Origin `json:",omitempty"` // provenance of module + Reuse bool `json:",omitempty"` // reuse of old module info is safe +} + +type ModuleError struct { + Err string // error text +} + +type moduleErrorNoMethods ModuleError + +// UnmarshalJSON accepts both {"Err":"text"} and "text", +// so that the output of go mod download -json can still +// be unmarshalled into a ModulePublic during -reuse processing. +func (e *ModuleError) UnmarshalJSON(data []byte) error { + if len(data) > 0 && data[0] == '"' { + return json.Unmarshal(data, &e.Err) + } + return json.Unmarshal(data, (*moduleErrorNoMethods)(e)) +} + +func (m *ModulePublic) String() string { + s := m.Path + versionString := func(mm *ModulePublic) string { + v := mm.Version + if len(mm.Retracted) == 0 { + return v + } + return v + " (retracted)" + } + + if m.Version != "" { + s += " " + versionString(m) + if m.Update != nil { + s += " [" + versionString(m.Update) + "]" + } + } + if m.Deprecated != "" { + s += " (deprecated)" + } + if m.Replace != nil { + s += " => " + m.Replace.Path + if m.Replace.Version != "" { + s += " " + versionString(m.Replace) + if m.Replace.Update != nil { + s += " [" + versionString(m.Replace.Update) + "]" + } + } + if m.Replace.Deprecated != "" { + s += " (deprecated)" + } + } + return s +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/build.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/build.go new file mode 100644 index 0000000000000000000000000000000000000000..5cf1487c3ede31302b89b65d7a08fe460a0b91de --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/build.go @@ -0,0 +1,460 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/modfetch" + "cmd/go/internal/modfetch/codehost" + "cmd/go/internal/modindex" + "cmd/go/internal/modinfo" + "cmd/go/internal/search" + + "golang.org/x/mod/module" +) + +var ( + infoStart, _ = hex.DecodeString("3077af0c9274080241e1c107e6d618e6") + infoEnd, _ = hex.DecodeString("f932433186182072008242104116d8f2") +) + +func isStandardImportPath(path string) bool { + return findStandardImportPath(path) != "" +} + +func findStandardImportPath(path string) string { + if path == "" { + panic("findStandardImportPath called with empty path") + } + if search.IsStandardImportPath(path) { + if modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) { + return filepath.Join(cfg.GOROOT, "src", path) + } + } + return "" +} + +// PackageModuleInfo returns information about the module that provides +// a given package. If modules are not enabled or if the package is in the +// standard library or if the package was not successfully loaded with +// LoadPackages or ImportFromFiles, nil is returned. +func PackageModuleInfo(ctx context.Context, pkgpath string) *modinfo.ModulePublic { + if isStandardImportPath(pkgpath) || !Enabled() { + return nil + } + m, ok := findModule(loaded, pkgpath) + if !ok { + return nil + } + + rs := LoadModFile(ctx) + return moduleInfo(ctx, rs, m, 0, nil) +} + +// PackageModRoot returns the module root directory for the module that provides +// a given package. If modules are not enabled or if the package is in the +// standard library or if the package was not successfully loaded with +// LoadPackages or ImportFromFiles, the empty string is returned. +func PackageModRoot(ctx context.Context, pkgpath string) string { + if isStandardImportPath(pkgpath) || !Enabled() || cfg.BuildMod == "vendor" { + return "" + } + m, ok := findModule(loaded, pkgpath) + if !ok { + return "" + } + root, _, err := fetch(ctx, m) + if err != nil { + return "" + } + return root +} + +func ModuleInfo(ctx context.Context, path string) *modinfo.ModulePublic { + if !Enabled() { + return nil + } + + if path, vers, found := strings.Cut(path, "@"); found { + m := module.Version{Path: path, Version: vers} + return moduleInfo(ctx, nil, m, 0, nil) + } + + rs := LoadModFile(ctx) + + var ( + v string + ok bool + ) + if rs.pruning == pruned { + v, ok = rs.rootSelected(path) + } + if !ok { + mg, err := rs.Graph(ctx) + if err != nil { + base.Fatal(err) + } + v = mg.Selected(path) + } + + if v == "none" { + return &modinfo.ModulePublic{ + Path: path, + Error: &modinfo.ModuleError{ + Err: "module not in current build", + }, + } + } + + return moduleInfo(ctx, rs, module.Version{Path: path, Version: v}, 0, nil) +} + +// addUpdate fills in m.Update if an updated version is available. +func addUpdate(ctx context.Context, m *modinfo.ModulePublic) { + if m.Version == "" { + return + } + + info, err := Query(ctx, m.Path, "upgrade", m.Version, CheckAllowed) + var noVersionErr *NoMatchingVersionError + if errors.Is(err, ErrDisallowed) || + errors.Is(err, fs.ErrNotExist) || + errors.As(err, &noVersionErr) { + // Ignore "not found" and "no matching version" errors. + // This means the proxy has no matching version or no versions at all. + // + // Ignore "disallowed" errors. This means the current version is + // excluded or retracted and there are no higher allowed versions. + // + // We should report other errors though. An attacker that controls the + // network shouldn't be able to hide versions by interfering with + // the HTTPS connection. An attacker that controls the proxy may still + // hide versions, since the "list" and "latest" endpoints are not + // authenticated. + return + } else if err != nil { + if m.Error == nil { + m.Error = &modinfo.ModuleError{Err: err.Error()} + } + return + } + + if gover.ModCompare(m.Path, info.Version, m.Version) > 0 { + m.Update = &modinfo.ModulePublic{ + Path: m.Path, + Version: info.Version, + Time: &info.Time, + } + } +} + +// mergeOrigin returns the union of data from two origins, +// returning either a new origin or one of its unmodified arguments. +// If the two origins conflict including if either is nil, +// mergeOrigin returns nil. +func mergeOrigin(m1, m2 *codehost.Origin) *codehost.Origin { + if m1 == nil || m2 == nil { + return nil + } + + if m2.VCS != m1.VCS || + m2.URL != m1.URL || + m2.Subdir != m1.Subdir { + return nil + } + + merged := *m1 + if m2.Hash != "" { + if m1.Hash != "" && m1.Hash != m2.Hash { + return nil + } + merged.Hash = m2.Hash + } + if m2.TagSum != "" { + if m1.TagSum != "" && (m1.TagSum != m2.TagSum || m1.TagPrefix != m2.TagPrefix) { + return nil + } + merged.TagSum = m2.TagSum + merged.TagPrefix = m2.TagPrefix + } + if m2.Ref != "" { + if m1.Ref != "" && m1.Ref != m2.Ref { + return nil + } + merged.Ref = m2.Ref + } + + switch { + case merged == *m1: + return m1 + case merged == *m2: + return m2 + default: + // Clone the result to avoid an alloc for merged + // if the result is equal to one of the arguments. + clone := merged + return &clone + } +} + +// addVersions fills in m.Versions with the list of known versions. +// Excluded versions will be omitted. If listRetracted is false, retracted +// versions will also be omitted. +func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted bool) { + // TODO(bcmills): Would it make sense to check for reuse here too? + // Perhaps that doesn't buy us much, though: we would always have to fetch + // all of the version tags to list the available versions anyway. + + allowed := CheckAllowed + if listRetracted { + allowed = CheckExclusions + } + v, origin, err := versions(ctx, m.Path, allowed) + if err != nil && m.Error == nil { + m.Error = &modinfo.ModuleError{Err: err.Error()} + } + m.Versions = v + m.Origin = mergeOrigin(m.Origin, origin) +} + +// addRetraction fills in m.Retracted if the module was retracted by its author. +// m.Error is set if there's an error loading retraction information. +func addRetraction(ctx context.Context, m *modinfo.ModulePublic) { + if m.Version == "" { + return + } + + err := CheckRetractions(ctx, module.Version{Path: m.Path, Version: m.Version}) + var noVersionErr *NoMatchingVersionError + var retractErr *ModuleRetractedError + if err == nil || errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { + // Ignore "not found" and "no matching version" errors. + // This means the proxy has no matching version or no versions at all. + // + // We should report other errors though. An attacker that controls the + // network shouldn't be able to hide versions by interfering with + // the HTTPS connection. An attacker that controls the proxy may still + // hide versions, since the "list" and "latest" endpoints are not + // authenticated. + return + } else if errors.As(err, &retractErr) { + if len(retractErr.Rationale) == 0 { + m.Retracted = []string{"retracted by module author"} + } else { + m.Retracted = retractErr.Rationale + } + } else if m.Error == nil { + m.Error = &modinfo.ModuleError{Err: err.Error()} + } +} + +// addDeprecation fills in m.Deprecated if the module was deprecated by its +// author. m.Error is set if there's an error loading deprecation information. +func addDeprecation(ctx context.Context, m *modinfo.ModulePublic) { + deprecation, err := CheckDeprecation(ctx, module.Version{Path: m.Path, Version: m.Version}) + var noVersionErr *NoMatchingVersionError + if errors.Is(err, fs.ErrNotExist) || errors.As(err, &noVersionErr) { + // Ignore "not found" and "no matching version" errors. + // This means the proxy has no matching version or no versions at all. + // + // We should report other errors though. An attacker that controls the + // network shouldn't be able to hide versions by interfering with + // the HTTPS connection. An attacker that controls the proxy may still + // hide versions, since the "list" and "latest" endpoints are not + // authenticated. + return + } + if err != nil { + if m.Error == nil { + m.Error = &modinfo.ModuleError{Err: err.Error()} + } + return + } + m.Deprecated = deprecation +} + +// moduleInfo returns information about module m, loaded from the requirements +// in rs (which may be nil to indicate that m was not loaded from a requirement +// graph). +func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) *modinfo.ModulePublic { + if m.Version == "" && MainModules.Contains(m.Path) { + info := &modinfo.ModulePublic{ + Path: m.Path, + Version: m.Version, + Main: true, + } + if v, ok := rawGoVersion.Load(m); ok { + info.GoVersion = v.(string) + } else { + panic("internal error: GoVersion not set for main module") + } + if modRoot := MainModules.ModRoot(m); modRoot != "" { + info.Dir = modRoot + info.GoMod = modFilePath(modRoot) + } + return info + } + + info := &modinfo.ModulePublic{ + Path: m.Path, + Version: m.Version, + Indirect: rs != nil && !rs.direct[m.Path], + } + if v, ok := rawGoVersion.Load(m); ok { + info.GoVersion = v.(string) + } + + // completeFromModCache fills in the extra fields in m using the module cache. + completeFromModCache := func(m *modinfo.ModulePublic) { + if gover.IsToolchain(m.Path) { + return + } + + checksumOk := func(suffix string) bool { + return rs == nil || m.Version == "" || !mustHaveSums() || + modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix}) + } + + mod := module.Version{Path: m.Path, Version: m.Version} + + if m.Version != "" { + if old := reuse[mod]; old != nil { + if err := checkReuse(ctx, mod, old.Origin); err == nil { + *m = *old + m.Query = "" + m.Dir = "" + return + } + } + + if q, err := Query(ctx, m.Path, m.Version, "", nil); err != nil { + m.Error = &modinfo.ModuleError{Err: err.Error()} + } else { + m.Version = q.Version + m.Time = &q.Time + } + } + + if m.GoVersion == "" && checksumOk("/go.mod") { + // Load the go.mod file to determine the Go version, since it hasn't + // already been populated from rawGoVersion. + if summary, err := rawGoModSummary(mod); err == nil && summary.goVersion != "" { + m.GoVersion = summary.goVersion + } + } + + if m.Version != "" { + if checksumOk("/go.mod") { + gomod, err := modfetch.CachePath(ctx, mod, "mod") + if err == nil { + if info, err := os.Stat(gomod); err == nil && info.Mode().IsRegular() { + m.GoMod = gomod + } + } + } + if checksumOk("") { + dir, err := modfetch.DownloadDir(ctx, mod) + if err == nil { + m.Dir = dir + } + } + + if mode&ListRetracted != 0 { + addRetraction(ctx, m) + } + } + } + + if rs == nil { + // If this was an explicitly-versioned argument to 'go mod download' or + // 'go list -m', report the actual requested version, not its replacement. + completeFromModCache(info) // Will set m.Error in vendor mode. + return info + } + + r := Replacement(m) + if r.Path == "" { + if cfg.BuildMod == "vendor" { + // It's tempting to fill in the "Dir" field to point within the vendor + // directory, but that would be misleading: the vendor directory contains + // a flattened package tree, not complete modules, and it can even + // interleave packages from different modules if one module path is a + // prefix of the other. + } else { + completeFromModCache(info) + } + return info + } + + // Don't hit the network to fill in extra data for replaced modules. + // The original resolved Version and Time don't matter enough to be + // worth the cost, and we're going to overwrite the GoMod and Dir from the + // replacement anyway. See https://golang.org/issue/27859. + info.Replace = &modinfo.ModulePublic{ + Path: r.Path, + Version: r.Version, + } + if v, ok := rawGoVersion.Load(m); ok { + info.Replace.GoVersion = v.(string) + } + if r.Version == "" { + if filepath.IsAbs(r.Path) { + info.Replace.Dir = r.Path + } else { + info.Replace.Dir = filepath.Join(replaceRelativeTo(), r.Path) + } + info.Replace.GoMod = filepath.Join(info.Replace.Dir, "go.mod") + } + if cfg.BuildMod != "vendor" { + completeFromModCache(info.Replace) + info.Dir = info.Replace.Dir + info.GoMod = info.Replace.GoMod + info.Retracted = info.Replace.Retracted + } + info.GoVersion = info.Replace.GoVersion + return info +} + +// findModule searches for the module that contains the package at path. +// If the package was loaded, its containing module and true are returned. +// Otherwise, module.Version{} and false are returned. +func findModule(ld *loader, path string) (module.Version, bool) { + if pkg, ok := ld.pkgCache.Get(path); ok { + return pkg.mod, pkg.mod != module.Version{} + } + return module.Version{}, false +} + +func ModInfoProg(info string, isgccgo bool) []byte { + // Inject an init function to set runtime.modinfo. + // This is only used for gccgo - with gc we hand the info directly to the linker. + // The init function has the drawback that packages may want to + // look at the module info in their init functions (see issue 29628), + // which won't work. See also issue 30344. + if isgccgo { + return fmt.Appendf(nil, `package main +import _ "unsafe" +//go:linkname __set_debug_modinfo__ runtime.setmodinfo +func __set_debug_modinfo__(string) +func init() { __set_debug_modinfo__(%q) } +`, ModInfoData(info)) + } + return nil +} + +func ModInfoData(info string) []byte { + return []byte(string(infoStart) + info + string(infoEnd)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/buildlist.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/buildlist.go new file mode 100644 index 0000000000000000000000000000000000000000..d72a24f111449ad60fd40780074a770826a27e23 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/buildlist.go @@ -0,0 +1,1510 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "context" + "errors" + "fmt" + "os" + "reflect" + "runtime" + "runtime/debug" + "slices" + "strings" + "sync" + "sync/atomic" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/mvs" + "cmd/go/internal/par" + + "golang.org/x/mod/module" +) + +// A Requirements represents a logically-immutable set of root module requirements. +type Requirements struct { + // pruning is the pruning at which the requirement graph is computed. + // + // If unpruned, the graph includes all transitive requirements regardless + // of whether the requiring module supports pruning. + // + // If pruned, the graph includes only the root modules, the explicit + // requirements of those root modules, and the transitive requirements of only + // the root modules that do not support pruning. + // + // If workspace, the graph includes only the workspace modules, the explicit + // requirements of the workspace modules, and the transitive requirements of + // the workspace modules that do not support pruning. + pruning modPruning + + // rootModules is the set of root modules of the graph, sorted and capped to + // length. It may contain duplicates, and may contain multiple versions for a + // given module path. The root modules of the graph are the set of main + // modules in workspace mode, and the main module's direct requirements + // outside workspace mode. + // + // The roots are always expected to contain an entry for the "go" module, + // indicating the Go language version in use. + rootModules []module.Version + maxRootVersion map[string]string + + // direct is the set of module paths for which we believe the module provides + // a package directly imported by a package or test in the main module. + // + // The "direct" map controls which modules are annotated with "// indirect" + // comments in the go.mod file, and may impact which modules are listed as + // explicit roots (vs. indirect-only dependencies). However, it should not + // have a semantic effect on the build list overall. + // + // The initial direct map is populated from the existing "// indirect" + // comments (or lack thereof) in the go.mod file. It is updated by the + // package loader: dependencies may be promoted to direct if new + // direct imports are observed, and may be demoted to indirect during + // 'go mod tidy' or 'go mod vendor'. + // + // The direct map is keyed by module paths, not module versions. When a + // module's selected version changes, we assume that it remains direct if the + // previous version was a direct dependency. That assumption might not hold in + // rare cases (such as if a dependency splits out a nested module, or merges a + // nested module back into a parent module). + direct map[string]bool + + graphOnce sync.Once // guards writes to (but not reads from) graph + graph atomic.Pointer[cachedGraph] +} + +// A cachedGraph is a non-nil *ModuleGraph, together with any error discovered +// while loading that graph. +type cachedGraph struct { + mg *ModuleGraph + err error // If err is non-nil, mg may be incomplete (but must still be non-nil). +} + +// requirements is the requirement graph for the main module. +// +// It is always non-nil if the main module's go.mod file has been loaded. +// +// This variable should only be read from the loadModFile function, and should +// only be written in the loadModFile and commitRequirements functions. +// All other functions that need or produce a *Requirements should +// accept and/or return an explicit parameter. +var requirements *Requirements + +func mustHaveGoRoot(roots []module.Version) { + for _, m := range roots { + if m.Path == "go" { + return + } + } + panic("go: internal error: missing go root module") +} + +// newRequirements returns a new requirement set with the given root modules. +// The dependencies of the roots will be loaded lazily at the first call to the +// Graph method. +// +// The rootModules slice must be sorted according to gover.ModSort. +// The caller must not modify the rootModules slice or direct map after passing +// them to newRequirements. +// +// If vendoring is in effect, the caller must invoke initVendor on the returned +// *Requirements before any other method. +func newRequirements(pruning modPruning, rootModules []module.Version, direct map[string]bool) *Requirements { + mustHaveGoRoot(rootModules) + + if pruning != workspace { + if workFilePath != "" { + panic("in workspace mode, but pruning is not workspace in newRequirements") + } + } + + if pruning != workspace { + if workFilePath != "" { + panic("in workspace mode, but pruning is not workspace in newRequirements") + } + for i, m := range rootModules { + if m.Version == "" && MainModules.Contains(m.Path) { + panic(fmt.Sprintf("newRequirements called with untrimmed build list: rootModules[%v] is a main module", i)) + } + if m.Path == "" || m.Version == "" { + panic(fmt.Sprintf("bad requirement: rootModules[%v] = %v", i, m)) + } + } + } + + rs := &Requirements{ + pruning: pruning, + rootModules: rootModules, + maxRootVersion: make(map[string]string, len(rootModules)), + direct: direct, + } + + for i, m := range rootModules { + if i > 0 { + prev := rootModules[i-1] + if prev.Path > m.Path || (prev.Path == m.Path && gover.ModCompare(m.Path, prev.Version, m.Version) > 0) { + panic(fmt.Sprintf("newRequirements called with unsorted roots: %v", rootModules)) + } + } + + if v, ok := rs.maxRootVersion[m.Path]; ok && gover.ModCompare(m.Path, v, m.Version) >= 0 { + continue + } + rs.maxRootVersion[m.Path] = m.Version + } + + if rs.maxRootVersion["go"] == "" { + panic(`newRequirements called without a "go" version`) + } + return rs +} + +// String returns a string describing the Requirements for debugging. +func (rs *Requirements) String() string { + return fmt.Sprintf("{%v %v}", rs.pruning, rs.rootModules) +} + +// initVendor initializes rs.graph from the given list of vendored module +// dependencies, overriding the graph that would normally be loaded from module +// requirements. +func (rs *Requirements) initVendor(vendorList []module.Version) { + rs.graphOnce.Do(func() { + roots := MainModules.Versions() + if inWorkspaceMode() { + // Use rs.rootModules to pull in the go and toolchain roots + // from the go.work file and preserve the invariant that all + // of rs.rootModules are in mg.g. + roots = rs.rootModules + } + mg := &ModuleGraph{ + g: mvs.NewGraph(cmpVersion, roots), + } + + if rs.pruning == pruned { + mainModule := MainModules.mustGetSingleMainModule() + // The roots of a single pruned module should already include every module in the + // vendor list, because the vendored modules are the same as those needed + // for graph pruning. + // + // Just to be sure, we'll double-check that here. + inconsistent := false + for _, m := range vendorList { + if v, ok := rs.rootSelected(m.Path); !ok || v != m.Version { + base.Errorf("go: vendored module %v should be required explicitly in go.mod", m) + inconsistent = true + } + } + if inconsistent { + base.Fatal(errGoModDirty) + } + + // Now we can treat the rest of the module graph as effectively “pruned + // out”, as though we are viewing the main module from outside: in vendor + // mode, the root requirements *are* the complete module graph. + mg.g.Require(mainModule, rs.rootModules) + } else { + // The transitive requirements of the main module are not in general available + // from the vendor directory, and we don't actually know how we got from + // the roots to the final build list. + // + // Instead, we'll inject a fake "vendor/modules.txt" module that provides + // those transitive dependencies, and mark it as a dependency of the main + // module. That allows us to elide the actual structure of the module + // graph, but still distinguishes between direct and indirect + // dependencies. + vendorMod := module.Version{Path: "vendor/modules.txt", Version: ""} + if inWorkspaceMode() { + for _, m := range MainModules.Versions() { + reqs, _ := rootsFromModFile(m, MainModules.ModFile(m), omitToolchainRoot) + mg.g.Require(m, append(reqs, vendorMod)) + } + mg.g.Require(vendorMod, vendorList) + + } else { + mainModule := MainModules.mustGetSingleMainModule() + mg.g.Require(mainModule, append(rs.rootModules, vendorMod)) + mg.g.Require(vendorMod, vendorList) + } + } + + rs.graph.Store(&cachedGraph{mg, nil}) + }) +} + +// GoVersion returns the Go language version for the Requirements. +func (rs *Requirements) GoVersion() string { + v, _ := rs.rootSelected("go") + if v == "" { + panic("internal error: missing go version in modload.Requirements") + } + return v +} + +// rootSelected returns the version of the root dependency with the given module +// path, or the zero module.Version and ok=false if the module is not a root +// dependency. +func (rs *Requirements) rootSelected(path string) (version string, ok bool) { + if MainModules.Contains(path) { + return "", true + } + if v, ok := rs.maxRootVersion[path]; ok { + return v, true + } + return "", false +} + +// hasRedundantRoot returns true if the root list contains multiple requirements +// of the same module or a requirement on any version of the main module. +// Redundant requirements should be pruned, but they may influence version +// selection. +func (rs *Requirements) hasRedundantRoot() bool { + for i, m := range rs.rootModules { + if MainModules.Contains(m.Path) || (i > 0 && m.Path == rs.rootModules[i-1].Path) { + return true + } + } + return false +} + +// Graph returns the graph of module requirements loaded from the current +// root modules (as reported by RootModules). +// +// Graph always makes a best effort to load the requirement graph despite any +// errors, and always returns a non-nil *ModuleGraph. +// +// If the requirements of any relevant module fail to load, Graph also +// returns a non-nil error of type *mvs.BuildListError. +func (rs *Requirements) Graph(ctx context.Context) (*ModuleGraph, error) { + rs.graphOnce.Do(func() { + mg, mgErr := readModGraph(ctx, rs.pruning, rs.rootModules, nil) + rs.graph.Store(&cachedGraph{mg, mgErr}) + }) + cached := rs.graph.Load() + return cached.mg, cached.err +} + +// IsDirect returns whether the given module provides a package directly +// imported by a package or test in the main module. +func (rs *Requirements) IsDirect(path string) bool { + return rs.direct[path] +} + +// A ModuleGraph represents the complete graph of module dependencies +// of a main module. +// +// If the main module supports module graph pruning, the graph does not include +// transitive dependencies of non-root (implicit) dependencies. +type ModuleGraph struct { + g *mvs.Graph + loadCache par.ErrCache[module.Version, *modFileSummary] + + buildListOnce sync.Once + buildList []module.Version +} + +var readModGraphDebugOnce sync.Once + +// readModGraph reads and returns the module dependency graph starting at the +// given roots. +// +// The requirements of the module versions found in the unprune map are included +// in the graph even if they would normally be pruned out. +// +// Unlike LoadModGraph, readModGraph does not attempt to diagnose or update +// inconsistent roots. +func readModGraph(ctx context.Context, pruning modPruning, roots []module.Version, unprune map[module.Version]bool) (*ModuleGraph, error) { + mustHaveGoRoot(roots) + if pruning == pruned { + // Enable diagnostics for lazy module loading + // (https://golang.org/ref/mod#lazy-loading) only if the module graph is + // pruned. + // + // In unpruned modules,we load the module graph much more aggressively (in + // order to detect inconsistencies that wouldn't be feasible to spot-check), + // so it wouldn't be useful to log when that occurs (because it happens in + // normal operation all the time). + readModGraphDebugOnce.Do(func() { + for _, f := range strings.Split(os.Getenv("GODEBUG"), ",") { + switch f { + case "lazymod=log": + debug.PrintStack() + fmt.Fprintf(os.Stderr, "go: read full module graph.\n") + case "lazymod=strict": + debug.PrintStack() + base.Fatalf("go: read full module graph (forbidden by GODEBUG=lazymod=strict).") + } + } + }) + } + + var graphRoots []module.Version + if inWorkspaceMode() { + graphRoots = roots + } else { + graphRoots = MainModules.Versions() + } + var ( + mu sync.Mutex // guards mg.g and hasError during loading + hasError bool + mg = &ModuleGraph{ + g: mvs.NewGraph(cmpVersion, graphRoots), + } + ) + + if pruning != workspace { + if inWorkspaceMode() { + panic("pruning is not workspace in workspace mode") + } + mg.g.Require(MainModules.mustGetSingleMainModule(), roots) + } + + type dedupKey struct { + m module.Version + pruning modPruning + } + var ( + loadQueue = par.NewQueue(runtime.GOMAXPROCS(0)) + loading sync.Map // dedupKey → nil; the set of modules that have been or are being loaded + ) + + // loadOne synchronously loads the explicit requirements for module m. + // It does not load the transitive requirements of m even if the go version in + // m's go.mod file indicates that it supports graph pruning. + loadOne := func(m module.Version) (*modFileSummary, error) { + return mg.loadCache.Do(m, func() (*modFileSummary, error) { + summary, err := goModSummary(m) + + mu.Lock() + if err == nil { + mg.g.Require(m, summary.require) + } else { + hasError = true + } + mu.Unlock() + + return summary, err + }) + } + + var enqueue func(m module.Version, pruning modPruning) + enqueue = func(m module.Version, pruning modPruning) { + if m.Version == "none" { + return + } + + if _, dup := loading.LoadOrStore(dedupKey{m, pruning}, nil); dup { + // m has already been enqueued for loading. Since unpruned loading may + // follow cycles in the requirement graph, we need to return early + // to avoid making the load queue infinitely long. + return + } + + loadQueue.Add(func() { + summary, err := loadOne(m) + if err != nil { + return // findError will report the error later. + } + + // If the version in m's go.mod file does not support pruning, then we + // cannot assume that the explicit requirements of m (added by loadOne) + // are sufficient to build the packages it contains. We must load its full + // transitive dependency graph to be sure that we see all relevant + // dependencies. In addition, we must load the requirements of any module + // that is explicitly marked as unpruned. + nextPruning := summary.pruning + if pruning == unpruned { + nextPruning = unpruned + } + for _, r := range summary.require { + if pruning != pruned || summary.pruning == unpruned || unprune[r] { + enqueue(r, nextPruning) + } + } + }) + } + + mustHaveGoRoot(roots) + for _, m := range roots { + enqueue(m, pruning) + } + <-loadQueue.Idle() + + // Reload any dependencies of the main modules which are not + // at their selected versions at workspace mode, because the + // requirements don't accurately reflect the transitive imports. + if pruning == workspace { + // hasDepsInAll contains the set of modules that need to be loaded + // at workspace pruning because any of their dependencies may + // provide packages in all. + hasDepsInAll := make(map[string]bool) + seen := map[module.Version]bool{} + for _, m := range roots { + hasDepsInAll[m.Path] = true + } + // This loop will terminate because it will call enqueue on each version of + // each dependency of the modules in hasDepsInAll at most once (and only + // calls enqueue on successively increasing versions of each dependency). + for { + needsEnqueueing := map[module.Version]bool{} + for p := range hasDepsInAll { + m := module.Version{Path: p, Version: mg.g.Selected(p)} + if !seen[m] { + needsEnqueueing[m] = true + continue + } + reqs, _ := mg.g.RequiredBy(m) + for _, r := range reqs { + s := module.Version{Path: r.Path, Version: mg.g.Selected(r.Path)} + if gover.ModCompare(r.Path, s.Version, r.Version) > 0 && !seen[s] { + needsEnqueueing[s] = true + } + } + } + // add all needs enqueueing to paths we care about + if len(needsEnqueueing) == 0 { + break + } + + for p := range needsEnqueueing { + enqueue(p, workspace) + seen[p] = true + hasDepsInAll[p.Path] = true + } + <-loadQueue.Idle() + } + } + + if hasError { + return mg, mg.findError() + } + return mg, nil +} + +// RequiredBy returns the dependencies required by module m in the graph, +// or ok=false if module m's dependencies are pruned out. +// +// The caller must not modify the returned slice, but may safely append to it +// and may rely on it not to be modified. +func (mg *ModuleGraph) RequiredBy(m module.Version) (reqs []module.Version, ok bool) { + return mg.g.RequiredBy(m) +} + +// Selected returns the selected version of the module with the given path. +// +// If no version is selected, Selected returns version "none". +func (mg *ModuleGraph) Selected(path string) (version string) { + return mg.g.Selected(path) +} + +// WalkBreadthFirst invokes f once, in breadth-first order, for each module +// version other than "none" that appears in the graph, regardless of whether +// that version is selected. +func (mg *ModuleGraph) WalkBreadthFirst(f func(m module.Version)) { + mg.g.WalkBreadthFirst(f) +} + +// BuildList returns the selected versions of all modules present in the graph, +// beginning with the main modules. +// +// The order of the remaining elements in the list is deterministic +// but arbitrary. +// +// The caller must not modify the returned list, but may safely append to it +// and may rely on it not to be modified. +func (mg *ModuleGraph) BuildList() []module.Version { + mg.buildListOnce.Do(func() { + mg.buildList = slices.Clip(mg.g.BuildList()) + }) + return mg.buildList +} + +func (mg *ModuleGraph) findError() error { + errStack := mg.g.FindPath(func(m module.Version) bool { + _, err := mg.loadCache.Get(m) + return err != nil && err != par.ErrCacheEntryNotFound + }) + if len(errStack) > 0 { + _, err := mg.loadCache.Get(errStack[len(errStack)-1]) + var noUpgrade func(from, to module.Version) bool + return mvs.NewBuildListError(err, errStack, noUpgrade) + } + + return nil +} + +func (mg *ModuleGraph) allRootsSelected() bool { + var roots []module.Version + if inWorkspaceMode() { + roots = MainModules.Versions() + } else { + roots, _ = mg.g.RequiredBy(MainModules.mustGetSingleMainModule()) + } + for _, m := range roots { + if mg.Selected(m.Path) != m.Version { + return false + } + } + return true +} + +// LoadModGraph loads and returns the graph of module dependencies of the main module, +// without loading any packages. +// +// If the goVersion string is non-empty, the returned graph is the graph +// as interpreted by the given Go version (instead of the version indicated +// in the go.mod file). +// +// Modules are loaded automatically (and lazily) in LoadPackages: +// LoadModGraph need only be called if LoadPackages is not, +// typically in commands that care about modules but no particular package. +func LoadModGraph(ctx context.Context, goVersion string) (*ModuleGraph, error) { + rs, err := loadModFile(ctx, nil) + if err != nil { + return nil, err + } + + if goVersion != "" { + v, _ := rs.rootSelected("go") + if gover.Compare(v, gover.GoStrictVersion) >= 0 && gover.Compare(goVersion, v) < 0 { + return nil, fmt.Errorf("requested Go version %s cannot load module graph (requires Go >= %s)", goVersion, v) + } + + pruning := pruningForGoVersion(goVersion) + if pruning == unpruned && rs.pruning != unpruned { + // Use newRequirements instead of convertDepth because convertDepth + // also updates roots; here, we want to report the unmodified roots + // even though they may seem inconsistent. + rs = newRequirements(unpruned, rs.rootModules, rs.direct) + } + + return rs.Graph(ctx) + } + + rs, mg, err := expandGraph(ctx, rs) + if err != nil { + return nil, err + } + requirements = rs + return mg, err +} + +// expandGraph loads the complete module graph from rs. +// +// If the complete graph reveals that some root of rs is not actually the +// selected version of its path, expandGraph computes a new set of roots that +// are consistent. (With a pruned module graph, this may result in upgrades to +// other modules due to requirements that were previously pruned out.) +// +// expandGraph returns the updated roots, along with the module graph loaded +// from those roots and any error encountered while loading that graph. +// expandGraph returns non-nil requirements and a non-nil graph regardless of +// errors. On error, the roots might not be updated to be consistent. +func expandGraph(ctx context.Context, rs *Requirements) (*Requirements, *ModuleGraph, error) { + mg, mgErr := rs.Graph(ctx) + if mgErr != nil { + // Without the graph, we can't update the roots: we don't know which + // versions of transitive dependencies would be selected. + return rs, mg, mgErr + } + + if !mg.allRootsSelected() { + // The roots of rs are not consistent with the rest of the graph. Update + // them. In an unpruned module this is a no-op for the build list as a whole — + // it just promotes what were previously transitive requirements to be + // roots — but in a pruned module it may pull in previously-irrelevant + // transitive dependencies. + + newRS, rsErr := updateRoots(ctx, rs.direct, rs, nil, nil, false) + if rsErr != nil { + // Failed to update roots, perhaps because of an error in a transitive + // dependency needed for the update. Return the original Requirements + // instead. + return rs, mg, rsErr + } + rs = newRS + mg, mgErr = rs.Graph(ctx) + } + + return rs, mg, mgErr +} + +// EditBuildList edits the global build list by first adding every module in add +// to the existing build list, then adjusting versions (and adding or removing +// requirements as needed) until every module in mustSelect is selected at the +// given version. +// +// (Note that the newly-added modules might not be selected in the resulting +// build list: they could be lower than existing requirements or conflict with +// versions in mustSelect.) +// +// If the versions listed in mustSelect are mutually incompatible (due to one of +// the listed modules requiring a higher version of another), EditBuildList +// returns a *ConstraintError and leaves the build list in its previous state. +// +// On success, EditBuildList reports whether the selected version of any module +// in the build list may have been changed (possibly to or from "none") as a +// result. +func EditBuildList(ctx context.Context, add, mustSelect []module.Version) (changed bool, err error) { + rs, changed, err := editRequirements(ctx, LoadModFile(ctx), add, mustSelect) + if err != nil { + return false, err + } + requirements = rs + return changed, err +} + +// OverrideRoots edits the global requirement roots by replacing the specific module versions. +func OverrideRoots(ctx context.Context, replace []module.Version) { + requirements = overrideRoots(ctx, requirements, replace) +} + +func overrideRoots(ctx context.Context, rs *Requirements, replace []module.Version) *Requirements { + drop := make(map[string]bool) + for _, m := range replace { + drop[m.Path] = true + } + var roots []module.Version + for _, m := range rs.rootModules { + if !drop[m.Path] { + roots = append(roots, m) + } + } + roots = append(roots, replace...) + gover.ModSort(roots) + return newRequirements(rs.pruning, roots, rs.direct) +} + +// A ConstraintError describes inconsistent constraints in EditBuildList +type ConstraintError struct { + // Conflict lists the source of the conflict for each version in mustSelect + // that could not be selected due to the requirements of some other version in + // mustSelect. + Conflicts []Conflict +} + +func (e *ConstraintError) Error() string { + b := new(strings.Builder) + b.WriteString("version constraints conflict:") + for _, c := range e.Conflicts { + fmt.Fprintf(b, "\n\t%s", c.Summary()) + } + return b.String() +} + +// A Conflict is a path of requirements starting at a root or proposed root in +// the requirement graph, explaining why that root either causes a module passed +// in the mustSelect list to EditBuildList to be unattainable, or introduces an +// unresolvable error in loading the requirement graph. +type Conflict struct { + // Path is a path of requirements starting at some module version passed in + // the mustSelect argument and ending at a module whose requirements make that + // version unacceptable. (Path always has len ≥ 1.) + Path []module.Version + + // If Err is nil, Constraint is a module version passed in the mustSelect + // argument that has the same module path as, and a lower version than, + // the last element of the Path slice. + Constraint module.Version + + // If Constraint is unset, Err is an error encountered when loading the + // requirements of the last element in Path. + Err error +} + +// UnwrapModuleError returns c.Err, but unwraps it if it is a module.ModuleError +// with a version and path matching the last entry in the Path slice. +func (c Conflict) UnwrapModuleError() error { + me, ok := c.Err.(*module.ModuleError) + if ok && len(c.Path) > 0 { + last := c.Path[len(c.Path)-1] + if me.Path == last.Path && me.Version == last.Version { + return me.Err + } + } + return c.Err +} + +// Summary returns a string that describes only the first and last modules in +// the conflict path. +func (c Conflict) Summary() string { + if len(c.Path) == 0 { + return "(internal error: invalid Conflict struct)" + } + first := c.Path[0] + last := c.Path[len(c.Path)-1] + if len(c.Path) == 1 { + if c.Err != nil { + return fmt.Sprintf("%s: %v", first, c.UnwrapModuleError()) + } + return fmt.Sprintf("%s is above %s", first, c.Constraint.Version) + } + + adverb := "" + if len(c.Path) > 2 { + adverb = "indirectly " + } + if c.Err != nil { + return fmt.Sprintf("%s %srequires %s: %v", first, adverb, last, c.UnwrapModuleError()) + } + return fmt.Sprintf("%s %srequires %s, but %s is requested", first, adverb, last, c.Constraint.Version) +} + +// String returns a string that describes the full conflict path. +func (c Conflict) String() string { + if len(c.Path) == 0 { + return "(internal error: invalid Conflict struct)" + } + b := new(strings.Builder) + fmt.Fprintf(b, "%v", c.Path[0]) + if len(c.Path) == 1 { + fmt.Fprintf(b, " found") + } else { + for _, r := range c.Path[1:] { + fmt.Fprintf(b, " requires\n\t%v", r) + } + } + if c.Constraint != (module.Version{}) { + fmt.Fprintf(b, ", but %v is requested", c.Constraint.Version) + } + if c.Err != nil { + fmt.Fprintf(b, ": %v", c.UnwrapModuleError()) + } + return b.String() +} + +// tidyRoots trims the root dependencies to the minimal requirements needed to +// both retain the same versions of all packages in pkgs and satisfy the +// graph-pruning invariants (if applicable). +func tidyRoots(ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Requirements, error) { + mainModule := MainModules.mustGetSingleMainModule() + if rs.pruning == unpruned { + return tidyUnprunedRoots(ctx, mainModule, rs, pkgs) + } + return tidyPrunedRoots(ctx, mainModule, rs, pkgs) +} + +func updateRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { + switch rs.pruning { + case unpruned: + return updateUnprunedRoots(ctx, direct, rs, add) + case pruned: + return updatePrunedRoots(ctx, direct, rs, pkgs, add, rootsImported) + case workspace: + return updateWorkspaceRoots(ctx, rs, add) + default: + panic(fmt.Sprintf("unsupported pruning mode: %v", rs.pruning)) + } +} + +func updateWorkspaceRoots(ctx context.Context, rs *Requirements, add []module.Version) (*Requirements, error) { + if len(add) != 0 { + // add should be empty in workspace mode because workspace mode implies + // -mod=readonly, which in turn implies no new requirements. The code path + // that would result in add being non-empty returns an error before it + // reaches this point: The set of modules to add comes from + // resolveMissingImports, which in turn resolves each package by calling + // queryImport. But queryImport explicitly checks for -mod=readonly, and + // return an error. + panic("add is not empty") + } + return rs, nil +} + +// tidyPrunedRoots returns a minimal set of root requirements that maintains the +// invariants of the go.mod file needed to support graph pruning for the given +// packages: +// +// 1. For each package marked with pkgInAll, the module path that provided that +// package is included as a root. +// 2. For all packages, the module that provided that package either remains +// selected at the same version or is upgraded by the dependencies of a +// root. +// +// If any module that provided a package has been upgraded above its previous +// version, the caller may need to reload and recompute the package graph. +// +// To ensure that the loading process eventually converges, the caller should +// add any needed roots from the tidy root set (without removing existing untidy +// roots) until the set of roots has converged. +func tidyPrunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { + var ( + roots []module.Version + pathIsRoot = map[string]bool{mainModule.Path: true} + ) + if v, ok := old.rootSelected("go"); ok { + roots = append(roots, module.Version{Path: "go", Version: v}) + pathIsRoot["go"] = true + } + if v, ok := old.rootSelected("toolchain"); ok { + roots = append(roots, module.Version{Path: "toolchain", Version: v}) + pathIsRoot["toolchain"] = true + } + // We start by adding roots for every package in "all". + // + // Once that is done, we may still need to add more roots to cover upgraded or + // otherwise-missing test dependencies for packages in "all". For those test + // dependencies, we prefer to add roots for packages with shorter import + // stacks first, on the theory that the module requirements for those will + // tend to fill in the requirements for their transitive imports (which have + // deeper import stacks). So we add the missing dependencies for one depth at + // a time, starting with the packages actually in "all" and expanding outwards + // until we have scanned every package that was loaded. + var ( + queue []*loadPkg + queued = map[*loadPkg]bool{} + ) + for _, pkg := range pkgs { + if !pkg.flags.has(pkgInAll) { + continue + } + if pkg.fromExternalModule() && !pathIsRoot[pkg.mod.Path] { + roots = append(roots, pkg.mod) + pathIsRoot[pkg.mod.Path] = true + } + queue = append(queue, pkg) + queued[pkg] = true + } + gover.ModSort(roots) + tidy := newRequirements(pruned, roots, old.direct) + + for len(queue) > 0 { + roots = tidy.rootModules + mg, err := tidy.Graph(ctx) + if err != nil { + return nil, err + } + + prevQueue := queue + queue = nil + for _, pkg := range prevQueue { + m := pkg.mod + if m.Path == "" { + continue + } + for _, dep := range pkg.imports { + if !queued[dep] { + queue = append(queue, dep) + queued[dep] = true + } + } + if pkg.test != nil && !queued[pkg.test] { + queue = append(queue, pkg.test) + queued[pkg.test] = true + } + + if !pathIsRoot[m.Path] { + if s := mg.Selected(m.Path); gover.ModCompare(m.Path, s, m.Version) < 0 { + roots = append(roots, m) + pathIsRoot[m.Path] = true + } + } + } + + if len(roots) > len(tidy.rootModules) { + gover.ModSort(roots) + tidy = newRequirements(pruned, roots, tidy.direct) + } + } + + roots = tidy.rootModules + _, err := tidy.Graph(ctx) + if err != nil { + return nil, err + } + + // We try to avoid adding explicit requirements for test-only dependencies of + // packages in external modules. However, if we drop the explicit + // requirements, that may change an import from unambiguous (due to lazy + // module loading) to ambiguous (because lazy module loading no longer + // disambiguates it). For any package that has become ambiguous, we try + // to fix it by promoting its module to an explicit root. + // (See https://go.dev/issue/60313.) + q := par.NewQueue(runtime.GOMAXPROCS(0)) + for { + var disambiguateRoot sync.Map + for _, pkg := range pkgs { + if pkg.mod.Path == "" || pathIsRoot[pkg.mod.Path] { + // Lazy module loading will cause pkg.mod to be checked before any other modules + // that are only indirectly required. It is as unambiguous as possible. + continue + } + pkg := pkg + q.Add(func() { + skipModFile := true + _, _, _, _, err := importFromModules(ctx, pkg.path, tidy, nil, skipModFile) + if aie := (*AmbiguousImportError)(nil); errors.As(err, &aie) { + disambiguateRoot.Store(pkg.mod, true) + } + }) + } + <-q.Idle() + + disambiguateRoot.Range(func(k, _ any) bool { + m := k.(module.Version) + roots = append(roots, m) + pathIsRoot[m.Path] = true + return true + }) + + if len(roots) > len(tidy.rootModules) { + module.Sort(roots) + tidy = newRequirements(pruned, roots, tidy.direct) + _, err = tidy.Graph(ctx) + if err != nil { + return nil, err + } + // Adding these roots may have pulled additional modules into the module + // graph, causing additional packages to become ambiguous. Keep iterating + // until we reach a fixed point. + continue + } + + break + } + + return tidy, nil +} + +// updatePrunedRoots returns a set of root requirements that maintains the +// invariants of the go.mod file needed to support graph pruning: +// +// 1. The selected version of the module providing each package marked with +// either pkgInAll or pkgIsRoot is included as a root. +// Note that certain root patterns (such as '...') may explode the root set +// to contain every module that provides any package imported (or merely +// required) by any other module. +// 2. Each root appears only once, at the selected version of its path +// (if rs.graph is non-nil) or at the highest version otherwise present as a +// root (otherwise). +// 3. Every module path that appears as a root in rs remains a root. +// 4. Every version in add is selected at its given version unless upgraded by +// (the dependencies of) an existing root or another module in add. +// +// The packages in pkgs are assumed to have been loaded from either the roots of +// rs or the modules selected in the graph of rs. +// +// The above invariants together imply the graph-pruning invariants for the +// go.mod file: +// +// 1. (The import invariant.) Every module that provides a package transitively +// imported by any package or test in the main module is included as a root. +// This follows by induction from (1) and (3) above. Transitively-imported +// packages loaded during this invocation are marked with pkgInAll (1), +// and by hypothesis any transitively-imported packages loaded in previous +// invocations were already roots in rs (3). +// +// 2. (The argument invariant.) Every module that provides a package matching +// an explicit package pattern is included as a root. This follows directly +// from (1): packages matching explicit package patterns are marked with +// pkgIsRoot. +// +// 3. (The completeness invariant.) Every module that contributed any package +// to the build is required by either the main module or one of the modules +// it requires explicitly. This invariant is left up to the caller, who must +// not load packages from outside the module graph but may add roots to the +// graph, but is facilitated by (3). If the caller adds roots to the graph in +// order to resolve missing packages, then updatePrunedRoots will retain them, +// the selected versions of those roots cannot regress, and they will +// eventually be written back to the main module's go.mod file. +// +// (See https://golang.org/design/36460-lazy-module-loading#invariants for more +// detail.) +func updatePrunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) { + roots := rs.rootModules + rootsUpgraded := false + + spotCheckRoot := map[module.Version]bool{} + + // “The selected version of the module providing each package marked with + // either pkgInAll or pkgIsRoot is included as a root.” + needSort := false + for _, pkg := range pkgs { + if !pkg.fromExternalModule() { + // pkg was not loaded from a module dependency, so we don't need + // to do anything special to maintain that dependency. + continue + } + + switch { + case pkg.flags.has(pkgInAll): + // pkg is transitively imported by a package or test in the main module. + // We need to promote the module that maintains it to a root: if some + // other module depends on the main module, and that other module also + // uses a pruned module graph, it will expect to find all of our + // transitive dependencies by reading just our go.mod file, not the go.mod + // files of everything we depend on. + // + // (This is the “import invariant” that makes graph pruning possible.) + + case rootsImported && pkg.flags.has(pkgFromRoot): + // pkg is a transitive dependency of some root, and we are treating the + // roots as if they are imported by the main module (as in 'go get'). + + case pkg.flags.has(pkgIsRoot): + // pkg is a root of the package-import graph. (Generally this means that + // it matches a command-line argument.) We want future invocations of the + // 'go' command — such as 'go test' on the same package — to continue to + // use the same versions of its dependencies that we are using right now. + // So we need to bring this package's dependencies inside the pruned + // module graph. + // + // Making the module containing this package a root of the module graph + // does exactly that: if the module containing the package supports graph + // pruning then it should satisfy the import invariant itself, so all of + // its dependencies should be in its go.mod file, and if the module + // containing the package does not support pruning then if we make it a + // root we will load all of its (unpruned) transitive dependencies into + // the module graph. + // + // (This is the “argument invariant”, and is important for + // reproducibility.) + + default: + // pkg is a dependency of some other package outside of the main module. + // As far as we know it's not relevant to the main module (and thus not + // relevant to consumers of the main module either), and its dependencies + // should already be in the module graph — included in the dependencies of + // the package that imported it. + continue + } + + if _, ok := rs.rootSelected(pkg.mod.Path); ok { + // It is possible that the main module's go.mod file is incomplete or + // otherwise erroneous — for example, perhaps the author forgot to 'git + // add' their updated go.mod file after adding a new package import, or + // perhaps they made an edit to the go.mod file using a third-party tool + // ('git merge'?) that doesn't maintain consistency for module + // dependencies. If that happens, ideally we want to detect the missing + // requirements and fix them up here. + // + // However, we also need to be careful not to be too aggressive. For + // transitive dependencies of external tests, the go.mod file for the + // module containing the test itself is expected to provide all of the + // relevant dependencies, and we explicitly don't want to pull in + // requirements on *irrelevant* requirements that happen to occur in the + // go.mod files for these transitive-test-only dependencies. (See the test + // in mod_lazy_test_horizon.txt for a concrete example). + // + // The “goldilocks zone” seems to be to spot-check exactly the same + // modules that we promote to explicit roots: namely, those that provide + // packages transitively imported by the main module, and those that + // provide roots of the package-import graph. That will catch erroneous + // edits to the main module's go.mod file and inconsistent requirements in + // dependencies that provide imported packages, but will ignore erroneous + // or misleading requirements in dependencies that aren't obviously + // relevant to the packages in the main module. + spotCheckRoot[pkg.mod] = true + } else { + roots = append(roots, pkg.mod) + rootsUpgraded = true + // The roots slice was initially sorted because rs.rootModules was sorted, + // but the root we just added could be out of order. + needSort = true + } + } + + for _, m := range add { + if v, ok := rs.rootSelected(m.Path); !ok || gover.ModCompare(m.Path, v, m.Version) < 0 { + roots = append(roots, m) + rootsUpgraded = true + needSort = true + } + } + if needSort { + gover.ModSort(roots) + } + + // "Each root appears only once, at the selected version of its path ….” + for { + var mg *ModuleGraph + if rootsUpgraded { + // We've added or upgraded one or more roots, so load the full module + // graph so that we can update those roots to be consistent with other + // requirements. + if mustHaveCompleteRequirements() { + // Our changes to the roots may have moved dependencies into or out of + // the graph-pruning horizon, which could in turn change the selected + // versions of other modules. (For pruned modules adding or removing an + // explicit root is a semantic change, not just a cosmetic one.) + return rs, errGoModDirty + } + + rs = newRequirements(pruned, roots, direct) + var err error + mg, err = rs.Graph(ctx) + if err != nil { + return rs, err + } + } else { + // Since none of the roots have been upgraded, we have no reason to + // suspect that they are inconsistent with the requirements of any other + // roots. Only look at the full module graph if we've already loaded it; + // otherwise, just spot-check the explicit requirements of the roots from + // which we loaded packages. + if rs.graph.Load() != nil { + // We've already loaded the full module graph, which includes the + // requirements of all of the root modules — even the transitive + // requirements, if they are unpruned! + mg, _ = rs.Graph(ctx) + } else if cfg.BuildMod == "vendor" { + // We can't spot-check the requirements of other modules because we + // don't in general have their go.mod files available in the vendor + // directory. (Fortunately this case is impossible, because mg.graph is + // always non-nil in vendor mode!) + panic("internal error: rs.graph is unexpectedly nil with -mod=vendor") + } else if !spotCheckRoots(ctx, rs, spotCheckRoot) { + // We spot-checked the explicit requirements of the roots that are + // relevant to the packages we've loaded. Unfortunately, they're + // inconsistent in some way; we need to load the full module graph + // so that we can fix the roots properly. + var err error + mg, err = rs.Graph(ctx) + if err != nil { + return rs, err + } + } + } + + roots = make([]module.Version, 0, len(rs.rootModules)) + rootsUpgraded = false + inRootPaths := make(map[string]bool, len(rs.rootModules)+1) + for _, mm := range MainModules.Versions() { + inRootPaths[mm.Path] = true + } + for _, m := range rs.rootModules { + if inRootPaths[m.Path] { + // This root specifies a redundant path. We already retained the + // selected version of this path when we saw it before, so omit the + // redundant copy regardless of its version. + // + // When we read the full module graph, we include the dependencies of + // every root even if that root is redundant. That better preserves + // reproducibility if, say, some automated tool adds a redundant + // 'require' line and then runs 'go mod tidy' to try to make everything + // consistent, since the requirements of the older version are carried + // over. + // + // So omitting a root that was previously present may *reduce* the + // selected versions of non-roots, but merely removing a requirement + // cannot *increase* the selected versions of other roots as a result — + // we don't need to mark this change as an upgrade. (This particular + // change cannot invalidate any other roots.) + continue + } + + var v string + if mg == nil { + v, _ = rs.rootSelected(m.Path) + } else { + v = mg.Selected(m.Path) + } + roots = append(roots, module.Version{Path: m.Path, Version: v}) + inRootPaths[m.Path] = true + if v != m.Version { + rootsUpgraded = true + } + } + // Note that rs.rootModules was already sorted by module path and version, + // and we appended to the roots slice in the same order and guaranteed that + // each path has only one version, so roots is also sorted by module path + // and (trivially) version. + + if !rootsUpgraded { + if cfg.BuildMod != "mod" { + // The only changes to the root set (if any) were to remove duplicates. + // The requirements are consistent (if perhaps redundant), so keep the + // original rs to preserve its ModuleGraph. + return rs, nil + } + // The root set has converged: every root going into this iteration was + // already at its selected version, although we have have removed other + // (redundant) roots for the same path. + break + } + } + + if rs.pruning == pruned && reflect.DeepEqual(roots, rs.rootModules) && reflect.DeepEqual(direct, rs.direct) { + // The root set is unchanged and rs was already pruned, so keep rs to + // preserve its cached ModuleGraph (if any). + return rs, nil + } + return newRequirements(pruned, roots, direct), nil +} + +// spotCheckRoots reports whether the versions of the roots in rs satisfy the +// explicit requirements of the modules in mods. +func spotCheckRoots(ctx context.Context, rs *Requirements, mods map[module.Version]bool) bool { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + work := par.NewQueue(runtime.GOMAXPROCS(0)) + for m := range mods { + m := m + work.Add(func() { + if ctx.Err() != nil { + return + } + + summary, err := goModSummary(m) + if err != nil { + cancel() + return + } + + for _, r := range summary.require { + if v, ok := rs.rootSelected(r.Path); ok && gover.ModCompare(r.Path, v, r.Version) < 0 { + cancel() + return + } + } + }) + } + <-work.Idle() + + if ctx.Err() != nil { + // Either we failed a spot-check, or the caller no longer cares about our + // answer anyway. + return false + } + + return true +} + +// tidyUnprunedRoots returns a minimal set of root requirements that maintains +// the selected version of every module that provided or lexically could have +// provided a package in pkgs, and includes the selected version of every such +// module in direct as a root. +func tidyUnprunedRoots(ctx context.Context, mainModule module.Version, old *Requirements, pkgs []*loadPkg) (*Requirements, error) { + var ( + // keep is a set of of modules that provide packages or are needed to + // disambiguate imports. + keep []module.Version + keptPath = map[string]bool{} + + // rootPaths is a list of module paths that provide packages directly + // imported from the main module. They should be included as roots. + rootPaths []string + inRootPaths = map[string]bool{} + + // altMods is a set of paths of modules that lexically could have provided + // imported packages. It may be okay to remove these from the list of + // explicit requirements if that removes them from the module graph. If they + // are present in the module graph reachable from rootPaths, they must not + // be at a lower version. That could cause a missing sum error or a new + // import ambiguity. + // + // For example, suppose a developer rewrites imports from example.com/m to + // example.com/m/v2, then runs 'go mod tidy'. Tidy may delete the + // requirement on example.com/m if there is no other transitive requirement + // on it. However, if example.com/m were downgraded to a version not in + // go.sum, when package example.com/m/v2/p is loaded, we'd get an error + // trying to disambiguate the import, since we can't check example.com/m + // without its sum. See #47738. + altMods = map[string]string{} + ) + if v, ok := old.rootSelected("go"); ok { + keep = append(keep, module.Version{Path: "go", Version: v}) + keptPath["go"] = true + } + if v, ok := old.rootSelected("toolchain"); ok { + keep = append(keep, module.Version{Path: "toolchain", Version: v}) + keptPath["toolchain"] = true + } + for _, pkg := range pkgs { + if !pkg.fromExternalModule() { + continue + } + if m := pkg.mod; !keptPath[m.Path] { + keep = append(keep, m) + keptPath[m.Path] = true + if old.direct[m.Path] && !inRootPaths[m.Path] { + rootPaths = append(rootPaths, m.Path) + inRootPaths[m.Path] = true + } + } + for _, m := range pkg.altMods { + altMods[m.Path] = m.Version + } + } + + // Construct a build list with a minimal set of roots. + // This may remove or downgrade modules in altMods. + reqs := &mvsReqs{roots: keep} + min, err := mvs.Req(mainModule, rootPaths, reqs) + if err != nil { + return nil, err + } + buildList, err := mvs.BuildList([]module.Version{mainModule}, reqs) + if err != nil { + return nil, err + } + + // Check if modules in altMods were downgraded but not removed. + // If so, add them to roots, which will retain an "// indirect" requirement + // in go.mod. See comment on altMods above. + keptAltMod := false + for _, m := range buildList { + if v, ok := altMods[m.Path]; ok && gover.ModCompare(m.Path, m.Version, v) < 0 { + keep = append(keep, module.Version{Path: m.Path, Version: v}) + keptAltMod = true + } + } + if keptAltMod { + // We must run mvs.Req again instead of simply adding altMods to min. + // It's possible that a requirement in altMods makes some other + // explicit indirect requirement unnecessary. + reqs.roots = keep + min, err = mvs.Req(mainModule, rootPaths, reqs) + if err != nil { + return nil, err + } + } + + return newRequirements(unpruned, min, old.direct), nil +} + +// updateUnprunedRoots returns a set of root requirements that includes the selected +// version of every module path in direct as a root, and maintains the selected +// version of every module selected in the graph of rs. +// +// The roots are updated such that: +// +// 1. The selected version of every module path in direct is included as a root +// (if it is not "none"). +// 2. Each root is the selected version of its path. (We say that such a root +// set is “consistent”.) +// 3. Every version selected in the graph of rs remains selected unless upgraded +// by a dependency in add. +// 4. Every version in add is selected at its given version unless upgraded by +// (the dependencies of) an existing root or another module in add. +func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requirements, add []module.Version) (*Requirements, error) { + mg, err := rs.Graph(ctx) + if err != nil { + // We can't ignore errors in the module graph even if the user passed the -e + // flag to try to push past them. If we can't load the complete module + // dependencies, then we can't reliably compute a minimal subset of them. + return rs, err + } + + if mustHaveCompleteRequirements() { + // Instead of actually updating the requirements, just check that no updates + // are needed. + if rs == nil { + // We're being asked to reconstruct the requirements from scratch, + // but we aren't even allowed to modify them. + return rs, errGoModDirty + } + for _, m := range rs.rootModules { + if m.Version != mg.Selected(m.Path) { + // The root version v is misleading: the actual selected version is higher. + return rs, errGoModDirty + } + } + for _, m := range add { + if m.Version != mg.Selected(m.Path) { + return rs, errGoModDirty + } + } + for mPath := range direct { + if _, ok := rs.rootSelected(mPath); !ok { + // Module m is supposed to be listed explicitly, but isn't. + // + // Note that this condition is also detected (and logged with more + // detail) earlier during package loading, so it shouldn't actually be + // possible at this point — this is just a defense in depth. + return rs, errGoModDirty + } + } + + // No explicit roots are missing and all roots are already at the versions + // we want to keep. Any other changes we would make are purely cosmetic, + // such as pruning redundant indirect dependencies. Per issue #34822, we + // ignore cosmetic changes when we cannot update the go.mod file. + return rs, nil + } + + var ( + rootPaths []string // module paths that should be included as roots + inRootPaths = map[string]bool{} + ) + for _, root := range rs.rootModules { + // If the selected version of the root is the same as what was already + // listed in the go.mod file, retain it as a root (even if redundant) to + // avoid unnecessary churn. (See https://golang.org/issue/34822.) + // + // We do this even for indirect requirements, since we don't know why they + // were added and they could become direct at any time. + if !inRootPaths[root.Path] && mg.Selected(root.Path) == root.Version { + rootPaths = append(rootPaths, root.Path) + inRootPaths[root.Path] = true + } + } + + // “The selected version of every module path in direct is included as a root.” + // + // This is only for convenience and clarity for end users: in an unpruned module, + // the choice of explicit vs. implicit dependency has no impact on MVS + // selection (for itself or any other module). + keep := append(mg.BuildList()[MainModules.Len():], add...) + for _, m := range keep { + if direct[m.Path] && !inRootPaths[m.Path] { + rootPaths = append(rootPaths, m.Path) + inRootPaths[m.Path] = true + } + } + + var roots []module.Version + for _, mainModule := range MainModules.Versions() { + min, err := mvs.Req(mainModule, rootPaths, &mvsReqs{roots: keep}) + if err != nil { + return rs, err + } + roots = append(roots, min...) + } + if MainModules.Len() > 1 { + gover.ModSort(roots) + } + if rs.pruning == unpruned && reflect.DeepEqual(roots, rs.rootModules) && reflect.DeepEqual(direct, rs.direct) { + // The root set is unchanged and rs was already unpruned, so keep rs to + // preserve its cached ModuleGraph (if any). + return rs, nil + } + + return newRequirements(unpruned, roots, direct), nil +} + +// convertPruning returns a version of rs with the given pruning behavior. +// If rs already has the given pruning, convertPruning returns rs unmodified. +func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) (*Requirements, error) { + if rs.pruning == pruning { + return rs, nil + } else if rs.pruning == workspace || pruning == workspace { + panic("attempting to convert to/from workspace pruning and another pruning type") + } + + if pruning == unpruned { + // We are converting a pruned module to an unpruned one. The roots of a + // pruned module graph are a superset of the roots of an unpruned one, so + // we don't need to add any new roots — we just need to drop the ones that + // are redundant, which is exactly what updateUnprunedRoots does. + return updateUnprunedRoots(ctx, rs.direct, rs, nil) + } + + // We are converting an unpruned module to a pruned one. + // + // An unpruned module graph includes the transitive dependencies of every + // module in the build list. As it turns out, we can express that as a pruned + // root set! “Include the transitive dependencies of every module in the build + // list” is exactly what happens in a pruned module if we promote every module + // in the build list to a root. + mg, err := rs.Graph(ctx) + if err != nil { + return rs, err + } + return newRequirements(pruned, mg.BuildList()[MainModules.Len():], rs.direct), nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/edit.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/edit.go new file mode 100644 index 0000000000000000000000000000000000000000..63ee15c76452016c828ed6cf96a2abdf16138390 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/edit.go @@ -0,0 +1,855 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/mvs" + "cmd/go/internal/par" + "context" + "errors" + "fmt" + "maps" + "os" + "slices" + + "golang.org/x/mod/module" +) + +// editRequirements returns an edited version of rs such that: +// +// 1. Each module version in mustSelect is selected. +// +// 2. Each module version in tryUpgrade is upgraded toward the indicated +// version as far as can be done without violating (1). +// (Other upgrades are also allowed if they are caused by +// transitive requirements of versions in mustSelect or +// tryUpgrade.) +// +// 3. Each module version in rs.rootModules (or rs.graph, if rs is unpruned) +// is downgraded or upgraded from its original version only to the extent +// needed to satisfy (1) and (2). +// +// Generally, the module versions in mustSelect are due to the module or a +// package within the module matching an explicit command line argument to 'go +// get', and the versions in tryUpgrade are transitive dependencies that are +// either being upgraded by 'go get -u' or being added to satisfy some +// otherwise-missing package import. +// +// If pruning is enabled, the roots of the edited requirements include an +// explicit entry for each module path in tryUpgrade, mustSelect, and the roots +// of rs, unless the selected version for the module path is "none". +func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSelect []module.Version) (edited *Requirements, changed bool, err error) { + if rs.pruning == workspace { + panic("editRequirements cannot edit workspace requirements") + } + + orig := rs + // If we already know what go version we will end up on after the edit, and + // the pruning for that version is different, go ahead and apply it now. + // + // If we are changing from pruned to unpruned, then we MUST check the unpruned + // graph for conflicts from the start. (Checking only for pruned conflicts + // would miss some that would be introduced later.) + // + // If we are changing from unpruned to pruned, then we would like to avoid + // unnecessary downgrades due to conflicts that would be pruned out of the + // final graph anyway. + // + // Note that even if we don't find a go version in mustSelect, it is possible + // that we will switch from unpruned to pruned (but not the other way around!) + // after applying the edits if we find a dependency that requires a high + // enough go version to trigger an upgrade. + rootPruning := orig.pruning + for _, m := range mustSelect { + if m.Path == "go" { + rootPruning = pruningForGoVersion(m.Version) + break + } else if m.Path == "toolchain" && pruningForGoVersion(gover.FromToolchain(m.Version)) == unpruned { + // We don't know exactly what go version we will end up at, but we know + // that it must be a version supported by the requested toolchain, and + // that toolchain does not support pruning. + // + // TODO(bcmills): 'go get' ought to reject explicit toolchain versions + // older than gover.GoStrictVersion. Once that is fixed, is this still + // needed? + rootPruning = unpruned + break + } + } + + if rootPruning != rs.pruning { + rs, err = convertPruning(ctx, rs, rootPruning) + if err != nil { + return orig, false, err + } + } + + // selectedRoot records the edited version (possibly "none") for each module + // path that would be a root in the edited requirements. + var selectedRoot map[string]string // module path → edited version + if rootPruning == pruned { + selectedRoot = maps.Clone(rs.maxRootVersion) + } else { + // In a module without graph pruning, modules that provide packages imported + // by the main module may either be explicit roots or implicit transitive + // dependencies. To the extent possible, we want to preserve those implicit + // dependencies, so we need to treat everything in the build list as + // potentially relevant — that is, as what would be a “root” in a module + // with graph pruning enabled. + mg, err := rs.Graph(ctx) + if err != nil { + // If we couldn't load the graph, we don't know what its requirements were + // to begin with, so we can't edit those requirements in a coherent way. + return orig, false, err + } + bl := mg.BuildList()[MainModules.Len():] + selectedRoot = make(map[string]string, len(bl)) + for _, m := range bl { + selectedRoot[m.Path] = m.Version + } + } + + for _, r := range tryUpgrade { + if v, ok := selectedRoot[r.Path]; ok && gover.ModCompare(r.Path, v, r.Version) >= 0 { + continue + } + if cfg.BuildV { + fmt.Fprintf(os.Stderr, "go: trying upgrade to %v\n", r) + } + selectedRoot[r.Path] = r.Version + } + + // conflicts is a list of conflicts that we cannot resolve without violating + // some version in mustSelect. It may be incomplete, but we want to report + // as many conflicts as we can so that the user can solve more of them at once. + var conflicts []Conflict + + // mustSelectVersion is an index of the versions in mustSelect. + mustSelectVersion := make(map[string]string, len(mustSelect)) + for _, r := range mustSelect { + if v, ok := mustSelectVersion[r.Path]; ok && v != r.Version { + prev := module.Version{Path: r.Path, Version: v} + if gover.ModCompare(r.Path, v, r.Version) > 0 { + conflicts = append(conflicts, Conflict{Path: []module.Version{prev}, Constraint: r}) + } else { + conflicts = append(conflicts, Conflict{Path: []module.Version{r}, Constraint: prev}) + } + continue + } + + mustSelectVersion[r.Path] = r.Version + selectedRoot[r.Path] = r.Version + } + + // We've indexed all of the data we need and we've computed the initial + // versions of the roots. Now we need to load the actual module graph and + // restore the invariant that every root is the selected version of its path. + // + // For 'go mod tidy' we would do that using expandGraph, which upgrades the + // roots until their requirements are internally consistent and then drops out + // the old roots. However, here we need to do more: we also need to make sure + // the modules in mustSelect don't get upgraded above their intended versions. + // To do that, we repeatedly walk the module graph, identify paths of + // requirements that result in versions that are too high, and downgrade the + // roots that lead to those paths. When no conflicts remain, we're done. + // + // Since we want to report accurate paths to each conflict, we don't drop out + // older-than-selected roots until the process completes. That might mean that + // we do some extra downgrades when they could be skipped, but for the benefit + // of being able to explain the reason for every downgrade that seems + // worthwhile. + // + // Graph pruning adds an extra wrinkle: a given node in the module graph + // may be reached from a root whose dependencies are pruned, and from a root + // whose dependencies are not pruned. It may be the case that the path from + // the unpruned root leads to a conflict, while the path from the pruned root + // prunes out the requirements that would lead to that conflict. + // So we need to track the two kinds of paths independently. + // They join back together at the roots of the graph: if a root r1 with pruned + // requirements depends on a root r2 with unpruned requirements, then + // selecting r1 would cause r2 to become a root and pull in all of its + // unpruned dependencies. + // + // The dqTracker type implements the logic for propagating conflict paths + // through the pruned and unpruned parts of the module graph. + // + // We make a best effort to fix incompatibilities, subject to two properties: + // + // 1. If the user runs 'go get' with a set of mutually-compatible module + // versions, we should accept those versions. + // + // 2. If we end up upgrading or downgrading a module, it should be + // clear why we did so. + // + // We don't try to find an optimal SAT solution, + // especially given the complex interactions with graph pruning. + + var ( + roots []module.Version // the current versions in selectedRoot, in sorted order + rootsDirty = true // true if roots does not match selectedRoot + ) + + // rejectedRoot records the set of module versions that have been disqualified + // as roots of the module graph. When downgrading due to a conflict or error, + // we skip any version that has already been rejected. + // + // NOTE(bcmills): I am not sure that the rejectedRoot map is really necessary, + // since we normally only downgrade roots or accept indirect upgrades to + // known-good versions. However, I am having trouble proving that accepting an + // indirect upgrade never introduces a conflict that leads to further + // downgrades. I really want to be able to prove that editRequirements + // terminates, and the easiest way to prove it is to add this map. + // + // Then the proof of termination is this: + // On every iteration where we mark the roots as dirty, we add some new module + // version to the map. The universe of module versions is finite, so we must + // eventually reach a state in which we do not add any version to the map. + // In that state, we either report a conflict or succeed in the edit. + rejectedRoot := map[module.Version]bool{} + + for rootsDirty && len(conflicts) == 0 { + roots = roots[:0] + for p, v := range selectedRoot { + if v != "none" { + roots = append(roots, module.Version{Path: p, Version: v}) + } + } + gover.ModSort(roots) + + // First, we extend the graph so that it includes the selected version + // of every root. The upgraded roots are in addition to the original + // roots, so we will have enough information to trace a path to each + // conflict we discover from one or more of the original roots. + mg, upgradedRoots, err := extendGraph(ctx, rootPruning, roots, selectedRoot) + if err != nil { + var tooNew *gover.TooNewError + if mg == nil || errors.As(err, &tooNew) { + return orig, false, err + } + // We're about to walk the entire extended module graph, so we will find + // any error then — and we will either try to resolve it by downgrading + // something or report it as a conflict with more detail. + } + + // extendedRootPruning is an index of the pruning used to load each root in + // the extended module graph. + extendedRootPruning := make(map[module.Version]modPruning, len(roots)+len(upgradedRoots)) + findPruning := func(m module.Version) modPruning { + if rootPruning == pruned { + summary, _ := mg.loadCache.Get(m) + if summary != nil && summary.pruning == unpruned { + return unpruned + } + } + return rootPruning + } + for _, m := range roots { + extendedRootPruning[m] = findPruning(m) + } + for m := range upgradedRoots { + extendedRootPruning[m] = findPruning(m) + } + + // Now check the resulting extended graph for errors and incompatibilities. + t := dqTracker{extendedRootPruning: extendedRootPruning} + mg.g.WalkBreadthFirst(func(m module.Version) { + if max, ok := mustSelectVersion[m.Path]; ok && gover.ModCompare(m.Path, m.Version, max) > 0 { + // m itself violates mustSelect, so it cannot appear in the module graph + // even if its transitive dependencies would be pruned out. + t.disqualify(m, pruned, dqState{dep: m}) + return + } + + summary, err := mg.loadCache.Get(m) + if err != nil && err != par.ErrCacheEntryNotFound { + // We can't determine the requirements of m, so we don't know whether + // they would be allowed. This may be a transient error reaching the + // repository, rather than a permanent error with the retrieved version. + // + // TODO(golang.org/issue/31730, golang.org/issue/30134): + // decide what to do based on the actual error. + t.disqualify(m, pruned, dqState{err: err}) + return + } + + reqs, ok := mg.RequiredBy(m) + if !ok { + // The dependencies of m do not appear in the module graph, so they + // can't be causing any problems this time. + return + } + + if summary == nil { + if m.Version != "" { + panic(fmt.Sprintf("internal error: %d reqs present for %v, but summary is nil", len(reqs), m)) + } + // m is the main module: we are editing its dependencies, so it cannot + // become disqualified. + return + } + + // Before we check for problems due to transitive dependencies, first + // check m's direct requirements. A requirement on a version r that + // violates mustSelect disqualifies m, even if the requirements of r are + // themselves pruned out. + for _, r := range reqs { + if max, ok := mustSelectVersion[r.Path]; ok && gover.ModCompare(r.Path, r.Version, max) > 0 { + t.disqualify(m, pruned, dqState{dep: r}) + return + } + } + for _, r := range reqs { + if !t.require(m, r) { + break + } + } + }) + + // We have now marked all of the versions in the graph that have conflicts, + // with a path to each conflict from one or more roots that introduce it. + // Now we need to identify those roots and change their versions + // (if possible) in order to resolve the conflicts. + rootsDirty = false + for _, m := range roots { + path, err := t.path(m, extendedRootPruning[m]) + if len(path) == 0 && err == nil { + continue // Nothing wrong with m; we can keep it. + } + + // path leads to a module with a problem: either it violates a constraint, + // or some error prevents us from determining whether it violates a + // constraint. We might end up logging or returning the conflict + // information, so go ahead and fill in the details about it. + conflict := Conflict{ + Path: path, + Err: err, + } + if err == nil { + var last module.Version = path[len(path)-1] + mustV, ok := mustSelectVersion[last.Path] + if !ok { + fmt.Fprintf(os.Stderr, "go: %v\n", conflict) + panic("internal error: found a version conflict, but no constraint it violates") + } + conflict.Constraint = module.Version{ + Path: last.Path, + Version: mustV, + } + } + + if v, ok := mustSelectVersion[m.Path]; ok && v == m.Version { + // m is in mustSelect, but is marked as disqualified due to a transitive + // dependency. + // + // In theory we could try removing module paths that don't appear in + // mustSelect (added by tryUpgrade or already present in rs) in order to + // get graph pruning to take effect, but (a) it is likely that 'go mod + // tidy' would re-add those roots and reintroduce unwanted upgrades, + // causing confusion, and (b) deciding which roots to try to eliminate + // would add a lot of complexity. + // + // Instead, we report the path to the conflict as an error. + // If users want to explicitly prune out nodes from the dependency + // graph, they can always add an explicit 'exclude' directive. + conflicts = append(conflicts, conflict) + continue + } + + // If m is not the selected version of its path, we have two options: we + // can either upgrade to the version that actually is selected (dropping m + // itself out of the bottom of the module graph), or we can try + // downgrading it. + // + // If the version we would be upgrading to is ok to use, we will just plan + // to do that and avoid the overhead of trying to find some lower version + // to downgrade to. + // + // However, it is possible that m depends on something that leads to its + // own upgrade, so if the upgrade isn't viable we should go ahead and try + // to downgrade (like with any other root). + if v := mg.Selected(m.Path); v != m.Version { + u := module.Version{Path: m.Path, Version: v} + uPruning, ok := t.extendedRootPruning[m] + if !ok { + fmt.Fprintf(os.Stderr, "go: %v\n", conflict) + panic(fmt.Sprintf("internal error: selected version of root %v is %v, but it was not expanded as a new root", m, u)) + } + if !t.check(u, uPruning).isDisqualified() && !rejectedRoot[u] { + // Applying the upgrade from m to u will resolve the conflict, + // so plan to do that if there are no other conflicts to resolve. + continue + } + } + + // Figure out what version of m's path was present before we started + // the edit. We want to make sure we consider keeping it as-is, + // even if it wouldn't normally be included. (For example, it might + // be a pseudo-version or pre-release.) + origMG, _ := orig.Graph(ctx) + origV := origMG.Selected(m.Path) + + if conflict.Err != nil && origV == m.Version { + // This version of m.Path was already in the module graph before we + // started editing, and the problem with it is that we can't load its + // (transitive) requirements. + // + // If this conflict was just one step in a longer chain of downgrades, + // then we would want to keep going past it until we find a version + // that doesn't have that problem. However, we only want to downgrade + // away from an *existing* requirement if we can confirm that it actually + // conflicts with mustSelect. (For example, we don't want + // 'go get -u ./...' to incidentally downgrade some dependency whose + // go.mod file is unavailable or has a bad checksum.) + conflicts = append(conflicts, conflict) + continue + } + + // We need to downgrade m's path to some lower version to try to resolve + // the conflict. Find the next-lowest candidate and apply it. + rejectedRoot[m] = true + prev := m + for { + prev, err = previousVersion(ctx, prev) + if gover.ModCompare(m.Path, m.Version, origV) > 0 && (gover.ModCompare(m.Path, prev.Version, origV) < 0 || err != nil) { + // previousVersion skipped over origV. Insert it into the order. + prev.Version = origV + } else if err != nil { + // We don't know the next downgrade to try. Give up. + return orig, false, err + } + if rejectedRoot[prev] { + // We already rejected prev in a previous round. + // To ensure that this algorithm terminates, don't try it again. + continue + } + pruning := rootPruning + if pruning == pruned { + if summary, err := mg.loadCache.Get(m); err == nil { + pruning = summary.pruning + } + } + if t.check(prev, pruning).isDisqualified() { + // We found a problem with prev this round that would also disqualify + // it as a root. Don't bother trying it next round. + rejectedRoot[prev] = true + continue + } + break + } + selectedRoot[m.Path] = prev.Version + rootsDirty = true + + // If this downgrade is potentially interesting, log the reason for it. + if conflict.Err != nil || cfg.BuildV { + var action string + if prev.Version == "none" { + action = fmt.Sprintf("removing %s", m) + } else if prev.Version == origV { + action = fmt.Sprintf("restoring %s", prev) + } else { + action = fmt.Sprintf("trying %s", prev) + } + fmt.Fprintf(os.Stderr, "go: %s\n\t%s\n", conflict.Summary(), action) + } + } + if rootsDirty { + continue + } + + // We didn't resolve any issues by downgrading, but we may still need to + // resolve some conflicts by locking in upgrades. Do that now. + // + // We don't do these upgrades until we're done downgrading because the + // downgrade process might reveal or remove conflicts (by changing which + // requirement edges are pruned out). + var upgradedFrom []module.Version // for logging only + for p, v := range selectedRoot { + if _, ok := mustSelectVersion[p]; !ok { + if actual := mg.Selected(p); actual != v { + if cfg.BuildV { + upgradedFrom = append(upgradedFrom, module.Version{Path: p, Version: v}) + } + selectedRoot[p] = actual + // Accepting the upgrade to m.Path might cause the selected versions + // of other modules to fall, because they were being increased by + // dependencies of m that are no longer present in the graph. + // + // TODO(bcmills): Can removing m as a root also cause the selected + // versions of other modules to rise? I think not: we're strictly + // removing non-root nodes from the module graph, which can't cause + // any root to decrease (because they're roots), and the dependencies + // of non-roots don't matter because they're either always unpruned or + // always pruned out. + // + // At any rate, it shouldn't cost much to reload the module graph one + // last time and confirm that it is stable. + rootsDirty = true + } + } + } + if rootsDirty { + if cfg.BuildV { + gover.ModSort(upgradedFrom) // Make logging deterministic. + for _, m := range upgradedFrom { + fmt.Fprintf(os.Stderr, "go: accepting indirect upgrade from %v to %s\n", m, selectedRoot[m.Path]) + } + } + continue + } + break + } + if len(conflicts) > 0 { + return orig, false, &ConstraintError{Conflicts: conflicts} + } + + if rootPruning == unpruned { + // An unpruned go.mod file lists only a subset of the requirements needed + // for building packages. Figure out which requirements need to be explicit. + var rootPaths []string + + // The modules in mustSelect are always promoted to be explicit. + for _, m := range mustSelect { + if m.Version != "none" && !MainModules.Contains(m.Path) { + rootPaths = append(rootPaths, m.Path) + } + } + + for _, m := range roots { + if v, ok := rs.rootSelected(m.Path); ok && (v == m.Version || rs.direct[m.Path]) { + // m.Path was formerly a root, and either its version hasn't changed or + // we believe that it provides a package directly imported by a package + // or test in the main module. For now we'll assume that it is still + // relevant enough to remain a root. If we actually load all of the + // packages and tests in the main module (which we are not doing here), + // we can revise the explicit roots at that point. + rootPaths = append(rootPaths, m.Path) + } + } + + roots, err = mvs.Req(MainModules.mustGetSingleMainModule(), rootPaths, &mvsReqs{roots: roots}) + if err != nil { + return nil, false, err + } + } + + changed = rootPruning != orig.pruning || !slices.Equal(roots, orig.rootModules) + if !changed { + // Because the roots we just computed are unchanged, the entire graph must + // be the same as it was before. Save the original rs, since we have + // probably already loaded its requirement graph. + return orig, false, nil + } + + // A module that is not even in the build list necessarily cannot provide + // any imported packages. Mark as direct only the direct modules that are + // still in the build list. (We assume that any module path that provided a + // direct import before the edit continues to do so after. There are a few + // edge cases where that can change, such as if a package moves into or out of + // a nested module or disappears entirely. If that happens, the user can run + // 'go mod tidy' to clean up the direct/indirect annotations.) + // + // TODO(bcmills): Would it make more sense to leave the direct map as-is + // but allow it to refer to modules that are no longer in the build list? + // That might complicate updateRoots, but it may be cleaner in other ways. + direct := make(map[string]bool, len(rs.direct)) + for _, m := range roots { + if rs.direct[m.Path] { + direct[m.Path] = true + } + } + edited = newRequirements(rootPruning, roots, direct) + + // If we ended up adding a dependency that upgrades our go version far enough + // to activate pruning, we must convert the edited Requirements in order to + // avoid dropping transitive dependencies from the build list the next time + // someone uses the updated go.mod file. + // + // Note that it isn't possible to go in the other direction (from pruned to + // unpruned) unless the "go" or "toolchain" module is explicitly listed in + // mustSelect, which we already handled at the very beginning of the edit. + // That is because the virtual "go" module only requires a "toolchain", + // and the "toolchain" module never requires anything else, which means that + // those two modules will never be downgraded due to a conflict with any other + // constraint. + if rootPruning == unpruned { + if v, ok := edited.rootSelected("go"); ok && pruningForGoVersion(v) == pruned { + // Since we computed the edit with the unpruned graph, and the pruned + // graph is a strict subset of the unpruned graph, this conversion + // preserves the exact (edited) build list that we already computed. + // + // However, it does that by shoving the whole build list into the roots of + // the graph. 'go get' will check for that sort of transition and log a + // message reminding the user how to clean up this mess we're about to + // make. 😅 + edited, err = convertPruning(ctx, edited, pruned) + if err != nil { + return orig, false, err + } + } + } + return edited, true, nil +} + +// extendGraph loads the module graph from roots, and iteratively extends it by +// unpruning the selected version of each module path that is a root in rs or in +// the roots slice until the graph reaches a fixed point. +// +// The graph is guaranteed to converge to a fixed point because unpruning a +// module version can only increase (never decrease) the selected versions, +// and the set of versions for each module is finite. +// +// The extended graph is useful for diagnosing version conflicts: for each +// selected module version, it can provide a complete path of requirements from +// some root to that version. +func extendGraph(ctx context.Context, rootPruning modPruning, roots []module.Version, selectedRoot map[string]string) (mg *ModuleGraph, upgradedRoot map[module.Version]bool, err error) { + for { + mg, err = readModGraph(ctx, rootPruning, roots, upgradedRoot) + // We keep on going even if err is non-nil until we reach a steady state. + // (Note that readModGraph returns a non-nil *ModuleGraph even in case of + // errors.) The caller may be able to fix the errors by adjusting versions, + // so we really want to return as complete a result as we can. + + if rootPruning == unpruned { + // Everything is already unpruned, so there isn't anything we can do to + // extend it further. + break + } + + nPrevRoots := len(upgradedRoot) + for p := range selectedRoot { + // Since p is a root path, when we fix up the module graph to be + // consistent with the selected versions, p will be promoted to a root, + // which will pull in its dependencies. Ensure that its dependencies are + // included in the module graph. + v := mg.g.Selected(p) + if v == "none" { + // Version “none” always has no requirements, so it doesn't need + // an explicit node in the module graph. + continue + } + m := module.Version{Path: p, Version: v} + if _, ok := mg.g.RequiredBy(m); !ok && !upgradedRoot[m] { + // The dependencies of the selected version of p were not loaded. + // Mark it as an upgrade so that we will load its dependencies + // in the next iteration. + // + // Note that we don't remove any of the existing roots, even if they are + // no longer the selected version: with graph pruning in effect this may + // leave some spurious dependencies in the graph, but it at least + // preserves enough of the graph to explain why each upgrade occurred: + // this way, we can report a complete path from the passed-in roots + // to every node in the module graph. + // + // This process is guaranteed to reach a fixed point: since we are only + // adding roots (never removing them), the selected version of each module + // can only increase, never decrease, and the set of module versions in the + // universe is finite. + if upgradedRoot == nil { + upgradedRoot = make(map[module.Version]bool) + } + upgradedRoot[m] = true + } + } + if len(upgradedRoot) == nPrevRoots { + break + } + } + + return mg, upgradedRoot, err +} + +type perPruning[T any] struct { + pruned T + unpruned T +} + +func (pp perPruning[T]) from(p modPruning) T { + if p == unpruned { + return pp.unpruned + } + return pp.pruned +} + +// A dqTracker tracks and propagates the reason that each module version +// cannot be included in the module graph. +type dqTracker struct { + // extendedRootPruning is the modPruning given the go.mod file for each root + // in the extended module graph. + extendedRootPruning map[module.Version]modPruning + + // dqReason records whether and why each each encountered version is + // disqualified in a pruned or unpruned context. + dqReason map[module.Version]perPruning[dqState] + + // requiring maps each not-yet-disqualified module version to the versions + // that would cause that module's requirements to be included in a pruned or + // unpruned context. If that version becomes disqualified, the + // disqualification will be propagated to all of the versions in the + // corresponding list. + // + // This map is similar to the module requirement graph, but includes more + // detail about whether a given dependency edge appears in a pruned or + // unpruned context. (Other commands do not need this level of detail.) + requiring map[module.Version][]module.Version +} + +// A dqState indicates whether and why a module version is “disqualified” from +// being used in a way that would incorporate its requirements. +// +// The zero dqState indicates that the module version is not known to be +// disqualified, either because it is ok or because we are currently traversing +// a cycle that includes it. +type dqState struct { + err error // if non-nil, disqualified because the requirements of the module could not be read + dep module.Version // disqualified because the module is or requires dep +} + +func (dq dqState) isDisqualified() bool { + return dq != dqState{} +} + +func (dq dqState) String() string { + if dq.err != nil { + return dq.err.Error() + } + if dq.dep != (module.Version{}) { + return dq.dep.String() + } + return "(no conflict)" +} + +// require records that m directly requires r, in case r becomes disqualified. +// (These edges are in the opposite direction from the edges in an mvs.Graph.) +// +// If r is already disqualified, require propagates the disqualification to m +// and returns the reason for the disqualification. +func (t *dqTracker) require(m, r module.Version) (ok bool) { + rdq := t.dqReason[r] + rootPruning, isRoot := t.extendedRootPruning[r] + if isRoot && rdq.from(rootPruning).isDisqualified() { + // When we pull in m's dependencies, we will have an edge from m to r, and r + // is disqualified (it is a root, which causes its problematic dependencies + // to always be included). So we cannot pull in m's dependencies at all: + // m is completely disqualified. + t.disqualify(m, pruned, dqState{dep: r}) + return false + } + + if dq := rdq.from(unpruned); dq.isDisqualified() { + t.disqualify(m, unpruned, dqState{dep: r}) + if _, ok := t.extendedRootPruning[m]; !ok { + // Since m is not a root, its dependencies can't be included in the pruned + // part of the module graph, and will never be disqualified from a pruned + // reason. We've already disqualified everything that matters. + return false + } + } + + // Record that m is a dependant of r, so that if r is later disqualified + // m will be disqualified as well. + if t.requiring == nil { + t.requiring = make(map[module.Version][]module.Version) + } + t.requiring[r] = append(t.requiring[r], m) + return true +} + +// disqualify records why the dependencies of m cannot be included in the module +// graph if reached from a part of the graph with the given pruning. +// +// Since the pruned graph is a subgraph of the unpruned graph, disqualifying a +// module from a pruned part of the graph also disqualifies it in the unpruned +// parts. +func (t *dqTracker) disqualify(m module.Version, fromPruning modPruning, reason dqState) { + if !reason.isDisqualified() { + panic("internal error: disqualify called with a non-disqualifying dqState") + } + + dq := t.dqReason[m] + if dq.from(fromPruning).isDisqualified() { + return // Already disqualified for some other reason; don't overwrite it. + } + rootPruning, isRoot := t.extendedRootPruning[m] + if fromPruning == pruned { + dq.pruned = reason + if !dq.unpruned.isDisqualified() { + // Since the pruned graph of m is a subgraph of the unpruned graph, if it + // is disqualified due to something in the pruned graph, it is certainly + // disqualified in the unpruned graph from the same reason. + dq.unpruned = reason + } + } else { + dq.unpruned = reason + if dq.pruned.isDisqualified() { + panic(fmt.Sprintf("internal error: %v is marked as disqualified when pruned, but not when unpruned", m)) + } + if isRoot && rootPruning == unpruned { + // Since m is a root that is always unpruned, any other roots — even + // pruned ones! — that cause it to be selected would also cause the reason + // for is disqualification to be included in the module graph. + dq.pruned = reason + } + } + if t.dqReason == nil { + t.dqReason = make(map[module.Version]perPruning[dqState]) + } + t.dqReason[m] = dq + + if isRoot && (fromPruning == pruned || rootPruning == unpruned) { + // Either m is disqualified even when its dependencies are pruned, + // or m's go.mod file causes its dependencies to *always* be unpruned. + // Everything that depends on it must be disqualified. + for _, p := range t.requiring[m] { + t.disqualify(p, pruned, dqState{dep: m}) + // Note that since the pruned graph is a subset of the unpruned graph, + // disqualifying p in the pruned graph also disqualifies it in the + // unpruned graph. + } + // Everything in t.requiring[m] is now fully disqualified. + // We won't need to use it again. + delete(t.requiring, m) + return + } + + // Either m is not a root, or it is a pruned root but only being disqualified + // when reached from the unpruned parts of the module graph. + // Either way, the reason for this disqualification is only visible to the + // unpruned parts of the module graph. + for _, p := range t.requiring[m] { + t.disqualify(p, unpruned, dqState{dep: m}) + } + if !isRoot { + // Since m is not a root, its dependencies can't be included in the pruned + // part of the module graph, and will never be disqualified from a pruned + // reason. We've already disqualified everything that matters. + delete(t.requiring, m) + } +} + +// check reports whether m is disqualified in the given pruning context. +func (t *dqTracker) check(m module.Version, pruning modPruning) dqState { + return t.dqReason[m].from(pruning) +} + +// path returns the path from m to the reason it is disqualified, which may be +// either a module that violates constraints or an error in loading +// requirements. +// +// If m is not disqualified, path returns (nil, nil). +func (t *dqTracker) path(m module.Version, pruning modPruning) (path []module.Version, err error) { + for { + dq := t.dqReason[m].from(pruning) + if !dq.isDisqualified() { + return path, nil + } + path = append(path, m) + if dq.err != nil || dq.dep == m { + return path, dq.err // m itself is the conflict. + } + m = dq.dep + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/help.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/help.go new file mode 100644 index 0000000000000000000000000000000000000000..886ad62bd90cb6a64113732336d9affe117a98ef --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/help.go @@ -0,0 +1,64 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import "cmd/go/internal/base" + +var HelpModules = &base.Command{ + UsageLine: "modules", + Short: "modules, module versions, and more", + Long: ` +Modules are how Go manages dependencies. + +A module is a collection of packages that are released, versioned, and +distributed together. Modules may be downloaded directly from version control +repositories or from module proxy servers. + +For a series of tutorials on modules, see +https://golang.org/doc/tutorial/create-module. + +For a detailed reference on modules, see https://golang.org/ref/mod. + +By default, the go command may download modules from https://proxy.golang.org. +It may authenticate modules using the checksum database at +https://sum.golang.org. Both services are operated by the Go team at Google. +The privacy policies for these services are available at +https://proxy.golang.org/privacy and https://sum.golang.org/privacy, +respectively. + +The go command's download behavior may be configured using GOPROXY, GOSUMDB, +GOPRIVATE, and other environment variables. See 'go help environment' +and https://golang.org/ref/mod#private-module-privacy for more information. + `, +} + +var HelpGoMod = &base.Command{ + UsageLine: "go.mod", + Short: "the go.mod file", + Long: ` +A module version is defined by a tree of source files, with a go.mod +file in its root. When the go command is run, it looks in the current +directory and then successive parent directories to find the go.mod +marking the root of the main (current) module. + +The go.mod file format is described in detail at +https://golang.org/ref/mod#go-mod-file. + +To create a new go.mod file, use 'go mod init'. For details see +'go help mod init' or https://golang.org/ref/mod#go-mod-init. + +To add missing module requirements or remove unneeded requirements, +use 'go mod tidy'. For details, see 'go help mod tidy' or +https://golang.org/ref/mod#go-mod-tidy. + +To add, upgrade, downgrade, or remove a specific module requirement, use +'go get'. For details, see 'go help module-get' or +https://golang.org/ref/mod#go-get. + +To make other changes or to parse go.mod as JSON for use by other tools, +use 'go mod edit'. See 'go help mod edit' or +https://golang.org/ref/mod#go-mod-edit. + `, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/import.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/import.go new file mode 100644 index 0000000000000000000000000000000000000000..7cd5fcf36a67ace4635a88dd2a88454b3030b840 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/import.go @@ -0,0 +1,784 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "context" + "errors" + "fmt" + "go/build" + "io/fs" + "os" + pathpkg "path" + "path/filepath" + "sort" + "strings" + + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/go/internal/gover" + "cmd/go/internal/modfetch" + "cmd/go/internal/modindex" + "cmd/go/internal/par" + "cmd/go/internal/search" + "cmd/go/internal/str" + + "golang.org/x/mod/module" +) + +type ImportMissingError struct { + Path string + Module module.Version + QueryErr error + + ImportingMainModule module.Version + + // isStd indicates whether we would expect to find the package in the standard + // library. This is normally true for all dotless import paths, but replace + // directives can cause us to treat the replaced paths as also being in + // modules. + isStd bool + + // importerGoVersion is the version the module containing the import error + // specified. It is only set when isStd is true. + importerGoVersion string + + // replaced the highest replaced version of the module where the replacement + // contains the package. replaced is only set if the replacement is unused. + replaced module.Version + + // newMissingVersion is set to a newer version of Module if one is present + // in the build list. When set, we can't automatically upgrade. + newMissingVersion string +} + +func (e *ImportMissingError) Error() string { + if e.Module.Path == "" { + if e.isStd { + msg := fmt.Sprintf("package %s is not in std (%s)", e.Path, filepath.Join(cfg.GOROOT, "src", e.Path)) + if e.importerGoVersion != "" { + msg += fmt.Sprintf("\nnote: imported by a module that requires go %s", e.importerGoVersion) + } + return msg + } + if e.QueryErr != nil && e.QueryErr != ErrNoModRoot { + return fmt.Sprintf("cannot find module providing package %s: %v", e.Path, e.QueryErr) + } + if cfg.BuildMod == "mod" || (cfg.BuildMod == "readonly" && allowMissingModuleImports) { + return "cannot find module providing package " + e.Path + } + + if e.replaced.Path != "" { + suggestArg := e.replaced.Path + if !module.IsZeroPseudoVersion(e.replaced.Version) { + suggestArg = e.replaced.String() + } + return fmt.Sprintf("module %s provides package %s and is replaced but not required; to add it:\n\tgo get %s", e.replaced.Path, e.Path, suggestArg) + } + + message := fmt.Sprintf("no required module provides package %s", e.Path) + if e.QueryErr != nil { + return fmt.Sprintf("%s: %v", message, e.QueryErr) + } + if e.ImportingMainModule.Path != "" && e.ImportingMainModule != MainModules.ModContainingCWD() { + return fmt.Sprintf("%s; to add it:\n\tcd %s\n\tgo get %s", message, MainModules.ModRoot(e.ImportingMainModule), e.Path) + } + return fmt.Sprintf("%s; to add it:\n\tgo get %s", message, e.Path) + } + + if e.newMissingVersion != "" { + return fmt.Sprintf("package %s provided by %s at latest version %s but not at required version %s", e.Path, e.Module.Path, e.Module.Version, e.newMissingVersion) + } + + return fmt.Sprintf("missing module for import: %s@%s provides %s", e.Module.Path, e.Module.Version, e.Path) +} + +func (e *ImportMissingError) Unwrap() error { + return e.QueryErr +} + +func (e *ImportMissingError) ImportPath() string { + return e.Path +} + +// An AmbiguousImportError indicates an import of a package found in multiple +// modules in the build list, or found in both the main module and its vendor +// directory. +type AmbiguousImportError struct { + importPath string + Dirs []string + Modules []module.Version // Either empty or 1:1 with Dirs. +} + +func (e *AmbiguousImportError) ImportPath() string { + return e.importPath +} + +func (e *AmbiguousImportError) Error() string { + locType := "modules" + if len(e.Modules) == 0 { + locType = "directories" + } + + var buf strings.Builder + fmt.Fprintf(&buf, "ambiguous import: found package %s in multiple %s:", e.importPath, locType) + + for i, dir := range e.Dirs { + buf.WriteString("\n\t") + if i < len(e.Modules) { + m := e.Modules[i] + buf.WriteString(m.Path) + if m.Version != "" { + fmt.Fprintf(&buf, " %s", m.Version) + } + fmt.Fprintf(&buf, " (%s)", dir) + } else { + buf.WriteString(dir) + } + } + + return buf.String() +} + +// A DirectImportFromImplicitDependencyError indicates a package directly +// imported by a package or test in the main module that is satisfied by a +// dependency that is not explicit in the main module's go.mod file. +type DirectImportFromImplicitDependencyError struct { + ImporterPath string + ImportedPath string + Module module.Version +} + +func (e *DirectImportFromImplicitDependencyError) Error() string { + return fmt.Sprintf("package %s imports %s from implicitly required module; to add missing requirements, run:\n\tgo get %s@%s", e.ImporterPath, e.ImportedPath, e.Module.Path, e.Module.Version) +} + +func (e *DirectImportFromImplicitDependencyError) ImportPath() string { + return e.ImporterPath +} + +// ImportMissingSumError is reported in readonly mode when we need to check +// if a module contains a package, but we don't have a sum for its .zip file. +// We might need sums for multiple modules to verify the package is unique. +// +// TODO(#43653): consolidate multiple errors of this type into a single error +// that suggests a 'go get' command for root packages that transitively import +// packages from modules with missing sums. load.CheckPackageErrors would be +// a good place to consolidate errors, but we'll need to attach the import +// stack here. +type ImportMissingSumError struct { + importPath string + found bool + mods []module.Version + importer, importerVersion string // optional, but used for additional context + importerIsTest bool +} + +func (e *ImportMissingSumError) Error() string { + var importParen string + if e.importer != "" { + importParen = fmt.Sprintf(" (imported by %s)", e.importer) + } + var message string + if e.found { + message = fmt.Sprintf("missing go.sum entry needed to verify package %s%s is provided by exactly one module", e.importPath, importParen) + } else { + message = fmt.Sprintf("missing go.sum entry for module providing package %s%s", e.importPath, importParen) + } + var hint string + if e.importer == "" { + // Importing package is unknown, or the missing package was named on the + // command line. Recommend 'go mod download' for the modules that could + // provide the package, since that shouldn't change go.mod. + if len(e.mods) > 0 { + args := make([]string, len(e.mods)) + for i, mod := range e.mods { + args[i] = mod.Path + } + hint = fmt.Sprintf("; to add:\n\tgo mod download %s", strings.Join(args, " ")) + } + } else { + // Importing package is known (common case). Recommend 'go get' on the + // current version of the importing package. + tFlag := "" + if e.importerIsTest { + tFlag = " -t" + } + version := "" + if e.importerVersion != "" { + version = "@" + e.importerVersion + } + hint = fmt.Sprintf("; to add:\n\tgo get%s %s%s", tFlag, e.importer, version) + } + return message + hint +} + +func (e *ImportMissingSumError) ImportPath() string { + return e.importPath +} + +type invalidImportError struct { + importPath string + err error +} + +func (e *invalidImportError) ImportPath() string { + return e.importPath +} + +func (e *invalidImportError) Error() string { + return e.err.Error() +} + +func (e *invalidImportError) Unwrap() error { + return e.err +} + +// importFromModules finds the module and directory in the dependency graph of +// rs containing the package with the given import path. If mg is nil, +// importFromModules attempts to locate the module using only the main module +// and the roots of rs before it loads the full graph. +// +// The answer must be unique: importFromModules returns an error if multiple +// modules are observed to provide the same package. +// +// importFromModules can return a module with an empty m.Path, for packages in +// the standard library. +// +// importFromModules can return an empty directory string, for fake packages +// like "C" and "unsafe". +// +// If the package is not present in any module selected from the requirement +// graph, importFromModules returns an *ImportMissingError. +// +// If the package is present in exactly one module, importFromModules will +// return the module, its root directory, and a list of other modules that +// lexically could have provided the package but did not. +// +// If skipModFile is true, the go.mod file for the package is not loaded. This +// allows 'go mod tidy' to preserve a minor checksum-preservation bug +// (https://go.dev/issue/56222) for modules with 'go' versions between 1.17 and +// 1.20, preventing unnecessary go.sum churn and network access in those +// modules. +func importFromModules(ctx context.Context, path string, rs *Requirements, mg *ModuleGraph, skipModFile bool) (m module.Version, modroot, dir string, altMods []module.Version, err error) { + invalidf := func(format string, args ...interface{}) (module.Version, string, string, []module.Version, error) { + return module.Version{}, "", "", nil, &invalidImportError{ + importPath: path, + err: fmt.Errorf(format, args...), + } + } + + if strings.Contains(path, "@") { + return invalidf("import path %q should not have @version", path) + } + if build.IsLocalImport(path) { + return invalidf("%q is relative, but relative import paths are not supported in module mode", path) + } + if filepath.IsAbs(path) { + return invalidf("%q is not a package path; see 'go help packages'", path) + } + if search.IsMetaPackage(path) { + return invalidf("%q is not an importable package; see 'go help packages'", path) + } + + if path == "C" { + // There's no directory for import "C". + return module.Version{}, "", "", nil, nil + } + // Before any further lookup, check that the path is valid. + if err := module.CheckImportPath(path); err != nil { + return module.Version{}, "", "", nil, &invalidImportError{importPath: path, err: err} + } + + // Check each module on the build list. + var dirs, roots []string + var mods []module.Version + + // Is the package in the standard library? + pathIsStd := search.IsStandardImportPath(path) + if pathIsStd && modindex.IsStandardPackage(cfg.GOROOT, cfg.BuildContext.Compiler, path) { + for _, mainModule := range MainModules.Versions() { + if MainModules.InGorootSrc(mainModule) { + if dir, ok, err := dirInModule(path, MainModules.PathPrefix(mainModule), MainModules.ModRoot(mainModule), true); err != nil { + return module.Version{}, MainModules.ModRoot(mainModule), dir, nil, err + } else if ok { + return mainModule, MainModules.ModRoot(mainModule), dir, nil, nil + } + } + } + dir := filepath.Join(cfg.GOROOTsrc, path) + modroot = cfg.GOROOTsrc + if str.HasPathPrefix(path, "cmd") { + modroot = filepath.Join(cfg.GOROOTsrc, "cmd") + } + dirs = append(dirs, dir) + roots = append(roots, modroot) + mods = append(mods, module.Version{}) + } + // -mod=vendor is special. + // Everything must be in the main modules or the main module's or workspace's vendor directory. + if cfg.BuildMod == "vendor" { + var mainErr error + for _, mainModule := range MainModules.Versions() { + modRoot := MainModules.ModRoot(mainModule) + if modRoot != "" { + dir, mainOK, err := dirInModule(path, MainModules.PathPrefix(mainModule), modRoot, true) + if mainErr == nil { + mainErr = err + } + if mainOK { + mods = append(mods, mainModule) + dirs = append(dirs, dir) + roots = append(roots, modRoot) + } + } + } + + if HasModRoot() { + vendorDir := VendorDir() + dir, vendorOK, _ := dirInModule(path, "", vendorDir, false) + if vendorOK { + readVendorList(vendorDir) + // TODO(#60922): It's possible for a package to manually have been added to the + // vendor directory, causing the dirInModule to succeed, but no vendorPkgModule + // to exist, causing an empty module path to be reported. Do better checking + // here. + mods = append(mods, vendorPkgModule[path]) + dirs = append(dirs, dir) + roots = append(roots, vendorDir) + } + } + + if len(dirs) > 1 { + return module.Version{}, "", "", nil, &AmbiguousImportError{importPath: path, Dirs: dirs} + } + + if mainErr != nil { + return module.Version{}, "", "", nil, mainErr + } + + if len(dirs) == 0 { + return module.Version{}, "", "", nil, &ImportMissingError{Path: path} + } + + return mods[0], roots[0], dirs[0], nil, nil + } + + // Iterate over possible modules for the path, not all selected modules. + // Iterating over selected modules would make the overall loading time + // O(M × P) for M modules providing P imported packages, whereas iterating + // over path prefixes is only O(P × k) with maximum path depth k. For + // large projects both M and P may be very large (note that M ≤ P), but k + // will tend to remain smallish (if for no other reason than filesystem + // path limitations). + // + // We perform this iteration either one or two times. If mg is initially nil, + // then we first attempt to load the package using only the main module and + // its root requirements. If that does not identify the package, or if mg is + // already non-nil, then we attempt to load the package using the full + // requirements in mg. + for { + var sumErrMods, altMods []module.Version + for prefix := path; prefix != "."; prefix = pathpkg.Dir(prefix) { + if gover.IsToolchain(prefix) { + // Do not use the synthetic "go" module for "go/ast". + continue + } + var ( + v string + ok bool + ) + if mg == nil { + v, ok = rs.rootSelected(prefix) + } else { + v, ok = mg.Selected(prefix), true + } + if !ok || v == "none" { + continue + } + m := module.Version{Path: prefix, Version: v} + + root, isLocal, err := fetch(ctx, m) + if err != nil { + if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { + // We are missing a sum needed to fetch a module in the build list. + // We can't verify that the package is unique, and we may not find + // the package at all. Keep checking other modules to decide which + // error to report. Multiple sums may be missing if we need to look in + // multiple nested modules to resolve the import; we'll report them all. + sumErrMods = append(sumErrMods, m) + continue + } + // Report fetch error. + // Note that we don't know for sure this module is necessary, + // but it certainly _could_ provide the package, and even if we + // continue the loop and find the package in some other module, + // we need to look at this module to make sure the import is + // not ambiguous. + return module.Version{}, "", "", nil, err + } + if dir, ok, err := dirInModule(path, m.Path, root, isLocal); err != nil { + return module.Version{}, "", "", nil, err + } else if ok { + mods = append(mods, m) + roots = append(roots, root) + dirs = append(dirs, dir) + } else { + altMods = append(altMods, m) + } + } + + if len(mods) > 1 { + // We produce the list of directories from longest to shortest candidate + // module path, but the AmbiguousImportError should report them from + // shortest to longest. Reverse them now. + for i := 0; i < len(mods)/2; i++ { + j := len(mods) - 1 - i + mods[i], mods[j] = mods[j], mods[i] + roots[i], roots[j] = roots[j], roots[i] + dirs[i], dirs[j] = dirs[j], dirs[i] + } + return module.Version{}, "", "", nil, &AmbiguousImportError{importPath: path, Dirs: dirs, Modules: mods} + } + + if len(sumErrMods) > 0 { + for i := 0; i < len(sumErrMods)/2; i++ { + j := len(sumErrMods) - 1 - i + sumErrMods[i], sumErrMods[j] = sumErrMods[j], sumErrMods[i] + } + return module.Version{}, "", "", nil, &ImportMissingSumError{ + importPath: path, + mods: sumErrMods, + found: len(mods) > 0, + } + } + + if len(mods) == 1 { + // We've found the unique module containing the package. + // However, in order to actually compile it we need to know what + // Go language version to use, which requires its go.mod file. + // + // If the module graph is pruned and this is a test-only dependency + // of a package in "all", we didn't necessarily load that file + // when we read the module graph, so do it now to be sure. + if !skipModFile && cfg.BuildMod != "vendor" && mods[0].Path != "" && !MainModules.Contains(mods[0].Path) { + if _, err := goModSummary(mods[0]); err != nil { + return module.Version{}, "", "", nil, err + } + } + return mods[0], roots[0], dirs[0], altMods, nil + } + + if mg != nil { + // We checked the full module graph and still didn't find the + // requested package. + var queryErr error + if !HasModRoot() { + queryErr = ErrNoModRoot + } + return module.Version{}, "", "", nil, &ImportMissingError{Path: path, QueryErr: queryErr, isStd: pathIsStd} + } + + // So far we've checked the root dependencies. + // Load the full module graph and try again. + mg, err = rs.Graph(ctx) + if err != nil { + // We might be missing one or more transitive (implicit) dependencies from + // the module graph, so we can't return an ImportMissingError here — one + // of the missing modules might actually contain the package in question, + // in which case we shouldn't go looking for it in some new dependency. + return module.Version{}, "", "", nil, err + } + } +} + +// queryImport attempts to locate a module that can be added to the current +// build list to provide the package with the given import path. +// +// Unlike QueryPattern, queryImport prefers to add a replaced version of a +// module *before* checking the proxies for a version to add. +func queryImport(ctx context.Context, path string, rs *Requirements) (module.Version, error) { + // To avoid spurious remote fetches, try the latest replacement for each + // module (golang.org/issue/26241). + var mods []module.Version + if MainModules != nil { // TODO(#48912): Ensure MainModules exists at this point, and remove the check. + for mp, mv := range MainModules.HighestReplaced() { + if !maybeInModule(path, mp) { + continue + } + if mv == "" { + // The only replacement is a wildcard that doesn't specify a version, so + // synthesize a pseudo-version with an appropriate major version and a + // timestamp below any real timestamp. That way, if the main module is + // used from within some other module, the user will be able to upgrade + // the requirement to any real version they choose. + if _, pathMajor, ok := module.SplitPathVersion(mp); ok && len(pathMajor) > 0 { + mv = module.ZeroPseudoVersion(pathMajor[1:]) + } else { + mv = module.ZeroPseudoVersion("v0") + } + } + mg, err := rs.Graph(ctx) + if err != nil { + return module.Version{}, err + } + if gover.ModCompare(mp, mg.Selected(mp), mv) >= 0 { + // We can't resolve the import by adding mp@mv to the module graph, + // because the selected version of mp is already at least mv. + continue + } + mods = append(mods, module.Version{Path: mp, Version: mv}) + } + } + + // Every module path in mods is a prefix of the import path. + // As in QueryPattern, prefer the longest prefix that satisfies the import. + sort.Slice(mods, func(i, j int) bool { + return len(mods[i].Path) > len(mods[j].Path) + }) + for _, m := range mods { + root, isLocal, err := fetch(ctx, m) + if err != nil { + if sumErr := (*sumMissingError)(nil); errors.As(err, &sumErr) { + return module.Version{}, &ImportMissingSumError{importPath: path} + } + return module.Version{}, err + } + if _, ok, err := dirInModule(path, m.Path, root, isLocal); err != nil { + return m, err + } else if ok { + if cfg.BuildMod == "readonly" { + return module.Version{}, &ImportMissingError{Path: path, replaced: m} + } + return m, nil + } + } + if len(mods) > 0 && module.CheckPath(path) != nil { + // The package path is not valid to fetch remotely, + // so it can only exist in a replaced module, + // and we know from the above loop that it is not. + replacement := Replacement(mods[0]) + return module.Version{}, &PackageNotInModuleError{ + Mod: mods[0], + Query: "latest", + Pattern: path, + Replacement: replacement, + } + } + + if search.IsStandardImportPath(path) { + // This package isn't in the standard library, isn't in any module already + // in the build list, and isn't in any other module that the user has + // shimmed in via a "replace" directive. + // Moreover, the import path is reserved for the standard library, so + // QueryPattern cannot possibly find a module containing this package. + // + // Instead of trying QueryPattern, report an ImportMissingError immediately. + return module.Version{}, &ImportMissingError{Path: path, isStd: true} + } + + if (cfg.BuildMod == "readonly" || cfg.BuildMod == "vendor") && !allowMissingModuleImports { + // In readonly mode, we can't write go.mod, so we shouldn't try to look up + // the module. If readonly mode was enabled explicitly, include that in + // the error message. + // In vendor mode, we cannot use the network or module cache, so we + // shouldn't try to look up the module + var queryErr error + if cfg.BuildModExplicit { + queryErr = fmt.Errorf("import lookup disabled by -mod=%s", cfg.BuildMod) + } else if cfg.BuildModReason != "" { + queryErr = fmt.Errorf("import lookup disabled by -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) + } + return module.Version{}, &ImportMissingError{Path: path, QueryErr: queryErr} + } + + // Look up module containing the package, for addition to the build list. + // Goal is to determine the module, download it to dir, + // and return m, dir, ImportMissingError. + fmt.Fprintf(os.Stderr, "go: finding module for package %s\n", path) + + mg, err := rs.Graph(ctx) + if err != nil { + return module.Version{}, err + } + + candidates, err := QueryPackages(ctx, path, "latest", mg.Selected, CheckAllowed) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // Return "cannot find module providing package […]" instead of whatever + // low-level error QueryPattern produced. + return module.Version{}, &ImportMissingError{Path: path, QueryErr: err} + } else { + return module.Version{}, err + } + } + + candidate0MissingVersion := "" + for i, c := range candidates { + if v := mg.Selected(c.Mod.Path); gover.ModCompare(c.Mod.Path, v, c.Mod.Version) > 0 { + // QueryPattern proposed that we add module c.Mod to provide the package, + // but we already depend on a newer version of that module (and that + // version doesn't have the package). + // + // This typically happens when a package is present at the "@latest" + // version (e.g., v1.0.0) of a module, but we have a newer version + // of the same module in the build list (e.g., v1.0.1-beta), and + // the package is not present there. + if i == 0 { + candidate0MissingVersion = v + } + continue + } + return c.Mod, nil + } + return module.Version{}, &ImportMissingError{ + Path: path, + Module: candidates[0].Mod, + newMissingVersion: candidate0MissingVersion, + } +} + +// maybeInModule reports whether, syntactically, +// a package with the given import path could be supplied +// by a module with the given module path (mpath). +func maybeInModule(path, mpath string) bool { + return mpath == path || + len(path) > len(mpath) && path[len(mpath)] == '/' && path[:len(mpath)] == mpath +} + +var ( + haveGoModCache par.Cache[string, bool] // dir → bool + haveGoFilesCache par.ErrCache[string, bool] // dir → haveGoFiles +) + +// dirInModule locates the directory that would hold the package named by the given path, +// if it were in the module with module path mpath and root mdir. +// If path is syntactically not within mpath, +// or if mdir is a local file tree (isLocal == true) and the directory +// that would hold path is in a sub-module (covered by a go.mod below mdir), +// dirInModule returns "", false, nil. +// +// Otherwise, dirInModule returns the name of the directory where +// Go source files would be expected, along with a boolean indicating +// whether there are in fact Go source files in that directory. +// A non-nil error indicates that the existence of the directory and/or +// source files could not be determined, for example due to a permission error. +func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFiles bool, err error) { + // Determine where to expect the package. + if path == mpath { + dir = mdir + } else if mpath == "" { // vendor directory + dir = filepath.Join(mdir, path) + } else if len(path) > len(mpath) && path[len(mpath)] == '/' && path[:len(mpath)] == mpath { + dir = filepath.Join(mdir, path[len(mpath)+1:]) + } else { + return "", false, nil + } + + // Check that there aren't other modules in the way. + // This check is unnecessary inside the module cache + // and important to skip in the vendor directory, + // where all the module trees have been overlaid. + // So we only check local module trees + // (the main module, and any directory trees pointed at by replace directives). + if isLocal { + for d := dir; d != mdir && len(d) > len(mdir); { + haveGoMod := haveGoModCache.Do(d, func() bool { + fi, err := fsys.Stat(filepath.Join(d, "go.mod")) + return err == nil && !fi.IsDir() + }) + + if haveGoMod { + return "", false, nil + } + parent := filepath.Dir(d) + if parent == d { + // Break the loop, as otherwise we'd loop + // forever if d=="." and mdir=="". + break + } + d = parent + } + } + + // Now committed to returning dir (not ""). + + // Are there Go source files in the directory? + // We don't care about build tags, not even "go:build ignore". + // We're just looking for a plausible directory. + haveGoFiles, err = haveGoFilesCache.Do(dir, func() (bool, error) { + // modindex.GetPackage will return ErrNotIndexed for any directories which + // are reached through a symlink, so that they will be handled by + // fsys.IsDirWithGoFiles below. + if ip, err := modindex.GetPackage(mdir, dir); err == nil { + return ip.IsDirWithGoFiles() + } else if !errors.Is(err, modindex.ErrNotIndexed) { + return false, err + } + return fsys.IsDirWithGoFiles(dir) + }) + + return dir, haveGoFiles, err +} + +// fetch downloads the given module (or its replacement) +// and returns its location. +// +// The isLocal return value reports whether the replacement, +// if any, is local to the filesystem. +func fetch(ctx context.Context, mod module.Version) (dir string, isLocal bool, err error) { + if modRoot := MainModules.ModRoot(mod); modRoot != "" { + return modRoot, true, nil + } + if r := Replacement(mod); r.Path != "" { + if r.Version == "" { + dir = r.Path + if !filepath.IsAbs(dir) { + dir = filepath.Join(replaceRelativeTo(), dir) + } + // Ensure that the replacement directory actually exists: + // dirInModule does not report errors for missing modules, + // so if we don't report the error now, later failures will be + // very mysterious. + if _, err := fsys.Stat(dir); err != nil { + // TODO(bcmills): We should also read dir/go.mod here and check its Go version, + // and return a gover.TooNewError if appropriate. + + if os.IsNotExist(err) { + // Semantically the module version itself “exists” — we just don't + // have its source code. Remove the equivalence to os.ErrNotExist, + // and make the message more concise while we're at it. + err = fmt.Errorf("replacement directory %s does not exist", r.Path) + } else { + err = fmt.Errorf("replacement directory %s: %w", r.Path, err) + } + return dir, true, module.VersionError(mod, err) + } + return dir, true, nil + } + mod = r + } + + if mustHaveSums() && !modfetch.HaveSum(mod) { + return "", false, module.VersionError(mod, &sumMissingError{}) + } + + dir, err = modfetch.Download(ctx, mod) + return dir, false, err +} + +// mustHaveSums reports whether we require that all checksums +// needed to load or build packages are already present in the go.sum file. +func mustHaveSums() bool { + return HasModRoot() && cfg.BuildMod == "readonly" && !inWorkspaceMode() +} + +type sumMissingError struct { + suggestion string +} + +func (e *sumMissingError) Error() string { + return "missing go.sum entry" + e.suggestion +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/import_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/import_test.go new file mode 100644 index 0000000000000000000000000000000000000000..eb4f5d64d3a3c7dca78c66d9dfa901530c5dfaa9 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/import_test.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "context" + "internal/testenv" + "regexp" + "strings" + "testing" + + "golang.org/x/mod/module" +) + +var importTests = []struct { + path string + m module.Version + err string +}{ + { + path: "golang.org/x/net/context", + m: module.Version{ + Path: "golang.org/x/net", + }, + }, + { + path: "golang.org/x/net", + err: `module golang.org/x/net@.* found \(v[01]\.\d+\.\d+\), but does not contain package golang.org/x/net`, + }, + { + path: "golang.org/x/text", + m: module.Version{ + Path: "golang.org/x/text", + }, + }, + { + path: "github.com/rsc/quote/buggy", + m: module.Version{ + Path: "github.com/rsc/quote", + Version: "v1.5.2", + }, + }, + { + path: "github.com/rsc/quote", + m: module.Version{ + Path: "github.com/rsc/quote", + Version: "v1.5.2", + }, + }, + { + path: "golang.org/x/foo/bar", + err: "cannot find module providing package golang.org/x/foo/bar", + }, +} + +func TestQueryImport(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + testenv.MustHaveExecPath(t, "git") + + oldAllowMissingModuleImports := allowMissingModuleImports + oldRootMode := RootMode + defer func() { + allowMissingModuleImports = oldAllowMissingModuleImports + RootMode = oldRootMode + }() + allowMissingModuleImports = true + RootMode = NoRoot + + ctx := context.Background() + rs := LoadModFile(ctx) + + for _, tt := range importTests { + t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) { + // Note that there is no build list, so Import should always fail. + m, err := queryImport(ctx, tt.path, rs) + + if tt.err == "" { + if err != nil { + t.Fatalf("queryImport(_, %q): %v", tt.path, err) + } + } else { + if err == nil { + t.Fatalf("queryImport(_, %q) = %v, nil; expected error", tt.path, m) + } + if !regexp.MustCompile(tt.err).MatchString(err.Error()) { + t.Fatalf("queryImport(_, %q): error %q, want error matching %#q", tt.path, err, tt.err) + } + } + + if m.Path != tt.m.Path || (tt.m.Version != "" && m.Version != tt.m.Version) { + t.Errorf("queryImport(_, %q) = %v, _; want %v", tt.path, m, tt.m) + } + }) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/init.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/init.go new file mode 100644 index 0000000000000000000000000000000000000000..f4f4a68254b1663a3b3743eac90ecbb3bad16a6f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/init.go @@ -0,0 +1,2042 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "internal/lazyregexp" + "io" + "os" + "path" + "path/filepath" + "slices" + "strconv" + "strings" + "sync" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/go/internal/gover" + "cmd/go/internal/lockedfile" + "cmd/go/internal/modfetch" + "cmd/go/internal/search" + + "golang.org/x/mod/modfile" + "golang.org/x/mod/module" +) + +// Variables set by other packages. +// +// TODO(#40775): See if these can be plumbed as explicit parameters. +var ( + // RootMode determines whether a module root is needed. + RootMode Root + + // ForceUseModules may be set to force modules to be enabled when + // GO111MODULE=auto or to report an error when GO111MODULE=off. + ForceUseModules bool + + allowMissingModuleImports bool + + // ExplicitWriteGoMod prevents LoadPackages, ListModules, and other functions + // from updating go.mod and go.sum or reporting errors when updates are + // needed. A package should set this if it would cause go.mod to be written + // multiple times (for example, 'go get' calls LoadPackages multiple times) or + // if it needs some other operation to be successful before go.mod and go.sum + // can be written (for example, 'go mod download' must download modules before + // adding sums to go.sum). Packages that set this are responsible for calling + // WriteGoMod explicitly. + ExplicitWriteGoMod bool +) + +// Variables set in Init. +var ( + initialized bool + + // These are primarily used to initialize the MainModules, and should be + // eventually superseded by them but are still used in cases where the module + // roots are required but MainModules hasn't been initialized yet. Set to + // the modRoots of the main modules. + // modRoots != nil implies len(modRoots) > 0 + modRoots []string + gopath string +) + +// EnterModule resets MainModules and requirements to refer to just this one module. +func EnterModule(ctx context.Context, enterModroot string) { + MainModules = nil // reset MainModules + requirements = nil + workFilePath = "" // Force module mode + modfetch.Reset() + + modRoots = []string{enterModroot} + LoadModFile(ctx) +} + +// Variable set in InitWorkfile +var ( + // Set to the path to the go.work file, or "" if workspace mode is disabled. + workFilePath string +) + +type MainModuleSet struct { + // versions are the module.Version values of each of the main modules. + // For each of them, the Path fields are ordinary module paths and the Version + // fields are empty strings. + // versions is clipped (len=cap). + versions []module.Version + + // modRoot maps each module in versions to its absolute filesystem path. + modRoot map[module.Version]string + + // pathPrefix is the path prefix for packages in the module, without a trailing + // slash. For most modules, pathPrefix is just version.Path, but the + // standard-library module "std" has an empty prefix. + pathPrefix map[module.Version]string + + // inGorootSrc caches whether modRoot is within GOROOT/src. + // The "std" module is special within GOROOT/src, but not otherwise. + inGorootSrc map[module.Version]bool + + modFiles map[module.Version]*modfile.File + + modContainingCWD module.Version + + workFile *modfile.WorkFile + + workFileReplaceMap map[module.Version]module.Version + // highest replaced version of each module path; empty string for wildcard-only replacements + highestReplaced map[string]string + + indexMu sync.Mutex + indices map[module.Version]*modFileIndex +} + +func (mms *MainModuleSet) PathPrefix(m module.Version) string { + return mms.pathPrefix[m] +} + +// Versions returns the module.Version values of each of the main modules. +// For each of them, the Path fields are ordinary module paths and the Version +// fields are empty strings. +// Callers should not modify the returned slice. +func (mms *MainModuleSet) Versions() []module.Version { + if mms == nil { + return nil + } + return mms.versions +} + +func (mms *MainModuleSet) Contains(path string) bool { + if mms == nil { + return false + } + for _, v := range mms.versions { + if v.Path == path { + return true + } + } + return false +} + +func (mms *MainModuleSet) ModRoot(m module.Version) string { + if mms == nil { + return "" + } + return mms.modRoot[m] +} + +func (mms *MainModuleSet) InGorootSrc(m module.Version) bool { + if mms == nil { + return false + } + return mms.inGorootSrc[m] +} + +func (mms *MainModuleSet) mustGetSingleMainModule() module.Version { + if mms == nil || len(mms.versions) == 0 { + panic("internal error: mustGetSingleMainModule called in context with no main modules") + } + if len(mms.versions) != 1 { + if inWorkspaceMode() { + panic("internal error: mustGetSingleMainModule called in workspace mode") + } else { + panic("internal error: multiple main modules present outside of workspace mode") + } + } + return mms.versions[0] +} + +func (mms *MainModuleSet) GetSingleIndexOrNil() *modFileIndex { + if mms == nil { + return nil + } + if len(mms.versions) == 0 { + return nil + } + return mms.indices[mms.mustGetSingleMainModule()] +} + +func (mms *MainModuleSet) Index(m module.Version) *modFileIndex { + mms.indexMu.Lock() + defer mms.indexMu.Unlock() + return mms.indices[m] +} + +func (mms *MainModuleSet) SetIndex(m module.Version, index *modFileIndex) { + mms.indexMu.Lock() + defer mms.indexMu.Unlock() + mms.indices[m] = index +} + +func (mms *MainModuleSet) ModFile(m module.Version) *modfile.File { + return mms.modFiles[m] +} + +func (mms *MainModuleSet) WorkFile() *modfile.WorkFile { + return mms.workFile +} + +func (mms *MainModuleSet) Len() int { + if mms == nil { + return 0 + } + return len(mms.versions) +} + +// ModContainingCWD returns the main module containing the working directory, +// or module.Version{} if none of the main modules contain the working +// directory. +func (mms *MainModuleSet) ModContainingCWD() module.Version { + return mms.modContainingCWD +} + +func (mms *MainModuleSet) HighestReplaced() map[string]string { + return mms.highestReplaced +} + +// GoVersion returns the go version set on the single module, in module mode, +// or the go.work file in workspace mode. +func (mms *MainModuleSet) GoVersion() string { + if inWorkspaceMode() { + return gover.FromGoWork(mms.workFile) + } + if mms != nil && len(mms.versions) == 1 { + f := mms.ModFile(mms.mustGetSingleMainModule()) + if f == nil { + // Special case: we are outside a module, like 'go run x.go'. + // Assume the local Go version. + // TODO(#49228): Clean this up; see loadModFile. + return gover.Local() + } + return gover.FromGoMod(f) + } + return gover.DefaultGoModVersion +} + +// Toolchain returns the toolchain set on the single module, in module mode, +// or the go.work file in workspace mode. +func (mms *MainModuleSet) Toolchain() string { + if inWorkspaceMode() { + if mms.workFile != nil && mms.workFile.Toolchain != nil { + return mms.workFile.Toolchain.Name + } + return "go" + mms.GoVersion() + } + if mms != nil && len(mms.versions) == 1 { + f := mms.ModFile(mms.mustGetSingleMainModule()) + if f == nil { + // Special case: we are outside a module, like 'go run x.go'. + // Assume the local Go version. + // TODO(#49228): Clean this up; see loadModFile. + return gover.LocalToolchain() + } + if f.Toolchain != nil { + return f.Toolchain.Name + } + } + return "go" + mms.GoVersion() +} + +func (mms *MainModuleSet) WorkFileReplaceMap() map[module.Version]module.Version { + return mms.workFileReplaceMap +} + +var MainModules *MainModuleSet + +type Root int + +const ( + // AutoRoot is the default for most commands. modload.Init will look for + // a go.mod file in the current directory or any parent. If none is found, + // modules may be disabled (GO111MODULE=auto) or commands may run in a + // limited module mode. + AutoRoot Root = iota + + // NoRoot is used for commands that run in module mode and ignore any go.mod + // file the current directory or in parent directories. + NoRoot + + // NeedRoot is used for commands that must run in module mode and don't + // make sense without a main module. + NeedRoot +) + +// ModFile returns the parsed go.mod file. +// +// Note that after calling LoadPackages or LoadModGraph, +// the require statements in the modfile.File are no longer +// the source of truth and will be ignored: edits made directly +// will be lost at the next call to WriteGoMod. +// To make permanent changes to the require statements +// in go.mod, edit it before loading. +func ModFile() *modfile.File { + Init() + modFile := MainModules.ModFile(MainModules.mustGetSingleMainModule()) + if modFile == nil { + die() + } + return modFile +} + +func BinDir() string { + Init() + if cfg.GOBIN != "" { + return cfg.GOBIN + } + if gopath == "" { + return "" + } + return filepath.Join(gopath, "bin") +} + +// InitWorkfile initializes the workFilePath variable for commands that +// operate in workspace mode. It should not be called by other commands, +// for example 'go mod tidy', that don't operate in workspace mode. +func InitWorkfile() { + workFilePath = FindGoWork(base.Cwd()) +} + +// FindGoWork returns the name of the go.work file for this command, +// or the empty string if there isn't one. +// Most code should use Init and Enabled rather than use this directly. +// It is exported mainly for Go toolchain switching, which must process +// the go.work very early at startup. +func FindGoWork(wd string) string { + if RootMode == NoRoot { + return "" + } + + switch gowork := cfg.Getenv("GOWORK"); gowork { + case "off": + return "" + case "", "auto": + return findWorkspaceFile(wd) + default: + if !filepath.IsAbs(gowork) { + base.Fatalf("go: invalid GOWORK: not an absolute path") + } + return gowork + } +} + +// WorkFilePath returns the absolute path of the go.work file, or "" if not in +// workspace mode. WorkFilePath must be called after InitWorkfile. +func WorkFilePath() string { + return workFilePath +} + +// Reset clears all the initialized, cached state about the use of modules, +// so that we can start over. +func Reset() { + initialized = false + ForceUseModules = false + RootMode = 0 + modRoots = nil + cfg.ModulesEnabled = false + MainModules = nil + requirements = nil + workFilePath = "" + modfetch.Reset() +} + +// Init determines whether module mode is enabled, locates the root of the +// current module (if any), sets environment variables for Git subprocesses, and +// configures the cfg, codehost, load, modfetch, and search packages for use +// with modules. +func Init() { + if initialized { + return + } + initialized = true + + // Keep in sync with WillBeEnabled. We perform extra validation here, and + // there are lots of diagnostics and side effects, so we can't use + // WillBeEnabled directly. + var mustUseModules bool + env := cfg.Getenv("GO111MODULE") + switch env { + default: + base.Fatalf("go: unknown environment setting GO111MODULE=%s", env) + case "auto": + mustUseModules = ForceUseModules + case "on", "": + mustUseModules = true + case "off": + if ForceUseModules { + base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") + } + mustUseModules = false + return + } + + if err := fsys.Init(base.Cwd()); err != nil { + base.Fatal(err) + } + + // Disable any prompting for passwords by Git. + // Only has an effect for 2.3.0 or later, but avoiding + // the prompt in earlier versions is just too hard. + // If user has explicitly set GIT_TERMINAL_PROMPT=1, keep + // prompting. + // See golang.org/issue/9341 and golang.org/issue/12706. + if os.Getenv("GIT_TERMINAL_PROMPT") == "" { + os.Setenv("GIT_TERMINAL_PROMPT", "0") + } + + // Disable any ssh connection pooling by Git. + // If a Git subprocess forks a child into the background to cache a new connection, + // that child keeps stdout/stderr open. After the Git subprocess exits, + // os/exec expects to be able to read from the stdout/stderr pipe + // until EOF to get all the data that the Git subprocess wrote before exiting. + // The EOF doesn't come until the child exits too, because the child + // is holding the write end of the pipe. + // This is unfortunate, but it has come up at least twice + // (see golang.org/issue/13453 and golang.org/issue/16104) + // and confuses users when it does. + // If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND, + // assume they know what they are doing and don't step on it. + // But default to turning off ControlMaster. + if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" { + os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no -o BatchMode=yes") + } + + if os.Getenv("GCM_INTERACTIVE") == "" { + os.Setenv("GCM_INTERACTIVE", "never") + } + if modRoots != nil { + // modRoot set before Init was called ("go mod init" does this). + // No need to search for go.mod. + } else if RootMode == NoRoot { + if cfg.ModFile != "" && !base.InGOFLAGS("-modfile") { + base.Fatalf("go: -modfile cannot be used with commands that ignore the current module") + } + modRoots = nil + } else if workFilePath != "" { + // We're in workspace mode, which implies module mode. + if cfg.ModFile != "" { + base.Fatalf("go: -modfile cannot be used in workspace mode") + } + } else { + if modRoot := findModuleRoot(base.Cwd()); modRoot == "" { + if cfg.ModFile != "" { + base.Fatalf("go: cannot find main module, but -modfile was set.\n\t-modfile cannot be used to set the module root directory.") + } + if RootMode == NeedRoot { + base.Fatal(ErrNoModRoot) + } + if !mustUseModules { + // GO111MODULE is 'auto', and we can't find a module root. + // Stay in GOPATH mode. + return + } + } else if search.InDir(modRoot, os.TempDir()) == "." { + // If you create /tmp/go.mod for experimenting, + // then any tests that create work directories under /tmp + // will find it and get modules when they're not expecting them. + // It's a bit of a peculiar thing to disallow but quite mysterious + // when it happens. See golang.org/issue/26708. + fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in system temp root %v\n", os.TempDir()) + if RootMode == NeedRoot { + base.Fatal(ErrNoModRoot) + } + if !mustUseModules { + return + } + } else { + modRoots = []string{modRoot} + } + } + if cfg.ModFile != "" && !strings.HasSuffix(cfg.ModFile, ".mod") { + base.Fatalf("go: -modfile=%s: file does not have .mod extension", cfg.ModFile) + } + + // We're in module mode. Set any global variables that need to be set. + cfg.ModulesEnabled = true + setDefaultBuildMod() + list := filepath.SplitList(cfg.BuildContext.GOPATH) + if len(list) > 0 && list[0] != "" { + gopath = list[0] + if _, err := fsys.Stat(filepath.Join(gopath, "go.mod")); err == nil { + fmt.Fprintf(os.Stderr, "go: warning: ignoring go.mod in $GOPATH %v\n", gopath) + if RootMode == NeedRoot { + base.Fatal(ErrNoModRoot) + } + if !mustUseModules { + return + } + } + } +} + +// WillBeEnabled checks whether modules should be enabled but does not +// initialize modules by installing hooks. If Init has already been called, +// WillBeEnabled returns the same result as Enabled. +// +// This function is needed to break a cycle. The main package needs to know +// whether modules are enabled in order to install the module or GOPATH version +// of 'go get', but Init reads the -modfile flag in 'go get', so it shouldn't +// be called until the command is installed and flags are parsed. Instead of +// calling Init and Enabled, the main package can call this function. +func WillBeEnabled() bool { + if modRoots != nil || cfg.ModulesEnabled { + // Already enabled. + return true + } + if initialized { + // Initialized, not enabled. + return false + } + + // Keep in sync with Init. Init does extra validation and prints warnings or + // exits, so it can't call this function directly. + env := cfg.Getenv("GO111MODULE") + switch env { + case "on", "": + return true + case "auto": + break + default: + return false + } + + return FindGoMod(base.Cwd()) != "" +} + +// FindGoMod returns the name of the go.mod file for this command, +// or the empty string if there isn't one. +// Most code should use Init and Enabled rather than use this directly. +// It is exported mainly for Go toolchain switching, which must process +// the go.mod very early at startup. +func FindGoMod(wd string) string { + modRoot := findModuleRoot(wd) + if modRoot == "" { + // GO111MODULE is 'auto', and we can't find a module root. + // Stay in GOPATH mode. + return "" + } + if search.InDir(modRoot, os.TempDir()) == "." { + // If you create /tmp/go.mod for experimenting, + // then any tests that create work directories under /tmp + // will find it and get modules when they're not expecting them. + // It's a bit of a peculiar thing to disallow but quite mysterious + // when it happens. See golang.org/issue/26708. + return "" + } + return filepath.Join(modRoot, "go.mod") +} + +// Enabled reports whether modules are (or must be) enabled. +// If modules are enabled but there is no main module, Enabled returns true +// and then the first use of module information will call die +// (usually through MustModRoot). +func Enabled() bool { + Init() + return modRoots != nil || cfg.ModulesEnabled +} + +func VendorDir() string { + if inWorkspaceMode() { + return filepath.Join(filepath.Dir(WorkFilePath()), "vendor") + } + // Even if -mod=vendor, we could be operating with no mod root (and thus no + // vendor directory). As long as there are no dependencies that is expected + // to work. See script/vendor_outside_module.txt. + modRoot := MainModules.ModRoot(MainModules.mustGetSingleMainModule()) + if modRoot == "" { + panic("vendor directory does not exist when in single module mode outside of a module") + } + return filepath.Join(modRoot, "vendor") +} + +func inWorkspaceMode() bool { + if !initialized { + panic("inWorkspaceMode called before modload.Init called") + } + if !Enabled() { + return false + } + return workFilePath != "" +} + +// HasModRoot reports whether a main module is present. +// HasModRoot may return false even if Enabled returns true: for example, 'get' +// does not require a main module. +func HasModRoot() bool { + Init() + return modRoots != nil +} + +// MustHaveModRoot checks that a main module or main modules are present, +// and calls base.Fatalf if there are no main modules. +func MustHaveModRoot() { + Init() + if !HasModRoot() { + die() + } +} + +// ModFilePath returns the path that would be used for the go.mod +// file, if in module mode. ModFilePath calls base.Fatalf if there is no main +// module, even if -modfile is set. +func ModFilePath() string { + MustHaveModRoot() + return modFilePath(findModuleRoot(base.Cwd())) +} + +func modFilePath(modRoot string) string { + if cfg.ModFile != "" { + return cfg.ModFile + } + return filepath.Join(modRoot, "go.mod") +} + +func die() { + if cfg.Getenv("GO111MODULE") == "off" { + base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'") + } + if inWorkspaceMode() { + base.Fatalf("go: no modules were found in the current workspace; see 'go help work'") + } + if dir, name := findAltConfig(base.Cwd()); dir != "" { + rel, err := filepath.Rel(base.Cwd(), dir) + if err != nil { + rel = dir + } + cdCmd := "" + if rel != "." { + cdCmd = fmt.Sprintf("cd %s && ", rel) + } + base.Fatalf("go: cannot find main module, but found %s in %s\n\tto create a module there, run:\n\t%sgo mod init", name, dir, cdCmd) + } + base.Fatal(ErrNoModRoot) +} + +var ErrNoModRoot = errors.New("go.mod file not found in current directory or any parent directory; see 'go help modules'") + +type goModDirtyError struct{} + +func (goModDirtyError) Error() string { + if cfg.BuildModExplicit { + return fmt.Sprintf("updates to go.mod needed, disabled by -mod=%v; to update it:\n\tgo mod tidy", cfg.BuildMod) + } + if cfg.BuildModReason != "" { + return fmt.Sprintf("updates to go.mod needed, disabled by -mod=%s\n\t(%s)\n\tto update it:\n\tgo mod tidy", cfg.BuildMod, cfg.BuildModReason) + } + return "updates to go.mod needed; to update it:\n\tgo mod tidy" +} + +var errGoModDirty error = goModDirtyError{} + +func loadWorkFile(path string) (workFile *modfile.WorkFile, modRoots []string, err error) { + workDir := filepath.Dir(path) + wf, err := ReadWorkFile(path) + if err != nil { + return nil, nil, err + } + seen := map[string]bool{} + for _, d := range wf.Use { + modRoot := d.Path + if !filepath.IsAbs(modRoot) { + modRoot = filepath.Join(workDir, modRoot) + } + + if seen[modRoot] { + return nil, nil, fmt.Errorf("path %s appears multiple times in workspace", modRoot) + } + seen[modRoot] = true + modRoots = append(modRoots, modRoot) + } + + return wf, modRoots, nil +} + +// ReadWorkFile reads and parses the go.work file at the given path. +func ReadWorkFile(path string) (*modfile.WorkFile, error) { + workData, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + f, err := modfile.ParseWork(path, workData, nil) + if err != nil { + return nil, err + } + if f.Go != nil && gover.Compare(f.Go.Version, gover.Local()) > 0 && cfg.CmdName != "work edit" { + base.Fatal(&gover.TooNewError{What: base.ShortPath(path), GoVersion: f.Go.Version}) + } + return f, nil +} + +// WriteWorkFile cleans and writes out the go.work file to the given path. +func WriteWorkFile(path string, wf *modfile.WorkFile) error { + wf.SortBlocks() + wf.Cleanup() + out := modfile.Format(wf.Syntax) + + return os.WriteFile(path, out, 0666) +} + +// UpdateWorkGoVersion updates the go line in wf to be at least goVers, +// reporting whether it changed the file. +func UpdateWorkGoVersion(wf *modfile.WorkFile, goVers string) (changed bool) { + old := gover.FromGoWork(wf) + if gover.Compare(old, goVers) >= 0 { + return false + } + + wf.AddGoStmt(goVers) + + // We wrote a new go line. For reproducibility, + // if the toolchain running right now is newer than the new toolchain line, + // update the toolchain line to record the newer toolchain. + // The user never sets the toolchain explicitly in a 'go work' command, + // so this is only happening as a result of a go or toolchain line found + // in a module. + // If the toolchain running right now is a dev toolchain (like "go1.21") + // writing 'toolchain go1.21' will not be useful, since that's not an actual + // toolchain you can download and run. In that case fall back to at least + // checking that the toolchain is new enough for the Go version. + toolchain := "go" + old + if wf.Toolchain != nil { + toolchain = wf.Toolchain.Name + } + if gover.IsLang(gover.Local()) { + toolchain = gover.ToolchainMax(toolchain, "go"+goVers) + } else { + toolchain = gover.ToolchainMax(toolchain, "go"+gover.Local()) + } + + // Drop the toolchain line if it is implied by the go line + // or if it is asking for a toolchain older than Go 1.21, + // which will not understand the toolchain line. + if toolchain == "go"+goVers || gover.Compare(gover.FromToolchain(toolchain), gover.GoStrictVersion) < 0 { + wf.DropToolchainStmt() + } else { + wf.AddToolchainStmt(toolchain) + } + return true +} + +// UpdateWorkFile updates comments on directory directives in the go.work +// file to include the associated module path. +func UpdateWorkFile(wf *modfile.WorkFile) { + missingModulePaths := map[string]string{} // module directory listed in file -> abspath modroot + + for _, d := range wf.Use { + if d.Path == "" { + continue // d is marked for deletion. + } + modRoot := d.Path + if d.ModulePath == "" { + missingModulePaths[d.Path] = modRoot + } + } + + // Clean up and annotate directories. + // TODO(matloob): update x/mod to actually add module paths. + for moddir, absmodroot := range missingModulePaths { + _, f, err := ReadModFile(filepath.Join(absmodroot, "go.mod"), nil) + if err != nil { + continue // Error will be reported if modules are loaded. + } + wf.AddUse(moddir, f.Module.Mod.Path) + } +} + +// LoadModFile sets Target and, if there is a main module, parses the initial +// build list from its go.mod file. +// +// LoadModFile may make changes in memory, like adding a go directive and +// ensuring requirements are consistent. The caller is responsible for ensuring +// those changes are written to disk by calling LoadPackages or ListModules +// (unless ExplicitWriteGoMod is set) or by calling WriteGoMod directly. +// +// As a side-effect, LoadModFile may change cfg.BuildMod to "vendor" if +// -mod wasn't set explicitly and automatic vendoring should be enabled. +// +// If LoadModFile or CreateModFile has already been called, LoadModFile returns +// the existing in-memory requirements (rather than re-reading them from disk). +// +// LoadModFile checks the roots of the module graph for consistency with each +// other, but unlike LoadModGraph does not load the full module graph or check +// it for global consistency. Most callers outside of the modload package should +// use LoadModGraph instead. +func LoadModFile(ctx context.Context) *Requirements { + rs, err := loadModFile(ctx, nil) + if err != nil { + base.Fatal(err) + } + return rs +} + +func loadModFile(ctx context.Context, opts *PackageOpts) (*Requirements, error) { + if requirements != nil { + return requirements, nil + } + + Init() + var workFile *modfile.WorkFile + if inWorkspaceMode() { + var err error + workFile, modRoots, err = loadWorkFile(workFilePath) + if err != nil { + return nil, fmt.Errorf("reading go.work: %w", err) + } + for _, modRoot := range modRoots { + sumFile := strings.TrimSuffix(modFilePath(modRoot), ".mod") + ".sum" + modfetch.WorkspaceGoSumFiles = append(modfetch.WorkspaceGoSumFiles, sumFile) + } + modfetch.GoSumFile = workFilePath + ".sum" + } else if len(modRoots) == 0 { + // We're in module mode, but not inside a module. + // + // Commands like 'go build', 'go run', 'go list' have no go.mod file to + // read or write. They would need to find and download the latest versions + // of a potentially large number of modules with no way to save version + // information. We can succeed slowly (but not reproducibly), but that's + // not usually a good experience. + // + // Instead, we forbid resolving import paths to modules other than std and + // cmd. Users may still build packages specified with .go files on the + // command line, but they'll see an error if those files import anything + // outside std. + // + // This can be overridden by calling AllowMissingModuleImports. + // For example, 'go get' does this, since it is expected to resolve paths. + // + // See golang.org/issue/32027. + } else { + modfetch.GoSumFile = strings.TrimSuffix(modFilePath(modRoots[0]), ".mod") + ".sum" + } + if len(modRoots) == 0 { + // TODO(#49228): Instead of creating a fake module with an empty modroot, + // make MainModules.Len() == 0 mean that we're in module mode but not inside + // any module. + mainModule := module.Version{Path: "command-line-arguments"} + MainModules = makeMainModules([]module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, nil) + var ( + goVersion string + pruning modPruning + roots []module.Version + direct = map[string]bool{"go": true} + ) + if inWorkspaceMode() { + // Since we are in a workspace, the Go version for the synthetic + // "command-line-arguments" module must not exceed the Go version + // for the workspace. + goVersion = MainModules.GoVersion() + pruning = workspace + roots = []module.Version{ + mainModule, + {Path: "go", Version: goVersion}, + {Path: "toolchain", Version: gover.LocalToolchain()}, + } + } else { + goVersion = gover.Local() + pruning = pruningForGoVersion(goVersion) + roots = []module.Version{ + {Path: "go", Version: goVersion}, + {Path: "toolchain", Version: gover.LocalToolchain()}, + } + } + rawGoVersion.Store(mainModule, goVersion) + requirements = newRequirements(pruning, roots, direct) + if cfg.BuildMod == "vendor" { + // For issue 56536: Some users may have GOFLAGS=-mod=vendor set. + // Make sure it behaves as though the fake module is vendored + // with no dependencies. + requirements.initVendor(nil) + } + return requirements, nil + } + + var modFiles []*modfile.File + var mainModules []module.Version + var indices []*modFileIndex + var errs []error + for _, modroot := range modRoots { + gomod := modFilePath(modroot) + var fixed bool + data, f, err := ReadModFile(gomod, fixVersion(ctx, &fixed)) + if err != nil { + if inWorkspaceMode() { + if tooNew, ok := err.(*gover.TooNewError); ok && !strings.HasPrefix(cfg.CmdName, "work ") { + // Switching to a newer toolchain won't help - the go.work has the wrong version. + // Report this more specific error, unless we are a command like 'go work use' + // or 'go work sync', which will fix the problem after the caller sees the TooNewError + // and switches to a newer toolchain. + err = errWorkTooOld(gomod, workFile, tooNew.GoVersion) + } else { + err = fmt.Errorf("cannot load module %s listed in go.work file: %w", + base.ShortPath(filepath.Dir(gomod)), err) + } + } + errs = append(errs, err) + continue + } + if inWorkspaceMode() && !strings.HasPrefix(cfg.CmdName, "work ") { + // Refuse to use workspace if its go version is too old. + // Disable this check if we are a workspace command like work use or work sync, + // which will fix the problem. + mv := gover.FromGoMod(f) + wv := gover.FromGoWork(workFile) + if gover.Compare(mv, wv) > 0 && gover.Compare(mv, gover.GoStrictVersion) >= 0 { + errs = append(errs, errWorkTooOld(gomod, workFile, mv)) + continue + } + } + + modFiles = append(modFiles, f) + mainModule := f.Module.Mod + mainModules = append(mainModules, mainModule) + indices = append(indices, indexModFile(data, f, mainModule, fixed)) + + if err := module.CheckImportPath(f.Module.Mod.Path); err != nil { + if pathErr, ok := err.(*module.InvalidPathError); ok { + pathErr.Kind = "module" + } + errs = append(errs, err) + } + } + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + + MainModules = makeMainModules(mainModules, modRoots, modFiles, indices, workFile) + setDefaultBuildMod() // possibly enable automatic vendoring + rs := requirementsFromModFiles(ctx, workFile, modFiles, opts) + + if cfg.BuildMod == "vendor" { + readVendorList(VendorDir()) + var indexes []*modFileIndex + var modFiles []*modfile.File + var modRoots []string + for _, m := range MainModules.Versions() { + indexes = append(indexes, MainModules.Index(m)) + modFiles = append(modFiles, MainModules.ModFile(m)) + modRoots = append(modRoots, MainModules.ModRoot(m)) + } + checkVendorConsistency(indexes, modFiles, modRoots) + rs.initVendor(vendorList) + } + + if inWorkspaceMode() { + // We don't need to update the mod file so return early. + requirements = rs + return rs, nil + } + + mainModule := MainModules.mustGetSingleMainModule() + + if rs.hasRedundantRoot() { + // If any module path appears more than once in the roots, we know that the + // go.mod file needs to be updated even though we have not yet loaded any + // transitive dependencies. + var err error + rs, err = updateRoots(ctx, rs.direct, rs, nil, nil, false) + if err != nil { + return nil, err + } + } + + if MainModules.Index(mainModule).goVersion == "" && rs.pruning != workspace { + // TODO(#45551): Do something more principled instead of checking + // cfg.CmdName directly here. + if cfg.BuildMod == "mod" && cfg.CmdName != "mod graph" && cfg.CmdName != "mod why" { + // go line is missing from go.mod; add one there and add to derived requirements. + v := gover.Local() + if opts != nil && opts.TidyGoVersion != "" { + v = opts.TidyGoVersion + } + addGoStmt(MainModules.ModFile(mainModule), mainModule, v) + rs = overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: v}}) + + // We need to add a 'go' version to the go.mod file, but we must assume + // that its existing contents match something between Go 1.11 and 1.16. + // Go 1.11 through 1.16 do not support graph pruning, but the latest Go + // version uses a pruned module graph — so we need to convert the + // requirements to support pruning. + if gover.Compare(v, gover.ExplicitIndirectVersion) >= 0 { + var err error + rs, err = convertPruning(ctx, rs, pruned) + if err != nil { + return nil, err + } + } + } else { + rawGoVersion.Store(mainModule, gover.DefaultGoModVersion) + } + } + + requirements = rs + return requirements, nil +} + +func errWorkTooOld(gomod string, wf *modfile.WorkFile, goVers string) error { + return fmt.Errorf("module %s listed in go.work file requires go >= %s, but go.work lists go %s; to update it:\n\tgo work use", + base.ShortPath(filepath.Dir(gomod)), goVers, gover.FromGoWork(wf)) +} + +// CreateModFile initializes a new module by creating a go.mod file. +// +// If modPath is empty, CreateModFile will attempt to infer the path from the +// directory location within GOPATH. +// +// If a vendoring configuration file is present, CreateModFile will attempt to +// translate it to go.mod directives. The resulting build list may not be +// exactly the same as in the legacy configuration (for example, we can't get +// packages at multiple versions from the same module). +func CreateModFile(ctx context.Context, modPath string) { + modRoot := base.Cwd() + modRoots = []string{modRoot} + Init() + modFilePath := modFilePath(modRoot) + if _, err := fsys.Stat(modFilePath); err == nil { + base.Fatalf("go: %s already exists", modFilePath) + } + + if modPath == "" { + var err error + modPath, err = findModulePath(modRoot) + if err != nil { + base.Fatal(err) + } + } else if err := module.CheckImportPath(modPath); err != nil { + if pathErr, ok := err.(*module.InvalidPathError); ok { + pathErr.Kind = "module" + // Same as build.IsLocalPath() + if pathErr.Path == "." || pathErr.Path == ".." || + strings.HasPrefix(pathErr.Path, "./") || strings.HasPrefix(pathErr.Path, "../") { + pathErr.Err = errors.New("is a local import path") + } + } + base.Fatal(err) + } else if _, _, ok := module.SplitPathVersion(modPath); !ok { + if strings.HasPrefix(modPath, "gopkg.in/") { + invalidMajorVersionMsg := fmt.Errorf("module paths beginning with gopkg.in/ must always have a major version suffix in the form of .vN:\n\tgo mod init %s", suggestGopkgIn(modPath)) + base.Fatalf(`go: invalid module path "%v": %v`, modPath, invalidMajorVersionMsg) + } + invalidMajorVersionMsg := fmt.Errorf("major version suffixes must be in the form of /vN and are only allowed for v2 or later:\n\tgo mod init %s", suggestModulePath(modPath)) + base.Fatalf(`go: invalid module path "%v": %v`, modPath, invalidMajorVersionMsg) + } + + fmt.Fprintf(os.Stderr, "go: creating new go.mod: module %s\n", modPath) + modFile := new(modfile.File) + modFile.AddModuleStmt(modPath) + MainModules = makeMainModules([]module.Version{modFile.Module.Mod}, []string{modRoot}, []*modfile.File{modFile}, []*modFileIndex{nil}, nil) + addGoStmt(modFile, modFile.Module.Mod, gover.Local()) // Add the go directive before converted module requirements. + + rs := requirementsFromModFiles(ctx, nil, []*modfile.File{modFile}, nil) + rs, err := updateRoots(ctx, rs.direct, rs, nil, nil, false) + if err != nil { + base.Fatal(err) + } + requirements = rs + if err := commitRequirements(ctx, WriteOpts{}); err != nil { + base.Fatal(err) + } + + // Suggest running 'go mod tidy' unless the project is empty. Even if we + // imported all the correct requirements above, we're probably missing + // some sums, so the next build command in -mod=readonly will likely fail. + // + // We look for non-hidden .go files or subdirectories to determine whether + // this is an existing project. Walking the tree for packages would be more + // accurate, but could take much longer. + empty := true + files, _ := os.ReadDir(modRoot) + for _, f := range files { + name := f.Name() + if strings.HasPrefix(name, ".") || strings.HasPrefix(name, "_") { + continue + } + if strings.HasSuffix(name, ".go") || f.IsDir() { + empty = false + break + } + } + if !empty { + fmt.Fprintf(os.Stderr, "go: to add module requirements and sums:\n\tgo mod tidy\n") + } +} + +// fixVersion returns a modfile.VersionFixer implemented using the Query function. +// +// It resolves commit hashes and branch names to versions, +// canonicalizes versions that appeared in early vgo drafts, +// and does nothing for versions that already appear to be canonical. +// +// The VersionFixer sets 'fixed' if it ever returns a non-canonical version. +func fixVersion(ctx context.Context, fixed *bool) modfile.VersionFixer { + return func(path, vers string) (resolved string, err error) { + defer func() { + if err == nil && resolved != vers { + *fixed = true + } + }() + + // Special case: remove the old -gopkgin- hack. + if strings.HasPrefix(path, "gopkg.in/") && strings.Contains(vers, "-gopkgin-") { + vers = vers[strings.Index(vers, "-gopkgin-")+len("-gopkgin-"):] + } + + // fixVersion is called speculatively on every + // module, version pair from every go.mod file. + // Avoid the query if it looks OK. + _, pathMajor, ok := module.SplitPathVersion(path) + if !ok { + return "", &module.ModuleError{ + Path: path, + Err: &module.InvalidVersionError{ + Version: vers, + Err: fmt.Errorf("malformed module path %q", path), + }, + } + } + if vers != "" && module.CanonicalVersion(vers) == vers { + if err := module.CheckPathMajor(vers, pathMajor); err != nil { + return "", module.VersionError(module.Version{Path: path, Version: vers}, err) + } + return vers, nil + } + + info, err := Query(ctx, path, vers, "", nil) + if err != nil { + return "", err + } + return info.Version, nil + } +} + +// AllowMissingModuleImports allows import paths to be resolved to modules +// when there is no module root. Normally, this is forbidden because it's slow +// and there's no way to make the result reproducible, but some commands +// like 'go get' are expected to do this. +// +// This function affects the default cfg.BuildMod when outside of a module, +// so it can only be called prior to Init. +func AllowMissingModuleImports() { + if initialized { + panic("AllowMissingModuleImports after Init") + } + allowMissingModuleImports = true +} + +// makeMainModules creates a MainModuleSet and associated variables according to +// the given main modules. +func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile.File, indices []*modFileIndex, workFile *modfile.WorkFile) *MainModuleSet { + for _, m := range ms { + if m.Version != "" { + panic("mainModulesCalled with module.Version with non empty Version field: " + fmt.Sprintf("%#v", m)) + } + } + modRootContainingCWD := findModuleRoot(base.Cwd()) + mainModules := &MainModuleSet{ + versions: slices.Clip(ms), + inGorootSrc: map[module.Version]bool{}, + pathPrefix: map[module.Version]string{}, + modRoot: map[module.Version]string{}, + modFiles: map[module.Version]*modfile.File{}, + indices: map[module.Version]*modFileIndex{}, + highestReplaced: map[string]string{}, + workFile: workFile, + } + var workFileReplaces []*modfile.Replace + if workFile != nil { + workFileReplaces = workFile.Replace + mainModules.workFileReplaceMap = toReplaceMap(workFile.Replace) + } + mainModulePaths := make(map[string]bool) + for _, m := range ms { + if mainModulePaths[m.Path] { + base.Errorf("go: module %s appears multiple times in workspace", m.Path) + } + mainModulePaths[m.Path] = true + } + replacedByWorkFile := make(map[string]bool) + replacements := make(map[module.Version]module.Version) + for _, r := range workFileReplaces { + if mainModulePaths[r.Old.Path] && r.Old.Version == "" { + base.Errorf("go: workspace module %v is replaced at all versions in the go.work file. To fix, remove the replacement from the go.work file or specify the version at which to replace the module.", r.Old.Path) + } + replacedByWorkFile[r.Old.Path] = true + v, ok := mainModules.highestReplaced[r.Old.Path] + if !ok || gover.ModCompare(r.Old.Path, r.Old.Version, v) > 0 { + mainModules.highestReplaced[r.Old.Path] = r.Old.Version + } + replacements[r.Old] = r.New + } + for i, m := range ms { + mainModules.pathPrefix[m] = m.Path + mainModules.modRoot[m] = rootDirs[i] + mainModules.modFiles[m] = modFiles[i] + mainModules.indices[m] = indices[i] + + if mainModules.modRoot[m] == modRootContainingCWD { + mainModules.modContainingCWD = m + } + + if rel := search.InDir(rootDirs[i], cfg.GOROOTsrc); rel != "" { + mainModules.inGorootSrc[m] = true + if m.Path == "std" { + // The "std" module in GOROOT/src is the Go standard library. Unlike other + // modules, the packages in the "std" module have no import-path prefix. + // + // Modules named "std" outside of GOROOT/src do not receive this special + // treatment, so it is possible to run 'go test .' in other GOROOTs to + // test individual packages using a combination of the modified package + // and the ordinary standard library. + // (See https://golang.org/issue/30756.) + mainModules.pathPrefix[m] = "" + } + } + + if modFiles[i] != nil { + curModuleReplaces := make(map[module.Version]bool) + for _, r := range modFiles[i].Replace { + if replacedByWorkFile[r.Old.Path] { + continue + } + var newV module.Version = r.New + if WorkFilePath() != "" && newV.Version == "" && !filepath.IsAbs(newV.Path) { + // Since we are in a workspace, we may be loading replacements from + // multiple go.mod files. Relative paths in those replacement are + // relative to the go.mod file, not the workspace, so the same string + // may refer to two different paths and different strings may refer to + // the same path. Convert them all to be absolute instead. + // + // (We could do this outside of a workspace too, but it would mean that + // replacement paths in error strings needlessly differ from what's in + // the go.mod file.) + newV.Path = filepath.Join(rootDirs[i], newV.Path) + } + if prev, ok := replacements[r.Old]; ok && !curModuleReplaces[r.Old] && prev != newV { + base.Fatalf("go: conflicting replacements for %v:\n\t%v\n\t%v\nuse \"go work edit -replace %v=[override]\" to resolve", r.Old, prev, newV, r.Old) + } + curModuleReplaces[r.Old] = true + replacements[r.Old] = newV + + v, ok := mainModules.highestReplaced[r.Old.Path] + if !ok || gover.ModCompare(r.Old.Path, r.Old.Version, v) > 0 { + mainModules.highestReplaced[r.Old.Path] = r.Old.Version + } + } + } + } + return mainModules +} + +// requirementsFromModFiles returns the set of non-excluded requirements from +// the global modFile. +func requirementsFromModFiles(ctx context.Context, workFile *modfile.WorkFile, modFiles []*modfile.File, opts *PackageOpts) *Requirements { + var roots []module.Version + direct := map[string]bool{} + var pruning modPruning + if inWorkspaceMode() { + pruning = workspace + roots = make([]module.Version, len(MainModules.Versions()), 2+len(MainModules.Versions())) + copy(roots, MainModules.Versions()) + goVersion := gover.FromGoWork(workFile) + var toolchain string + if workFile.Toolchain != nil { + toolchain = workFile.Toolchain.Name + } + roots = appendGoAndToolchainRoots(roots, goVersion, toolchain, direct) + } else { + pruning = pruningForGoVersion(MainModules.GoVersion()) + if len(modFiles) != 1 { + panic(fmt.Errorf("requirementsFromModFiles called with %v modfiles outside workspace mode", len(modFiles))) + } + modFile := modFiles[0] + roots, direct = rootsFromModFile(MainModules.mustGetSingleMainModule(), modFile, withToolchainRoot) + } + + gover.ModSort(roots) + rs := newRequirements(pruning, roots, direct) + return rs +} + +type addToolchainRoot bool + +const ( + omitToolchainRoot addToolchainRoot = false + withToolchainRoot = true +) + +func rootsFromModFile(m module.Version, modFile *modfile.File, addToolchainRoot addToolchainRoot) (roots []module.Version, direct map[string]bool) { + direct = make(map[string]bool) + padding := 2 // Add padding for the toolchain and go version, added upon return. + if !addToolchainRoot { + padding = 1 + } + roots = make([]module.Version, 0, padding+len(modFile.Require)) + for _, r := range modFile.Require { + if index := MainModules.Index(m); index != nil && index.exclude[r.Mod] { + if cfg.BuildMod == "mod" { + fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version) + } else { + fmt.Fprintf(os.Stderr, "go: ignoring requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version) + } + continue + } + + roots = append(roots, r.Mod) + if !r.Indirect { + direct[r.Mod.Path] = true + } + } + goVersion := gover.FromGoMod(modFile) + var toolchain string + if addToolchainRoot && modFile.Toolchain != nil { + toolchain = modFile.Toolchain.Name + } + roots = appendGoAndToolchainRoots(roots, goVersion, toolchain, direct) + return roots, direct +} + +func appendGoAndToolchainRoots(roots []module.Version, goVersion, toolchain string, direct map[string]bool) []module.Version { + // Add explicit go and toolchain versions, inferring as needed. + roots = append(roots, module.Version{Path: "go", Version: goVersion}) + direct["go"] = true // Every module directly uses the language and runtime. + + if toolchain != "" { + roots = append(roots, module.Version{Path: "toolchain", Version: toolchain}) + // Leave the toolchain as indirect: nothing in the user's module directly + // imports a package from the toolchain, and (like an indirect dependency in + // a module without graph pruning) we may remove the toolchain line + // automatically if the 'go' version is changed so that it implies the exact + // same toolchain. + } + return roots +} + +// setDefaultBuildMod sets a default value for cfg.BuildMod if the -mod flag +// wasn't provided. setDefaultBuildMod may be called multiple times. +func setDefaultBuildMod() { + if cfg.BuildModExplicit { + if inWorkspaceMode() && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" { + base.Fatalf("go: -mod may only be set to readonly or vendor when in workspace mode, but it is set to %q"+ + "\n\tRemove the -mod flag to use the default readonly value, "+ + "\n\tor set GOWORK=off to disable workspace mode.", cfg.BuildMod) + } + // Don't override an explicit '-mod=' argument. + return + } + + // TODO(#40775): commands should pass in the module mode as an option + // to modload functions instead of relying on an implicit setting + // based on command name. + switch cfg.CmdName { + case "get", "mod download", "mod init", "mod tidy", "work sync": + // These commands are intended to update go.mod and go.sum. + cfg.BuildMod = "mod" + return + case "mod graph", "mod verify", "mod why": + // These commands should not update go.mod or go.sum, but they should be + // able to fetch modules not in go.sum and should not report errors if + // go.mod is inconsistent. They're useful for debugging, and they need + // to work in buggy situations. + cfg.BuildMod = "mod" + return + case "mod vendor", "work vendor": + cfg.BuildMod = "readonly" + return + } + if modRoots == nil { + if allowMissingModuleImports { + cfg.BuildMod = "mod" + } else { + cfg.BuildMod = "readonly" + } + return + } + + if len(modRoots) >= 1 { + var goVersion string + var versionSource string + if inWorkspaceMode() { + versionSource = "go.work" + if wfg := MainModules.WorkFile().Go; wfg != nil { + goVersion = wfg.Version + } + } else { + versionSource = "go.mod" + index := MainModules.GetSingleIndexOrNil() + if index != nil { + goVersion = index.goVersion + } + } + vendorDir := "" + if workFilePath != "" { + vendorDir = filepath.Join(filepath.Dir(workFilePath), "vendor") + } else { + if len(modRoots) != 1 { + panic(fmt.Errorf("outside workspace mode, but have %v modRoots", modRoots)) + } + vendorDir = filepath.Join(modRoots[0], "vendor") + } + if fi, err := fsys.Stat(vendorDir); err == nil && fi.IsDir() { + modGo := "unspecified" + if goVersion != "" { + if gover.Compare(goVersion, "1.14") < 0 { + // The go version is less than 1.14. Don't set -mod=vendor by default. + // Since a vendor directory exists, we should record why we didn't use it. + // This message won't normally be shown, but it may appear with import errors. + cfg.BuildModReason = fmt.Sprintf("Go version in "+versionSource+" is %s, so vendor directory was not used.", modGo) + } else { + vendoredWorkspace, err := modulesTextIsForWorkspace(vendorDir) + if err != nil { + base.Fatalf("go: reading modules.txt for vendor directory: %v", err) + } + if vendoredWorkspace != (versionSource == "go.work") { + if vendoredWorkspace { + cfg.BuildModReason = "Outside workspace mode, but vendor directory is for a workspace." + } else { + cfg.BuildModReason = "In workspace mode, but vendor directory is not for a workspace" + } + } else { + // The Go version is at least 1.14, a vendor directory exists, and + // the modules.txt was generated in the same mode the command is running in. + // Set -mod=vendor by default. + cfg.BuildMod = "vendor" + cfg.BuildModReason = "Go version in " + versionSource + " is at least 1.14 and vendor directory exists." + return + } + } + modGo = goVersion + } + + } + } + + cfg.BuildMod = "readonly" +} + +func modulesTextIsForWorkspace(vendorDir string) (bool, error) { + f, err := fsys.Open(filepath.Join(vendorDir, "modules.txt")) + if errors.Is(err, os.ErrNotExist) { + // Some vendor directories exist that don't contain modules.txt. + // This mostly happens when converting to modules. + // We want to preserve the behavior that mod=vendor is set (even though + // readVendorList does nothing in that case). + return false, nil + } + if err != nil { + return false, err + } + var buf [512]byte + n, err := f.Read(buf[:]) + if err != nil && err != io.EOF { + return false, err + } + line, _, _ := strings.Cut(string(buf[:n]), "\n") + if annotations, ok := strings.CutPrefix(line, "## "); ok { + for _, entry := range strings.Split(annotations, ";") { + entry = strings.TrimSpace(entry) + if entry == "workspace" { + return true, nil + } + } + } + return false, nil +} + +func mustHaveCompleteRequirements() bool { + return cfg.BuildMod != "mod" && !inWorkspaceMode() +} + +// addGoStmt adds a go directive to the go.mod file if it does not already +// include one. The 'go' version added, if any, is the latest version supported +// by this toolchain. +func addGoStmt(modFile *modfile.File, mod module.Version, v string) { + if modFile.Go != nil && modFile.Go.Version != "" { + return + } + forceGoStmt(modFile, mod, v) +} + +func forceGoStmt(modFile *modfile.File, mod module.Version, v string) { + if err := modFile.AddGoStmt(v); err != nil { + base.Fatalf("go: internal error: %v", err) + } + rawGoVersion.Store(mod, v) +} + +var altConfigs = []string{ + ".git/config", +} + +func findModuleRoot(dir string) (roots string) { + if dir == "" { + panic("dir not set") + } + dir = filepath.Clean(dir) + + // Look for enclosing go.mod. + for { + if fi, err := fsys.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { + return dir + } + d := filepath.Dir(dir) + if d == dir { + break + } + dir = d + } + return "" +} + +func findWorkspaceFile(dir string) (root string) { + if dir == "" { + panic("dir not set") + } + dir = filepath.Clean(dir) + + // Look for enclosing go.mod. + for { + f := filepath.Join(dir, "go.work") + if fi, err := fsys.Stat(f); err == nil && !fi.IsDir() { + return f + } + d := filepath.Dir(dir) + if d == dir { + break + } + if d == cfg.GOROOT { + // As a special case, don't cross GOROOT to find a go.work file. + // The standard library and commands built in go always use the vendored + // dependencies, so avoid using a most likely irrelevant go.work file. + return "" + } + dir = d + } + return "" +} + +func findAltConfig(dir string) (root, name string) { + if dir == "" { + panic("dir not set") + } + dir = filepath.Clean(dir) + if rel := search.InDir(dir, cfg.BuildContext.GOROOT); rel != "" { + // Don't suggest creating a module from $GOROOT/.git/config + // or a config file found in any parent of $GOROOT (see #34191). + return "", "" + } + for { + for _, name := range altConfigs { + if fi, err := fsys.Stat(filepath.Join(dir, name)); err == nil && !fi.IsDir() { + return dir, name + } + } + d := filepath.Dir(dir) + if d == dir { + break + } + dir = d + } + return "", "" +} + +func findModulePath(dir string) (string, error) { + // TODO(bcmills): once we have located a plausible module path, we should + // query version control (if available) to verify that it matches the major + // version of the most recent tag. + // See https://golang.org/issue/29433, https://golang.org/issue/27009, and + // https://golang.org/issue/31549. + + // Cast about for import comments, + // first in top-level directory, then in subdirectories. + list, _ := os.ReadDir(dir) + for _, info := range list { + if info.Type().IsRegular() && strings.HasSuffix(info.Name(), ".go") { + if com := findImportComment(filepath.Join(dir, info.Name())); com != "" { + return com, nil + } + } + } + for _, info1 := range list { + if info1.IsDir() { + files, _ := os.ReadDir(filepath.Join(dir, info1.Name())) + for _, info2 := range files { + if info2.Type().IsRegular() && strings.HasSuffix(info2.Name(), ".go") { + if com := findImportComment(filepath.Join(dir, info1.Name(), info2.Name())); com != "" { + return path.Dir(com), nil + } + } + } + } + } + + // Look for Godeps.json declaring import path. + data, _ := os.ReadFile(filepath.Join(dir, "Godeps/Godeps.json")) + var cfg1 struct{ ImportPath string } + json.Unmarshal(data, &cfg1) + if cfg1.ImportPath != "" { + return cfg1.ImportPath, nil + } + + // Look for vendor.json declaring import path. + data, _ = os.ReadFile(filepath.Join(dir, "vendor/vendor.json")) + var cfg2 struct{ RootPath string } + json.Unmarshal(data, &cfg2) + if cfg2.RootPath != "" { + return cfg2.RootPath, nil + } + + // Look for path in GOPATH. + var badPathErr error + for _, gpdir := range filepath.SplitList(cfg.BuildContext.GOPATH) { + if gpdir == "" { + continue + } + if rel := search.InDir(dir, filepath.Join(gpdir, "src")); rel != "" && rel != "." { + path := filepath.ToSlash(rel) + // gorelease will alert users publishing their modules to fix their paths. + if err := module.CheckImportPath(path); err != nil { + badPathErr = err + break + } + return path, nil + } + } + + reason := "outside GOPATH, module path must be specified" + if badPathErr != nil { + // return a different error message if the module was in GOPATH, but + // the module path determined above would be an invalid path. + reason = fmt.Sprintf("bad module path inferred from directory in GOPATH: %v", badPathErr) + } + msg := `cannot determine module path for source directory %s (%s) + +Example usage: + 'go mod init example.com/m' to initialize a v0 or v1 module + 'go mod init example.com/m/v2' to initialize a v2 module + +Run 'go help mod init' for more information. +` + return "", fmt.Errorf(msg, dir, reason) +} + +var ( + importCommentRE = lazyregexp.New(`(?m)^package[ \t]+[^ \t\r\n/]+[ \t]+//[ \t]+import[ \t]+(\"[^"]+\")[ \t]*\r?\n`) +) + +func findImportComment(file string) string { + data, err := os.ReadFile(file) + if err != nil { + return "" + } + m := importCommentRE.FindSubmatch(data) + if m == nil { + return "" + } + path, err := strconv.Unquote(string(m[1])) + if err != nil { + return "" + } + return path +} + +// WriteOpts control the behavior of WriteGoMod. +type WriteOpts struct { + DropToolchain bool // go get toolchain@none + ExplicitToolchain bool // go get has set explicit toolchain version + + // TODO(bcmills): Make 'go mod tidy' update the go version in the Requirements + // instead of writing directly to the modfile.File + TidyWroteGo bool // Go.Version field already updated by 'go mod tidy' +} + +// WriteGoMod writes the current build list back to go.mod. +func WriteGoMod(ctx context.Context, opts WriteOpts) error { + requirements = LoadModFile(ctx) + return commitRequirements(ctx, opts) +} + +// commitRequirements ensures go.mod and go.sum are up to date with the current +// requirements. +// +// In "mod" mode, commitRequirements writes changes to go.mod and go.sum. +// +// In "readonly" and "vendor" modes, commitRequirements returns an error if +// go.mod or go.sum are out of date in a semantically significant way. +// +// In workspace mode, commitRequirements only writes changes to go.work.sum. +func commitRequirements(ctx context.Context, opts WriteOpts) (err error) { + if inWorkspaceMode() { + // go.mod files aren't updated in workspace mode, but we still want to + // update the go.work.sum file. + return modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()) + } + if MainModules.Len() != 1 || MainModules.ModRoot(MainModules.Versions()[0]) == "" { + // We aren't in a module, so we don't have anywhere to write a go.mod file. + return nil + } + mainModule := MainModules.mustGetSingleMainModule() + modFile := MainModules.ModFile(mainModule) + if modFile == nil { + // command-line-arguments has no .mod file to write. + return nil + } + modFilePath := modFilePath(MainModules.ModRoot(mainModule)) + + var list []*modfile.Require + toolchain := "" + goVersion := "" + for _, m := range requirements.rootModules { + if m.Path == "go" { + goVersion = m.Version + continue + } + if m.Path == "toolchain" { + toolchain = m.Version + continue + } + list = append(list, &modfile.Require{ + Mod: m, + Indirect: !requirements.direct[m.Path], + }) + } + + // Update go line. + // Every MVS graph we consider should have go as a root, + // and toolchain is either implied by the go line or explicitly a root. + if goVersion == "" { + base.Fatalf("go: internal error: missing go root module in WriteGoMod") + } + if gover.Compare(goVersion, gover.Local()) > 0 { + // We cannot assume that we know how to update a go.mod to a newer version. + return &gover.TooNewError{What: "updating go.mod", GoVersion: goVersion} + } + wroteGo := opts.TidyWroteGo + if !wroteGo && modFile.Go == nil || modFile.Go.Version != goVersion { + alwaysUpdate := cfg.BuildMod == "mod" || cfg.CmdName == "mod tidy" || cfg.CmdName == "get" + if modFile.Go == nil && goVersion == gover.DefaultGoModVersion && !alwaysUpdate { + // The go.mod has no go line, the implied default Go version matches + // what we've computed for the graph, and we're not in one of the + // traditional go.mod-updating programs, so leave it alone. + } else { + wroteGo = true + forceGoStmt(modFile, mainModule, goVersion) + } + } + if toolchain == "" { + toolchain = "go" + goVersion + } + + // For reproducibility, if we are writing a new go line, + // and we're not explicitly modifying the toolchain line with 'go get toolchain@something', + // and the go version is one that supports switching toolchains, + // and the toolchain running right now is newer than the current toolchain line, + // then update the toolchain line to record the newer toolchain. + // + // TODO(#57001): This condition feels too complicated. Can we simplify it? + // TODO(#57001): Add more tests for toolchain lines. + toolVers := gover.FromToolchain(toolchain) + if wroteGo && !opts.DropToolchain && !opts.ExplicitToolchain && + gover.Compare(goVersion, gover.GoStrictVersion) >= 0 && + (gover.Compare(gover.Local(), toolVers) > 0 && !gover.IsLang(gover.Local())) { + toolchain = "go" + gover.Local() + toolVers = gover.FromToolchain(toolchain) + } + + if opts.DropToolchain || toolchain == "go"+goVersion || (gover.Compare(toolVers, gover.GoStrictVersion) < 0 && !opts.ExplicitToolchain) { + // go get toolchain@none or toolchain matches go line or isn't valid; drop it. + // TODO(#57001): 'go get' should reject explicit toolchains below GoStrictVersion. + modFile.DropToolchainStmt() + } else { + modFile.AddToolchainStmt(toolchain) + } + + // Update require blocks. + if gover.Compare(goVersion, gover.SeparateIndirectVersion) < 0 { + modFile.SetRequire(list) + } else { + modFile.SetRequireSeparateIndirect(list) + } + modFile.Cleanup() + + index := MainModules.GetSingleIndexOrNil() + dirty := index.modFileIsDirty(modFile) + if dirty && cfg.BuildMod != "mod" { + // If we're about to fail due to -mod=readonly, + // prefer to report a dirty go.mod over a dirty go.sum + return errGoModDirty + } + + if !dirty && cfg.CmdName != "mod tidy" { + // The go.mod file has the same semantic content that it had before + // (but not necessarily the same exact bytes). + // Don't write go.mod, but write go.sum in case we added or trimmed sums. + // 'go mod init' shouldn't write go.sum, since it will be incomplete. + if cfg.CmdName != "mod init" { + if err := modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()); err != nil { + return err + } + } + return nil + } + if _, ok := fsys.OverlayPath(modFilePath); ok { + if dirty { + return errors.New("updates to go.mod needed, but go.mod is part of the overlay specified with -overlay") + } + return nil + } + + new, err := modFile.Format() + if err != nil { + return err + } + defer func() { + // At this point we have determined to make the go.mod file on disk equal to new. + MainModules.SetIndex(mainModule, indexModFile(new, modFile, mainModule, false)) + + // Update go.sum after releasing the side lock and refreshing the index. + // 'go mod init' shouldn't write go.sum, since it will be incomplete. + if cfg.CmdName != "mod init" { + if err == nil { + err = modfetch.WriteGoSum(ctx, keepSums(ctx, loaded, requirements, addBuildListZipSums), mustHaveCompleteRequirements()) + } + } + }() + + // Make a best-effort attempt to acquire the side lock, only to exclude + // previous versions of the 'go' command from making simultaneous edits. + if unlock, err := modfetch.SideLock(ctx); err == nil { + defer unlock() + } + + errNoChange := errors.New("no update needed") + + err = lockedfile.Transform(modFilePath, func(old []byte) ([]byte, error) { + if bytes.Equal(old, new) { + // The go.mod file is already equal to new, possibly as the result of some + // other process. + return nil, errNoChange + } + + if index != nil && !bytes.Equal(old, index.data) { + // The contents of the go.mod file have changed. In theory we could add all + // of the new modules to the build list, recompute, and check whether any + // module in *our* build list got bumped to a different version, but that's + // a lot of work for marginal benefit. Instead, fail the command: if users + // want to run concurrent commands, they need to start with a complete, + // consistent module definition. + return nil, fmt.Errorf("existing contents have changed since last read") + } + + return new, nil + }) + + if err != nil && err != errNoChange { + return fmt.Errorf("updating go.mod: %w", err) + } + return nil +} + +// keepSums returns the set of modules (and go.mod file entries) for which +// checksums would be needed in order to reload the same set of packages +// loaded by the most recent call to LoadPackages or ImportFromFiles, +// including any go.mod files needed to reconstruct the MVS result +// or identify go versions, +// in addition to the checksums for every module in keepMods. +func keepSums(ctx context.Context, ld *loader, rs *Requirements, which whichSums) map[module.Version]bool { + // Every module in the full module graph contributes its requirements, + // so in order to ensure that the build list itself is reproducible, + // we need sums for every go.mod in the graph (regardless of whether + // that version is selected). + keep := make(map[module.Version]bool) + + // Add entries for modules in the build list with paths that are prefixes of + // paths of loaded packages. We need to retain sums for all of these modules — + // not just the modules containing the actual packages — in order to rule out + // ambiguous import errors the next time we load the package. + keepModSumsForZipSums := true + if ld == nil { + if gover.Compare(MainModules.GoVersion(), gover.TidyGoModSumVersion) < 0 && cfg.BuildMod != "mod" { + keepModSumsForZipSums = false + } + } else { + keepPkgGoModSums := true + if gover.Compare(ld.requirements.GoVersion(), gover.TidyGoModSumVersion) < 0 && (ld.Tidy || cfg.BuildMod != "mod") { + keepPkgGoModSums = false + keepModSumsForZipSums = false + } + for _, pkg := range ld.pkgs { + // We check pkg.mod.Path here instead of pkg.inStd because the + // pseudo-package "C" is not in std, but not provided by any module (and + // shouldn't force loading the whole module graph). + if pkg.testOf != nil || (pkg.mod.Path == "" && pkg.err == nil) || module.CheckImportPath(pkg.path) != nil { + continue + } + + // We need the checksum for the go.mod file for pkg.mod + // so that we know what Go version to use to compile pkg. + // However, we didn't do so before Go 1.21, and the bug is relatively + // minor, so we maintain the previous (buggy) behavior in 'go mod tidy' to + // avoid introducing unnecessary churn. + if keepPkgGoModSums { + r := resolveReplacement(pkg.mod) + keep[modkey(r)] = true + } + + if rs.pruning == pruned && pkg.mod.Path != "" { + if v, ok := rs.rootSelected(pkg.mod.Path); ok && v == pkg.mod.Version { + // pkg was loaded from a root module, and because the main module has + // a pruned module graph we do not check non-root modules for + // conflicts for packages that can be found in roots. So we only need + // the checksums for the root modules that may contain pkg, not all + // possible modules. + for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) { + if v, ok := rs.rootSelected(prefix); ok && v != "none" { + m := module.Version{Path: prefix, Version: v} + r := resolveReplacement(m) + keep[r] = true + } + } + continue + } + } + + mg, _ := rs.Graph(ctx) + for prefix := pkg.path; prefix != "."; prefix = path.Dir(prefix) { + if v := mg.Selected(prefix); v != "none" { + m := module.Version{Path: prefix, Version: v} + r := resolveReplacement(m) + keep[r] = true + } + } + } + } + + if rs.graph.Load() == nil { + // We haven't needed to load the module graph so far. + // Save sums for the root modules (or their replacements), but don't + // incur the cost of loading the graph just to find and retain the sums. + for _, m := range rs.rootModules { + r := resolveReplacement(m) + keep[modkey(r)] = true + if which == addBuildListZipSums { + keep[r] = true + } + } + } else { + mg, _ := rs.Graph(ctx) + mg.WalkBreadthFirst(func(m module.Version) { + if _, ok := mg.RequiredBy(m); ok { + // The requirements from m's go.mod file are present in the module graph, + // so they are relevant to the MVS result regardless of whether m was + // actually selected. + r := resolveReplacement(m) + keep[modkey(r)] = true + } + }) + + if which == addBuildListZipSums { + for _, m := range mg.BuildList() { + r := resolveReplacement(m) + if keepModSumsForZipSums { + keep[modkey(r)] = true // we need the go version from the go.mod file to do anything useful with the zipfile + } + keep[r] = true + } + } + } + + return keep +} + +type whichSums int8 + +const ( + loadedZipSumsOnly = whichSums(iota) + addBuildListZipSums +) + +// modkey returns the module.Version under which the checksum for m's go.mod +// file is stored in the go.sum file. +func modkey(m module.Version) module.Version { + return module.Version{Path: m.Path, Version: m.Version + "/go.mod"} +} + +func suggestModulePath(path string) string { + var m string + + i := len(path) + for i > 0 && ('0' <= path[i-1] && path[i-1] <= '9' || path[i-1] == '.') { + i-- + } + url := path[:i] + url = strings.TrimSuffix(url, "/v") + url = strings.TrimSuffix(url, "/") + + f := func(c rune) bool { + return c > '9' || c < '0' + } + s := strings.FieldsFunc(path[i:], f) + if len(s) > 0 { + m = s[0] + } + m = strings.TrimLeft(m, "0") + if m == "" || m == "1" { + return url + "/v2" + } + + return url + "/v" + m +} + +func suggestGopkgIn(path string) string { + var m string + i := len(path) + for i > 0 && (('0' <= path[i-1] && path[i-1] <= '9') || (path[i-1] == '.')) { + i-- + } + url := path[:i] + url = strings.TrimSuffix(url, ".v") + url = strings.TrimSuffix(url, "/v") + url = strings.TrimSuffix(url, "/") + + f := func(c rune) bool { + return c > '9' || c < '0' + } + s := strings.FieldsFunc(path, f) + if len(s) > 0 { + m = s[0] + } + + m = strings.TrimLeft(m, "0") + + if m == "" { + return url + ".v1" + } + return url + ".v" + m +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/list.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/list.go new file mode 100644 index 0000000000000000000000000000000000000000..ef93c25121acc8ebbf58ff8676a9a93a4c3146c7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/list.go @@ -0,0 +1,310 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "runtime" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/modfetch/codehost" + "cmd/go/internal/modinfo" + "cmd/go/internal/search" + "cmd/internal/pkgpattern" + + "golang.org/x/mod/module" +) + +type ListMode int + +const ( + ListU ListMode = 1 << iota + ListRetracted + ListDeprecated + ListVersions + ListRetractedVersions +) + +// ListModules returns a description of the modules matching args, if known, +// along with any error preventing additional matches from being identified. +// +// The returned slice can be nonempty even if the error is non-nil. +func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile string) ([]*modinfo.ModulePublic, error) { + var reuse map[module.Version]*modinfo.ModulePublic + if reuseFile != "" { + data, err := os.ReadFile(reuseFile) + if err != nil { + return nil, err + } + dec := json.NewDecoder(bytes.NewReader(data)) + reuse = make(map[module.Version]*modinfo.ModulePublic) + for { + var m modinfo.ModulePublic + if err := dec.Decode(&m); err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("parsing %s: %v", reuseFile, err) + } + if m.Origin == nil { + continue + } + m.Reuse = true + reuse[module.Version{Path: m.Path, Version: m.Version}] = &m + if m.Query != "" { + reuse[module.Version{Path: m.Path, Version: m.Query}] = &m + } + } + } + + rs, mods, err := listModules(ctx, LoadModFile(ctx), args, mode, reuse) + + type token struct{} + sem := make(chan token, runtime.GOMAXPROCS(0)) + if mode != 0 { + for _, m := range mods { + if m.Reuse { + continue + } + add := func(m *modinfo.ModulePublic) { + sem <- token{} + go func() { + if mode&ListU != 0 { + addUpdate(ctx, m) + } + if mode&ListVersions != 0 { + addVersions(ctx, m, mode&ListRetractedVersions != 0) + } + if mode&ListRetracted != 0 { + addRetraction(ctx, m) + } + if mode&ListDeprecated != 0 { + addDeprecation(ctx, m) + } + <-sem + }() + } + + add(m) + if m.Replace != nil { + add(m.Replace) + } + } + } + // Fill semaphore channel to wait for all tasks to finish. + for n := cap(sem); n > 0; n-- { + sem <- token{} + } + + if err == nil { + requirements = rs + // TODO(#61605): The extra ListU clause fixes a problem with Go 1.21rc3 + // where "go mod tidy" and "go list -m -u all" fight over whether the go.sum + // should be considered up-to-date. The fix for now is to always treat the + // go.sum as up-to-date during list -m -u. Probably the right fix is more targeted, + // but in general list -u is looking up other checksums in the checksum database + // that won't be necessary later, so it makes sense not to write the go.sum back out. + if !ExplicitWriteGoMod && mode&ListU == 0 { + err = commitRequirements(ctx, WriteOpts{}) + } + } + return mods, err +} + +func listModules(ctx context.Context, rs *Requirements, args []string, mode ListMode, reuse map[module.Version]*modinfo.ModulePublic) (_ *Requirements, mods []*modinfo.ModulePublic, mgErr error) { + if len(args) == 0 { + var ms []*modinfo.ModulePublic + for _, m := range MainModules.Versions() { + if gover.IsToolchain(m.Path) { + continue + } + ms = append(ms, moduleInfo(ctx, rs, m, mode, reuse)) + } + return rs, ms, nil + } + + needFullGraph := false + for _, arg := range args { + if strings.Contains(arg, `\`) { + base.Fatalf("go: module paths never use backslash") + } + if search.IsRelativePath(arg) { + base.Fatalf("go: cannot use relative path %s to specify module", arg) + } + if arg == "all" || strings.Contains(arg, "...") { + needFullGraph = true + if !HasModRoot() { + base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) + } + continue + } + if path, vers, found := strings.Cut(arg, "@"); found { + if vers == "upgrade" || vers == "patch" { + if _, ok := rs.rootSelected(path); !ok || rs.pruning == unpruned { + needFullGraph = true + if !HasModRoot() { + base.Fatalf("go: cannot match %q: %v", arg, ErrNoModRoot) + } + } + } + continue + } + if _, ok := rs.rootSelected(arg); !ok || rs.pruning == unpruned { + needFullGraph = true + if mode&ListVersions == 0 && !HasModRoot() { + base.Fatalf("go: cannot match %q without -versions or an explicit version: %v", arg, ErrNoModRoot) + } + } + } + + var mg *ModuleGraph + if needFullGraph { + rs, mg, mgErr = expandGraph(ctx, rs) + } + + matchedModule := map[module.Version]bool{} + for _, arg := range args { + if path, vers, found := strings.Cut(arg, "@"); found { + var current string + if mg == nil { + current, _ = rs.rootSelected(path) + } else { + current = mg.Selected(path) + } + if current == "none" && mgErr != nil { + if vers == "upgrade" || vers == "patch" { + // The module graph is incomplete, so we don't know what version we're + // actually upgrading from. + // mgErr is already set, so just skip this module. + continue + } + } + + allowed := CheckAllowed + if IsRevisionQuery(path, vers) || mode&ListRetracted != 0 { + // Allow excluded and retracted versions if the user asked for a + // specific revision or used 'go list -retracted'. + allowed = nil + } + info, err := queryReuse(ctx, path, vers, current, allowed, reuse) + if err != nil { + var origin *codehost.Origin + if info != nil { + origin = info.Origin + } + mods = append(mods, &modinfo.ModulePublic{ + Path: path, + Version: vers, + Error: modinfoError(path, vers, err), + Origin: origin, + }) + continue + } + + // Indicate that m was resolved from outside of rs by passing a nil + // *Requirements instead. + var noRS *Requirements + + mod := moduleInfo(ctx, noRS, module.Version{Path: path, Version: info.Version}, mode, reuse) + if vers != mod.Version { + mod.Query = vers + } + mod.Origin = info.Origin + mods = append(mods, mod) + continue + } + + // Module path or pattern. + var match func(string) bool + if arg == "all" { + match = func(p string) bool { return !gover.IsToolchain(p) } + } else if strings.Contains(arg, "...") { + mp := pkgpattern.MatchPattern(arg) + match = func(p string) bool { return mp(p) && !gover.IsToolchain(p) } + } else { + var v string + if mg == nil { + var ok bool + v, ok = rs.rootSelected(arg) + if !ok { + // We checked rootSelected(arg) in the earlier args loop, so if there + // is no such root we should have loaded a non-nil mg. + panic(fmt.Sprintf("internal error: root requirement expected but not found for %v", arg)) + } + } else { + v = mg.Selected(arg) + } + if v == "none" && mgErr != nil { + // mgErr is already set, so just skip this module. + continue + } + if v != "none" { + mods = append(mods, moduleInfo(ctx, rs, module.Version{Path: arg, Version: v}, mode, reuse)) + } else if cfg.BuildMod == "vendor" { + // In vendor mode, we can't determine whether a missing module is “a + // known dependency” because the module graph is incomplete. + // Give a more explicit error message. + mods = append(mods, &modinfo.ModulePublic{ + Path: arg, + Error: modinfoError(arg, "", errors.New("can't resolve module using the vendor directory\n\t(Use -mod=mod or -mod=readonly to bypass.)")), + }) + } else if mode&ListVersions != 0 { + // Don't make the user provide an explicit '@latest' when they're + // explicitly asking what the available versions are. Instead, return a + // module with version "none", to which we can add the requested list. + mods = append(mods, &modinfo.ModulePublic{Path: arg}) + } else { + mods = append(mods, &modinfo.ModulePublic{ + Path: arg, + Error: modinfoError(arg, "", errors.New("not a known dependency")), + }) + } + continue + } + + matched := false + for _, m := range mg.BuildList() { + if match(m.Path) { + matched = true + if !matchedModule[m] { + matchedModule[m] = true + mods = append(mods, moduleInfo(ctx, rs, m, mode, reuse)) + } + } + } + if !matched { + fmt.Fprintf(os.Stderr, "warning: pattern %q matched no module dependencies\n", arg) + } + } + + return rs, mods, mgErr +} + +// modinfoError wraps an error to create an error message in +// modinfo.ModuleError with minimal redundancy. +func modinfoError(path, vers string, err error) *modinfo.ModuleError { + var nerr *NoMatchingVersionError + var merr *module.ModuleError + if errors.As(err, &nerr) { + // NoMatchingVersionError contains the query, so we don't mention the + // query again in ModuleError. + err = &module.ModuleError{Path: path, Err: err} + } else if !errors.As(err, &merr) { + // If the error does not contain path and version, wrap it in a + // module.ModuleError. + err = &module.ModuleError{Path: path, Version: vers, Err: err} + } + + return &modinfo.ModuleError{Err: err.Error()} +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/load.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/load.go new file mode 100644 index 0000000000000000000000000000000000000000..51eb141d4b38b827d1233c33b26619d51d15181f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/load.go @@ -0,0 +1,2352 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +// This file contains the module-mode package loader, as well as some accessory +// functions pertaining to the package import graph. +// +// There are two exported entry points into package loading — LoadPackages and +// ImportFromFiles — both implemented in terms of loadFromRoots, which itself +// manipulates an instance of the loader struct. +// +// Although most of the loading state is maintained in the loader struct, +// one key piece - the build list - is a global, so that it can be modified +// separate from the loading operation, such as during "go get" +// upgrades/downgrades or in "go mod" operations. +// TODO(#40775): It might be nice to make the loader take and return +// a buildList rather than hard-coding use of the global. +// +// Loading is an iterative process. On each iteration, we try to load the +// requested packages and their transitive imports, then try to resolve modules +// for any imported packages that are still missing. +// +// The first step of each iteration identifies a set of “root” packages. +// Normally the root packages are exactly those matching the named pattern +// arguments. However, for the "all" meta-pattern, the final set of packages is +// computed from the package import graph, and therefore cannot be an initial +// input to loading that graph. Instead, the root packages for the "all" pattern +// are those contained in the main module, and allPatternIsRoot parameter to the +// loader instructs it to dynamically expand those roots to the full "all" +// pattern as loading progresses. +// +// The pkgInAll flag on each loadPkg instance tracks whether that +// package is known to match the "all" meta-pattern. +// A package matches the "all" pattern if: +// - it is in the main module, or +// - it is imported by any test in the main module, or +// - it is imported by another package in "all", or +// - the main module specifies a go version ≤ 1.15, and the package is imported +// by a *test of* another package in "all". +// +// When graph pruning is in effect, we want to spot-check the graph-pruning +// invariants — which depend on which packages are known to be in "all" — even +// when we are only loading individual packages, so we set the pkgInAll flag +// regardless of the whether the "all" pattern is a root. +// (This is necessary to maintain the “import invariant” described in +// https://golang.org/design/36460-lazy-module-loading.) +// +// Because "go mod vendor" prunes out the tests of vendored packages, the +// behavior of the "all" pattern with -mod=vendor in Go 1.11–1.15 is the same +// as the "all" pattern (regardless of the -mod flag) in 1.16+. +// The loader uses the GoVersion parameter to determine whether the "all" +// pattern should close over tests (as in Go 1.11–1.15) or stop at only those +// packages transitively imported by the packages and tests in the main module +// ("all" in Go 1.16+ and "go mod vendor" in Go 1.11+). +// +// Note that it is possible for a loaded package NOT to be in "all" even when we +// are loading the "all" pattern. For example, packages that are transitive +// dependencies of other roots named on the command line must be loaded, but are +// not in "all". (The mod_notall test illustrates this behavior.) +// Similarly, if the LoadTests flag is set but the "all" pattern does not close +// over test dependencies, then when we load the test of a package that is in +// "all" but outside the main module, the dependencies of that test will not +// necessarily themselves be in "all". (That configuration does not arise in Go +// 1.11–1.15, but it will be possible in Go 1.16+.) +// +// Loading proceeds from the roots, using a parallel work-queue with a limit on +// the amount of active work (to avoid saturating disks, CPU cores, and/or +// network connections). Each package is added to the queue the first time it is +// imported by another package. When we have finished identifying the imports of +// a package, we add the test for that package if it is needed. A test may be +// needed if: +// - the package matches a root pattern and tests of the roots were requested, or +// - the package is in the main module and the "all" pattern is requested +// (because the "all" pattern includes the dependencies of tests in the main +// module), or +// - the package is in "all" and the definition of "all" we are using includes +// dependencies of tests (as is the case in Go ≤1.15). +// +// After all available packages have been loaded, we examine the results to +// identify any requested or imported packages that are still missing, and if +// so, which modules we could add to the module graph in order to make the +// missing packages available. We add those to the module graph and iterate, +// until either all packages resolve successfully or we cannot identify any +// module that would resolve any remaining missing package. +// +// If the main module is “tidy” (that is, if "go mod tidy" is a no-op for it) +// and all requested packages are in "all", then loading completes in a single +// iteration. +// TODO(bcmills): We should also be able to load in a single iteration if the +// requested packages all come from modules that are themselves tidy, regardless +// of whether those packages are in "all". Today, that requires two iterations +// if those packages are not found in existing dependencies of the main module. + +import ( + "context" + "errors" + "fmt" + "go/build" + "io/fs" + "os" + "path" + pathpkg "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strings" + "sync" + "sync/atomic" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/go/internal/gover" + "cmd/go/internal/imports" + "cmd/go/internal/modfetch" + "cmd/go/internal/modindex" + "cmd/go/internal/mvs" + "cmd/go/internal/par" + "cmd/go/internal/search" + "cmd/go/internal/str" + + "golang.org/x/mod/module" +) + +// loaded is the most recently-used package loader. +// It holds details about individual packages. +// +// This variable should only be accessed directly in top-level exported +// functions. All other functions that require or produce a *loader should pass +// or return it as an explicit parameter. +var loaded *loader + +// PackageOpts control the behavior of the LoadPackages function. +type PackageOpts struct { + // TidyGoVersion is the Go version to which the go.mod file should be updated + // after packages have been loaded. + // + // An empty TidyGoVersion means to use the Go version already specified in the + // main module's go.mod file, or the latest Go version if there is no main + // module. + TidyGoVersion string + + // Tags are the build tags in effect (as interpreted by the + // cmd/go/internal/imports package). + // If nil, treated as equivalent to imports.Tags(). + Tags map[string]bool + + // Tidy, if true, requests that the build list and go.sum file be reduced to + // the minimal dependencies needed to reproducibly reload the requested + // packages. + Tidy bool + + // TidyCompatibleVersion is the oldest Go version that must be able to + // reproducibly reload the requested packages. + // + // If empty, the compatible version is the Go version immediately prior to the + // 'go' version listed in the go.mod file. + TidyCompatibleVersion string + + // VendorModulesInGOROOTSrc indicates that if we are within a module in + // GOROOT/src, packages in the module's vendor directory should be resolved as + // actual module dependencies (instead of standard-library packages). + VendorModulesInGOROOTSrc bool + + // ResolveMissingImports indicates that we should attempt to add module + // dependencies as needed to resolve imports of packages that are not found. + // + // For commands that support the -mod flag, resolving imports may still fail + // if the flag is set to "readonly" (the default) or "vendor". + ResolveMissingImports bool + + // AssumeRootsImported indicates that the transitive dependencies of the root + // packages should be treated as if those roots will be imported by the main + // module. + AssumeRootsImported bool + + // AllowPackage, if non-nil, is called after identifying the module providing + // each package. If AllowPackage returns a non-nil error, that error is set + // for the package, and the imports and test of that package will not be + // loaded. + // + // AllowPackage may be invoked concurrently by multiple goroutines, + // and may be invoked multiple times for a given package path. + AllowPackage func(ctx context.Context, path string, mod module.Version) error + + // LoadTests loads the test dependencies of each package matching a requested + // pattern. If ResolveMissingImports is also true, test dependencies will be + // resolved if missing. + LoadTests bool + + // UseVendorAll causes the "all" package pattern to be interpreted as if + // running "go mod vendor" (or building with "-mod=vendor"). + // + // This is a no-op for modules that declare 'go 1.16' or higher, for which this + // is the default (and only) interpretation of the "all" pattern in module mode. + UseVendorAll bool + + // AllowErrors indicates that LoadPackages should not terminate the process if + // an error occurs. + AllowErrors bool + + // SilencePackageErrors indicates that LoadPackages should not print errors + // that occur while matching or loading packages, and should not terminate the + // process if such an error occurs. + // + // Errors encountered in the module graph will still be reported. + // + // The caller may retrieve the silenced package errors using the Lookup + // function, and matching errors are still populated in the Errs field of the + // associated search.Match.) + SilencePackageErrors bool + + // SilenceMissingStdImports indicates that LoadPackages should not print + // errors or terminate the process if an imported package is missing, and the + // import path looks like it might be in the standard library (perhaps in a + // future version). + SilenceMissingStdImports bool + + // SilenceNoGoErrors indicates that LoadPackages should not print + // imports.ErrNoGo errors. + // This allows the caller to invoke LoadPackages (and report other errors) + // without knowing whether the requested packages exist for the given tags. + // + // Note that if a requested package does not exist *at all*, it will fail + // during module resolution and the error will not be suppressed. + SilenceNoGoErrors bool + + // SilenceUnmatchedWarnings suppresses the warnings normally emitted for + // patterns that did not match any packages. + SilenceUnmatchedWarnings bool + + // Resolve the query against this module. + MainModule module.Version + + // If Switcher is non-nil, then LoadPackages passes all encountered errors + // to Switcher.Error and tries Switcher.Switch before base.ExitIfErrors. + Switcher gover.Switcher +} + +// LoadPackages identifies the set of packages matching the given patterns and +// loads the packages in the import graph rooted at that set. +func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (matches []*search.Match, loadedPackages []string) { + if opts.Tags == nil { + opts.Tags = imports.Tags() + } + + patterns = search.CleanPatterns(patterns) + matches = make([]*search.Match, 0, len(patterns)) + allPatternIsRoot := false + for _, pattern := range patterns { + matches = append(matches, search.NewMatch(pattern)) + if pattern == "all" { + allPatternIsRoot = true + } + } + + updateMatches := func(rs *Requirements, ld *loader) { + for _, m := range matches { + switch { + case m.IsLocal(): + // Evaluate list of file system directories on first iteration. + if m.Dirs == nil { + matchModRoots := modRoots + if opts.MainModule != (module.Version{}) { + matchModRoots = []string{MainModules.ModRoot(opts.MainModule)} + } + matchLocalDirs(ctx, matchModRoots, m, rs) + } + + // Make a copy of the directory list and translate to import paths. + // Note that whether a directory corresponds to an import path + // changes as the build list is updated, and a directory can change + // from not being in the build list to being in it and back as + // the exact version of a particular module increases during + // the loader iterations. + m.Pkgs = m.Pkgs[:0] + for _, dir := range m.Dirs { + pkg, err := resolveLocalPackage(ctx, dir, rs) + if err != nil { + if !m.IsLiteral() && (err == errPkgIsBuiltin || err == errPkgIsGorootSrc) { + continue // Don't include "builtin" or GOROOT/src in wildcard patterns. + } + + // If we're outside of a module, ensure that the failure mode + // indicates that. + if !HasModRoot() { + die() + } + + if ld != nil { + m.AddError(err) + } + continue + } + m.Pkgs = append(m.Pkgs, pkg) + } + + case m.IsLiteral(): + m.Pkgs = []string{m.Pattern()} + + case strings.Contains(m.Pattern(), "..."): + m.Errs = m.Errs[:0] + mg, err := rs.Graph(ctx) + if err != nil { + // The module graph is (or may be) incomplete — perhaps we failed to + // load the requirements of some module. This is an error in matching + // the patterns to packages, because we may be missing some packages + // or we may erroneously match packages in the wrong versions of + // modules. However, for cases like 'go list -e', the error should not + // necessarily prevent us from loading the packages we could find. + m.Errs = append(m.Errs, err) + } + matchPackages(ctx, m, opts.Tags, includeStd, mg.BuildList()) + + case m.Pattern() == "all": + if ld == nil { + // The initial roots are the packages in the main module. + // loadFromRoots will expand that to "all". + m.Errs = m.Errs[:0] + matchModules := MainModules.Versions() + if opts.MainModule != (module.Version{}) { + matchModules = []module.Version{opts.MainModule} + } + matchPackages(ctx, m, opts.Tags, omitStd, matchModules) + } else { + // Starting with the packages in the main module, + // enumerate the full list of "all". + m.Pkgs = ld.computePatternAll() + } + + case m.Pattern() == "std" || m.Pattern() == "cmd": + if m.Pkgs == nil { + m.MatchPackages() // Locate the packages within GOROOT/src. + } + + default: + panic(fmt.Sprintf("internal error: modload missing case for pattern %s", m.Pattern())) + } + } + } + + initialRS, err := loadModFile(ctx, &opts) + if err != nil { + base.Fatal(err) + } + + ld := loadFromRoots(ctx, loaderParams{ + PackageOpts: opts, + requirements: initialRS, + + allPatternIsRoot: allPatternIsRoot, + + listRoots: func(rs *Requirements) (roots []string) { + updateMatches(rs, nil) + for _, m := range matches { + roots = append(roots, m.Pkgs...) + } + return roots + }, + }) + + // One last pass to finalize wildcards. + updateMatches(ld.requirements, ld) + + // List errors in matching patterns (such as directory permission + // errors for wildcard patterns). + if !ld.SilencePackageErrors { + for _, match := range matches { + for _, err := range match.Errs { + ld.error(err) + } + } + } + ld.exitIfErrors(ctx) + + if !opts.SilenceUnmatchedWarnings { + search.WarnUnmatched(matches) + } + + if opts.Tidy { + if cfg.BuildV { + mg, _ := ld.requirements.Graph(ctx) + for _, m := range initialRS.rootModules { + var unused bool + if ld.requirements.pruning == unpruned { + // m is unused if it was dropped from the module graph entirely. If it + // was only demoted from direct to indirect, it may still be in use via + // a transitive import. + unused = mg.Selected(m.Path) == "none" + } else { + // m is unused if it was dropped from the roots. If it is still present + // as a transitive dependency, that transitive dependency is not needed + // by any package or test in the main module. + _, ok := ld.requirements.rootSelected(m.Path) + unused = !ok + } + if unused { + fmt.Fprintf(os.Stderr, "unused %s\n", m.Path) + } + } + } + + keep := keepSums(ctx, ld, ld.requirements, loadedZipSumsOnly) + compatVersion := ld.TidyCompatibleVersion + goVersion := ld.requirements.GoVersion() + if compatVersion == "" { + if gover.Compare(goVersion, gover.GoStrictVersion) < 0 { + compatVersion = gover.Prev(goVersion) + } else { + // Starting at GoStrictVersion, we no longer maintain compatibility with + // versions older than what is listed in the go.mod file. + compatVersion = goVersion + } + } + if gover.Compare(compatVersion, goVersion) > 0 { + // Each version of the Go toolchain knows how to interpret go.mod and + // go.sum files produced by all previous versions, so a compatibility + // version higher than the go.mod version adds nothing. + compatVersion = goVersion + } + if compatPruning := pruningForGoVersion(compatVersion); compatPruning != ld.requirements.pruning { + compatRS := newRequirements(compatPruning, ld.requirements.rootModules, ld.requirements.direct) + ld.checkTidyCompatibility(ctx, compatRS, compatVersion) + + for m := range keepSums(ctx, ld, compatRS, loadedZipSumsOnly) { + keep[m] = true + } + } + + if !ExplicitWriteGoMod { + modfetch.TrimGoSum(keep) + + // commitRequirements below will also call WriteGoSum, but the "keep" map + // we have here could be strictly larger: commitRequirements only commits + // loaded.requirements, but here we may have also loaded (and want to + // preserve checksums for) additional entities from compatRS, which are + // only needed for compatibility with ld.TidyCompatibleVersion. + if err := modfetch.WriteGoSum(ctx, keep, mustHaveCompleteRequirements()); err != nil { + base.Fatal(err) + } + } + } + + // Success! Update go.mod and go.sum (if needed) and return the results. + // We'll skip updating if ExplicitWriteGoMod is true (the caller has opted + // to call WriteGoMod itself) or if ResolveMissingImports is false (the + // command wants to examine the package graph as-is). + loaded = ld + requirements = loaded.requirements + + for _, pkg := range ld.pkgs { + if !pkg.isTest() { + loadedPackages = append(loadedPackages, pkg.path) + } + } + sort.Strings(loadedPackages) + + if !ExplicitWriteGoMod && opts.ResolveMissingImports { + if err := commitRequirements(ctx, WriteOpts{}); err != nil { + base.Fatal(err) + } + } + + return matches, loadedPackages +} + +// matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories +// outside of the standard library and active modules. +func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs *Requirements) { + if !m.IsLocal() { + panic(fmt.Sprintf("internal error: resolveLocalDirs on non-local pattern %s", m.Pattern())) + } + + if i := strings.Index(m.Pattern(), "..."); i >= 0 { + // The pattern is local, but it is a wildcard. Its packages will + // only resolve to paths if they are inside of the standard + // library, the main module, or some dependency of the main + // module. Verify that before we walk the filesystem: a filesystem + // walk in a directory like /var or /etc can be very expensive! + dir := filepath.Dir(filepath.Clean(m.Pattern()[:i+3])) + absDir := dir + if !filepath.IsAbs(dir) { + absDir = filepath.Join(base.Cwd(), dir) + } + + modRoot := findModuleRoot(absDir) + found := false + for _, mainModuleRoot := range modRoots { + if mainModuleRoot == modRoot { + found = true + break + } + } + if !found && search.InDir(absDir, cfg.GOROOTsrc) == "" && pathInModuleCache(ctx, absDir, rs) == "" { + m.Dirs = []string{} + scope := "main module or its selected dependencies" + if inWorkspaceMode() { + scope = "modules listed in go.work or their selected dependencies" + } + m.AddError(fmt.Errorf("directory prefix %s does not contain %s", base.ShortPath(absDir), scope)) + return + } + } + + m.MatchDirs(modRoots) +} + +// resolveLocalPackage resolves a filesystem path to a package path. +func resolveLocalPackage(ctx context.Context, dir string, rs *Requirements) (string, error) { + var absDir string + if filepath.IsAbs(dir) { + absDir = filepath.Clean(dir) + } else { + absDir = filepath.Join(base.Cwd(), dir) + } + + bp, err := cfg.BuildContext.ImportDir(absDir, 0) + if err != nil && (bp == nil || len(bp.IgnoredGoFiles) == 0) { + // golang.org/issue/32917: We should resolve a relative path to a + // package path only if the relative path actually contains the code + // for that package. + // + // If the named directory does not exist or contains no Go files, + // the package does not exist. + // Other errors may affect package loading, but not resolution. + if _, err := fsys.Stat(absDir); err != nil { + if os.IsNotExist(err) { + // Canonicalize OS-specific errors to errDirectoryNotFound so that error + // messages will be easier for users to search for. + return "", &fs.PathError{Op: "stat", Path: absDir, Err: errDirectoryNotFound} + } + return "", err + } + if _, noGo := err.(*build.NoGoError); noGo { + // A directory that does not contain any Go source files — even ignored + // ones! — is not a Go package, and we can't resolve it to a package + // path because that path could plausibly be provided by some other + // module. + // + // Any other error indicates that the package “exists” (at least in the + // sense that it cannot exist in any other module), but has some other + // problem (such as a syntax error). + return "", err + } + } + + for _, mod := range MainModules.Versions() { + modRoot := MainModules.ModRoot(mod) + if modRoot != "" && absDir == modRoot { + if absDir == cfg.GOROOTsrc { + return "", errPkgIsGorootSrc + } + return MainModules.PathPrefix(mod), nil + } + } + + // Note: The checks for @ here are just to avoid misinterpreting + // the module cache directories (formerly GOPATH/src/mod/foo@v1.5.2/bar). + // It's not strictly necessary but helpful to keep the checks. + var pkgNotFoundErr error + pkgNotFoundLongestPrefix := "" + for _, mainModule := range MainModules.Versions() { + modRoot := MainModules.ModRoot(mainModule) + if modRoot != "" && str.HasFilePathPrefix(absDir, modRoot) && !strings.Contains(absDir[len(modRoot):], "@") { + suffix := filepath.ToSlash(str.TrimFilePathPrefix(absDir, modRoot)) + if pkg, found := strings.CutPrefix(suffix, "vendor/"); found { + if cfg.BuildMod != "vendor" { + return "", fmt.Errorf("without -mod=vendor, directory %s has no package path", absDir) + } + + readVendorList(VendorDir()) + if _, ok := vendorPkgModule[pkg]; !ok { + return "", fmt.Errorf("directory %s is not a package listed in vendor/modules.txt", absDir) + } + return pkg, nil + } + + mainModulePrefix := MainModules.PathPrefix(mainModule) + if mainModulePrefix == "" { + pkg := suffix + if pkg == "builtin" { + // "builtin" is a pseudo-package with a real source file. + // It's not included in "std", so it shouldn't resolve from "." + // within module "std" either. + return "", errPkgIsBuiltin + } + return pkg, nil + } + + pkg := pathpkg.Join(mainModulePrefix, suffix) + if _, ok, err := dirInModule(pkg, mainModulePrefix, modRoot, true); err != nil { + return "", err + } else if !ok { + // This main module could contain the directory but doesn't. Other main + // modules might contain the directory, so wait till we finish the loop + // to see if another main module contains directory. But if not, + // return an error. + if len(mainModulePrefix) > len(pkgNotFoundLongestPrefix) { + pkgNotFoundLongestPrefix = mainModulePrefix + pkgNotFoundErr = &PackageNotInModuleError{MainModules: []module.Version{mainModule}, Pattern: pkg} + } + continue + } + return pkg, nil + } + } + if pkgNotFoundErr != nil { + return "", pkgNotFoundErr + } + + if sub := search.InDir(absDir, cfg.GOROOTsrc); sub != "" && sub != "." && !strings.Contains(sub, "@") { + pkg := filepath.ToSlash(sub) + if pkg == "builtin" { + return "", errPkgIsBuiltin + } + return pkg, nil + } + + pkg := pathInModuleCache(ctx, absDir, rs) + if pkg == "" { + dirstr := fmt.Sprintf("directory %s", base.ShortPath(absDir)) + if dirstr == "directory ." { + dirstr = "current directory" + } + if inWorkspaceMode() { + if mr := findModuleRoot(absDir); mr != "" { + return "", fmt.Errorf("%s is contained in a module that is not one of the workspace modules listed in go.work. You can add the module to the workspace using:\n\tgo work use %s", dirstr, base.ShortPath(mr)) + } + return "", fmt.Errorf("%s outside modules listed in go.work or their selected dependencies", dirstr) + } + return "", fmt.Errorf("%s outside main module or its selected dependencies", dirstr) + } + return pkg, nil +} + +var ( + errDirectoryNotFound = errors.New("directory not found") + errPkgIsGorootSrc = errors.New("GOROOT/src is not an importable package") + errPkgIsBuiltin = errors.New(`"builtin" is a pseudo-package, not an importable package`) +) + +// pathInModuleCache returns the import path of the directory dir, +// if dir is in the module cache copy of a module in our build list. +func pathInModuleCache(ctx context.Context, dir string, rs *Requirements) string { + tryMod := func(m module.Version) (string, bool) { + if gover.IsToolchain(m.Path) { + return "", false + } + var root string + var err error + if repl := Replacement(m); repl.Path != "" && repl.Version == "" { + root = repl.Path + if !filepath.IsAbs(root) { + root = filepath.Join(replaceRelativeTo(), root) + } + } else if repl.Path != "" { + root, err = modfetch.DownloadDir(ctx, repl) + } else { + root, err = modfetch.DownloadDir(ctx, m) + } + if err != nil { + return "", false + } + + sub := search.InDir(dir, root) + if sub == "" { + return "", false + } + sub = filepath.ToSlash(sub) + if strings.Contains(sub, "/vendor/") || strings.HasPrefix(sub, "vendor/") || strings.Contains(sub, "@") { + return "", false + } + + return path.Join(m.Path, filepath.ToSlash(sub)), true + } + + if rs.pruning == pruned { + for _, m := range rs.rootModules { + if v, _ := rs.rootSelected(m.Path); v != m.Version { + continue // m is a root, but we have a higher root for the same path. + } + if importPath, ok := tryMod(m); ok { + // checkMultiplePaths ensures that a module can be used for at most one + // requirement, so this must be it. + return importPath + } + } + } + + // None of the roots contained dir, or the graph is unpruned (so we don't want + // to distinguish between roots and transitive dependencies). Either way, + // check the full graph to see if the directory is a non-root dependency. + // + // If the roots are not consistent with the full module graph, the selected + // versions of root modules may differ from what we already checked above. + // Re-check those paths too. + + mg, _ := rs.Graph(ctx) + var importPath string + for _, m := range mg.BuildList() { + var found bool + importPath, found = tryMod(m) + if found { + break + } + } + return importPath +} + +// ImportFromFiles adds modules to the build list as needed +// to satisfy the imports in the named Go source files. +// +// Errors in missing dependencies are silenced. +// +// TODO(bcmills): Silencing errors seems off. Take a closer look at this and +// figure out what the error-reporting actually ought to be. +func ImportFromFiles(ctx context.Context, gofiles []string) { + rs := LoadModFile(ctx) + + tags := imports.Tags() + imports, testImports, err := imports.ScanFiles(gofiles, tags) + if err != nil { + base.Fatal(err) + } + + loaded = loadFromRoots(ctx, loaderParams{ + PackageOpts: PackageOpts{ + Tags: tags, + ResolveMissingImports: true, + SilencePackageErrors: true, + }, + requirements: rs, + listRoots: func(*Requirements) (roots []string) { + roots = append(roots, imports...) + roots = append(roots, testImports...) + return roots + }, + }) + requirements = loaded.requirements + + if !ExplicitWriteGoMod { + if err := commitRequirements(ctx, WriteOpts{}); err != nil { + base.Fatal(err) + } + } +} + +// DirImportPath returns the effective import path for dir, +// provided it is within a main module, or else returns ".". +func (mms *MainModuleSet) DirImportPath(ctx context.Context, dir string) (path string, m module.Version) { + if !HasModRoot() { + return ".", module.Version{} + } + LoadModFile(ctx) // Sets targetPrefix. + + if !filepath.IsAbs(dir) { + dir = filepath.Join(base.Cwd(), dir) + } else { + dir = filepath.Clean(dir) + } + + var longestPrefix string + var longestPrefixPath string + var longestPrefixVersion module.Version + for _, v := range mms.Versions() { + modRoot := mms.ModRoot(v) + if dir == modRoot { + return mms.PathPrefix(v), v + } + if str.HasFilePathPrefix(dir, modRoot) { + pathPrefix := MainModules.PathPrefix(v) + if pathPrefix > longestPrefix { + longestPrefix = pathPrefix + longestPrefixVersion = v + suffix := filepath.ToSlash(str.TrimFilePathPrefix(dir, modRoot)) + if strings.HasPrefix(suffix, "vendor/") { + longestPrefixPath = suffix[len("vendor/"):] + continue + } + longestPrefixPath = pathpkg.Join(mms.PathPrefix(v), suffix) + } + } + } + if len(longestPrefix) > 0 { + return longestPrefixPath, longestPrefixVersion + } + + return ".", module.Version{} +} + +// PackageModule returns the module providing the package named by the import path. +func PackageModule(path string) module.Version { + pkg, ok := loaded.pkgCache.Get(path) + if !ok { + return module.Version{} + } + return pkg.mod +} + +// Lookup returns the source directory, import path, and any loading error for +// the package at path as imported from the package in parentDir. +// Lookup requires that one of the Load functions in this package has already +// been called. +func Lookup(parentPath string, parentIsStd bool, path string) (dir, realPath string, err error) { + if path == "" { + panic("Lookup called with empty package path") + } + + if parentIsStd { + path = loaded.stdVendor(parentPath, path) + } + pkg, ok := loaded.pkgCache.Get(path) + if !ok { + // The loader should have found all the relevant paths. + // There are a few exceptions, though: + // - during go list without -test, the p.Resolve calls to process p.TestImports and p.XTestImports + // end up here to canonicalize the import paths. + // - during any load, non-loaded packages like "unsafe" end up here. + // - during any load, build-injected dependencies like "runtime/cgo" end up here. + // - because we ignore appengine/* in the module loader, + // the dependencies of any actual appengine/* library end up here. + dir := findStandardImportPath(path) + if dir != "" { + return dir, path, nil + } + return "", "", errMissing + } + return pkg.dir, pkg.path, pkg.err +} + +// A loader manages the process of loading information about +// the required packages for a particular build, +// checking that the packages are available in the module set, +// and updating the module set if needed. +type loader struct { + loaderParams + + // allClosesOverTests indicates whether the "all" pattern includes + // dependencies of tests outside the main module (as in Go 1.11–1.15). + // (Otherwise — as in Go 1.16+ — the "all" pattern includes only the packages + // transitively *imported by* the packages and tests in the main module.) + allClosesOverTests bool + + // skipImportModFiles indicates whether we may skip loading go.mod files + // for imported packages (as in 'go mod tidy' in Go 1.17–1.20). + skipImportModFiles bool + + work *par.Queue + + // reset on each iteration + roots []*loadPkg + pkgCache *par.Cache[string, *loadPkg] + pkgs []*loadPkg // transitive closure of loaded packages and tests; populated in buildStacks +} + +// loaderParams configure the packages loaded by, and the properties reported +// by, a loader instance. +type loaderParams struct { + PackageOpts + requirements *Requirements + + allPatternIsRoot bool // Is the "all" pattern an additional root? + + listRoots func(rs *Requirements) []string +} + +func (ld *loader) reset() { + select { + case <-ld.work.Idle(): + default: + panic("loader.reset when not idle") + } + + ld.roots = nil + ld.pkgCache = new(par.Cache[string, *loadPkg]) + ld.pkgs = nil +} + +// error reports an error via either os.Stderr or base.Error, +// according to whether ld.AllowErrors is set. +func (ld *loader) error(err error) { + if ld.AllowErrors { + fmt.Fprintf(os.Stderr, "go: %v\n", err) + } else if ld.Switcher != nil { + ld.Switcher.Error(err) + } else { + base.Error(err) + } +} + +// switchIfErrors switches toolchains if a switch is needed. +func (ld *loader) switchIfErrors(ctx context.Context) { + if ld.Switcher != nil { + ld.Switcher.Switch(ctx) + } +} + +// exitIfErrors switches toolchains if a switch is needed +// or else exits if any errors have been reported. +func (ld *loader) exitIfErrors(ctx context.Context) { + ld.switchIfErrors(ctx) + base.ExitIfErrors() +} + +// goVersion reports the Go version that should be used for the loader's +// requirements: ld.TidyGoVersion if set, or ld.requirements.GoVersion() +// otherwise. +func (ld *loader) goVersion() string { + if ld.TidyGoVersion != "" { + return ld.TidyGoVersion + } + return ld.requirements.GoVersion() +} + +// A loadPkg records information about a single loaded package. +type loadPkg struct { + // Populated at construction time: + path string // import path + testOf *loadPkg + + // Populated at construction time and updated by (*loader).applyPkgFlags: + flags atomicLoadPkgFlags + + // Populated by (*loader).load: + mod module.Version // module providing package + dir string // directory containing source code + err error // error loading package + imports []*loadPkg // packages imported by this one + testImports []string // test-only imports, saved for use by pkg.test. + inStd bool + altMods []module.Version // modules that could have contained the package but did not + + // Populated by (*loader).pkgTest: + testOnce sync.Once + test *loadPkg + + // Populated by postprocessing in (*loader).buildStacks: + stack *loadPkg // package importing this one in minimal import stack for this pkg +} + +// loadPkgFlags is a set of flags tracking metadata about a package. +type loadPkgFlags int8 + +const ( + // pkgInAll indicates that the package is in the "all" package pattern, + // regardless of whether we are loading the "all" package pattern. + // + // When the pkgInAll flag and pkgImportsLoaded flags are both set, the caller + // who set the last of those flags must propagate the pkgInAll marking to all + // of the imports of the marked package. + // + // A test is marked with pkgInAll if that test would promote the packages it + // imports to be in "all" (such as when the test is itself within the main + // module, or when ld.allClosesOverTests is true). + pkgInAll loadPkgFlags = 1 << iota + + // pkgIsRoot indicates that the package matches one of the root package + // patterns requested by the caller. + // + // If LoadTests is set, then when pkgIsRoot and pkgImportsLoaded are both set, + // the caller who set the last of those flags must populate a test for the + // package (in the pkg.test field). + // + // If the "all" pattern is included as a root, then non-test packages in "all" + // are also roots (and must be marked pkgIsRoot). + pkgIsRoot + + // pkgFromRoot indicates that the package is in the transitive closure of + // imports starting at the roots. (Note that every package marked as pkgIsRoot + // is also trivially marked pkgFromRoot.) + pkgFromRoot + + // pkgImportsLoaded indicates that the imports and testImports fields of a + // loadPkg have been populated. + pkgImportsLoaded +) + +// has reports whether all of the flags in cond are set in f. +func (f loadPkgFlags) has(cond loadPkgFlags) bool { + return f&cond == cond +} + +// An atomicLoadPkgFlags stores a loadPkgFlags for which individual flags can be +// added atomically. +type atomicLoadPkgFlags struct { + bits atomic.Int32 +} + +// update sets the given flags in af (in addition to any flags already set). +// +// update returns the previous flag state so that the caller may determine which +// flags were newly-set. +func (af *atomicLoadPkgFlags) update(flags loadPkgFlags) (old loadPkgFlags) { + for { + old := af.bits.Load() + new := old | int32(flags) + if new == old || af.bits.CompareAndSwap(old, new) { + return loadPkgFlags(old) + } + } +} + +// has reports whether all of the flags in cond are set in af. +func (af *atomicLoadPkgFlags) has(cond loadPkgFlags) bool { + return loadPkgFlags(af.bits.Load())&cond == cond +} + +// isTest reports whether pkg is a test of another package. +func (pkg *loadPkg) isTest() bool { + return pkg.testOf != nil +} + +// fromExternalModule reports whether pkg was loaded from a module other than +// the main module. +func (pkg *loadPkg) fromExternalModule() bool { + if pkg.mod.Path == "" { + return false // loaded from the standard library, not a module + } + return !MainModules.Contains(pkg.mod.Path) +} + +var errMissing = errors.New("cannot find package") + +// loadFromRoots attempts to load the build graph needed to process a set of +// root packages and their dependencies. +// +// The set of root packages is returned by the params.listRoots function, and +// expanded to the full set of packages by tracing imports (and possibly tests) +// as needed. +func loadFromRoots(ctx context.Context, params loaderParams) *loader { + ld := &loader{ + loaderParams: params, + work: par.NewQueue(runtime.GOMAXPROCS(0)), + } + + if ld.requirements.pruning == unpruned { + // If the module graph does not support pruning, we assume that we will need + // the full module graph in order to load package dependencies. + // + // This might not be strictly necessary, but it matches the historical + // behavior of the 'go' command and keeps the go.mod file more consistent in + // case of erroneous hand-edits — which are less likely to be detected by + // spot-checks in modules that do not maintain the expanded go.mod + // requirements needed for graph pruning. + var err error + ld.requirements, _, err = expandGraph(ctx, ld.requirements) + if err != nil { + ld.error(err) + } + } + ld.exitIfErrors(ctx) + + updateGoVersion := func() { + goVersion := ld.goVersion() + + if ld.requirements.pruning != workspace { + var err error + ld.requirements, err = convertPruning(ctx, ld.requirements, pruningForGoVersion(goVersion)) + if err != nil { + ld.error(err) + ld.exitIfErrors(ctx) + } + } + + // If the module's Go version omits go.sum entries for go.mod files for test + // dependencies of external packages, avoid loading those files in the first + // place. + ld.skipImportModFiles = ld.Tidy && gover.Compare(goVersion, gover.TidyGoModSumVersion) < 0 + + // If the module's go version explicitly predates the change in "all" for + // graph pruning, continue to use the older interpretation. + ld.allClosesOverTests = gover.Compare(goVersion, gover.NarrowAllVersion) < 0 && !ld.UseVendorAll + } + + for { + ld.reset() + updateGoVersion() + + // Load the root packages and their imports. + // Note: the returned roots can change on each iteration, + // since the expansion of package patterns depends on the + // build list we're using. + rootPkgs := ld.listRoots(ld.requirements) + + if ld.requirements.pruning == pruned && cfg.BuildMod == "mod" { + // Before we start loading transitive imports of packages, locate all of + // the root packages and promote their containing modules to root modules + // dependencies. If their go.mod files are tidy (the common case) and the + // set of root packages does not change then we can select the correct + // versions of all transitive imports on the first try and complete + // loading in a single iteration. + changedBuildList := ld.preloadRootModules(ctx, rootPkgs) + if changedBuildList { + // The build list has changed, so the set of root packages may have also + // changed. Start over to pick up the changes. (Preloading roots is much + // cheaper than loading the full import graph, so we would rather pay + // for an extra iteration of preloading than potentially end up + // discarding the result of a full iteration of loading.) + continue + } + } + + inRoots := map[*loadPkg]bool{} + for _, path := range rootPkgs { + root := ld.pkg(ctx, path, pkgIsRoot) + if !inRoots[root] { + ld.roots = append(ld.roots, root) + inRoots[root] = true + } + } + + // ld.pkg adds imported packages to the work queue and calls applyPkgFlags, + // which adds tests (and test dependencies) as needed. + // + // When all of the work in the queue has completed, we'll know that the + // transitive closure of dependencies has been loaded. + <-ld.work.Idle() + + ld.buildStacks() + + changed, err := ld.updateRequirements(ctx) + if err != nil { + ld.error(err) + break + } + if changed { + // Don't resolve missing imports until the module graph has stabilized. + // If the roots are still changing, they may turn out to specify a + // requirement on the missing package(s), and we would rather use a + // version specified by a new root than add a new dependency on an + // unrelated version. + continue + } + + if !ld.ResolveMissingImports || (!HasModRoot() && !allowMissingModuleImports) { + // We've loaded as much as we can without resolving missing imports. + break + } + + modAddedBy, err := ld.resolveMissingImports(ctx) + if err != nil { + ld.error(err) + break + } + if len(modAddedBy) == 0 { + // The roots are stable, and we've resolved all of the missing packages + // that we can. + break + } + + toAdd := make([]module.Version, 0, len(modAddedBy)) + for m := range modAddedBy { + toAdd = append(toAdd, m) + } + gover.ModSort(toAdd) // to make errors deterministic + + // We ran updateRequirements before resolving missing imports and it didn't + // make any changes, so we know that the requirement graph is already + // consistent with ld.pkgs: we don't need to pass ld.pkgs to updateRoots + // again. (That would waste time looking for changes that we have already + // applied.) + var noPkgs []*loadPkg + // We also know that we're going to call updateRequirements again next + // iteration so we don't need to also update it here. (That would waste time + // computing a "direct" map that we'll have to recompute later anyway.) + direct := ld.requirements.direct + rs, err := updateRoots(ctx, direct, ld.requirements, noPkgs, toAdd, ld.AssumeRootsImported) + if err != nil { + // If an error was found in a newly added module, report the package + // import stack instead of the module requirement stack. Packages + // are more descriptive. + if err, ok := err.(*mvs.BuildListError); ok { + if pkg := modAddedBy[err.Module()]; pkg != nil { + ld.error(fmt.Errorf("%s: %w", pkg.stackText(), err.Err)) + break + } + } + ld.error(err) + break + } + if reflect.DeepEqual(rs.rootModules, ld.requirements.rootModules) { + // Something is deeply wrong. resolveMissingImports gave us a non-empty + // set of modules to add to the graph, but adding those modules had no + // effect — either they were already in the graph, or updateRoots did not + // add them as requested. + panic(fmt.Sprintf("internal error: adding %v to module graph had no effect on root requirements (%v)", toAdd, rs.rootModules)) + } + ld.requirements = rs + } + ld.exitIfErrors(ctx) + + // Tidy the build list, if applicable, before we report errors. + // (The process of tidying may remove errors from irrelevant dependencies.) + if ld.Tidy { + rs, err := tidyRoots(ctx, ld.requirements, ld.pkgs) + if err != nil { + ld.error(err) + } else { + if ld.TidyGoVersion != "" { + // Attempt to switch to the requested Go version. We have been using its + // pruning and semantics all along, but there may have been — and may + // still be — requirements on higher versions in the graph. + tidy := overrideRoots(ctx, rs, []module.Version{{Path: "go", Version: ld.TidyGoVersion}}) + mg, err := tidy.Graph(ctx) + if err != nil { + ld.error(err) + } + if v := mg.Selected("go"); v == ld.TidyGoVersion { + rs = tidy + } else { + conflict := Conflict{ + Path: mg.g.FindPath(func(m module.Version) bool { + return m.Path == "go" && m.Version == v + })[1:], + Constraint: module.Version{Path: "go", Version: ld.TidyGoVersion}, + } + msg := conflict.Summary() + if cfg.BuildV { + msg = conflict.String() + } + ld.error(errors.New(msg)) + } + } + + if ld.requirements.pruning == pruned { + // We continuously add tidy roots to ld.requirements during loading, so + // at this point the tidy roots (other than possibly the "go" version + // edited above) should be a subset of the roots of ld.requirements, + // ensuring that no new dependencies are brought inside the + // graph-pruning horizon. + // If that is not the case, there is a bug in the loading loop above. + for _, m := range rs.rootModules { + if m.Path == "go" && ld.TidyGoVersion != "" { + continue + } + if v, ok := ld.requirements.rootSelected(m.Path); !ok || v != m.Version { + ld.error(fmt.Errorf("internal error: a requirement on %v is needed but was not added during package loading (selected %s)", m, v)) + } + } + } + + ld.requirements = rs + } + + ld.exitIfErrors(ctx) + } + + // Report errors, if any. + for _, pkg := range ld.pkgs { + if pkg.err == nil { + continue + } + + // Add importer information to checksum errors. + if sumErr := (*ImportMissingSumError)(nil); errors.As(pkg.err, &sumErr) { + if importer := pkg.stack; importer != nil { + sumErr.importer = importer.path + sumErr.importerVersion = importer.mod.Version + sumErr.importerIsTest = importer.testOf != nil + } + } + + if stdErr := (*ImportMissingError)(nil); errors.As(pkg.err, &stdErr) && stdErr.isStd { + // Add importer go version information to import errors of standard + // library packages arising from newer releases. + if importer := pkg.stack; importer != nil { + if v, ok := rawGoVersion.Load(importer.mod); ok && gover.Compare(gover.Local(), v.(string)) < 0 { + stdErr.importerGoVersion = v.(string) + } + } + if ld.SilenceMissingStdImports { + continue + } + } + if ld.SilencePackageErrors { + continue + } + if ld.SilenceNoGoErrors && errors.Is(pkg.err, imports.ErrNoGo) { + continue + } + + ld.error(fmt.Errorf("%s: %w", pkg.stackText(), pkg.err)) + } + + ld.checkMultiplePaths() + return ld +} + +// updateRequirements ensures that ld.requirements is consistent with the +// information gained from ld.pkgs. +// +// In particular: +// +// - Modules that provide packages directly imported from the main module are +// marked as direct, and are promoted to explicit roots. If a needed root +// cannot be promoted due to -mod=readonly or -mod=vendor, the importing +// package is marked with an error. +// +// - If ld scanned the "all" pattern independent of build constraints, it is +// guaranteed to have seen every direct import. Module dependencies that did +// not provide any directly-imported package are then marked as indirect. +// +// - Root dependencies are updated to their selected versions. +// +// The "changed" return value reports whether the update changed the selected +// version of any module that either provided a loaded package or may now +// provide a package that was previously unresolved. +func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err error) { + rs := ld.requirements + + // direct contains the set of modules believed to provide packages directly + // imported by the main module. + var direct map[string]bool + + // If we didn't scan all of the imports from the main module, or didn't use + // imports.AnyTags, then we didn't necessarily load every package that + // contributes “direct” imports — so we can't safely mark existing direct + // dependencies in ld.requirements as indirect-only. Propagate them as direct. + loadedDirect := ld.allPatternIsRoot && reflect.DeepEqual(ld.Tags, imports.AnyTags()) + if loadedDirect { + direct = make(map[string]bool) + } else { + // TODO(bcmills): It seems like a shame to allocate and copy a map here when + // it will only rarely actually vary from rs.direct. Measure this cost and + // maybe avoid the copy. + direct = make(map[string]bool, len(rs.direct)) + for mPath := range rs.direct { + direct[mPath] = true + } + } + + var maxTooNew *gover.TooNewError + for _, pkg := range ld.pkgs { + if pkg.err != nil { + if tooNew := (*gover.TooNewError)(nil); errors.As(pkg.err, &tooNew) { + if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 { + maxTooNew = tooNew + } + } + } + if pkg.mod.Version != "" || !MainModules.Contains(pkg.mod.Path) { + continue + } + + for _, dep := range pkg.imports { + if !dep.fromExternalModule() { + continue + } + + if inWorkspaceMode() { + // In workspace mode / workspace pruning mode, the roots are the main modules + // rather than the main module's direct dependencies. The check below on the selected + // roots does not apply. + if cfg.BuildMod == "vendor" { + // In workspace vendor mode, we don't need to load the requirements of the workspace + // modules' dependencies so the check below doesn't work. But that's okay, because + // checking whether modules are required directly for the purposes of pruning is + // less important in vendor mode: if we were able to load the package, we have + // everything we need to build the package, and dependencies' tests are pruned out + // of the vendor directory anyway. + continue + } + if mg, err := rs.Graph(ctx); err != nil { + return false, err + } else if _, ok := mg.RequiredBy(dep.mod); !ok { + // dep.mod is not an explicit dependency, but needs to be. + // See comment on error returned below. + pkg.err = &DirectImportFromImplicitDependencyError{ + ImporterPath: pkg.path, + ImportedPath: dep.path, + Module: dep.mod, + } + } + continue + } + + if pkg.err == nil && cfg.BuildMod != "mod" { + if v, ok := rs.rootSelected(dep.mod.Path); !ok || v != dep.mod.Version { + // dep.mod is not an explicit dependency, but needs to be. + // Because we are not in "mod" mode, we will not be able to update it. + // Instead, mark the importing package with an error. + // + // TODO(#41688): The resulting error message fails to include the file + // position of the import statement (because that information is not + // tracked by the module loader). Figure out how to plumb the import + // position through. + pkg.err = &DirectImportFromImplicitDependencyError{ + ImporterPath: pkg.path, + ImportedPath: dep.path, + Module: dep.mod, + } + // cfg.BuildMod does not allow us to change dep.mod to be a direct + // dependency, so don't mark it as such. + continue + } + } + + // dep is a package directly imported by a package or test in the main + // module and loaded from some other module (not the standard library). + // Mark its module as a direct dependency. + direct[dep.mod.Path] = true + } + } + if maxTooNew != nil { + return false, maxTooNew + } + + var addRoots []module.Version + if ld.Tidy { + // When we are tidying a module with a pruned dependency graph, we may need + // to add roots to preserve the versions of indirect, test-only dependencies + // that are upgraded above or otherwise missing from the go.mod files of + // direct dependencies. (For example, the direct dependency might be a very + // stable codebase that predates modules and thus lacks a go.mod file, or + // the author of the direct dependency may have forgotten to commit a change + // to the go.mod file, or may have made an erroneous hand-edit that causes + // it to be untidy.) + // + // Promoting an indirect dependency to a root adds the next layer of its + // dependencies to the module graph, which may increase the selected + // versions of other modules from which we have already loaded packages. + // So after we promote an indirect dependency to a root, we need to reload + // packages, which means another iteration of loading. + // + // As an extra wrinkle, the upgrades due to promoting a root can cause + // previously-resolved packages to become unresolved. For example, the + // module providing an unstable package might be upgraded to a version + // that no longer contains that package. If we then resolve the missing + // package, we might add yet another root that upgrades away some other + // dependency. (The tests in mod_tidy_convergence*.txt illustrate some + // particularly worrisome cases.) + // + // To ensure that this process of promoting, adding, and upgrading roots + // eventually terminates, during iteration we only ever add modules to the + // root set — we only remove irrelevant roots at the very end of + // iteration, after we have already added every root that we plan to need + // in the (eventual) tidy root set. + // + // Since we do not remove any roots during iteration, even if they no + // longer provide any imported packages, the selected versions of the + // roots can only increase and the set of roots can only expand. The set + // of extant root paths is finite and the set of versions of each path is + // finite, so the iteration *must* reach a stable fixed-point. + tidy, err := tidyRoots(ctx, rs, ld.pkgs) + if err != nil { + return false, err + } + addRoots = tidy.rootModules + } + + rs, err = updateRoots(ctx, direct, rs, ld.pkgs, addRoots, ld.AssumeRootsImported) + if err != nil { + // We don't actually know what even the root requirements are supposed to be, + // so we can't proceed with loading. Return the error to the caller + return false, err + } + + if rs.GoVersion() != ld.requirements.GoVersion() { + // A change in the selected Go version may or may not affect the set of + // loaded packages, but in some cases it can change the meaning of the "all" + // pattern, the level of pruning in the module graph, and even the set of + // packages present in the standard library. If it has changed, it's best to + // reload packages once more to be sure everything is stable. + changed = true + } else if rs != ld.requirements && !reflect.DeepEqual(rs.rootModules, ld.requirements.rootModules) { + // The roots of the module graph have changed in some way (not just the + // "direct" markings). Check whether the changes affected any of the loaded + // packages. + mg, err := rs.Graph(ctx) + if err != nil { + return false, err + } + for _, pkg := range ld.pkgs { + if pkg.fromExternalModule() && mg.Selected(pkg.mod.Path) != pkg.mod.Version { + changed = true + break + } + if pkg.err != nil { + // Promoting a module to a root may resolve an import that was + // previously missing (by pulling in a previously-prune dependency that + // provides it) or ambiguous (by promoting exactly one of the + // alternatives to a root and ignoring the second-level alternatives) or + // otherwise errored out (by upgrading from a version that cannot be + // fetched to one that can be). + // + // Instead of enumerating all of the possible errors, we'll just check + // whether importFromModules returns nil for the package. + // False-positives are ok: if we have a false-positive here, we'll do an + // extra iteration of package loading this time, but we'll still + // converge when the root set stops changing. + // + // In some sense, we can think of this as ‘upgraded the module providing + // pkg.path from "none" to a version higher than "none"’. + if _, _, _, _, err = importFromModules(ctx, pkg.path, rs, nil, ld.skipImportModFiles); err == nil { + changed = true + break + } + } + } + } + + ld.requirements = rs + return changed, nil +} + +// resolveMissingImports returns a set of modules that could be added as +// dependencies in order to resolve missing packages from pkgs. +// +// The newly-resolved packages are added to the addedModuleFor map, and +// resolveMissingImports returns a map from each new module version to +// the first missing package that module would resolve. +func (ld *loader) resolveMissingImports(ctx context.Context) (modAddedBy map[module.Version]*loadPkg, err error) { + type pkgMod struct { + pkg *loadPkg + mod *module.Version + } + var pkgMods []pkgMod + for _, pkg := range ld.pkgs { + if pkg.err == nil { + continue + } + if pkg.isTest() { + // If we are missing a test, we are also missing its non-test version, and + // we should only add the missing import once. + continue + } + if !errors.As(pkg.err, new(*ImportMissingError)) { + // Leave other errors for Import or load.Packages to report. + continue + } + + pkg := pkg + var mod module.Version + ld.work.Add(func() { + var err error + mod, err = queryImport(ctx, pkg.path, ld.requirements) + if err != nil { + var ime *ImportMissingError + if errors.As(err, &ime) { + for curstack := pkg.stack; curstack != nil; curstack = curstack.stack { + if MainModules.Contains(curstack.mod.Path) { + ime.ImportingMainModule = curstack.mod + break + } + } + } + // pkg.err was already non-nil, so we can reasonably attribute the error + // for pkg to either the original error or the one returned by + // queryImport. The existing error indicates only that we couldn't find + // the package, whereas the query error also explains why we didn't fix + // the problem — so we prefer the latter. + pkg.err = err + } + + // err is nil, but we intentionally leave pkg.err non-nil and pkg.mod + // unset: we still haven't satisfied other invariants of a + // successfully-loaded package, such as scanning and loading the imports + // of that package. If we succeed in resolving the new dependency graph, + // the caller can reload pkg and update the error at that point. + // + // Even then, the package might not be loaded from the version we've + // identified here. The module may be upgraded by some other dependency, + // or by a transitive dependency of mod itself, or — less likely — the + // package may be rejected by an AllowPackage hook or rendered ambiguous + // by some other newly-added or newly-upgraded dependency. + }) + + pkgMods = append(pkgMods, pkgMod{pkg: pkg, mod: &mod}) + } + <-ld.work.Idle() + + modAddedBy = map[module.Version]*loadPkg{} + + var ( + maxTooNew *gover.TooNewError + maxTooNewPkg *loadPkg + ) + for _, pm := range pkgMods { + if tooNew := (*gover.TooNewError)(nil); errors.As(pm.pkg.err, &tooNew) { + if maxTooNew == nil || gover.Compare(tooNew.GoVersion, maxTooNew.GoVersion) > 0 { + maxTooNew = tooNew + maxTooNewPkg = pm.pkg + } + } + } + if maxTooNew != nil { + fmt.Fprintf(os.Stderr, "go: toolchain upgrade needed to resolve %s\n", maxTooNewPkg.path) + return nil, maxTooNew + } + + for _, pm := range pkgMods { + pkg, mod := pm.pkg, *pm.mod + if mod.Path == "" { + continue + } + + fmt.Fprintf(os.Stderr, "go: found %s in %s %s\n", pkg.path, mod.Path, mod.Version) + if modAddedBy[mod] == nil { + modAddedBy[mod] = pkg + } + } + + return modAddedBy, nil +} + +// pkg locates the *loadPkg for path, creating and queuing it for loading if +// needed, and updates its state to reflect the given flags. +// +// The imports of the returned *loadPkg will be loaded asynchronously in the +// ld.work queue, and its test (if requested) will also be populated once +// imports have been resolved. When ld.work goes idle, all transitive imports of +// the requested package (and its test, if requested) will have been loaded. +func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loadPkg { + if flags.has(pkgImportsLoaded) { + panic("internal error: (*loader).pkg called with pkgImportsLoaded flag set") + } + + pkg := ld.pkgCache.Do(path, func() *loadPkg { + pkg := &loadPkg{ + path: path, + } + ld.applyPkgFlags(ctx, pkg, flags) + + ld.work.Add(func() { ld.load(ctx, pkg) }) + return pkg + }) + + ld.applyPkgFlags(ctx, pkg, flags) + return pkg +} + +// applyPkgFlags updates pkg.flags to set the given flags and propagate the +// (transitive) effects of those flags, possibly loading or enqueueing further +// packages as a result. +func (ld *loader) applyPkgFlags(ctx context.Context, pkg *loadPkg, flags loadPkgFlags) { + if flags == 0 { + return + } + + if flags.has(pkgInAll) && ld.allPatternIsRoot && !pkg.isTest() { + // This package matches a root pattern by virtue of being in "all". + flags |= pkgIsRoot + } + if flags.has(pkgIsRoot) { + flags |= pkgFromRoot + } + + old := pkg.flags.update(flags) + new := old | flags + if new == old || !new.has(pkgImportsLoaded) { + // We either didn't change the state of pkg, or we don't know anything about + // its dependencies yet. Either way, we can't usefully load its test or + // update its dependencies. + return + } + + if !pkg.isTest() { + // Check whether we should add (or update the flags for) a test for pkg. + // ld.pkgTest is idempotent and extra invocations are inexpensive, + // so it's ok if we call it more than is strictly necessary. + wantTest := false + switch { + case ld.allPatternIsRoot && MainModules.Contains(pkg.mod.Path): + // We are loading the "all" pattern, which includes packages imported by + // tests in the main module. This package is in the main module, so we + // need to identify the imports of its test even if LoadTests is not set. + // + // (We will filter out the extra tests explicitly in computePatternAll.) + wantTest = true + + case ld.allPatternIsRoot && ld.allClosesOverTests && new.has(pkgInAll): + // This variant of the "all" pattern includes imports of tests of every + // package that is itself in "all", and pkg is in "all", so its test is + // also in "all" (as above). + wantTest = true + + case ld.LoadTests && new.has(pkgIsRoot): + // LoadTest explicitly requests tests of “the root packages”. + wantTest = true + } + + if wantTest { + var testFlags loadPkgFlags + if MainModules.Contains(pkg.mod.Path) || (ld.allClosesOverTests && new.has(pkgInAll)) { + // Tests of packages in the main module are in "all", in the sense that + // they cause the packages they import to also be in "all". So are tests + // of packages in "all" if "all" closes over test dependencies. + testFlags |= pkgInAll + } + ld.pkgTest(ctx, pkg, testFlags) + } + } + + if new.has(pkgInAll) && !old.has(pkgInAll|pkgImportsLoaded) { + // We have just marked pkg with pkgInAll, or we have just loaded its + // imports, or both. Now is the time to propagate pkgInAll to the imports. + for _, dep := range pkg.imports { + ld.applyPkgFlags(ctx, dep, pkgInAll) + } + } + + if new.has(pkgFromRoot) && !old.has(pkgFromRoot|pkgImportsLoaded) { + for _, dep := range pkg.imports { + ld.applyPkgFlags(ctx, dep, pkgFromRoot) + } + } +} + +// preloadRootModules loads the module requirements needed to identify the +// selected version of each module providing a package in rootPkgs, +// adding new root modules to the module graph if needed. +func (ld *loader) preloadRootModules(ctx context.Context, rootPkgs []string) (changedBuildList bool) { + needc := make(chan map[module.Version]bool, 1) + needc <- map[module.Version]bool{} + for _, path := range rootPkgs { + path := path + ld.work.Add(func() { + // First, try to identify the module containing the package using only roots. + // + // If the main module is tidy and the package is in "all" — or if we're + // lucky — we can identify all of its imports without actually loading the + // full module graph. + m, _, _, _, err := importFromModules(ctx, path, ld.requirements, nil, ld.skipImportModFiles) + if err != nil { + var missing *ImportMissingError + if errors.As(err, &missing) && ld.ResolveMissingImports { + // This package isn't provided by any selected module. + // If we can find it, it will be a new root dependency. + m, err = queryImport(ctx, path, ld.requirements) + } + if err != nil { + // We couldn't identify the root module containing this package. + // Leave it unresolved; we will report it during loading. + return + } + } + if m.Path == "" { + // The package is in std or cmd. We don't need to change the root set. + return + } + + v, ok := ld.requirements.rootSelected(m.Path) + if !ok || v != m.Version { + // We found the requested package in m, but m is not a root, so + // loadModGraph will not load its requirements. We need to promote the + // module to a root to ensure that any other packages this package + // imports are resolved from correct dependency versions. + // + // (This is the “argument invariant” from + // https://golang.org/design/36460-lazy-module-loading.) + need := <-needc + need[m] = true + needc <- need + } + }) + } + <-ld.work.Idle() + + need := <-needc + if len(need) == 0 { + return false // No roots to add. + } + + toAdd := make([]module.Version, 0, len(need)) + for m := range need { + toAdd = append(toAdd, m) + } + gover.ModSort(toAdd) + + rs, err := updateRoots(ctx, ld.requirements.direct, ld.requirements, nil, toAdd, ld.AssumeRootsImported) + if err != nil { + // We are missing some root dependency, and for some reason we can't load + // enough of the module dependency graph to add the missing root. Package + // loading is doomed to fail, so fail quickly. + ld.error(err) + ld.exitIfErrors(ctx) + return false + } + if reflect.DeepEqual(rs.rootModules, ld.requirements.rootModules) { + // Something is deeply wrong. resolveMissingImports gave us a non-empty + // set of modules to add to the graph, but adding those modules had no + // effect — either they were already in the graph, or updateRoots did not + // add them as requested. + panic(fmt.Sprintf("internal error: adding %v to module graph had no effect on root requirements (%v)", toAdd, rs.rootModules)) + } + + ld.requirements = rs + return true +} + +// load loads an individual package. +func (ld *loader) load(ctx context.Context, pkg *loadPkg) { + var mg *ModuleGraph + if ld.requirements.pruning == unpruned { + var err error + mg, err = ld.requirements.Graph(ctx) + if err != nil { + // We already checked the error from Graph in loadFromRoots and/or + // updateRequirements, so we ignored the error on purpose and we should + // keep trying to push past it. + // + // However, because mg may be incomplete (and thus may select inaccurate + // versions), we shouldn't use it to load packages. Instead, we pass a nil + // *ModuleGraph, which will cause mg to first try loading from only the + // main module and root dependencies. + mg = nil + } + } + + var modroot string + pkg.mod, modroot, pkg.dir, pkg.altMods, pkg.err = importFromModules(ctx, pkg.path, ld.requirements, mg, ld.skipImportModFiles) + if pkg.dir == "" { + return + } + if MainModules.Contains(pkg.mod.Path) { + // Go ahead and mark pkg as in "all". This provides the invariant that a + // package that is *only* imported by other packages in "all" is always + // marked as such before loading its imports. + // + // We don't actually rely on that invariant at the moment, but it may + // improve efficiency somewhat and makes the behavior a bit easier to reason + // about (by reducing churn on the flag bits of dependencies), and costs + // essentially nothing (these atomic flag ops are essentially free compared + // to scanning source code for imports). + ld.applyPkgFlags(ctx, pkg, pkgInAll) + } + if ld.AllowPackage != nil { + if err := ld.AllowPackage(ctx, pkg.path, pkg.mod); err != nil { + pkg.err = err + } + } + + pkg.inStd = (search.IsStandardImportPath(pkg.path) && search.InDir(pkg.dir, cfg.GOROOTsrc) != "") + + var imports, testImports []string + + if cfg.BuildContext.Compiler == "gccgo" && pkg.inStd { + // We can't scan standard packages for gccgo. + } else { + var err error + imports, testImports, err = scanDir(modroot, pkg.dir, ld.Tags) + if err != nil { + pkg.err = err + return + } + } + + pkg.imports = make([]*loadPkg, 0, len(imports)) + var importFlags loadPkgFlags + if pkg.flags.has(pkgInAll) { + importFlags = pkgInAll + } + for _, path := range imports { + if pkg.inStd { + // Imports from packages in "std" and "cmd" should resolve using + // GOROOT/src/vendor even when "std" is not the main module. + path = ld.stdVendor(pkg.path, path) + } + pkg.imports = append(pkg.imports, ld.pkg(ctx, path, importFlags)) + } + pkg.testImports = testImports + + ld.applyPkgFlags(ctx, pkg, pkgImportsLoaded) +} + +// pkgTest locates the test of pkg, creating it if needed, and updates its state +// to reflect the given flags. +// +// pkgTest requires that the imports of pkg have already been loaded (flagged +// with pkgImportsLoaded). +func (ld *loader) pkgTest(ctx context.Context, pkg *loadPkg, testFlags loadPkgFlags) *loadPkg { + if pkg.isTest() { + panic("pkgTest called on a test package") + } + + createdTest := false + pkg.testOnce.Do(func() { + pkg.test = &loadPkg{ + path: pkg.path, + testOf: pkg, + mod: pkg.mod, + dir: pkg.dir, + err: pkg.err, + inStd: pkg.inStd, + } + ld.applyPkgFlags(ctx, pkg.test, testFlags) + createdTest = true + }) + + test := pkg.test + if createdTest { + test.imports = make([]*loadPkg, 0, len(pkg.testImports)) + var importFlags loadPkgFlags + if test.flags.has(pkgInAll) { + importFlags = pkgInAll + } + for _, path := range pkg.testImports { + if pkg.inStd { + path = ld.stdVendor(test.path, path) + } + test.imports = append(test.imports, ld.pkg(ctx, path, importFlags)) + } + pkg.testImports = nil + ld.applyPkgFlags(ctx, test, pkgImportsLoaded) + } else { + ld.applyPkgFlags(ctx, test, testFlags) + } + + return test +} + +// stdVendor returns the canonical import path for the package with the given +// path when imported from the standard-library package at parentPath. +func (ld *loader) stdVendor(parentPath, path string) string { + if search.IsStandardImportPath(path) { + return path + } + + if str.HasPathPrefix(parentPath, "cmd") { + if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("cmd") { + vendorPath := pathpkg.Join("cmd", "vendor", path) + + if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil { + return vendorPath + } + } + } else if !ld.VendorModulesInGOROOTSrc || !MainModules.Contains("std") || str.HasPathPrefix(parentPath, "vendor") { + // If we are outside of the 'std' module, resolve imports from within 'std' + // to the vendor directory. + // + // Do the same for importers beginning with the prefix 'vendor/' even if we + // are *inside* of the 'std' module: the 'vendor/' packages that resolve + // globally from GOROOT/src/vendor (and are listed as part of 'go list std') + // are distinct from the real module dependencies, and cannot import + // internal packages from the real module. + // + // (Note that although the 'vendor/' packages match the 'std' *package* + // pattern, they are not part of the std *module*, and do not affect + // 'go mod tidy' and similar module commands when working within std.) + vendorPath := pathpkg.Join("vendor", path) + if _, err := os.Stat(filepath.Join(cfg.GOROOTsrc, filepath.FromSlash(vendorPath))); err == nil { + return vendorPath + } + } + + // Not vendored: resolve from modules. + return path +} + +// computePatternAll returns the list of packages matching pattern "all", +// starting with a list of the import paths for the packages in the main module. +func (ld *loader) computePatternAll() (all []string) { + for _, pkg := range ld.pkgs { + if pkg.flags.has(pkgInAll) && !pkg.isTest() { + all = append(all, pkg.path) + } + } + sort.Strings(all) + return all +} + +// checkMultiplePaths verifies that a given module path is used as itself +// or as a replacement for another module, but not both at the same time. +// +// (See https://golang.org/issue/26607 and https://golang.org/issue/34650.) +func (ld *loader) checkMultiplePaths() { + mods := ld.requirements.rootModules + if cached := ld.requirements.graph.Load(); cached != nil { + if mg := cached.mg; mg != nil { + mods = mg.BuildList() + } + } + + firstPath := map[module.Version]string{} + for _, mod := range mods { + src := resolveReplacement(mod) + if prev, ok := firstPath[src]; !ok { + firstPath[src] = mod.Path + } else if prev != mod.Path { + ld.error(fmt.Errorf("%s@%s used for two different module paths (%s and %s)", src.Path, src.Version, prev, mod.Path)) + } + } +} + +// checkTidyCompatibility emits an error if any package would be loaded from a +// different module under rs than under ld.requirements. +func (ld *loader) checkTidyCompatibility(ctx context.Context, rs *Requirements, compatVersion string) { + goVersion := rs.GoVersion() + suggestUpgrade := false + suggestEFlag := false + suggestFixes := func() { + if ld.AllowErrors { + // The user is explicitly ignoring these errors, so don't bother them with + // other options. + return + } + + // We print directly to os.Stderr because this information is advice about + // how to fix errors, not actually an error itself. + // (The actual errors should have been logged already.) + + fmt.Fprintln(os.Stderr) + + goFlag := "" + if goVersion != MainModules.GoVersion() { + goFlag = " -go=" + goVersion + } + + compatFlag := "" + if compatVersion != gover.Prev(goVersion) { + compatFlag = " -compat=" + compatVersion + } + if suggestUpgrade { + eDesc := "" + eFlag := "" + if suggestEFlag { + eDesc = ", leaving some packages unresolved" + eFlag = " -e" + } + fmt.Fprintf(os.Stderr, "To upgrade to the versions selected by go %s%s:\n\tgo mod tidy%s -go=%s && go mod tidy%s -go=%s%s\n", compatVersion, eDesc, eFlag, compatVersion, eFlag, goVersion, compatFlag) + } else if suggestEFlag { + // If some packages are missing but no package is upgraded, then we + // shouldn't suggest upgrading to the Go 1.16 versions explicitly — that + // wouldn't actually fix anything for Go 1.16 users, and *would* break + // something for Go 1.17 users. + fmt.Fprintf(os.Stderr, "To proceed despite packages unresolved in go %s:\n\tgo mod tidy -e%s%s\n", compatVersion, goFlag, compatFlag) + } + + fmt.Fprintf(os.Stderr, "If reproducibility with go %s is not needed:\n\tgo mod tidy%s -compat=%s\n", compatVersion, goFlag, goVersion) + + // TODO(#46141): Populate the linked wiki page. + fmt.Fprintf(os.Stderr, "For other options, see:\n\thttps://golang.org/doc/modules/pruning\n") + } + + mg, err := rs.Graph(ctx) + if err != nil { + ld.error(fmt.Errorf("error loading go %s module graph: %w", compatVersion, err)) + ld.switchIfErrors(ctx) + suggestFixes() + ld.exitIfErrors(ctx) + return + } + + // Re-resolve packages in parallel. + // + // We re-resolve each package — rather than just checking versions — to ensure + // that we have fetched module source code (and, importantly, checksums for + // that source code) for all modules that are necessary to ensure that imports + // are unambiguous. That also produces clearer diagnostics, since we can say + // exactly what happened to the package if it became ambiguous or disappeared + // entirely. + // + // We re-resolve the packages in parallel because this process involves disk + // I/O to check for package sources, and because the process of checking for + // ambiguous imports may require us to download additional modules that are + // otherwise pruned out in Go 1.17 — we don't want to block progress on other + // packages while we wait for a single new download. + type mismatch struct { + mod module.Version + err error + } + mismatchMu := make(chan map[*loadPkg]mismatch, 1) + mismatchMu <- map[*loadPkg]mismatch{} + for _, pkg := range ld.pkgs { + if pkg.mod.Path == "" && pkg.err == nil { + // This package is from the standard library (which does not vary based on + // the module graph). + continue + } + + pkg := pkg + ld.work.Add(func() { + mod, _, _, _, err := importFromModules(ctx, pkg.path, rs, mg, ld.skipImportModFiles) + if mod != pkg.mod { + mismatches := <-mismatchMu + mismatches[pkg] = mismatch{mod: mod, err: err} + mismatchMu <- mismatches + } + }) + } + <-ld.work.Idle() + + mismatches := <-mismatchMu + if len(mismatches) == 0 { + // Since we're running as part of 'go mod tidy', the roots of the module + // graph should contain only modules that are relevant to some package in + // the package graph. We checked every package in the package graph and + // didn't find any mismatches, so that must mean that all of the roots of + // the module graph are also consistent. + // + // If we're wrong, Go 1.16 in -mod=readonly mode will error out with + // "updates to go.mod needed", which would be very confusing. So instead, + // we'll double-check that our reasoning above actually holds — if it + // doesn't, we'll emit an internal error and hopefully the user will report + // it as a bug. + for _, m := range ld.requirements.rootModules { + if v := mg.Selected(m.Path); v != m.Version { + fmt.Fprintln(os.Stderr) + base.Fatalf("go: internal error: failed to diagnose selected-version mismatch for module %s: go %s selects %s, but go %s selects %s\n\tPlease report this at https://golang.org/issue.", m.Path, goVersion, m.Version, compatVersion, v) + } + } + return + } + + // Iterate over the packages (instead of the mismatches map) to emit errors in + // deterministic order. + for _, pkg := range ld.pkgs { + mismatch, ok := mismatches[pkg] + if !ok { + continue + } + + if pkg.isTest() { + // We already did (or will) report an error for the package itself, + // so don't report a duplicate (and more verbose) error for its test. + if _, ok := mismatches[pkg.testOf]; !ok { + base.Fatalf("go: internal error: mismatch recorded for test %s, but not its non-test package", pkg.path) + } + continue + } + + switch { + case mismatch.err != nil: + // pkg resolved successfully, but errors out using the requirements in rs. + // + // This could occur because the import is provided by a single root (and + // is thus unambiguous in a main module with a pruned module graph) and + // also one or more transitive dependencies (and is ambiguous with an + // unpruned graph). + // + // It could also occur because some transitive dependency upgrades the + // module that previously provided the package to a version that no + // longer does, or to a version for which the module source code (but + // not the go.mod file in isolation) has a checksum error. + if missing := (*ImportMissingError)(nil); errors.As(mismatch.err, &missing) { + selected := module.Version{ + Path: pkg.mod.Path, + Version: mg.Selected(pkg.mod.Path), + } + ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it in %s", pkg.stackText(), pkg.mod, compatVersion, selected)) + } else { + if ambiguous := (*AmbiguousImportError)(nil); errors.As(mismatch.err, &ambiguous) { + // TODO: Is this check needed? + } + ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would fail to locate it:\n\t%v", pkg.stackText(), pkg.mod, compatVersion, mismatch.err)) + } + + suggestEFlag = true + + // Even if we press ahead with the '-e' flag, the older version will + // error out in readonly mode if it thinks the go.mod file contains + // any *explicit* dependency that is not at its selected version, + // even if that dependency is not relevant to any package being loaded. + // + // We check for that condition here. If all of the roots are consistent + // the '-e' flag suffices, but otherwise we need to suggest an upgrade. + if !suggestUpgrade { + for _, m := range ld.requirements.rootModules { + if v := mg.Selected(m.Path); v != m.Version { + suggestUpgrade = true + break + } + } + } + + case pkg.err != nil: + // pkg had an error in with a pruned module graph (presumably suppressed + // with the -e flag), but the error went away using an unpruned graph. + // + // This is possible, if, say, the import is unresolved in the pruned graph + // (because the "latest" version of each candidate module either is + // unavailable or does not contain the package), but is resolved in the + // unpruned graph due to a newer-than-latest dependency that is normally + // pruned out. + // + // This could also occur if the source code for the module providing the + // package in the pruned graph has a checksum error, but the unpruned + // graph upgrades that module to a version with a correct checksum. + // + // pkg.err should have already been logged elsewhere — along with a + // stack trace — so log only the import path and non-error info here. + suggestUpgrade = true + ld.error(fmt.Errorf("%s failed to load from any module,\n\tbut go %s would load it from %v", pkg.path, compatVersion, mismatch.mod)) + + case pkg.mod != mismatch.mod: + // The package is loaded successfully by both Go versions, but from a + // different module in each. This could lead to subtle (and perhaps even + // unnoticed!) variations in behavior between builds with different + // toolchains. + suggestUpgrade = true + ld.error(fmt.Errorf("%s loaded from %v,\n\tbut go %s would select %v\n", pkg.stackText(), pkg.mod, compatVersion, mismatch.mod.Version)) + + default: + base.Fatalf("go: internal error: mismatch recorded for package %s, but no differences found", pkg.path) + } + } + + ld.switchIfErrors(ctx) + suggestFixes() + ld.exitIfErrors(ctx) +} + +// scanDir is like imports.ScanDir but elides known magic imports from the list, +// so that we do not go looking for packages that don't really exist. +// +// The standard magic import is "C", for cgo. +// +// The only other known magic imports are appengine and appengine/*. +// These are so old that they predate "go get" and did not use URL-like paths. +// Most code today now uses google.golang.org/appengine instead, +// but not all code has been so updated. When we mostly ignore build tags +// during "go vendor", we look into "// +build appengine" files and +// may see these legacy imports. We drop them so that the module +// search does not look for modules to try to satisfy them. +func scanDir(modroot string, dir string, tags map[string]bool) (imports_, testImports []string, err error) { + if ip, mierr := modindex.GetPackage(modroot, dir); mierr == nil { + imports_, testImports, err = ip.ScanDir(tags) + goto Happy + } else if !errors.Is(mierr, modindex.ErrNotIndexed) { + return nil, nil, mierr + } + + imports_, testImports, err = imports.ScanDir(dir, tags) +Happy: + + filter := func(x []string) []string { + w := 0 + for _, pkg := range x { + if pkg != "C" && pkg != "appengine" && !strings.HasPrefix(pkg, "appengine/") && + pkg != "appengine_internal" && !strings.HasPrefix(pkg, "appengine_internal/") { + x[w] = pkg + w++ + } + } + return x[:w] + } + + return filter(imports_), filter(testImports), err +} + +// buildStacks computes minimal import stacks for each package, +// for use in error messages. When it completes, packages that +// are part of the original root set have pkg.stack == nil, +// and other packages have pkg.stack pointing at the next +// package up the import stack in their minimal chain. +// As a side effect, buildStacks also constructs ld.pkgs, +// the list of all packages loaded. +func (ld *loader) buildStacks() { + if len(ld.pkgs) > 0 { + panic("buildStacks") + } + for _, pkg := range ld.roots { + pkg.stack = pkg // sentinel to avoid processing in next loop + ld.pkgs = append(ld.pkgs, pkg) + } + for i := 0; i < len(ld.pkgs); i++ { // not range: appending to ld.pkgs in loop + pkg := ld.pkgs[i] + for _, next := range pkg.imports { + if next.stack == nil { + next.stack = pkg + ld.pkgs = append(ld.pkgs, next) + } + } + if next := pkg.test; next != nil && next.stack == nil { + next.stack = pkg + ld.pkgs = append(ld.pkgs, next) + } + } + for _, pkg := range ld.roots { + pkg.stack = nil + } +} + +// stackText builds the import stack text to use when +// reporting an error in pkg. It has the general form +// +// root imports +// other imports +// other2 tested by +// other2.test imports +// pkg +func (pkg *loadPkg) stackText() string { + var stack []*loadPkg + for p := pkg; p != nil; p = p.stack { + stack = append(stack, p) + } + + var buf strings.Builder + for i := len(stack) - 1; i >= 0; i-- { + p := stack[i] + fmt.Fprint(&buf, p.path) + if p.testOf != nil { + fmt.Fprint(&buf, ".test") + } + if i > 0 { + if stack[i-1].testOf == p { + fmt.Fprint(&buf, " tested by\n\t") + } else { + fmt.Fprint(&buf, " imports\n\t") + } + } + } + return buf.String() +} + +// why returns the text to use in "go mod why" output about the given package. +// It is less ornate than the stackText but contains the same information. +func (pkg *loadPkg) why() string { + var buf strings.Builder + var stack []*loadPkg + for p := pkg; p != nil; p = p.stack { + stack = append(stack, p) + } + + for i := len(stack) - 1; i >= 0; i-- { + p := stack[i] + if p.testOf != nil { + fmt.Fprintf(&buf, "%s.test\n", p.testOf.path) + } else { + fmt.Fprintf(&buf, "%s\n", p.path) + } + } + return buf.String() +} + +// Why returns the "go mod why" output stanza for the given package, +// without the leading # comment. +// The package graph must have been loaded already, usually by LoadPackages. +// If there is no reason for the package to be in the current build, +// Why returns an empty string. +func Why(path string) string { + pkg, ok := loaded.pkgCache.Get(path) + if !ok { + return "" + } + return pkg.why() +} + +// WhyDepth returns the number of steps in the Why listing. +// If there is no reason for the package to be in the current build, +// WhyDepth returns 0. +func WhyDepth(path string) int { + n := 0 + pkg, _ := loaded.pkgCache.Get(path) + for p := pkg; p != nil; p = p.stack { + n++ + } + return n +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/modfile.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/modfile.go new file mode 100644 index 0000000000000000000000000000000000000000..1d6b28db19ebd6bbf51524a503e42883388c3e53 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/modfile.go @@ -0,0 +1,820 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "unicode" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/go/internal/gover" + "cmd/go/internal/lockedfile" + "cmd/go/internal/modfetch" + "cmd/go/internal/par" + "cmd/go/internal/trace" + + "golang.org/x/mod/modfile" + "golang.org/x/mod/module" +) + +// ReadModFile reads and parses the mod file at gomod. ReadModFile properly applies the +// overlay, locks the file while reading, and applies fix, if applicable. +func ReadModFile(gomod string, fix modfile.VersionFixer) (data []byte, f *modfile.File, err error) { + gomod = base.ShortPath(gomod) // use short path in any errors + if gomodActual, ok := fsys.OverlayPath(gomod); ok { + // Don't lock go.mod if it's part of the overlay. + // On Plan 9, locking requires chmod, and we don't want to modify any file + // in the overlay. See #44700. + data, err = os.ReadFile(gomodActual) + } else { + data, err = lockedfile.Read(gomodActual) + } + if err != nil { + return nil, nil, err + } + + f, err = modfile.Parse(gomod, data, fix) + if err != nil { + // Errors returned by modfile.Parse begin with file:line. + return nil, nil, fmt.Errorf("errors parsing %s:\n%w", gomod, err) + } + if f.Go != nil && gover.Compare(f.Go.Version, gover.Local()) > 0 { + toolchain := "" + if f.Toolchain != nil { + toolchain = f.Toolchain.Name + } + return nil, nil, &gover.TooNewError{What: gomod, GoVersion: f.Go.Version, Toolchain: toolchain} + } + if f.Module == nil { + // No module declaration. Must add module path. + return nil, nil, fmt.Errorf("error reading %s: missing module declaration. To specify the module path:\n\tgo mod edit -module=example.com/mod", gomod) + } + + return data, f, err +} + +// A modFileIndex is an index of data corresponding to a modFile +// at a specific point in time. +type modFileIndex struct { + data []byte + dataNeedsFix bool // true if fixVersion applied a change while parsing data + module module.Version + goVersion string // Go version (no "v" or "go" prefix) + toolchain string + require map[module.Version]requireMeta + replace map[module.Version]module.Version + exclude map[module.Version]bool +} + +type requireMeta struct { + indirect bool +} + +// A modPruning indicates whether transitive dependencies of Go 1.17 dependencies +// are pruned out of the module subgraph rooted at a given module. +// (See https://golang.org/ref/mod#graph-pruning.) +type modPruning uint8 + +const ( + pruned modPruning = iota // transitive dependencies of modules at go 1.17 and higher are pruned out + unpruned // no transitive dependencies are pruned out + workspace // pruned to the union of modules in the workspace +) + +func (p modPruning) String() string { + switch p { + case pruned: + return "pruned" + case unpruned: + return "unpruned" + case workspace: + return "workspace" + default: + return fmt.Sprintf("%T(%d)", p, p) + } +} + +func pruningForGoVersion(goVersion string) modPruning { + if gover.Compare(goVersion, gover.ExplicitIndirectVersion) < 0 { + // The go.mod file does not duplicate relevant information about transitive + // dependencies, so they cannot be pruned out. + return unpruned + } + return pruned +} + +// CheckAllowed returns an error equivalent to ErrDisallowed if m is excluded by +// the main module's go.mod or retracted by its author. Most version queries use +// this to filter out versions that should not be used. +func CheckAllowed(ctx context.Context, m module.Version) error { + if err := CheckExclusions(ctx, m); err != nil { + return err + } + if err := CheckRetractions(ctx, m); err != nil { + return err + } + return nil +} + +// ErrDisallowed is returned by version predicates passed to Query and similar +// functions to indicate that a version should not be considered. +var ErrDisallowed = errors.New("disallowed module version") + +// CheckExclusions returns an error equivalent to ErrDisallowed if module m is +// excluded by the main module's go.mod file. +func CheckExclusions(ctx context.Context, m module.Version) error { + for _, mainModule := range MainModules.Versions() { + if index := MainModules.Index(mainModule); index != nil && index.exclude[m] { + return module.VersionError(m, errExcluded) + } + } + return nil +} + +var errExcluded = &excludedError{} + +type excludedError struct{} + +func (e *excludedError) Error() string { return "excluded by go.mod" } +func (e *excludedError) Is(err error) bool { return err == ErrDisallowed } + +// CheckRetractions returns an error if module m has been retracted by +// its author. +func CheckRetractions(ctx context.Context, m module.Version) (err error) { + defer func() { + if retractErr := (*ModuleRetractedError)(nil); err == nil || errors.As(err, &retractErr) { + return + } + // Attribute the error to the version being checked, not the version from + // which the retractions were to be loaded. + if mErr := (*module.ModuleError)(nil); errors.As(err, &mErr) { + err = mErr.Err + } + err = &retractionLoadingError{m: m, err: err} + }() + + if m.Version == "" { + // Main module, standard library, or file replacement module. + // Cannot be retracted. + return nil + } + if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { + // All versions of the module were replaced. + // Don't load retractions, since we'd just load the replacement. + return nil + } + + // Find the latest available version of the module, and load its go.mod. If + // the latest version is replaced, we'll load the replacement. + // + // If there's an error loading the go.mod, we'll return it here. These errors + // should generally be ignored by callers since they happen frequently when + // we're offline. These errors are not equivalent to ErrDisallowed, so they + // may be distinguished from retraction errors. + // + // We load the raw file here: the go.mod file may have a different module + // path that we expect if the module or its repository was renamed. + // We still want to apply retractions to other aliases of the module. + rm, err := queryLatestVersionIgnoringRetractions(ctx, m.Path) + if err != nil { + return err + } + summary, err := rawGoModSummary(rm) + if err != nil && !errors.Is(err, gover.ErrTooNew) { + return err + } + + var rationale []string + isRetracted := false + for _, r := range summary.retract { + if gover.ModCompare(m.Path, r.Low, m.Version) <= 0 && gover.ModCompare(m.Path, m.Version, r.High) <= 0 { + isRetracted = true + if r.Rationale != "" { + rationale = append(rationale, r.Rationale) + } + } + } + if isRetracted { + return module.VersionError(m, &ModuleRetractedError{Rationale: rationale}) + } + return nil +} + +type ModuleRetractedError struct { + Rationale []string +} + +func (e *ModuleRetractedError) Error() string { + msg := "retracted by module author" + if len(e.Rationale) > 0 { + // This is meant to be a short error printed on a terminal, so just + // print the first rationale. + msg += ": " + ShortMessage(e.Rationale[0], "retracted by module author") + } + return msg +} + +func (e *ModuleRetractedError) Is(err error) bool { + return err == ErrDisallowed +} + +type retractionLoadingError struct { + m module.Version + err error +} + +func (e *retractionLoadingError) Error() string { + return fmt.Sprintf("loading module retractions for %v: %v", e.m, e.err) +} + +func (e *retractionLoadingError) Unwrap() error { + return e.err +} + +// ShortMessage returns a string from go.mod (for example, a retraction +// rationale or deprecation message) that is safe to print in a terminal. +// +// If the given string is empty, ShortMessage returns the given default. If the +// given string is too long or contains non-printable characters, ShortMessage +// returns a hard-coded string. +func ShortMessage(message, emptyDefault string) string { + const maxLen = 500 + if i := strings.Index(message, "\n"); i >= 0 { + message = message[:i] + } + message = strings.TrimSpace(message) + if message == "" { + return emptyDefault + } + if len(message) > maxLen { + return "(message omitted: too long)" + } + for _, r := range message { + if !unicode.IsGraphic(r) && !unicode.IsSpace(r) { + return "(message omitted: contains non-printable characters)" + } + } + // NOTE: the go.mod parser rejects invalid UTF-8, so we don't check that here. + return message +} + +// CheckDeprecation returns a deprecation message from the go.mod file of the +// latest version of the given module. Deprecation messages are comments +// before or on the same line as the module directives that start with +// "Deprecated:" and run until the end of the paragraph. +// +// CheckDeprecation returns an error if the message can't be loaded. +// CheckDeprecation returns "", nil if there is no deprecation message. +func CheckDeprecation(ctx context.Context, m module.Version) (deprecation string, err error) { + defer func() { + if err != nil { + err = fmt.Errorf("loading deprecation for %s: %w", m.Path, err) + } + }() + + if m.Version == "" { + // Main module, standard library, or file replacement module. + // Don't look up deprecation. + return "", nil + } + if repl := Replacement(module.Version{Path: m.Path}); repl.Path != "" { + // All versions of the module were replaced. + // We'll look up deprecation separately for the replacement. + return "", nil + } + + latest, err := queryLatestVersionIgnoringRetractions(ctx, m.Path) + if err != nil { + return "", err + } + summary, err := rawGoModSummary(latest) + if err != nil && !errors.Is(err, gover.ErrTooNew) { + return "", err + } + return summary.deprecated, nil +} + +func replacement(mod module.Version, replace map[module.Version]module.Version) (fromVersion string, to module.Version, ok bool) { + if r, ok := replace[mod]; ok { + return mod.Version, r, true + } + if r, ok := replace[module.Version{Path: mod.Path}]; ok { + return "", r, true + } + return "", module.Version{}, false +} + +// Replacement returns the replacement for mod, if any. If the path in the +// module.Version is relative it's relative to the single main module outside +// workspace mode, or the workspace's directory in workspace mode. +func Replacement(mod module.Version) module.Version { + r, foundModRoot, _ := replacementFrom(mod) + return canonicalizeReplacePath(r, foundModRoot) +} + +// replacementFrom returns the replacement for mod, if any, the modroot of the replacement if it appeared in a go.mod, +// and the source of the replacement. The replacement is relative to the go.work or go.mod file it appears in. +func replacementFrom(mod module.Version) (r module.Version, modroot string, fromFile string) { + foundFrom, found, foundModRoot := "", module.Version{}, "" + if MainModules == nil { + return module.Version{}, "", "" + } else if MainModules.Contains(mod.Path) && mod.Version == "" { + // Don't replace the workspace version of the main module. + return module.Version{}, "", "" + } + if _, r, ok := replacement(mod, MainModules.WorkFileReplaceMap()); ok { + return r, "", workFilePath + } + for _, v := range MainModules.Versions() { + if index := MainModules.Index(v); index != nil { + if from, r, ok := replacement(mod, index.replace); ok { + modRoot := MainModules.ModRoot(v) + if foundModRoot != "" && foundFrom != from && found != r { + base.Errorf("conflicting replacements found for %v in workspace modules defined by %v and %v", + mod, modFilePath(foundModRoot), modFilePath(modRoot)) + return found, foundModRoot, modFilePath(foundModRoot) + } + found, foundModRoot = r, modRoot + } + } + } + return found, foundModRoot, modFilePath(foundModRoot) +} + +func replaceRelativeTo() string { + if workFilePath := WorkFilePath(); workFilePath != "" { + return filepath.Dir(workFilePath) + } + return MainModules.ModRoot(MainModules.mustGetSingleMainModule()) +} + +// canonicalizeReplacePath ensures that relative, on-disk, replaced module paths +// are relative to the workspace directory (in workspace mode) or to the module's +// directory (in module mode, as they already are). +func canonicalizeReplacePath(r module.Version, modRoot string) module.Version { + if filepath.IsAbs(r.Path) || r.Version != "" || modRoot == "" { + return r + } + workFilePath := WorkFilePath() + if workFilePath == "" { + return r + } + abs := filepath.Join(modRoot, r.Path) + if rel, err := filepath.Rel(filepath.Dir(workFilePath), abs); err == nil { + return module.Version{Path: ToDirectoryPath(rel), Version: r.Version} + } + // We couldn't make the version's path relative to the workspace's path, + // so just return the absolute path. It's the best we can do. + return module.Version{Path: ToDirectoryPath(abs), Version: r.Version} +} + +// resolveReplacement returns the module actually used to load the source code +// for m: either m itself, or the replacement for m (iff m is replaced). +// It also returns the modroot of the module providing the replacement if +// one was found. +func resolveReplacement(m module.Version) module.Version { + if r := Replacement(m); r.Path != "" { + return r + } + return m +} + +func toReplaceMap(replacements []*modfile.Replace) map[module.Version]module.Version { + replaceMap := make(map[module.Version]module.Version, len(replacements)) + for _, r := range replacements { + if prev, dup := replaceMap[r.Old]; dup && prev != r.New { + base.Fatalf("go: conflicting replacements for %v:\n\t%v\n\t%v", r.Old, prev, r.New) + } + replaceMap[r.Old] = r.New + } + return replaceMap +} + +// indexModFile rebuilds the index of modFile. +// If modFile has been changed since it was first read, +// modFile.Cleanup must be called before indexModFile. +func indexModFile(data []byte, modFile *modfile.File, mod module.Version, needsFix bool) *modFileIndex { + i := new(modFileIndex) + i.data = data + i.dataNeedsFix = needsFix + + i.module = module.Version{} + if modFile.Module != nil { + i.module = modFile.Module.Mod + } + + i.goVersion = "" + if modFile.Go == nil { + rawGoVersion.Store(mod, "") + } else { + i.goVersion = modFile.Go.Version + rawGoVersion.Store(mod, modFile.Go.Version) + } + if modFile.Toolchain != nil { + i.toolchain = modFile.Toolchain.Name + } + + i.require = make(map[module.Version]requireMeta, len(modFile.Require)) + for _, r := range modFile.Require { + i.require[r.Mod] = requireMeta{indirect: r.Indirect} + } + + i.replace = toReplaceMap(modFile.Replace) + + i.exclude = make(map[module.Version]bool, len(modFile.Exclude)) + for _, x := range modFile.Exclude { + i.exclude[x.Mod] = true + } + + return i +} + +// modFileIsDirty reports whether the go.mod file differs meaningfully +// from what was indexed. +// If modFile has been changed (even cosmetically) since it was first read, +// modFile.Cleanup must be called before modFileIsDirty. +func (i *modFileIndex) modFileIsDirty(modFile *modfile.File) bool { + if i == nil { + return modFile != nil + } + + if i.dataNeedsFix { + return true + } + + if modFile.Module == nil { + if i.module != (module.Version{}) { + return true + } + } else if modFile.Module.Mod != i.module { + return true + } + + var goV, toolchain string + if modFile.Go != nil { + goV = modFile.Go.Version + } + if modFile.Toolchain != nil { + toolchain = modFile.Toolchain.Name + } + + if goV != i.goVersion || + toolchain != i.toolchain || + len(modFile.Require) != len(i.require) || + len(modFile.Replace) != len(i.replace) || + len(modFile.Exclude) != len(i.exclude) { + return true + } + + for _, r := range modFile.Require { + if meta, ok := i.require[r.Mod]; !ok { + return true + } else if r.Indirect != meta.indirect { + if cfg.BuildMod == "readonly" { + // The module's requirements are consistent; only the "// indirect" + // comments that are wrong. But those are only guaranteed to be accurate + // after a "go mod tidy" — it's a good idea to run those before + // committing a change, but it's certainly not mandatory. + } else { + return true + } + } + } + + for _, r := range modFile.Replace { + if r.New != i.replace[r.Old] { + return true + } + } + + for _, x := range modFile.Exclude { + if !i.exclude[x.Mod] { + return true + } + } + + return false +} + +// rawGoVersion records the Go version parsed from each module's go.mod file. +// +// If a module is replaced, the version of the replacement is keyed by the +// replacement module.Version, not the version being replaced. +var rawGoVersion sync.Map // map[module.Version]string + +// A modFileSummary is a summary of a go.mod file for which we do not need to +// retain complete information — for example, the go.mod file of a dependency +// module. +type modFileSummary struct { + module module.Version + goVersion string + toolchain string + pruning modPruning + require []module.Version + retract []retraction + deprecated string +} + +// A retraction consists of a retracted version interval and rationale. +// retraction is like modfile.Retract, but it doesn't point to the syntax tree. +type retraction struct { + modfile.VersionInterval + Rationale string +} + +// goModSummary returns a summary of the go.mod file for module m, +// taking into account any replacements for m, exclusions of its dependencies, +// and/or vendoring. +// +// m must be a version in the module graph, reachable from the Target module. +// In readonly mode, the go.sum file must contain an entry for m's go.mod file +// (or its replacement). goModSummary must not be called for the Target module +// itself, as its requirements may change. Use rawGoModSummary for other +// module versions. +// +// The caller must not modify the returned summary. +func goModSummary(m module.Version) (*modFileSummary, error) { + if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) { + panic("internal error: goModSummary called on a main module") + } + if gover.IsToolchain(m.Path) { + return rawGoModSummary(m) + } + + if cfg.BuildMod == "vendor" { + summary := &modFileSummary{ + module: module.Version{Path: m.Path}, + } + + readVendorList(VendorDir()) + if vendorVersion[m.Path] != m.Version { + // This module is not vendored, so packages cannot be loaded from it and + // it cannot be relevant to the build. + return summary, nil + } + + // For every module other than the target, + // return the full list of modules from modules.txt. + // We don't know what versions the vendored module actually relies on, + // so assume that it requires everything. + summary.require = vendorList + return summary, nil + } + + actual := resolveReplacement(m) + if mustHaveSums() && actual.Version != "" { + key := module.Version{Path: actual.Path, Version: actual.Version + "/go.mod"} + if !modfetch.HaveSum(key) { + suggestion := fmt.Sprintf(" for go.mod file; to add it:\n\tgo mod download %s", m.Path) + return nil, module.VersionError(actual, &sumMissingError{suggestion: suggestion}) + } + } + summary, err := rawGoModSummary(actual) + if err != nil { + return nil, err + } + + if actual.Version == "" { + // The actual module is a filesystem-local replacement, for which we have + // unfortunately not enforced any sort of invariants about module lines or + // matching module paths. Anything goes. + // + // TODO(bcmills): Remove this special-case, update tests, and add a + // release note. + } else { + if summary.module.Path == "" { + return nil, module.VersionError(actual, errors.New("parsing go.mod: missing module line")) + } + + // In theory we should only allow mpath to be unequal to m.Path here if the + // version that we fetched lacks an explicit go.mod file: if the go.mod file + // is explicit, then it should match exactly (to ensure that imports of other + // packages within the module are interpreted correctly). Unfortunately, we + // can't determine that information from the module proxy protocol: we'll have + // to leave that validation for when we load actual packages from within the + // module. + if mpath := summary.module.Path; mpath != m.Path && mpath != actual.Path { + return nil, module.VersionError(actual, + fmt.Errorf("parsing go.mod:\n"+ + "\tmodule declares its path as: %s\n"+ + "\t but was required as: %s", mpath, m.Path)) + } + } + + for _, mainModule := range MainModules.Versions() { + if index := MainModules.Index(mainModule); index != nil && len(index.exclude) > 0 { + // Drop any requirements on excluded versions. + // Don't modify the cached summary though, since we might need the raw + // summary separately. + haveExcludedReqs := false + for _, r := range summary.require { + if index.exclude[r] { + haveExcludedReqs = true + break + } + } + if haveExcludedReqs { + s := new(modFileSummary) + *s = *summary + s.require = make([]module.Version, 0, len(summary.require)) + for _, r := range summary.require { + if !index.exclude[r] { + s.require = append(s.require, r) + } + } + summary = s + } + } + } + return summary, nil +} + +// rawGoModSummary returns a new summary of the go.mod file for module m, +// ignoring all replacements that may apply to m and excludes that may apply to +// its dependencies. +// +// rawGoModSummary cannot be used on the main module outside of workspace mode. +// The modFileSummary can still be used for retractions and deprecations +// even if a TooNewError is returned. +func rawGoModSummary(m module.Version) (*modFileSummary, error) { + if gover.IsToolchain(m.Path) { + if m.Path == "go" && gover.Compare(m.Version, gover.GoStrictVersion) >= 0 { + // Declare that go 1.21.3 requires toolchain 1.21.3, + // so that go get knows that downgrading toolchain implies downgrading go + // and similarly upgrading go requires upgrading the toolchain. + return &modFileSummary{module: m, require: []module.Version{{Path: "toolchain", Version: "go" + m.Version}}}, nil + } + return &modFileSummary{module: m}, nil + } + if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) { + // Calling rawGoModSummary implies that we are treating m as a module whose + // requirements aren't the roots of the module graph and can't be modified. + // + // If we are not in workspace mode, then the requirements of the main module + // are the roots of the module graph and we expect them to be kept consistent. + panic("internal error: rawGoModSummary called on a main module") + } + if m.Version == "" && inWorkspaceMode() && m.Path == "command-line-arguments" { + // "go work sync" calls LoadModGraph to make sure the module graph is valid. + // If there are no modules in the workspace, we synthesize an empty + // command-line-arguments module, which rawGoModData cannot read a go.mod for. + return &modFileSummary{module: m}, nil + } + return rawGoModSummaryCache.Do(m, func() (*modFileSummary, error) { + summary := new(modFileSummary) + name, data, err := rawGoModData(m) + if err != nil { + return nil, err + } + f, err := modfile.ParseLax(name, data, nil) + if err != nil { + return nil, module.VersionError(m, fmt.Errorf("parsing %s: %v", base.ShortPath(name), err)) + } + if f.Module != nil { + summary.module = f.Module.Mod + summary.deprecated = f.Module.Deprecated + } + if f.Go != nil { + rawGoVersion.LoadOrStore(m, f.Go.Version) + summary.goVersion = f.Go.Version + summary.pruning = pruningForGoVersion(f.Go.Version) + } else { + summary.pruning = unpruned + } + if f.Toolchain != nil { + summary.toolchain = f.Toolchain.Name + } + if len(f.Require) > 0 { + summary.require = make([]module.Version, 0, len(f.Require)+1) + for _, req := range f.Require { + summary.require = append(summary.require, req.Mod) + } + } + + if len(f.Retract) > 0 { + summary.retract = make([]retraction, 0, len(f.Retract)) + for _, ret := range f.Retract { + summary.retract = append(summary.retract, retraction{ + VersionInterval: ret.VersionInterval, + Rationale: ret.Rationale, + }) + } + } + + // This block must be kept at the end of the function because the summary may + // be used for reading retractions or deprecations even if a TooNewError is + // returned. + if summary.goVersion != "" && gover.Compare(summary.goVersion, gover.GoStrictVersion) >= 0 { + summary.require = append(summary.require, module.Version{Path: "go", Version: summary.goVersion}) + if gover.Compare(summary.goVersion, gover.Local()) > 0 { + return summary, &gover.TooNewError{What: "module " + m.String(), GoVersion: summary.goVersion} + } + } + + return summary, nil + }) +} + +var rawGoModSummaryCache par.ErrCache[module.Version, *modFileSummary] + +// rawGoModData returns the content of the go.mod file for module m, ignoring +// all replacements that may apply to m. +// +// rawGoModData cannot be used on the main module outside of workspace mode. +// +// Unlike rawGoModSummary, rawGoModData does not cache its results in memory. +// Use rawGoModSummary instead unless you specifically need these bytes. +func rawGoModData(m module.Version) (name string, data []byte, err error) { + if m.Version == "" { + dir := m.Path + if !filepath.IsAbs(dir) { + if inWorkspaceMode() && MainModules.Contains(m.Path) { + dir = MainModules.ModRoot(m) + } else { + // m is a replacement module with only a file path. + dir = filepath.Join(replaceRelativeTo(), dir) + } + } + name = filepath.Join(dir, "go.mod") + if gomodActual, ok := fsys.OverlayPath(name); ok { + // Don't lock go.mod if it's part of the overlay. + // On Plan 9, locking requires chmod, and we don't want to modify any file + // in the overlay. See #44700. + data, err = os.ReadFile(gomodActual) + } else { + data, err = lockedfile.Read(gomodActual) + } + if err != nil { + return "", nil, module.VersionError(m, fmt.Errorf("reading %s: %v", base.ShortPath(name), err)) + } + } else { + if !gover.ModIsValid(m.Path, m.Version) { + // Disallow the broader queries supported by fetch.Lookup. + base.Fatalf("go: internal error: %s@%s: unexpected invalid semantic version", m.Path, m.Version) + } + name = "go.mod" + data, err = modfetch.GoMod(context.TODO(), m.Path, m.Version) + } + return name, data, err +} + +// queryLatestVersionIgnoringRetractions looks up the latest version of the +// module with the given path without considering retracted or excluded +// versions. +// +// If all versions of the module are replaced, +// queryLatestVersionIgnoringRetractions returns the replacement without making +// a query. +// +// If the queried latest version is replaced, +// queryLatestVersionIgnoringRetractions returns the replacement. +func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (latest module.Version, err error) { + return latestVersionIgnoringRetractionsCache.Do(path, func() (module.Version, error) { + ctx, span := trace.StartSpan(ctx, "queryLatestVersionIgnoringRetractions "+path) + defer span.Done() + + if repl := Replacement(module.Version{Path: path}); repl.Path != "" { + // All versions of the module were replaced. + // No need to query. + return repl, nil + } + + // Find the latest version of the module. + // Ignore exclusions from the main module's go.mod. + const ignoreSelected = "" + var allowAll AllowedFunc + rev, err := Query(ctx, path, "latest", ignoreSelected, allowAll) + if err != nil { + return module.Version{}, err + } + latest := module.Version{Path: path, Version: rev.Version} + if repl := resolveReplacement(latest); repl.Path != "" { + latest = repl + } + return latest, nil + }) +} + +var latestVersionIgnoringRetractionsCache par.ErrCache[string, module.Version] // path → queryLatestVersionIgnoringRetractions result + +// ToDirectoryPath adds a prefix if necessary so that path in unambiguously +// an absolute path or a relative path starting with a '.' or '..' +// path component. +func ToDirectoryPath(path string) string { + if modfile.IsDirectoryPath(path) { + return path + } + // The path is not a relative path or an absolute path, so make it relative + // to the current directory. + return "./" + filepath.ToSlash(filepath.Clean(path)) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/mvs.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/mvs.go new file mode 100644 index 0000000000000000000000000000000000000000..8ae2dbff1e8887364f5afbef7d9ea76008b80196 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/mvs.go @@ -0,0 +1,136 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "context" + "errors" + "os" + "sort" + + "cmd/go/internal/gover" + "cmd/go/internal/modfetch" + "cmd/go/internal/modfetch/codehost" + + "golang.org/x/mod/module" +) + +// cmpVersion implements the comparison for versions in the module loader. +// +// It is consistent with gover.ModCompare except that as a special case, +// the version "" is considered higher than all other versions. +// The main module (also known as the target) has no version and must be chosen +// over other versions of the same module in the module dependency graph. +func cmpVersion(p string, v1, v2 string) int { + if v2 == "" { + if v1 == "" { + return 0 + } + return -1 + } + if v1 == "" { + return 1 + } + return gover.ModCompare(p, v1, v2) +} + +// mvsReqs implements mvs.Reqs for module semantic versions, +// with any exclusions or replacements applied internally. +type mvsReqs struct { + roots []module.Version +} + +func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) { + if mod.Version == "" && MainModules.Contains(mod.Path) { + // Use the build list as it existed when r was constructed, not the current + // global build list. + return r.roots, nil + } + + if mod.Version == "none" { + return nil, nil + } + + summary, err := goModSummary(mod) + if err != nil { + return nil, err + } + return summary.require, nil +} + +// Max returns the maximum of v1 and v2 according to gover.ModCompare. +// +// As a special case, the version "" is considered higher than all other +// versions. The main module (also known as the target) has no version and must +// be chosen over other versions of the same module in the module dependency +// graph. +func (*mvsReqs) Max(p, v1, v2 string) string { + if cmpVersion(p, v1, v2) < 0 { + return v2 + } + return v1 +} + +// Upgrade is a no-op, here to implement mvs.Reqs. +// The upgrade logic for go get -u is in ../modget/get.go. +func (*mvsReqs) Upgrade(m module.Version) (module.Version, error) { + return m, nil +} + +func versions(ctx context.Context, path string, allowed AllowedFunc) (versions []string, origin *codehost.Origin, err error) { + // Note: modfetch.Lookup and repo.Versions are cached, + // so there's no need for us to add extra caching here. + err = modfetch.TryProxies(func(proxy string) error { + repo, err := lookupRepo(ctx, proxy, path) + if err != nil { + return err + } + allVersions, err := repo.Versions(ctx, "") + if err != nil { + return err + } + allowedVersions := make([]string, 0, len(allVersions.List)) + for _, v := range allVersions.List { + if err := allowed(ctx, module.Version{Path: path, Version: v}); err == nil { + allowedVersions = append(allowedVersions, v) + } else if !errors.Is(err, ErrDisallowed) { + return err + } + } + versions = allowedVersions + origin = allVersions.Origin + return nil + }) + return versions, origin, err +} + +// previousVersion returns the tagged version of m.Path immediately prior to +// m.Version, or version "none" if no prior version is tagged. +// +// Since the version of a main module is not found in the version list, +// it has no previous version. +func previousVersion(ctx context.Context, m module.Version) (module.Version, error) { + if m.Version == "" && MainModules.Contains(m.Path) { + return module.Version{Path: m.Path, Version: "none"}, nil + } + + list, _, err := versions(ctx, m.Path, CheckAllowed) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return module.Version{Path: m.Path, Version: "none"}, nil + } + return module.Version{}, err + } + i := sort.Search(len(list), func(i int) bool { return gover.ModCompare(m.Path, list[i], m.Version) >= 0 }) + if i > 0 { + return module.Version{Path: m.Path, Version: list[i-1]}, nil + } + return module.Version{Path: m.Path, Version: "none"}, nil +} + +func (*mvsReqs) Previous(m module.Version) (module.Version, error) { + // TODO(golang.org/issue/38714): thread tracing context through MVS. + return previousVersion(context.TODO(), m) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/mvs_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/mvs_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e0a38b98d1e1b2ca7965135a70351758d3e25b2f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/mvs_test.go @@ -0,0 +1,31 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "testing" +) + +func TestReqsMax(t *testing.T) { + type testCase struct { + a, b, want string + } + reqs := new(mvsReqs) + for _, tc := range []testCase{ + {a: "v0.1.0", b: "v0.2.0", want: "v0.2.0"}, + {a: "v0.2.0", b: "v0.1.0", want: "v0.2.0"}, + {a: "", b: "v0.1.0", want: ""}, // "" is Target.Version + {a: "v0.1.0", b: "", want: ""}, + {a: "none", b: "v0.1.0", want: "v0.1.0"}, + {a: "v0.1.0", b: "none", want: "v0.1.0"}, + {a: "none", b: "", want: ""}, + {a: "", b: "none", want: ""}, + } { + max := reqs.Max("", tc.a, tc.b) + if max != tc.want { + t.Errorf("(%T).Max(%q, %q) = %q; want %q", reqs, tc.a, tc.b, max, tc.want) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/query.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/query.go new file mode 100644 index 0000000000000000000000000000000000000000..c4cf55442ba69be0cd10fe777cb086252ba74919 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/query.go @@ -0,0 +1,1343 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "bytes" + "context" + "errors" + "fmt" + "io/fs" + "os" + pathpkg "path" + "slices" + "sort" + "strings" + "sync" + "time" + + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/imports" + "cmd/go/internal/modfetch" + "cmd/go/internal/modfetch/codehost" + "cmd/go/internal/modinfo" + "cmd/go/internal/search" + "cmd/go/internal/str" + "cmd/go/internal/trace" + "cmd/internal/pkgpattern" + + "golang.org/x/mod/module" + "golang.org/x/mod/semver" +) + +// Query looks up a revision of a given module given a version query string. +// The module must be a complete module path. +// The version must take one of the following forms: +// +// - the literal string "latest", denoting the latest available, allowed +// tagged version, with non-prereleases preferred over prereleases. +// If there are no tagged versions in the repo, latest returns the most +// recent commit. +// +// - the literal string "upgrade", equivalent to "latest" except that if +// current is a newer version, current will be returned (see below). +// +// - the literal string "patch", denoting the latest available tagged version +// with the same major and minor number as current (see below). +// +// - v1, denoting the latest available tagged version v1.x.x. +// +// - v1.2, denoting the latest available tagged version v1.2.x. +// +// - v1.2.3, a semantic version string denoting that tagged version. +// +// - v1.2.3, >=v1.2.3, +// denoting the version closest to the target and satisfying the given operator, +// with non-prereleases preferred over prereleases. +// +// - a repository commit identifier or tag, denoting that commit. +// +// current denotes the currently-selected version of the module; it may be +// "none" if no version is currently selected, or "" if the currently-selected +// version is unknown or should not be considered. If query is +// "upgrade" or "patch", current will be returned if it is a newer +// semantic version or a chronologically later pseudo-version than the +// version that would otherwise be chosen. This prevents accidental downgrades +// from newer pre-release or development versions. +// +// The allowed function (which may be nil) is used to filter out unsuitable +// versions (see AllowedFunc documentation for details). If the query refers to +// a specific revision (for example, "master"; see IsRevisionQuery), and the +// revision is disallowed by allowed, Query returns the error. If the query +// does not refer to a specific revision (for example, "latest"), Query +// acts as if versions disallowed by allowed do not exist. +// +// If path is the path of the main module and the query is "latest", +// Query returns Target.Version as the version. +// +// Query often returns a non-nil *RevInfo with a non-nil error, +// to provide an info.Origin that can allow the error to be cached. +func Query(ctx context.Context, path, query, current string, allowed AllowedFunc) (*modfetch.RevInfo, error) { + ctx, span := trace.StartSpan(ctx, "modload.Query "+path) + defer span.Done() + + return queryReuse(ctx, path, query, current, allowed, nil) +} + +// queryReuse is like Query but also takes a map of module info that can be reused +// if the validation criteria in Origin are met. +func queryReuse(ctx context.Context, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { + var info *modfetch.RevInfo + err := modfetch.TryProxies(func(proxy string) (err error) { + info, err = queryProxy(ctx, proxy, path, query, current, allowed, reuse) + return err + }) + return info, err +} + +// checkReuse checks whether a revision of a given module +// for a given module may be reused, according to the information in origin. +func checkReuse(ctx context.Context, m module.Version, old *codehost.Origin) error { + return modfetch.TryProxies(func(proxy string) error { + repo, err := lookupRepo(ctx, proxy, m.Path) + if err != nil { + return err + } + return checkReuseRepo(ctx, repo, m.Path, m.Version, old) + }) +} + +func checkReuseRepo(ctx context.Context, repo versionRepo, path, query string, origin *codehost.Origin) error { + if origin == nil { + return errors.New("nil Origin") + } + + // Ensure that the Origin actually includes enough fields to resolve the query. + // If we got the previous Origin data from a proxy, it may be missing something + // that we would have needed to resolve the query directly from the repo. + switch { + case origin.RepoSum != "": + // A RepoSum is always acceptable, since it incorporates everything + // (and is often associated with an error result). + + case query == module.CanonicalVersion(query): + // This query refers to a specific version, and Go module versions + // are supposed to be cacheable and immutable (confirmed with checksums). + // If the version exists at all, we shouldn't need any extra information + // to identify which commit it resolves to. + // + // It may be associated with a Ref for a semantic-version tag, but if so + // we don't expect that tag to change in the future. We also don't need a + // TagSum: if a tag is removed from some ancestor commit, the version may + // change from valid to invalid, but we're ok with keeping stale versions + // as long as they were valid at some point in the past. + // + // If the version did not successfully resolve, the origin may indicate + // a TagSum and/or RepoSum instead of a Hash, in which case we still need + // to check those to ensure that the error is still applicable. + if origin.Hash == "" && origin.Ref == "" && origin.TagSum == "" { + return errors.New("no Origin information to check") + } + + case IsRevisionQuery(path, query): + // This query may refer to a branch, non-version tag, or commit ID. + // + // If it is a commit ID, we expect to see a Hash in the Origin data. On + // the other hand, if it is not a commit ID, we expect to see either a Ref + // (for a positive result) or a RepoSum (for a negative result), since + // we don't expect refs in general to remain stable over time. + if origin.Hash == "" && origin.Ref == "" { + return fmt.Errorf("query %q requires a Hash or Ref", query) + } + // Once we resolve the query to a particular commit, we will need to + // also identify the most appropriate version to assign to that commit. + // (It may correspond to more than one valid version.) + // + // The most appropriate version depends on the tags associated with + // both the commit itself (if the commit is a tagged version) + // and its ancestors (if we need to produce a pseudo-version for it). + if origin.TagSum == "" { + return fmt.Errorf("query %q requires a TagSum", query) + } + + default: + // The query may be "latest" or a version inequality or prefix. + // Its result depends on the absence of higher tags matching the query, + // not just the state of an individual ref or tag. + if origin.TagSum == "" { + return fmt.Errorf("query %q requires a TagSum", query) + } + } + + return repo.CheckReuse(ctx, origin) +} + +// AllowedFunc is used by Query and other functions to filter out unsuitable +// versions, for example, those listed in exclude directives in the main +// module's go.mod file. +// +// An AllowedFunc returns an error equivalent to ErrDisallowed for an unsuitable +// version. Any other error indicates the function was unable to determine +// whether the version should be allowed, for example, the function was unable +// to fetch or parse a go.mod file containing retractions. Typically, errors +// other than ErrDisallowed may be ignored. +type AllowedFunc func(context.Context, module.Version) error + +var errQueryDisabled error = queryDisabledError{} + +type queryDisabledError struct{} + +func (queryDisabledError) Error() string { + if cfg.BuildModReason == "" { + return fmt.Sprintf("cannot query module due to -mod=%s", cfg.BuildMod) + } + return fmt.Sprintf("cannot query module due to -mod=%s\n\t(%s)", cfg.BuildMod, cfg.BuildModReason) +} + +func queryProxy(ctx context.Context, proxy, path, query, current string, allowed AllowedFunc, reuse map[module.Version]*modinfo.ModulePublic) (*modfetch.RevInfo, error) { + ctx, span := trace.StartSpan(ctx, "modload.queryProxy "+path+" "+query) + defer span.Done() + + if current != "" && current != "none" && !gover.ModIsValid(path, current) { + return nil, fmt.Errorf("invalid previous version %v@%v", path, current) + } + if cfg.BuildMod == "vendor" { + return nil, errQueryDisabled + } + if allowed == nil { + allowed = func(context.Context, module.Version) error { return nil } + } + + if MainModules.Contains(path) && (query == "upgrade" || query == "patch") { + m := module.Version{Path: path} + if err := allowed(ctx, m); err != nil { + return nil, fmt.Errorf("internal error: main module version is not allowed: %w", err) + } + return &modfetch.RevInfo{Version: m.Version}, nil + } + + if path == "std" || path == "cmd" { + return nil, fmt.Errorf("can't query specific version (%q) of standard-library module %q", query, path) + } + + repo, err := lookupRepo(ctx, proxy, path) + if err != nil { + return nil, err + } + + if old := reuse[module.Version{Path: path, Version: query}]; old != nil { + if err := checkReuseRepo(ctx, repo, path, query, old.Origin); err == nil { + info := &modfetch.RevInfo{ + Version: old.Version, + Origin: old.Origin, + } + if old.Time != nil { + info.Time = *old.Time + } + return info, nil + } + } + + // Parse query to detect parse errors (and possibly handle query) + // before any network I/O. + qm, err := newQueryMatcher(path, query, current, allowed) + if (err == nil && qm.canStat) || err == errRevQuery { + // Direct lookup of a commit identifier or complete (non-prefix) semantic + // version. + + // If the identifier is not a canonical semver tag — including if it's a + // semver tag with a +metadata suffix — then modfetch.Stat will populate + // info.Version with a suitable pseudo-version. + info, err := repo.Stat(ctx, query) + if err != nil { + queryErr := err + // The full query doesn't correspond to a tag. If it is a semantic version + // with a +metadata suffix, see if there is a tag without that suffix: + // semantic versioning defines them to be equivalent. + canonicalQuery := module.CanonicalVersion(query) + if canonicalQuery != "" && query != canonicalQuery { + info, err = repo.Stat(ctx, canonicalQuery) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return info, err + } + } + if err != nil { + return info, queryErr + } + } + if err := allowed(ctx, module.Version{Path: path, Version: info.Version}); errors.Is(err, ErrDisallowed) { + return nil, err + } + return info, nil + } else if err != nil { + return nil, err + } + + // Load versions and execute query. + versions, err := repo.Versions(ctx, qm.prefix) + if err != nil { + return nil, err + } + origin := versions.Origin + + revWithOrigin := func(rev *modfetch.RevInfo) *modfetch.RevInfo { + if rev == nil { + if origin == nil { + return nil + } + return &modfetch.RevInfo{Origin: origin} + } + + clone := *rev + clone.Origin = origin + return &clone + } + + releases, prereleases, err := qm.filterVersions(ctx, versions.List) + if err != nil { + return revWithOrigin(nil), err + } + + lookup := func(v string) (*modfetch.RevInfo, error) { + rev, err := repo.Stat(ctx, v) + if rev != nil { + // Note that Stat can return a non-nil rev and a non-nil err, + // in order to provide origin information to make the error cacheable. + origin = mergeOrigin(origin, rev.Origin) + } + if err != nil { + return revWithOrigin(nil), err + } + + if (query == "upgrade" || query == "patch") && module.IsPseudoVersion(current) && !rev.Time.IsZero() { + // Don't allow "upgrade" or "patch" to move from a pseudo-version + // to a chronologically older version or pseudo-version. + // + // If the current version is a pseudo-version from an untagged branch, it + // may be semantically lower than the "latest" release or the latest + // pseudo-version on the main branch. A user on such a version is unlikely + // to intend to “upgrade” to a version that already existed at that point + // in time. + // + // We do this only if the current version is a pseudo-version: if the + // version is tagged, the author of the dependency module has given us + // explicit information about their intended precedence of this version + // relative to other versions, and we shouldn't contradict that + // information. (For example, v1.0.1 might be a backport of a fix already + // incorporated into v1.1.0, in which case v1.0.1 would be chronologically + // newer but v1.1.0 is still an “upgrade”; or v1.0.2 might be a revert of + // an unsuccessful fix in v1.0.1, in which case the v1.0.2 commit may be + // older than the v1.0.1 commit despite the tag itself being newer.) + currentTime, err := module.PseudoVersionTime(current) + if err == nil && rev.Time.Before(currentTime) { + if err := allowed(ctx, module.Version{Path: path, Version: current}); errors.Is(err, ErrDisallowed) { + return revWithOrigin(nil), err + } + rev, err = repo.Stat(ctx, current) + if rev != nil { + origin = mergeOrigin(origin, rev.Origin) + } + if err != nil { + return revWithOrigin(nil), err + } + return revWithOrigin(rev), nil + } + } + + return revWithOrigin(rev), nil + } + + if qm.preferLower { + if len(releases) > 0 { + return lookup(releases[0]) + } + if len(prereleases) > 0 { + return lookup(prereleases[0]) + } + } else { + if len(releases) > 0 { + return lookup(releases[len(releases)-1]) + } + if len(prereleases) > 0 { + return lookup(prereleases[len(prereleases)-1]) + } + } + + if qm.mayUseLatest { + latest, err := repo.Latest(ctx) + if latest != nil { + origin = mergeOrigin(origin, latest.Origin) + } + if err == nil { + if qm.allowsVersion(ctx, latest.Version) { + return lookup(latest.Version) + } + } else if !errors.Is(err, fs.ErrNotExist) { + return revWithOrigin(nil), err + } + } + + if (query == "upgrade" || query == "patch") && current != "" && current != "none" { + // "upgrade" and "patch" may stay on the current version if allowed. + if err := allowed(ctx, module.Version{Path: path, Version: current}); errors.Is(err, ErrDisallowed) { + return revWithOrigin(nil), err + } + return lookup(current) + } + + return revWithOrigin(nil), &NoMatchingVersionError{query: query, current: current} +} + +// IsRevisionQuery returns true if vers is a version query that may refer to +// a particular version or revision in a repository like "v1.0.0", "master", +// or "0123abcd". IsRevisionQuery returns false if vers is a query that +// chooses from among available versions like "latest" or ">v1.0.0". +func IsRevisionQuery(path, vers string) bool { + if vers == "latest" || + vers == "upgrade" || + vers == "patch" || + strings.HasPrefix(vers, "<") || + strings.HasPrefix(vers, ">") || + (gover.ModIsValid(path, vers) && gover.ModIsPrefix(path, vers)) { + return false + } + return true +} + +type queryMatcher struct { + path string + prefix string + filter func(version string) bool + allowed AllowedFunc + canStat bool // if true, the query can be resolved by repo.Stat + preferLower bool // if true, choose the lowest matching version + mayUseLatest bool + preferIncompatible bool +} + +var errRevQuery = errors.New("query refers to a non-semver revision") + +// newQueryMatcher returns a new queryMatcher that matches the versions +// specified by the given query on the module with the given path. +// +// If the query can only be resolved by statting a non-SemVer revision, +// newQueryMatcher returns errRevQuery. +func newQueryMatcher(path string, query, current string, allowed AllowedFunc) (*queryMatcher, error) { + badVersion := func(v string) (*queryMatcher, error) { + return nil, fmt.Errorf("invalid semantic version %q in range %q", v, query) + } + + matchesMajor := func(v string) bool { + _, pathMajor, ok := module.SplitPathVersion(path) + if !ok { + return false + } + return module.CheckPathMajor(v, pathMajor) == nil + } + + qm := &queryMatcher{ + path: path, + allowed: allowed, + preferIncompatible: strings.HasSuffix(current, "+incompatible"), + } + + switch { + case query == "latest": + qm.mayUseLatest = true + + case query == "upgrade": + if current == "" || current == "none" { + qm.mayUseLatest = true + } else { + qm.mayUseLatest = module.IsPseudoVersion(current) + qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, current) >= 0 } + } + + case query == "patch": + if current == "" || current == "none" { + return nil, &NoPatchBaseError{path} + } + if current == "" { + qm.mayUseLatest = true + } else { + qm.mayUseLatest = module.IsPseudoVersion(current) + qm.prefix = gover.ModMajorMinor(qm.path, current) + "." + qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, current) >= 0 } + } + + case strings.HasPrefix(query, "<="): + v := query[len("<="):] + if !gover.ModIsValid(path, v) { + return badVersion(v) + } + if gover.ModIsPrefix(path, v) { + // Refuse to say whether <=v1.2 allows v1.2.3 (remember, @v1.2 might mean v1.2.3). + return nil, fmt.Errorf("ambiguous semantic version %q in range %q", v, query) + } + qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, v) <= 0 } + if !matchesMajor(v) { + qm.preferIncompatible = true + } + + case strings.HasPrefix(query, "<"): + v := query[len("<"):] + if !gover.ModIsValid(path, v) { + return badVersion(v) + } + qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, v) < 0 } + if !matchesMajor(v) { + qm.preferIncompatible = true + } + + case strings.HasPrefix(query, ">="): + v := query[len(">="):] + if !gover.ModIsValid(path, v) { + return badVersion(v) + } + qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, v) >= 0 } + qm.preferLower = true + if !matchesMajor(v) { + qm.preferIncompatible = true + } + + case strings.HasPrefix(query, ">"): + v := query[len(">"):] + if !gover.ModIsValid(path, v) { + return badVersion(v) + } + if gover.ModIsPrefix(path, v) { + // Refuse to say whether >v1.2 allows v1.2.3 (remember, @v1.2 might mean v1.2.3). + return nil, fmt.Errorf("ambiguous semantic version %q in range %q", v, query) + } + qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, v) > 0 } + qm.preferLower = true + if !matchesMajor(v) { + qm.preferIncompatible = true + } + + case gover.ModIsValid(path, query): + if gover.ModIsPrefix(path, query) { + qm.prefix = query + "." + // Do not allow the query "v1.2" to match versions lower than "v1.2.0", + // such as prereleases for that version. (https://golang.org/issue/31972) + qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, query) >= 0 } + } else { + qm.canStat = true + qm.filter = func(mv string) bool { return gover.ModCompare(qm.path, mv, query) == 0 } + qm.prefix = semver.Canonical(query) + } + if !matchesMajor(query) { + qm.preferIncompatible = true + } + + default: + return nil, errRevQuery + } + + return qm, nil +} + +// allowsVersion reports whether version v is allowed by the prefix, filter, and +// AllowedFunc of qm. +func (qm *queryMatcher) allowsVersion(ctx context.Context, v string) bool { + if qm.prefix != "" && !strings.HasPrefix(v, qm.prefix) { + if gover.IsToolchain(qm.path) && strings.TrimSuffix(qm.prefix, ".") == v { + // Allow 1.21 to match "1.21." prefix. + } else { + return false + } + } + if qm.filter != nil && !qm.filter(v) { + return false + } + if qm.allowed != nil { + if err := qm.allowed(ctx, module.Version{Path: qm.path, Version: v}); errors.Is(err, ErrDisallowed) { + return false + } + } + return true +} + +// filterVersions classifies versions into releases and pre-releases, filtering +// out: +// 1. versions that do not satisfy the 'allowed' predicate, and +// 2. "+incompatible" versions, if a compatible one satisfies the predicate +// and the incompatible version is not preferred. +// +// If the allowed predicate returns an error not equivalent to ErrDisallowed, +// filterVersions returns that error. +func (qm *queryMatcher) filterVersions(ctx context.Context, versions []string) (releases, prereleases []string, err error) { + needIncompatible := qm.preferIncompatible + + var lastCompatible string + for _, v := range versions { + if !qm.allowsVersion(ctx, v) { + continue + } + + if !needIncompatible { + // We're not yet sure whether we need to include +incompatible versions. + // Keep track of the last compatible version we've seen, and use the + // presence (or absence) of a go.mod file in that version to decide: a + // go.mod file implies that the module author is supporting modules at a + // compatible version (and we should ignore +incompatible versions unless + // requested explicitly), while a lack of go.mod file implies the + // potential for legacy (pre-modules) versioning without semantic import + // paths (and thus *with* +incompatible versions). + // + // This isn't strictly accurate if the latest compatible version has been + // replaced by a local file path, because we do not allow file-path + // replacements without a go.mod file: the user would have needed to add + // one. However, replacing the last compatible version while + // simultaneously expecting to upgrade implicitly to a +incompatible + // version seems like an extreme enough corner case to ignore for now. + + if !strings.HasSuffix(v, "+incompatible") { + lastCompatible = v + } else if lastCompatible != "" { + // If the latest compatible version is allowed and has a go.mod file, + // ignore any version with a higher (+incompatible) major version. (See + // https://golang.org/issue/34165.) Note that we even prefer a + // compatible pre-release over an incompatible release. + ok, err := versionHasGoMod(ctx, module.Version{Path: qm.path, Version: lastCompatible}) + if err != nil { + return nil, nil, err + } + if ok { + // The last compatible version has a go.mod file, so that's the + // highest version we're willing to consider. Don't bother even + // looking at higher versions, because they're all +incompatible from + // here onward. + break + } + + // No acceptable compatible release has a go.mod file, so the versioning + // for the module might not be module-aware, and we should respect + // legacy major-version tags. + needIncompatible = true + } + } + + if gover.ModIsPrerelease(qm.path, v) { + prereleases = append(prereleases, v) + } else { + releases = append(releases, v) + } + } + + return releases, prereleases, nil +} + +type QueryResult struct { + Mod module.Version + Rev *modfetch.RevInfo + Packages []string +} + +// QueryPackages is like QueryPattern, but requires that the pattern match at +// least one package and omits the non-package result (if any). +func QueryPackages(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) ([]QueryResult, error) { + pkgMods, modOnly, err := QueryPattern(ctx, pattern, query, current, allowed) + + if len(pkgMods) == 0 && err == nil { + replacement := Replacement(modOnly.Mod) + return nil, &PackageNotInModuleError{ + Mod: modOnly.Mod, + Replacement: replacement, + Query: query, + Pattern: pattern, + } + } + + return pkgMods, err +} + +// QueryPattern looks up the module(s) containing at least one package matching +// the given pattern at the given version. The results are sorted by module path +// length in descending order. If any proxy provides a non-empty set of candidate +// modules, no further proxies are tried. +// +// For wildcard patterns, QueryPattern looks in modules with package paths up to +// the first "..." in the pattern. For the pattern "example.com/a/b.../c", +// QueryPattern would consider prefixes of "example.com/a". +// +// If any matching package is in the main module, QueryPattern considers only +// the main module and only the version "latest", without checking for other +// possible modules. +// +// QueryPattern always returns at least one QueryResult (which may be only +// modOnly) or a non-nil error. +func QueryPattern(ctx context.Context, pattern, query string, current func(string) string, allowed AllowedFunc) (pkgMods []QueryResult, modOnly *QueryResult, err error) { + ctx, span := trace.StartSpan(ctx, "modload.QueryPattern "+pattern+" "+query) + defer span.Done() + + base := pattern + + firstError := func(m *search.Match) error { + if len(m.Errs) == 0 { + return nil + } + return m.Errs[0] + } + + var match func(mod module.Version, roots []string, isLocal bool) *search.Match + matchPattern := pkgpattern.MatchPattern(pattern) + + if i := strings.Index(pattern, "..."); i >= 0 { + base = pathpkg.Dir(pattern[:i+3]) + if base == "." { + return nil, nil, &WildcardInFirstElementError{Pattern: pattern, Query: query} + } + match = func(mod module.Version, roots []string, isLocal bool) *search.Match { + m := search.NewMatch(pattern) + matchPackages(ctx, m, imports.AnyTags(), omitStd, []module.Version{mod}) + return m + } + } else { + match = func(mod module.Version, roots []string, isLocal bool) *search.Match { + m := search.NewMatch(pattern) + prefix := mod.Path + if MainModules.Contains(mod.Path) { + prefix = MainModules.PathPrefix(module.Version{Path: mod.Path}) + } + for _, root := range roots { + if _, ok, err := dirInModule(pattern, prefix, root, isLocal); err != nil { + m.AddError(err) + } else if ok { + m.Pkgs = []string{pattern} + } + } + return m + } + } + + var mainModuleMatches []module.Version + for _, mainModule := range MainModules.Versions() { + m := match(mainModule, modRoots, true) + if len(m.Pkgs) > 0 { + if query != "upgrade" && query != "patch" { + return nil, nil, &QueryMatchesPackagesInMainModuleError{ + Pattern: pattern, + Query: query, + Packages: m.Pkgs, + } + } + if err := allowed(ctx, mainModule); err != nil { + return nil, nil, fmt.Errorf("internal error: package %s is in the main module (%s), but version is not allowed: %w", pattern, mainModule.Path, err) + } + return []QueryResult{{ + Mod: mainModule, + Rev: &modfetch.RevInfo{Version: mainModule.Version}, + Packages: m.Pkgs, + }}, nil, nil + } + if err := firstError(m); err != nil { + return nil, nil, err + } + + var matchesMainModule bool + if matchPattern(mainModule.Path) { + mainModuleMatches = append(mainModuleMatches, mainModule) + matchesMainModule = true + } + + if (query == "upgrade" || query == "patch") && matchesMainModule { + if err := allowed(ctx, mainModule); err == nil { + modOnly = &QueryResult{ + Mod: mainModule, + Rev: &modfetch.RevInfo{Version: mainModule.Version}, + } + } + } + } + + var ( + results []QueryResult + candidateModules = modulePrefixesExcludingTarget(base) + ) + if len(candidateModules) == 0 { + if modOnly != nil { + return nil, modOnly, nil + } else if len(mainModuleMatches) != 0 { + return nil, nil, &QueryMatchesMainModulesError{ + MainModules: mainModuleMatches, + Pattern: pattern, + Query: query, + } + } else { + return nil, nil, &PackageNotInModuleError{ + MainModules: mainModuleMatches, + Query: query, + Pattern: pattern, + } + } + } + + err = modfetch.TryProxies(func(proxy string) error { + queryModule := func(ctx context.Context, path string) (r QueryResult, err error) { + ctx, span := trace.StartSpan(ctx, "modload.QueryPattern.queryModule ["+proxy+"] "+path) + defer span.Done() + + pathCurrent := current(path) + r.Mod.Path = path + r.Rev, err = queryProxy(ctx, proxy, path, query, pathCurrent, allowed, nil) + if err != nil { + return r, err + } + r.Mod.Version = r.Rev.Version + if gover.IsToolchain(r.Mod.Path) { + return r, nil + } + root, isLocal, err := fetch(ctx, r.Mod) + if err != nil { + return r, err + } + m := match(r.Mod, []string{root}, isLocal) + r.Packages = m.Pkgs + if len(r.Packages) == 0 && !matchPattern(path) { + if err := firstError(m); err != nil { + return r, err + } + replacement := Replacement(r.Mod) + return r, &PackageNotInModuleError{ + Mod: r.Mod, + Replacement: replacement, + Query: query, + Pattern: pattern, + } + } + return r, nil + } + + allResults, err := queryPrefixModules(ctx, candidateModules, queryModule) + results = allResults[:0] + for _, r := range allResults { + if len(r.Packages) == 0 { + modOnly = &r + } else { + results = append(results, r) + } + } + return err + }) + + if len(mainModuleMatches) > 0 && len(results) == 0 && modOnly == nil && errors.Is(err, fs.ErrNotExist) { + return nil, nil, &QueryMatchesMainModulesError{ + Pattern: pattern, + Query: query, + } + } + return slices.Clip(results), modOnly, err +} + +// modulePrefixesExcludingTarget returns all prefixes of path that may plausibly +// exist as a module, excluding targetPrefix but otherwise including path +// itself, sorted by descending length. Prefixes that are not valid module paths +// but are valid package paths (like "m" or "example.com/.gen") are included, +// since they might be replaced. +func modulePrefixesExcludingTarget(path string) []string { + prefixes := make([]string, 0, strings.Count(path, "/")+1) + + mainModulePrefixes := make(map[string]bool) + for _, m := range MainModules.Versions() { + mainModulePrefixes[m.Path] = true + } + + for { + if !mainModulePrefixes[path] { + if _, _, ok := module.SplitPathVersion(path); ok { + prefixes = append(prefixes, path) + } + } + + j := strings.LastIndexByte(path, '/') + if j < 0 { + break + } + path = path[:j] + } + + return prefixes +} + +func queryPrefixModules(ctx context.Context, candidateModules []string, queryModule func(ctx context.Context, path string) (QueryResult, error)) (found []QueryResult, err error) { + ctx, span := trace.StartSpan(ctx, "modload.queryPrefixModules") + defer span.Done() + + // If the path we're attempting is not in the module cache and we don't have a + // fetch result cached either, we'll end up making a (potentially slow) + // request to the proxy or (often even slower) the origin server. + // To minimize latency, execute all of those requests in parallel. + type result struct { + QueryResult + err error + } + results := make([]result, len(candidateModules)) + var wg sync.WaitGroup + wg.Add(len(candidateModules)) + for i, p := range candidateModules { + ctx := trace.StartGoroutine(ctx) + go func(p string, r *result) { + r.QueryResult, r.err = queryModule(ctx, p) + wg.Done() + }(p, &results[i]) + } + wg.Wait() + + // Classify the results. In case of failure, identify the error that the user + // is most likely to find helpful: the most useful class of error at the + // longest matching path. + var ( + noPackage *PackageNotInModuleError + noVersion *NoMatchingVersionError + noPatchBase *NoPatchBaseError + invalidPath *module.InvalidPathError // see comment in case below + invalidVersion error + notExistErr error + ) + for _, r := range results { + switch rErr := r.err.(type) { + case nil: + found = append(found, r.QueryResult) + case *PackageNotInModuleError: + // Given the option, prefer to attribute “package not in module” + // to modules other than the main one. + if noPackage == nil || MainModules.Contains(noPackage.Mod.Path) { + noPackage = rErr + } + case *NoMatchingVersionError: + if noVersion == nil { + noVersion = rErr + } + case *NoPatchBaseError: + if noPatchBase == nil { + noPatchBase = rErr + } + case *module.InvalidPathError: + // The prefix was not a valid module path, and there was no replacement. + // Prefixes like this may appear in candidateModules, since we handle + // replaced modules that weren't required in the repo lookup process + // (see lookupRepo). + // + // A shorter prefix may be a valid module path and may contain a valid + // import path, so this is a low-priority error. + if invalidPath == nil { + invalidPath = rErr + } + default: + if errors.Is(rErr, fs.ErrNotExist) { + if notExistErr == nil { + notExistErr = rErr + } + } else if iv := (*module.InvalidVersionError)(nil); errors.As(rErr, &iv) { + if invalidVersion == nil { + invalidVersion = rErr + } + } else if err == nil { + if len(found) > 0 || noPackage != nil { + // golang.org/issue/34094: If we have already found a module that + // could potentially contain the target package, ignore unclassified + // errors for modules with shorter paths. + + // golang.org/issue/34383 is a special case of this: if we have + // already found example.com/foo/v2@v2.0.0 with a matching go.mod + // file, ignore the error from example.com/foo@v2.0.0. + } else { + err = r.err + } + } + } + } + + // TODO(#26232): If len(found) == 0 and some of the errors are 4xx HTTP + // codes, have the auth package recheck the failed paths. + // If we obtain new credentials for any of them, re-run the above loop. + + if len(found) == 0 && err == nil { + switch { + case noPackage != nil: + err = noPackage + case noVersion != nil: + err = noVersion + case noPatchBase != nil: + err = noPatchBase + case invalidPath != nil: + err = invalidPath + case invalidVersion != nil: + err = invalidVersion + case notExistErr != nil: + err = notExistErr + default: + panic("queryPrefixModules: no modules found, but no error detected") + } + } + + return found, err +} + +// A NoMatchingVersionError indicates that Query found a module at the requested +// path, but not at any versions satisfying the query string and allow-function. +// +// NOTE: NoMatchingVersionError MUST NOT implement Is(fs.ErrNotExist). +// +// If the module came from a proxy, that proxy had to return a successful status +// code for the versions it knows about, and thus did not have the opportunity +// to return a non-400 status code to suppress fallback. +type NoMatchingVersionError struct { + query, current string +} + +func (e *NoMatchingVersionError) Error() string { + currentSuffix := "" + if (e.query == "upgrade" || e.query == "patch") && e.current != "" && e.current != "none" { + currentSuffix = fmt.Sprintf(" (current version is %s)", e.current) + } + return fmt.Sprintf("no matching versions for query %q", e.query) + currentSuffix +} + +// A NoPatchBaseError indicates that Query was called with the query "patch" +// but with a current version of "" or "none". +type NoPatchBaseError struct { + path string +} + +func (e *NoPatchBaseError) Error() string { + return fmt.Sprintf(`can't query version "patch" of module %s: no existing version is required`, e.path) +} + +// A WildcardInFirstElementError indicates that a pattern passed to QueryPattern +// had a wildcard in its first path element, and therefore had no pattern-prefix +// modules to search in. +type WildcardInFirstElementError struct { + Pattern string + Query string +} + +func (e *WildcardInFirstElementError) Error() string { + return fmt.Sprintf("no modules to query for %s@%s because first path element contains a wildcard", e.Pattern, e.Query) +} + +// A PackageNotInModuleError indicates that QueryPattern found a candidate +// module at the requested version, but that module did not contain any packages +// matching the requested pattern. +// +// NOTE: PackageNotInModuleError MUST NOT implement Is(fs.ErrNotExist). +// +// If the module came from a proxy, that proxy had to return a successful status +// code for the versions it knows about, and thus did not have the opportunity +// to return a non-400 status code to suppress fallback. +type PackageNotInModuleError struct { + MainModules []module.Version + Mod module.Version + Replacement module.Version + Query string + Pattern string +} + +func (e *PackageNotInModuleError) Error() string { + if len(e.MainModules) > 0 { + prefix := "workspace modules do" + if len(e.MainModules) == 1 { + prefix = fmt.Sprintf("main module (%s) does", e.MainModules[0]) + } + if strings.Contains(e.Pattern, "...") { + return fmt.Sprintf("%s not contain packages matching %s", prefix, e.Pattern) + } + return fmt.Sprintf("%s not contain package %s", prefix, e.Pattern) + } + + found := "" + if r := e.Replacement; r.Path != "" { + replacement := r.Path + if r.Version != "" { + replacement = fmt.Sprintf("%s@%s", r.Path, r.Version) + } + if e.Query == e.Mod.Version { + found = fmt.Sprintf(" (replaced by %s)", replacement) + } else { + found = fmt.Sprintf(" (%s, replaced by %s)", e.Mod.Version, replacement) + } + } else if e.Query != e.Mod.Version { + found = fmt.Sprintf(" (%s)", e.Mod.Version) + } + + if strings.Contains(e.Pattern, "...") { + return fmt.Sprintf("module %s@%s found%s, but does not contain packages matching %s", e.Mod.Path, e.Query, found, e.Pattern) + } + return fmt.Sprintf("module %s@%s found%s, but does not contain package %s", e.Mod.Path, e.Query, found, e.Pattern) +} + +func (e *PackageNotInModuleError) ImportPath() string { + if !strings.Contains(e.Pattern, "...") { + return e.Pattern + } + return "" +} + +// versionHasGoMod returns whether a version has a go.mod file. +// +// versionHasGoMod fetches the go.mod file (possibly a fake) and true if it +// contains anything other than a module directive with the same path. When a +// module does not have a real go.mod file, the go command acts as if it had one +// that only contained a module directive. Normal go.mod files created after +// 1.12 at least have a go directive. +// +// This function is a heuristic, since it's possible to commit a file that would +// pass this test. However, we only need a heuristic for determining whether +// +incompatible versions may be "latest", which is what this function is used +// for. +// +// This heuristic is useful for two reasons: first, when using a proxy, +// this lets us fetch from the .mod endpoint which is much faster than the .zip +// endpoint. The .mod file is used anyway, even if the .zip file contains a +// go.mod with different content. Second, if we don't fetch the .zip, then +// we don't need to verify it in go.sum. This makes 'go list -m -u' faster +// and simpler. +func versionHasGoMod(_ context.Context, m module.Version) (bool, error) { + _, data, err := rawGoModData(m) + if err != nil { + return false, err + } + isFake := bytes.Equal(data, modfetch.LegacyGoMod(m.Path)) + return !isFake, nil +} + +// A versionRepo is a subset of modfetch.Repo that can report information about +// available versions, but cannot fetch specific source files. +type versionRepo interface { + ModulePath() string + CheckReuse(context.Context, *codehost.Origin) error + Versions(ctx context.Context, prefix string) (*modfetch.Versions, error) + Stat(ctx context.Context, rev string) (*modfetch.RevInfo, error) + Latest(context.Context) (*modfetch.RevInfo, error) +} + +var _ versionRepo = modfetch.Repo(nil) + +func lookupRepo(ctx context.Context, proxy, path string) (repo versionRepo, err error) { + if path != "go" && path != "toolchain" { + err = module.CheckPath(path) + } + if err == nil { + repo = modfetch.Lookup(ctx, proxy, path) + } else { + repo = emptyRepo{path: path, err: err} + } + + if MainModules == nil { + return repo, err + } else if _, ok := MainModules.HighestReplaced()[path]; ok { + return &replacementRepo{repo: repo}, nil + } + + return repo, err +} + +// An emptyRepo is a versionRepo that contains no versions. +type emptyRepo struct { + path string + err error +} + +var _ versionRepo = emptyRepo{} + +func (er emptyRepo) ModulePath() string { return er.path } +func (er emptyRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error { + return fmt.Errorf("empty repo") +} +func (er emptyRepo) Versions(ctx context.Context, prefix string) (*modfetch.Versions, error) { + return &modfetch.Versions{}, nil +} +func (er emptyRepo) Stat(ctx context.Context, rev string) (*modfetch.RevInfo, error) { + return nil, er.err +} +func (er emptyRepo) Latest(ctx context.Context) (*modfetch.RevInfo, error) { return nil, er.err } + +// A replacementRepo augments a versionRepo to include the replacement versions +// (if any) found in the main module's go.mod file. +// +// A replacementRepo suppresses "not found" errors for otherwise-nonexistent +// modules, so a replacementRepo should only be constructed for a module that +// actually has one or more valid replacements. +type replacementRepo struct { + repo versionRepo +} + +var _ versionRepo = (*replacementRepo)(nil) + +func (rr *replacementRepo) ModulePath() string { return rr.repo.ModulePath() } + +func (rr *replacementRepo) CheckReuse(ctx context.Context, old *codehost.Origin) error { + return fmt.Errorf("replacement repo") +} + +// Versions returns the versions from rr.repo augmented with any matching +// replacement versions. +func (rr *replacementRepo) Versions(ctx context.Context, prefix string) (*modfetch.Versions, error) { + repoVersions, err := rr.repo.Versions(ctx, prefix) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return nil, err + } + repoVersions = new(modfetch.Versions) + } + + versions := repoVersions.List + for _, mm := range MainModules.Versions() { + if index := MainModules.Index(mm); index != nil && len(index.replace) > 0 { + path := rr.ModulePath() + for m := range index.replace { + if m.Path == path && strings.HasPrefix(m.Version, prefix) && m.Version != "" && !module.IsPseudoVersion(m.Version) { + versions = append(versions, m.Version) + } + } + } + } + + if len(versions) == len(repoVersions.List) { // replacement versions added + return repoVersions, nil + } + + path := rr.ModulePath() + sort.Slice(versions, func(i, j int) bool { + return gover.ModCompare(path, versions[i], versions[j]) < 0 + }) + str.Uniq(&versions) + return &modfetch.Versions{List: versions}, nil +} + +func (rr *replacementRepo) Stat(ctx context.Context, rev string) (*modfetch.RevInfo, error) { + info, err := rr.repo.Stat(ctx, rev) + if err == nil { + return info, err + } + var hasReplacements bool + for _, v := range MainModules.Versions() { + if index := MainModules.Index(v); index != nil && len(index.replace) > 0 { + hasReplacements = true + } + } + if !hasReplacements { + return info, err + } + + v := module.CanonicalVersion(rev) + if v != rev { + // The replacements in the go.mod file list only canonical semantic versions, + // so a non-canonical version can't possibly have a replacement. + return info, err + } + + path := rr.ModulePath() + _, pathMajor, ok := module.SplitPathVersion(path) + if ok && pathMajor == "" { + if err := module.CheckPathMajor(v, pathMajor); err != nil && semver.Build(v) == "" { + v += "+incompatible" + } + } + + if r := Replacement(module.Version{Path: path, Version: v}); r.Path == "" { + return info, err + } + return rr.replacementStat(v) +} + +func (rr *replacementRepo) Latest(ctx context.Context) (*modfetch.RevInfo, error) { + info, err := rr.repo.Latest(ctx) + path := rr.ModulePath() + + if v, ok := MainModules.HighestReplaced()[path]; ok { + if v == "" { + // The only replacement is a wildcard that doesn't specify a version, so + // synthesize a pseudo-version with an appropriate major version and a + // timestamp below any real timestamp. That way, if the main module is + // used from within some other module, the user will be able to upgrade + // the requirement to any real version they choose. + if _, pathMajor, ok := module.SplitPathVersion(path); ok && len(pathMajor) > 0 { + v = module.PseudoVersion(pathMajor[1:], "", time.Time{}, "000000000000") + } else { + v = module.PseudoVersion("v0", "", time.Time{}, "000000000000") + } + } + + if err != nil || gover.ModCompare(path, v, info.Version) > 0 { + return rr.replacementStat(v) + } + } + + return info, err +} + +func (rr *replacementRepo) replacementStat(v string) (*modfetch.RevInfo, error) { + rev := &modfetch.RevInfo{Version: v} + if module.IsPseudoVersion(v) { + rev.Time, _ = module.PseudoVersionTime(v) + rev.Short, _ = module.PseudoVersionRev(v) + } + return rev, nil +} + +// A QueryMatchesMainModulesError indicates that a query requests +// a version of the main module that cannot be satisfied. +// (The main module's version cannot be changed.) +type QueryMatchesMainModulesError struct { + MainModules []module.Version + Pattern string + Query string +} + +func (e *QueryMatchesMainModulesError) Error() string { + if MainModules.Contains(e.Pattern) { + return fmt.Sprintf("can't request version %q of the main module (%s)", e.Query, e.Pattern) + } + + plural := "" + mainModulePaths := make([]string, len(e.MainModules)) + for i := range e.MainModules { + mainModulePaths[i] = e.MainModules[i].Path + } + if len(e.MainModules) > 1 { + plural = "s" + } + return fmt.Sprintf("can't request version %q of pattern %q that includes the main module%s (%s)", e.Query, e.Pattern, plural, strings.Join(mainModulePaths, ", ")) +} + +// A QueryUpgradesAllError indicates that a query requests +// an upgrade on the all pattern. +// (The main module's version cannot be changed.) +type QueryUpgradesAllError struct { + MainModules []module.Version + Query string +} + +func (e *QueryUpgradesAllError) Error() string { + var plural string = "" + if len(e.MainModules) != 1 { + plural = "s" + } + + return fmt.Sprintf("can't request version %q of pattern \"all\" that includes the main module%s", e.Query, plural) +} + +// A QueryMatchesPackagesInMainModuleError indicates that a query cannot be +// satisfied because it matches one or more packages found in the main module. +type QueryMatchesPackagesInMainModuleError struct { + Pattern string + Query string + Packages []string +} + +func (e *QueryMatchesPackagesInMainModuleError) Error() string { + if len(e.Packages) > 1 { + return fmt.Sprintf("pattern %s matches %d packages in the main module, so can't request version %s", e.Pattern, len(e.Packages), e.Query) + } + + if search.IsMetaPackage(e.Pattern) || strings.Contains(e.Pattern, "...") { + return fmt.Sprintf("pattern %s matches package %s in the main module, so can't request version %s", e.Pattern, e.Packages[0], e.Query) + } + + return fmt.Sprintf("package %s is in the main module, so can't request version %s", e.Packages[0], e.Query) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/query_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/query_test.go new file mode 100644 index 0000000000000000000000000000000000000000..93f8f0d00d1c8d9756d485e97fe87116a56d575d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/modload/query_test.go @@ -0,0 +1,202 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package modload + +import ( + "context" + "flag" + "internal/testenv" + "log" + "os" + "path" + "path/filepath" + "strings" + "testing" + + "cmd/go/internal/cfg" + "cmd/go/internal/vcweb/vcstest" + + "golang.org/x/mod/module" +) + +func TestMain(m *testing.M) { + flag.Parse() + if err := testMain(m); err != nil { + log.Fatal(err) + } +} + +func testMain(m *testing.M) (err error) { + cfg.GOPROXY = "direct" + cfg.ModCacheRW = true + + srv, err := vcstest.NewServer() + if err != nil { + return err + } + defer func() { + if closeErr := srv.Close(); err == nil { + err = closeErr + } + }() + + dir, err := os.MkdirTemp("", "modload-test-") + if err != nil { + return err + } + defer func() { + if rmErr := os.RemoveAll(dir); err == nil { + err = rmErr + } + }() + + os.Setenv("GOPATH", dir) + cfg.BuildContext.GOPATH = dir + cfg.GOMODCACHE = filepath.Join(dir, "pkg/mod") + cfg.SumdbDir = filepath.Join(dir, "pkg/sumdb") + m.Run() + return nil +} + +var ( + queryRepo = "vcs-test.golang.org/git/querytest.git" + queryRepoV2 = queryRepo + "/v2" + queryRepoV3 = queryRepo + "/v3" + + // Empty version list (no semver tags), not actually empty. + emptyRepoPath = "vcs-test.golang.org/git/emptytest.git" +) + +var queryTests = []struct { + path string + query string + current string + allow string + vers string + err string +}{ + {path: queryRepo, query: "v0.0.0", vers: "v0.0.1"}, + {path: queryRepo, query: ">=v0.0.0", vers: "v0.0.0"}, + {path: queryRepo, query: "v0.0.1", vers: "v0.0.1"}, + {path: queryRepo, query: "v0.0.1+foo", vers: "v0.0.1"}, + {path: queryRepo, query: "v0.0.99", err: `vcs-test.golang.org/git/querytest.git@v0.0.99: invalid version: unknown revision v0.0.99`}, + {path: queryRepo, query: "v0", vers: "v0.3.0"}, + {path: queryRepo, query: "v0.1", vers: "v0.1.2"}, + {path: queryRepo, query: "v0.2", err: `no matching versions for query "v0.2"`}, + {path: queryRepo, query: "v0.0", vers: "v0.0.3"}, + {path: queryRepo, query: "v1.9.10-pre2+metadata", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"}, + {path: queryRepo, query: "ed5ffdaa", vers: "v1.9.10-pre2.0.20191220134614-ed5ffdaa1f5e"}, + + // golang.org/issue/29262: The major version for a module without a suffix + // should be based on the most recent tag (v1 as appropriate, not v0 + // unconditionally). + {path: queryRepo, query: "42abcb6df8ee", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"}, + + {path: queryRepo, query: "v1.9.10-pre2+wrongmetadata", err: `vcs-test.golang.org/git/querytest.git@v1.9.10-pre2+wrongmetadata: invalid version: unknown revision v1.9.10-pre2+wrongmetadata`}, + {path: queryRepo, query: "v1.9.10-pre2", err: `vcs-test.golang.org/git/querytest.git@v1.9.10-pre2: invalid version: unknown revision v1.9.10-pre2`}, + {path: queryRepo, query: "latest", vers: "v1.9.9"}, + {path: queryRepo, query: "latest", current: "v1.9.10-pre1", vers: "v1.9.9"}, + {path: queryRepo, query: "upgrade", vers: "v1.9.9"}, + {path: queryRepo, query: "upgrade", current: "v1.9.10-pre1", vers: "v1.9.10-pre1"}, + {path: queryRepo, query: "upgrade", current: "v1.9.10-pre2+metadata", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"}, + {path: queryRepo, query: "upgrade", current: "v0.0.0-20190513201126-42abcb6df8ee", vers: "v0.0.0-20190513201126-42abcb6df8ee"}, + {path: queryRepo, query: "upgrade", allow: "NOMATCH", err: `no matching versions for query "upgrade"`}, + {path: queryRepo, query: "upgrade", current: "v1.9.9", allow: "NOMATCH", err: `vcs-test.golang.org/git/querytest.git@v1.9.9: disallowed module version`}, + {path: queryRepo, query: "upgrade", current: "v1.99.99", err: `vcs-test.golang.org/git/querytest.git@v1.99.99: invalid version: unknown revision v1.99.99`}, + {path: queryRepo, query: "patch", current: "", err: `can't query version "patch" of module vcs-test.golang.org/git/querytest.git: no existing version is required`}, + {path: queryRepo, query: "patch", current: "v0.1.0", vers: "v0.1.2"}, + {path: queryRepo, query: "patch", current: "v1.9.0", vers: "v1.9.9"}, + {path: queryRepo, query: "patch", current: "v1.9.10-pre1", vers: "v1.9.10-pre1"}, + {path: queryRepo, query: "patch", current: "v1.9.10-pre2+metadata", vers: "v1.9.10-pre2.0.20190513201126-42abcb6df8ee"}, + {path: queryRepo, query: "patch", current: "v1.99.99", err: `vcs-test.golang.org/git/querytest.git@v1.99.99: invalid version: unknown revision v1.99.99`}, + {path: queryRepo, query: ">v1.9.9", vers: "v1.9.10-pre1"}, + {path: queryRepo, query: ">v1.10.0", err: `no matching versions for query ">v1.10.0"`}, + {path: queryRepo, query: ">=v1.10.0", err: `no matching versions for query ">=v1.10.0"`}, + {path: queryRepo, query: "6cf84eb", vers: "v0.0.2-0.20180704023347-6cf84ebaea54"}, + + // golang.org/issue/27173: A pseudo-version may be based on the highest tag on + // any parent commit, or any existing semantically-lower tag: a given commit + // could have been a pre-release for a backport tag at any point. + {path: queryRepo, query: "3ef0cec634e0", vers: "v0.1.2-0.20180704023347-3ef0cec634e0"}, + {path: queryRepo, query: "v0.1.2-0.20180704023347-3ef0cec634e0", vers: "v0.1.2-0.20180704023347-3ef0cec634e0"}, + {path: queryRepo, query: "v0.1.1-0.20180704023347-3ef0cec634e0", vers: "v0.1.1-0.20180704023347-3ef0cec634e0"}, + {path: queryRepo, query: "v0.0.4-0.20180704023347-3ef0cec634e0", vers: "v0.0.4-0.20180704023347-3ef0cec634e0"}, + + // Invalid tags are tested in cmd/go/testdata/script/mod_pseudo_invalid.txt. + + {path: queryRepo, query: "start", vers: "v0.0.0-20180704023101-5e9e31667ddf"}, + {path: queryRepo, query: "5e9e31667ddf", vers: "v0.0.0-20180704023101-5e9e31667ddf"}, + {path: queryRepo, query: "v0.0.0-20180704023101-5e9e31667ddf", vers: "v0.0.0-20180704023101-5e9e31667ddf"}, + + {path: queryRepo, query: "7a1b6bf", vers: "v0.1.0"}, + + {path: queryRepoV2, query: "v0.0.0", vers: "v2.0.0"}, + {path: queryRepoV2, query: ">=v0.0.0", vers: "v2.0.0"}, + + {path: queryRepoV2, query: "v2", vers: "v2.5.5"}, + {path: queryRepoV2, query: "v2.5", vers: "v2.5.5"}, + {path: queryRepoV2, query: "v2.6", err: `no matching versions for query "v2.6"`}, + {path: queryRepoV2, query: "v2.6.0-pre1", vers: "v2.6.0-pre1"}, + {path: queryRepoV2, query: "latest", vers: "v2.5.5"}, + + // Commit e0cf3de987e6 is actually v1.19.10-pre1, not anything resembling v3, + // and it has a go.mod file with a non-v3 module path. Attempting to query it + // as the v3 module should fail. + {path: queryRepoV3, query: "e0cf3de987e6", err: `vcs-test.golang.org/git/querytest.git/v3@v3.0.0-20180704024501-e0cf3de987e6: invalid version: go.mod has non-.../v3 module path "vcs-test.golang.org/git/querytest.git" (and .../v3/go.mod does not exist) at revision e0cf3de987e6`}, + + // The querytest repo does not have any commits tagged with major version 3, + // and the latest commit in the repo has a go.mod file specifying a non-v3 path. + // That should prevent us from resolving any version for the /v3 path. + {path: queryRepoV3, query: "latest", err: `no matching versions for query "latest"`}, + + {path: emptyRepoPath, query: "latest", vers: "v0.0.0-20180704023549-7bb914627242"}, + {path: emptyRepoPath, query: ">v0.0.0", err: `no matching versions for query ">v0.0.0"`}, + {path: emptyRepoPath, query: "" { + // A wildcard replacement found in the main module's go.mod file. + mod = module.Version{Path: f[1]} + f = f[2:] + } else { + // Not a version or a wildcard replacement. + // We don't know how to interpret this module line, so ignore it. + mod = module.Version{} + continue + } + + if len(f) >= 2 && f[0] == "=>" { + meta := vendorMeta[mod] + if len(f) == 2 { + // File replacement. + meta.Replacement = module.Version{Path: f[1]} + vendorReplaced = append(vendorReplaced, mod) + } else if len(f) == 3 && semver.IsValid(f[2]) { + // Path and version replacement. + meta.Replacement = module.Version{Path: f[1], Version: f[2]} + vendorReplaced = append(vendorReplaced, mod) + } else { + // We don't understand this replacement. Ignore it. + } + vendorMeta[mod] = meta + } + continue + } + + // Not a module line. Must be a package within a module or a metadata + // directive, either of which requires a preceding module line. + if mod.Path == "" { + continue + } + + if annotations, ok := strings.CutPrefix(line, "## "); ok { + // Metadata. Take the union of annotations across multiple lines, if present. + meta := vendorMeta[mod] + for _, entry := range strings.Split(annotations, ";") { + entry = strings.TrimSpace(entry) + if entry == "explicit" { + meta.Explicit = true + } + if goVersion, ok := strings.CutPrefix(entry, "go "); ok { + meta.GoVersion = goVersion + rawGoVersion.Store(mod, meta.GoVersion) + if gover.Compare(goVersion, gover.Local()) > 0 { + base.Fatal(&gover.TooNewError{What: mod.Path + " in " + base.ShortPath(vendorFile), GoVersion: goVersion}) + } + } + // All other tokens are reserved for future use. + } + vendorMeta[mod] = meta + continue + } + + if f := strings.Fields(line); len(f) == 1 && module.CheckImportPath(f[0]) == nil { + // A package within the current module. + vendorPkgModule[f[0]] = mod + + // Since this module provides a package for the build, we know that it + // is in the build list and is the selected version of its path. + // If this information is new, record it. + if v, ok := vendorVersion[mod.Path]; !ok || gover.ModCompare(mod.Path, v, mod.Version) < 0 { + vendorList = append(vendorList, mod) + vendorVersion[mod.Path] = mod.Version + } + } + } + }) +} + +// checkVendorConsistency verifies that the vendor/modules.txt file matches (if +// go 1.14) or at least does not contradict (go 1.13 or earlier) the +// requirements and replacements listed in the main module's go.mod file. +func checkVendorConsistency(indexes []*modFileIndex, modFiles []*modfile.File, modRoots []string) { + // readVendorList only needs the main module to get the directory + // the vendor directory is in. + readVendorList(VendorDir()) + + if len(modFiles) < 1 { + // We should never get here if there are zero modfiles. Either + // we're in single module mode and there's a single module, or + // we're in workspace mode, and we fail earlier reporting that + // "no modules were found in the current workspace". + panic("checkVendorConsistency called with zero modfiles") + } + + pre114 := false + if !inWorkspaceMode() { // workspace mode was added after Go 1.14 + if len(indexes) != 1 { + panic(fmt.Errorf("not in workspace mode but number of indexes is %v, not 1", len(indexes))) + } + index := indexes[0] + if gover.Compare(index.goVersion, "1.14") < 0 { + // Go versions before 1.14 did not include enough information in + // vendor/modules.txt to check for consistency. + // If we know that we're on an earlier version, relax the consistency check. + pre114 = true + } + } + + vendErrors := new(strings.Builder) + vendErrorf := func(mod module.Version, format string, args ...any) { + detail := fmt.Sprintf(format, args...) + if mod.Version == "" { + fmt.Fprintf(vendErrors, "\n\t%s: %s", mod.Path, detail) + } else { + fmt.Fprintf(vendErrors, "\n\t%s@%s: %s", mod.Path, mod.Version, detail) + } + } + + // Iterate over the Require directives in their original (not indexed) order + // so that the errors match the original file. + for _, modFile := range modFiles { + for _, r := range modFile.Require { + if !vendorMeta[r.Mod].Explicit { + if pre114 { + // Before 1.14, modules.txt did not indicate whether modules were listed + // explicitly in the main module's go.mod file. + // However, we can at least detect a version mismatch if packages were + // vendored from a non-matching version. + if vv, ok := vendorVersion[r.Mod.Path]; ok && vv != r.Mod.Version { + vendErrorf(r.Mod, fmt.Sprintf("is explicitly required in go.mod, but vendor/modules.txt indicates %s@%s", r.Mod.Path, vv)) + } + } else { + vendErrorf(r.Mod, "is explicitly required in go.mod, but not marked as explicit in vendor/modules.txt") + } + } + } + } + + describe := func(m module.Version) string { + if m.Version == "" { + return m.Path + } + return m.Path + "@" + m.Version + } + + // We need to verify *all* replacements that occur in modfile: even if they + // don't directly apply to any module in the vendor list, the replacement + // go.mod file can affect the selected versions of other (transitive) + // dependencies + seenrep := make(map[module.Version]bool) + checkReplace := func(replaces []*modfile.Replace) { + for _, r := range replaces { + if seenrep[r.Old] { + continue // Don't print the same error more than once + } + seenrep[r.Old] = true + rNew, modRoot, replacementSource := replacementFrom(r.Old) + rNewCanonical := canonicalizeReplacePath(rNew, modRoot) + vr := vendorMeta[r.Old].Replacement + if vr == (module.Version{}) { + if rNewCanonical == (module.Version{}) { + // r.Old is not actually replaced. It might be a main module. + // Don't return an error. + } else if pre114 && (r.Old.Version == "" || vendorVersion[r.Old.Path] != r.Old.Version) { + // Before 1.14, modules.txt omitted wildcard replacements and + // replacements for modules that did not have any packages to vendor. + } else { + vendErrorf(r.Old, "is replaced in %s, but not marked as replaced in vendor/modules.txt", base.ShortPath(replacementSource)) + } + } else if vr != rNewCanonical { + vendErrorf(r.Old, "is replaced by %s in %s, but marked as replaced by %s in vendor/modules.txt", describe(rNew), base.ShortPath(replacementSource), describe(vr)) + } + } + } + for _, modFile := range modFiles { + checkReplace(modFile.Replace) + } + if MainModules.workFile != nil { + checkReplace(MainModules.workFile.Replace) + } + + for _, mod := range vendorList { + meta := vendorMeta[mod] + if meta.Explicit { + // in workspace mode, check that it's required by at least one of the main modules + var foundRequire bool + for _, index := range indexes { + if _, inGoMod := index.require[mod]; inGoMod { + foundRequire = true + } + } + if !foundRequire { + article := "" + if inWorkspaceMode() { + article = "a " + } + vendErrorf(mod, "is marked as explicit in vendor/modules.txt, but not explicitly required in %vgo.mod", article) + } + + } + } + + for _, mod := range vendorReplaced { + r := Replacement(mod) + replacementSource := "go.mod" + if inWorkspaceMode() { + replacementSource = "the workspace" + } + if r == (module.Version{}) { + vendErrorf(mod, "is marked as replaced in vendor/modules.txt, but not replaced in %s", replacementSource) + continue + } + // If both replacements exist, we've already reported that they're different above. + } + + if vendErrors.Len() > 0 { + subcmd := "mod" + if inWorkspaceMode() { + subcmd = "work" + } + base.Fatalf("go: inconsistent vendoring in %s:%s\n\n\tTo ignore the vendor directory, use -mod=readonly or -mod=mod.\n\tTo sync the vendor directory, run:\n\t\tgo %s vendor", filepath.Dir(VendorDir()), vendErrors, subcmd) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/errors.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..8db65d656f027691220dcc2842e115e794f508ca --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/errors.go @@ -0,0 +1,105 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mvs + +import ( + "fmt" + "strings" + + "golang.org/x/mod/module" +) + +// BuildListError decorates an error that occurred gathering requirements +// while constructing a build list. BuildListError prints the chain +// of requirements to the module where the error occurred. +type BuildListError struct { + Err error + stack []buildListErrorElem +} + +type buildListErrorElem struct { + m module.Version + + // nextReason is the reason this module depends on the next module in the + // stack. Typically either "requires", or "updating to". + nextReason string +} + +// NewBuildListError returns a new BuildListError wrapping an error that +// occurred at a module found along the given path of requirements and/or +// upgrades, which must be non-empty. +// +// The isVersionChange function reports whether a path step is due to an +// explicit upgrade or downgrade (as opposed to an existing requirement in a +// go.mod file). A nil isVersionChange function indicates that none of the path +// steps are due to explicit version changes. +func NewBuildListError(err error, path []module.Version, isVersionChange func(from, to module.Version) bool) *BuildListError { + stack := make([]buildListErrorElem, 0, len(path)) + for len(path) > 1 { + reason := "requires" + if isVersionChange != nil && isVersionChange(path[0], path[1]) { + reason = "updating to" + } + stack = append(stack, buildListErrorElem{ + m: path[0], + nextReason: reason, + }) + path = path[1:] + } + stack = append(stack, buildListErrorElem{m: path[0]}) + + return &BuildListError{ + Err: err, + stack: stack, + } +} + +// Module returns the module where the error occurred. If the module stack +// is empty, this returns a zero value. +func (e *BuildListError) Module() module.Version { + if len(e.stack) == 0 { + return module.Version{} + } + return e.stack[len(e.stack)-1].m +} + +func (e *BuildListError) Error() string { + b := &strings.Builder{} + stack := e.stack + + // Don't print modules at the beginning of the chain without a + // version. These always seem to be the main module or a + // synthetic module ("target@"). + for len(stack) > 0 && stack[0].m.Version == "" { + stack = stack[1:] + } + + if len(stack) == 0 { + b.WriteString(e.Err.Error()) + } else { + for _, elem := range stack[:len(stack)-1] { + fmt.Fprintf(b, "%s %s\n\t", elem.m, elem.nextReason) + } + // Ensure that the final module path and version are included as part of the + // error message. + m := stack[len(stack)-1].m + if mErr, ok := e.Err.(*module.ModuleError); ok { + actual := module.Version{Path: mErr.Path, Version: mErr.Version} + if v, ok := mErr.Err.(*module.InvalidVersionError); ok { + actual.Version = v.Version + } + if actual == m { + fmt.Fprintf(b, "%v", e.Err) + } else { + fmt.Fprintf(b, "%s (replaced by %s): %v", m, actual, mErr.Err) + } + } else { + fmt.Fprintf(b, "%v", module.VersionError(m, e.Err)) + } + } + return b.String() +} + +func (e *BuildListError) Unwrap() error { return e.Err } diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/graph.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/graph.go new file mode 100644 index 0000000000000000000000000000000000000000..56b3c604eb1b9b5a5ecfb842003ab48d8581fd47 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/graph.go @@ -0,0 +1,226 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mvs + +import ( + "fmt" + "slices" + + "cmd/go/internal/gover" + + "golang.org/x/mod/module" +) + +// Graph implements an incremental version of the MVS algorithm, with the +// requirements pushed by the caller instead of pulled by the MVS traversal. +type Graph struct { + cmp func(p, v1, v2 string) int + roots []module.Version + + required map[module.Version][]module.Version + + isRoot map[module.Version]bool // contains true for roots and false for reachable non-roots + selected map[string]string // path → version +} + +// NewGraph returns an incremental MVS graph containing only a set of root +// dependencies and using the given max function for version strings. +// +// The caller must ensure that the root slice is not modified while the Graph +// may be in use. +func NewGraph(cmp func(p, v1, v2 string) int, roots []module.Version) *Graph { + g := &Graph{ + cmp: cmp, + roots: slices.Clip(roots), + required: make(map[module.Version][]module.Version), + isRoot: make(map[module.Version]bool), + selected: make(map[string]string), + } + + for _, m := range roots { + g.isRoot[m] = true + if g.cmp(m.Path, g.Selected(m.Path), m.Version) < 0 { + g.selected[m.Path] = m.Version + } + } + + return g +} + +// Require adds the information that module m requires all modules in reqs. +// The reqs slice must not be modified after it is passed to Require. +// +// m must be reachable by some existing chain of requirements from g's target, +// and Require must not have been called for it already. +// +// If any of the modules in reqs has the same path as g's target, +// the target must have higher precedence than the version in req. +func (g *Graph) Require(m module.Version, reqs []module.Version) { + // To help catch disconnected-graph bugs, enforce that all required versions + // are actually reachable from the roots (and therefore should affect the + // selected versions of the modules they name). + if _, reachable := g.isRoot[m]; !reachable { + panic(fmt.Sprintf("%v is not reachable from any root", m)) + } + + // Truncate reqs to its capacity to avoid aliasing bugs if it is later + // returned from RequiredBy and appended to. + reqs = slices.Clip(reqs) + + if _, dup := g.required[m]; dup { + panic(fmt.Sprintf("requirements of %v have already been set", m)) + } + g.required[m] = reqs + + for _, dep := range reqs { + // Mark dep reachable, regardless of whether it is selected. + if _, ok := g.isRoot[dep]; !ok { + g.isRoot[dep] = false + } + + if g.cmp(dep.Path, g.Selected(dep.Path), dep.Version) < 0 { + g.selected[dep.Path] = dep.Version + } + } +} + +// RequiredBy returns the slice of requirements passed to Require for m, if any, +// with its capacity reduced to its length. +// If Require has not been called for m, RequiredBy(m) returns ok=false. +// +// The caller must not modify the returned slice, but may safely append to it +// and may rely on it not to be modified. +func (g *Graph) RequiredBy(m module.Version) (reqs []module.Version, ok bool) { + reqs, ok = g.required[m] + return reqs, ok +} + +// Selected returns the selected version of the given module path. +// +// If no version is selected, Selected returns version "none". +func (g *Graph) Selected(path string) (version string) { + v, ok := g.selected[path] + if !ok { + return "none" + } + return v +} + +// BuildList returns the selected versions of all modules present in the Graph, +// beginning with the selected versions of each module path in the roots of g. +// +// The order of the remaining elements in the list is deterministic +// but arbitrary. +func (g *Graph) BuildList() []module.Version { + seenRoot := make(map[string]bool, len(g.roots)) + + var list []module.Version + for _, r := range g.roots { + if seenRoot[r.Path] { + // Multiple copies of the same root, with the same or different versions, + // are a bit of a degenerate case: we will take the transitive + // requirements of both roots into account, but only the higher one can + // possibly be selected. However — especially given that we need the + // seenRoot map for later anyway — it is simpler to support this + // degenerate case than to forbid it. + continue + } + + if v := g.Selected(r.Path); v != "none" { + list = append(list, module.Version{Path: r.Path, Version: v}) + } + seenRoot[r.Path] = true + } + uniqueRoots := list + + for path, version := range g.selected { + if !seenRoot[path] { + list = append(list, module.Version{Path: path, Version: version}) + } + } + gover.ModSort(list[len(uniqueRoots):]) + + return list +} + +// WalkBreadthFirst invokes f once, in breadth-first order, for each module +// version other than "none" that appears in the graph, regardless of whether +// that version is selected. +func (g *Graph) WalkBreadthFirst(f func(m module.Version)) { + var queue []module.Version + enqueued := make(map[module.Version]bool) + for _, m := range g.roots { + if m.Version != "none" { + queue = append(queue, m) + enqueued[m] = true + } + } + + for len(queue) > 0 { + m := queue[0] + queue = queue[1:] + + f(m) + + reqs, _ := g.RequiredBy(m) + for _, r := range reqs { + if !enqueued[r] && r.Version != "none" { + queue = append(queue, r) + enqueued[r] = true + } + } + } +} + +// FindPath reports a shortest requirement path starting at one of the roots of +// the graph and ending at a module version m for which f(m) returns true, or +// nil if no such path exists. +func (g *Graph) FindPath(f func(module.Version) bool) []module.Version { + // firstRequires[a] = b means that in a breadth-first traversal of the + // requirement graph, the module version a was first required by b. + firstRequires := make(map[module.Version]module.Version) + + queue := g.roots + for _, m := range g.roots { + firstRequires[m] = module.Version{} + } + + for len(queue) > 0 { + m := queue[0] + queue = queue[1:] + + if f(m) { + // Construct the path reversed (because we're starting from the far + // endpoint), then reverse it. + path := []module.Version{m} + for { + m = firstRequires[m] + if m.Path == "" { + break + } + path = append(path, m) + } + + i, j := 0, len(path)-1 + for i < j { + path[i], path[j] = path[j], path[i] + i++ + j-- + } + + return path + } + + reqs, _ := g.RequiredBy(m) + for _, r := range reqs { + if _, seen := firstRequires[r]; !seen { + queue = append(queue, r) + firstRequires[r] = m + } + } + } + + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/mvs.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/mvs.go new file mode 100644 index 0000000000000000000000000000000000000000..468a9859272ead5c245b00fc2d40f3aa3bb6164a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/mvs.go @@ -0,0 +1,488 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mvs implements Minimal Version Selection. +// See https://research.swtch.com/vgo-mvs. +package mvs + +import ( + "fmt" + "reflect" + "sort" + "sync" + + "cmd/go/internal/par" + + "golang.org/x/mod/module" +) + +// A Reqs is the requirement graph on which Minimal Version Selection (MVS) operates. +// +// The version strings are opaque except for the special version "none" +// (see the documentation for module.Version). In particular, MVS does not +// assume that the version strings are semantic versions; instead, the Max method +// gives access to the comparison operation. +// +// It must be safe to call methods on a Reqs from multiple goroutines simultaneously. +// Because a Reqs may read the underlying graph from the network on demand, +// the MVS algorithms parallelize the traversal to overlap network delays. +type Reqs interface { + // Required returns the module versions explicitly required by m itself. + // The caller must not modify the returned list. + Required(m module.Version) ([]module.Version, error) + + // Max returns the maximum of v1 and v2 (it returns either v1 or v2) + // in the module with path p. + // + // For all versions v, Max(v, "none") must be v, + // and for the target passed as the first argument to MVS functions, + // Max(target, v) must be target. + // + // Note that v1 < v2 can be written Max(v1, v2) != v1 + // and similarly v1 <= v2 can be written Max(v1, v2) == v2. + Max(p, v1, v2 string) string +} + +// An UpgradeReqs is a Reqs that can also identify available upgrades. +type UpgradeReqs interface { + Reqs + + // Upgrade returns the upgraded version of m, + // for use during an UpgradeAll operation. + // If m should be kept as is, Upgrade returns m. + // If m is not yet used in the build, then m.Version will be "none". + // More typically, m.Version will be the version required + // by some other module in the build. + // + // If no module version is available for the given path, + // Upgrade returns a non-nil error. + // TODO(rsc): Upgrade must be able to return errors, + // but should "no latest version" just return m instead? + Upgrade(m module.Version) (module.Version, error) +} + +// A DowngradeReqs is a Reqs that can also identify available downgrades. +type DowngradeReqs interface { + Reqs + + // Previous returns the version of m.Path immediately prior to m.Version, + // or "none" if no such version is known. + Previous(m module.Version) (module.Version, error) +} + +// BuildList returns the build list for the target module. +// +// target is the root vertex of a module requirement graph. For cmd/go, this is +// typically the main module, but note that this algorithm is not intended to +// be Go-specific: module paths and versions are treated as opaque values. +// +// reqs describes the module requirement graph and provides an opaque method +// for comparing versions. +// +// BuildList traverses the graph and returns a list containing the highest +// version for each visited module. The first element of the returned list is +// target itself; reqs.Max requires target.Version to compare higher than all +// other versions, so no other version can be selected. The remaining elements +// of the list are sorted by path. +// +// See https://research.swtch.com/vgo-mvs for details. +func BuildList(targets []module.Version, reqs Reqs) ([]module.Version, error) { + return buildList(targets, reqs, nil) +} + +func buildList(targets []module.Version, reqs Reqs, upgrade func(module.Version) (module.Version, error)) ([]module.Version, error) { + cmp := func(p, v1, v2 string) int { + if reqs.Max(p, v1, v2) != v1 { + return -1 + } + if reqs.Max(p, v2, v1) != v2 { + return 1 + } + return 0 + } + + var ( + mu sync.Mutex + g = NewGraph(cmp, targets) + upgrades = map[module.Version]module.Version{} + errs = map[module.Version]error{} // (non-nil errors only) + ) + + // Explore work graph in parallel in case reqs.Required + // does high-latency network operations. + var work par.Work[module.Version] + for _, target := range targets { + work.Add(target) + } + work.Do(10, func(m module.Version) { + + var required []module.Version + var err error + if m.Version != "none" { + required, err = reqs.Required(m) + } + + u := m + if upgrade != nil { + upgradeTo, upErr := upgrade(m) + if upErr == nil { + u = upgradeTo + } else if err == nil { + err = upErr + } + } + + mu.Lock() + if err != nil { + errs[m] = err + } + if u != m { + upgrades[m] = u + required = append([]module.Version{u}, required...) + } + g.Require(m, required) + mu.Unlock() + + for _, r := range required { + work.Add(r) + } + }) + + // If there was an error, find the shortest path from the target to the + // node where the error occurred so we can report a useful error message. + if len(errs) > 0 { + errPath := g.FindPath(func(m module.Version) bool { + return errs[m] != nil + }) + if len(errPath) == 0 { + panic("internal error: could not reconstruct path to module with error") + } + + err := errs[errPath[len(errPath)-1]] + isUpgrade := func(from, to module.Version) bool { + if u, ok := upgrades[from]; ok { + return u == to + } + return false + } + return nil, NewBuildListError(err, errPath, isUpgrade) + } + + // The final list is the minimum version of each module found in the graph. + list := g.BuildList() + if vs := list[:len(targets)]; !reflect.DeepEqual(vs, targets) { + // target.Version will be "" for modload, the main client of MVS. + // "" denotes the main module, which has no version. However, MVS treats + // version strings as opaque, so "" is not a special value here. + // See golang.org/issue/31491, golang.org/issue/29773. + panic(fmt.Sprintf("mistake: chose versions %+v instead of targets %+v", vs, targets)) + } + return list, nil +} + +// Req returns the minimal requirement list for the target module, +// with the constraint that all module paths listed in base must +// appear in the returned list. +func Req(mainModule module.Version, base []string, reqs Reqs) ([]module.Version, error) { + list, err := BuildList([]module.Version{mainModule}, reqs) + if err != nil { + return nil, err + } + + // Note: Not running in parallel because we assume + // that list came from a previous operation that paged + // in all the requirements, so there's no I/O to overlap now. + + max := map[string]string{} + for _, m := range list { + max[m.Path] = m.Version + } + + // Compute postorder, cache requirements. + var postorder []module.Version + reqCache := map[module.Version][]module.Version{} + reqCache[mainModule] = nil + + var walk func(module.Version) error + walk = func(m module.Version) error { + _, ok := reqCache[m] + if ok { + return nil + } + required, err := reqs.Required(m) + if err != nil { + return err + } + reqCache[m] = required + for _, m1 := range required { + if err := walk(m1); err != nil { + return err + } + } + postorder = append(postorder, m) + return nil + } + for _, m := range list { + if err := walk(m); err != nil { + return nil, err + } + } + + // Walk modules in reverse post-order, only adding those not implied already. + have := map[module.Version]bool{} + walk = func(m module.Version) error { + if have[m] { + return nil + } + have[m] = true + for _, m1 := range reqCache[m] { + walk(m1) + } + return nil + } + // First walk the base modules that must be listed. + var min []module.Version + haveBase := map[string]bool{} + for _, path := range base { + if haveBase[path] { + continue + } + m := module.Version{Path: path, Version: max[path]} + min = append(min, m) + walk(m) + haveBase[path] = true + } + // Now the reverse postorder to bring in anything else. + for i := len(postorder) - 1; i >= 0; i-- { + m := postorder[i] + if max[m.Path] != m.Version { + // Older version. + continue + } + if !have[m] { + min = append(min, m) + walk(m) + } + } + sort.Slice(min, func(i, j int) bool { + return min[i].Path < min[j].Path + }) + return min, nil +} + +// UpgradeAll returns a build list for the target module +// in which every module is upgraded to its latest version. +func UpgradeAll(target module.Version, reqs UpgradeReqs) ([]module.Version, error) { + return buildList([]module.Version{target}, reqs, func(m module.Version) (module.Version, error) { + if m.Path == target.Path { + return target, nil + } + + return reqs.Upgrade(m) + }) +} + +// Upgrade returns a build list for the target module +// in which the given additional modules are upgraded. +func Upgrade(target module.Version, reqs UpgradeReqs, upgrade ...module.Version) ([]module.Version, error) { + list, err := reqs.Required(target) + if err != nil { + return nil, err + } + + pathInList := make(map[string]bool, len(list)) + for _, m := range list { + pathInList[m.Path] = true + } + list = append([]module.Version(nil), list...) + + upgradeTo := make(map[string]string, len(upgrade)) + for _, u := range upgrade { + if !pathInList[u.Path] { + list = append(list, module.Version{Path: u.Path, Version: "none"}) + } + if prev, dup := upgradeTo[u.Path]; dup { + upgradeTo[u.Path] = reqs.Max(u.Path, prev, u.Version) + } else { + upgradeTo[u.Path] = u.Version + } + } + + return buildList([]module.Version{target}, &override{target, list, reqs}, func(m module.Version) (module.Version, error) { + if v, ok := upgradeTo[m.Path]; ok { + return module.Version{Path: m.Path, Version: v}, nil + } + return m, nil + }) +} + +// Downgrade returns a build list for the target module +// in which the given additional modules are downgraded, +// potentially overriding the requirements of the target. +// +// The versions to be downgraded may be unreachable from reqs.Latest and +// reqs.Previous, but the methods of reqs must otherwise handle such versions +// correctly. +func Downgrade(target module.Version, reqs DowngradeReqs, downgrade ...module.Version) ([]module.Version, error) { + // Per https://research.swtch.com/vgo-mvs#algorithm_4: + // “To avoid an unnecessary downgrade to E 1.1, we must also add a new + // requirement on E 1.2. We can apply Algorithm R to find the minimal set of + // new requirements to write to go.mod.” + // + // In order to generate those new requirements, we need to identify versions + // for every module in the build list — not just reqs.Required(target). + list, err := BuildList([]module.Version{target}, reqs) + if err != nil { + return nil, err + } + list = list[1:] // remove target + + max := make(map[string]string) + for _, r := range list { + max[r.Path] = r.Version + } + for _, d := range downgrade { + if v, ok := max[d.Path]; !ok || reqs.Max(d.Path, v, d.Version) != d.Version { + max[d.Path] = d.Version + } + } + + var ( + added = make(map[module.Version]bool) + rdeps = make(map[module.Version][]module.Version) + excluded = make(map[module.Version]bool) + ) + var exclude func(module.Version) + exclude = func(m module.Version) { + if excluded[m] { + return + } + excluded[m] = true + for _, p := range rdeps[m] { + exclude(p) + } + } + var add func(module.Version) + add = func(m module.Version) { + if added[m] { + return + } + added[m] = true + if v, ok := max[m.Path]; ok && reqs.Max(m.Path, m.Version, v) != v { + // m would upgrade an existing dependency — it is not a strict downgrade, + // and because it was already present as a dependency, it could affect the + // behavior of other relevant packages. + exclude(m) + return + } + list, err := reqs.Required(m) + if err != nil { + // If we can't load the requirements, we couldn't load the go.mod file. + // There are a number of reasons this can happen, but this usually + // means an older version of the module had a missing or invalid + // go.mod file. For example, if example.com/mod released v2.0.0 before + // migrating to modules (v2.0.0+incompatible), then added a valid go.mod + // in v2.0.1, downgrading from v2.0.1 would cause this error. + // + // TODO(golang.org/issue/31730, golang.org/issue/30134): if the error + // is transient (we couldn't download go.mod), return the error from + // Downgrade. Currently, we can't tell what kind of error it is. + exclude(m) + return + } + for _, r := range list { + add(r) + if excluded[r] { + exclude(m) + return + } + rdeps[r] = append(rdeps[r], m) + } + } + + downgraded := make([]module.Version, 0, len(list)+1) + downgraded = append(downgraded, target) +List: + for _, r := range list { + add(r) + for excluded[r] { + p, err := reqs.Previous(r) + if err != nil { + // This is likely a transient error reaching the repository, + // rather than a permanent error with the retrieved version. + // + // TODO(golang.org/issue/31730, golang.org/issue/30134): + // decode what to do based on the actual error. + return nil, err + } + // If the target version is a pseudo-version, it may not be + // included when iterating over prior versions using reqs.Previous. + // Insert it into the right place in the iteration. + // If v is excluded, p should be returned again by reqs.Previous on the next iteration. + if v := max[r.Path]; reqs.Max(r.Path, v, r.Version) != v && reqs.Max(r.Path, p.Version, v) != p.Version { + p.Version = v + } + if p.Version == "none" { + continue List + } + add(p) + r = p + } + downgraded = append(downgraded, r) + } + + // The downgrades we computed above only downgrade to versions enumerated by + // reqs.Previous. However, reqs.Previous omits some versions — such as + // pseudo-versions and retracted versions — that may be selected as transitive + // requirements of other modules. + // + // If one of those requirements pulls the version back up above the version + // identified by reqs.Previous, then the transitive dependencies of that that + // initially-downgraded version should no longer matter — in particular, we + // should not add new dependencies on module paths that nothing else in the + // updated module graph even requires. + // + // In order to eliminate those spurious dependencies, we recompute the build + // list with the actual versions of the downgraded modules as selected by MVS, + // instead of our initial downgrades. + // (See the downhiddenartifact and downhiddencross test cases). + actual, err := BuildList([]module.Version{target}, &override{ + target: target, + list: downgraded, + Reqs: reqs, + }) + if err != nil { + return nil, err + } + actualVersion := make(map[string]string, len(actual)) + for _, m := range actual { + actualVersion[m.Path] = m.Version + } + + downgraded = downgraded[:0] + for _, m := range list { + if v, ok := actualVersion[m.Path]; ok { + downgraded = append(downgraded, module.Version{Path: m.Path, Version: v}) + } + } + + return BuildList([]module.Version{target}, &override{ + target: target, + list: downgraded, + Reqs: reqs, + }) +} + +type override struct { + target module.Version + list []module.Version + Reqs +} + +func (r *override) Required(m module.Version) ([]module.Version, error) { + if m == r.target { + return r.list, nil + } + return r.Reqs.Required(m) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/mvs_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/mvs_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6e1e71cd5c1e510d1f97c4159fbde194a1538be8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/mvs/mvs_test.go @@ -0,0 +1,635 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mvs + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "golang.org/x/mod/module" +) + +var tests = ` +# Scenario from blog. +name: blog +A: B1 C2 +B1: D3 +C1: D2 +C2: D4 +C3: D5 +C4: G1 +D2: E1 +D3: E2 +D4: E2 F1 +D5: E2 +G1: C4 +A2: B1 C4 D4 +build A: A B1 C2 D4 E2 F1 +upgrade* A: A B1 C4 D5 E2 F1 G1 +upgrade A C4: A B1 C4 D4 E2 F1 G1 +build A2: A2 B1 C4 D4 E2 F1 G1 +downgrade A2 D2: A2 C4 D2 E2 F1 G1 + +name: trim +A: B1 C2 +B1: D3 +C2: B2 +B2: +build A: A B2 C2 D3 + +# Cross-dependency between D and E. +# No matter how it arises, should get result of merging all build lists via max, +# which leads to including both D2 and E2. + +name: cross1 +A: B C +B: D1 +C: D2 +D1: E2 +D2: E1 +build A: A B C D2 E2 + +name: cross1V +A: B2 C D2 E1 +B1: +B2: D1 +C: D2 +D1: E2 +D2: E1 +build A: A B2 C D2 E2 + +name: cross1U +A: B1 C +B1: +B2: D1 +C: D2 +D1: E2 +D2: E1 +build A: A B1 C D2 E1 +upgrade A B2: A B2 C D2 E2 + +name: cross1R +A: B C +B: D2 +C: D1 +D1: E2 +D2: E1 +build A: A B C D2 E2 + +name: cross1X +A: B C +B: D1 E2 +C: D2 +D1: E2 +D2: E1 +build A: A B C D2 E2 + +name: cross2 +A: B D2 +B: D1 +D1: E2 +D2: E1 +build A: A B D2 E2 + +name: cross2X +A: B D2 +B: D1 E2 +C: D2 +D1: E2 +D2: E1 +build A: A B D2 E2 + +name: cross3 +A: B D2 E1 +B: D1 +D1: E2 +D2: E1 +build A: A B D2 E2 + +name: cross3X +A: B D2 E1 +B: D1 E2 +D1: E2 +D2: E1 +build A: A B D2 E2 + +# Should not get E2 here, because B has been updated +# not to depend on D1 anymore. +name: cross4 +A1: B1 D2 +A2: B2 D2 +B1: D1 +B2: D2 +D1: E2 +D2: E1 +build A1: A1 B1 D2 E2 +build A2: A2 B2 D2 E1 + +# But the upgrade from A1 preserves the E2 dep explicitly. +upgrade A1 B2: A1 B2 D2 E2 +upgradereq A1 B2: B2 E2 + +name: cross5 +A: D1 +D1: E2 +D2: E1 +build A: A D1 E2 +upgrade* A: A D2 E2 +upgrade A D2: A D2 E2 +upgradereq A D2: D2 E2 + +name: cross6 +A: D2 +D1: E2 +D2: E1 +build A: A D2 E1 +upgrade* A: A D2 E2 +upgrade A E2: A D2 E2 + +name: cross7 +A: B C +B: D1 +C: E1 +D1: E2 +E1: D2 +build A: A B C D2 E2 + +# golang.org/issue/31248: +# Even though we select X2, the requirement on I1 +# via X1 should be preserved. +name: cross8 +M: A1 B1 +A1: X1 +B1: X2 +X1: I1 +X2: +build M: M A1 B1 I1 X2 + +# Upgrade from B1 to B2 should not drop the transitive dep on D. +name: drop +A: B1 C1 +B1: D1 +B2: +C2: +D2: +build A: A B1 C1 D1 +upgrade* A: A B2 C2 D2 + +name: simplify +A: B1 C1 +B1: C2 +C1: D1 +C2: +build A: A B1 C2 D1 + +name: up1 +A: B1 C1 +B1: +B2: +B3: +B4: +B5.hidden: +C2: +C3: +build A: A B1 C1 +upgrade* A: A B4 C3 + +name: up2 +A: B5.hidden C1 +B1: +B2: +B3: +B4: +B5.hidden: +C2: +C3: +build A: A B5.hidden C1 +upgrade* A: A B5.hidden C3 + +name: down1 +A: B2 +B1: C1 +B2: C2 +build A: A B2 C2 +downgrade A C1: A B1 C1 + +name: down2 +A: B2 E2 +B1: +B2: C2 F2 +C1: +D1: +C2: D2 E2 +D2: B2 +E2: D2 +E1: +F1: +build A: A B2 C2 D2 E2 F2 +downgrade A F1: A B1 C1 D1 E1 F1 + +# https://research.swtch.com/vgo-mvs#algorithm_4: +# “[D]owngrades are constrained to only downgrade packages, not also upgrade +# them; if an upgrade before downgrade is needed, the user must ask for it +# explicitly.” +# +# Here, downgrading B2 to B1 upgrades C1 to C2, and C2 does not depend on D2. +# However, C2 would be an upgrade — not a downgrade — so B1 must also be +# rejected. +name: downcross1 +A: B2 C1 +B1: C2 +B2: C1 +C1: D2 +C2: +D1: +D2: +build A: A B2 C1 D2 +downgrade A D1: A D1 + +# https://research.swtch.com/vgo-mvs#algorithm_4: +# “Unlike upgrades, downgrades must work by removing requirements, not adding +# them.” +# +# However, downgrading a requirement may introduce a new requirement on a +# previously-unrequired module. If each dependency's requirements are complete +# (“tidy”), that can't change the behavior of any other package whose version is +# not also being downgraded, so we should allow it. +name: downcross2 +A: B2 +B1: C1 +B2: D2 +C1: +D1: +D2: +build A: A B2 D2 +downgrade A D1: A B1 C1 D1 + +name: downcycle +A: A B2 +B2: A +B1: +build A: A B2 +downgrade A B1: A B1 + +# Both B3 and C2 require D2. +# If we downgrade D to D1, then in isolation B3 would downgrade to B1, +# because B2 is hidden — B1 is the next-highest version that is not hidden. +# However, if we downgrade D, we will also downgrade C to C1. +# And C1 requires B2.hidden, and B2.hidden also meets our requirements: +# it is compatible with D1 and a strict downgrade from B3. +# +# Since neither the initial nor the final build list includes B1, +# and the nothing in the final downgraded build list requires E at all, +# no dependency on E1 (required by only B1) should be introduced. +# +name: downhiddenartifact +A: B3 C2 +A1: B3 +B1: E1 +B2.hidden: +B3: D2 +C1: B2.hidden +C2: D2 +D1: +D2: +build A1: A1 B3 D2 +downgrade A1 D1: A1 B1 D1 E1 +build A: A B3 C2 D2 +downgrade A D1: A B2.hidden C1 D1 + +# Both B3 and C3 require D2. +# If we downgrade D to D1, then in isolation B3 would downgrade to B1, +# and C3 would downgrade to C1. +# But C1 requires B2.hidden, and B1 requires C2.hidden, so we can't +# downgrade to either of those without pulling the other back up a little. +# +# B2.hidden and C2.hidden are both compatible with D1, so that still +# meets our requirements — but then we're in an odd state in which +# B and C have both been downgraded to hidden versions, without any +# remaining requirements to explain how those hidden versions got there. +# +# TODO(bcmills): Would it be better to force downgrades to land on non-hidden +# versions? +# In this case, that would remove the dependencies on B and C entirely. +# +name: downhiddencross +A: B3 C3 +B1: C2.hidden +B2.hidden: +B3: D2 +C1: B2.hidden +C2.hidden: +C3: D2 +D1: +D2: +build A: A B3 C3 D2 +downgrade A D1: A B2.hidden C2.hidden D1 + +# golang.org/issue/25542. +name: noprev1 +A: B4 C2 +B2.hidden: +C2: +build A: A B4 C2 +downgrade A B2.hidden: A B2.hidden C2 + +name: noprev2 +A: B4 C2 +B2.hidden: +B1: +C2: +build A: A B4 C2 +downgrade A B2.hidden: A B2.hidden C2 + +name: noprev3 +A: B4 C2 +B3: +B2.hidden: +C2: +build A: A B4 C2 +downgrade A B2.hidden: A B2.hidden C2 + +# Cycles involving the target. + +# The target must be the newest version of itself. +name: cycle1 +A: B1 +B1: A1 +B2: A2 +B3: A3 +build A: A B1 +upgrade A B2: A B2 +upgrade* A: A B3 + +# golang.org/issue/29773: +# Requirements of older versions of the target +# must be carried over. +name: cycle2 +A: B1 +A1: C1 +A2: D1 +B1: A1 +B2: A2 +C1: A2 +C2: +D2: +build A: A B1 C1 D1 +upgrade* A: A B2 C2 D2 + +# Cycles with multiple possible solutions. +# (golang.org/issue/34086) +name: cycle3 +M: A1 C2 +A1: B1 +B1: C1 +B2: C2 +C1: +C2: B2 +build M: M A1 B2 C2 +req M: A1 B2 +req M A: A1 B2 +req M C: A1 C2 + +# Requirement minimization. + +name: req1 +A: B1 C1 D1 E1 F1 +B1: C1 E1 F1 +req A: B1 D1 +req A C: B1 C1 D1 + +name: req2 +A: G1 H1 +G1: H1 +H1: G1 +req A: G1 +req A G: G1 +req A H: H1 + +name: req3 +M: A1 B1 +A1: X1 +B1: X2 +X1: I1 +X2: +req M: A1 B1 + +name: reqnone +M: Anone B1 D1 E1 +B1: Cnone D1 +E1: Fnone +build M: M B1 D1 E1 +req M: B1 E1 + +name: reqdup +M: A1 B1 +A1: B1 +B1: +req M A A: A1 + +name: reqcross +M: A1 B1 C1 +A1: B1 C1 +B1: C1 +C1: +req M A B: A1 B1 +` + +func Test(t *testing.T) { + var ( + name string + reqs reqsMap + fns []func(*testing.T) + ) + flush := func() { + if name != "" { + t.Run(name, func(t *testing.T) { + for _, fn := range fns { + fn(t) + } + if len(fns) == 0 { + t.Errorf("no functions tested") + } + }) + } + } + m := func(s string) module.Version { + return module.Version{Path: s[:1], Version: s[1:]} + } + ms := func(list []string) []module.Version { + var mlist []module.Version + for _, s := range list { + mlist = append(mlist, m(s)) + } + return mlist + } + checkList := func(t *testing.T, desc string, list []module.Version, err error, val string) { + if err != nil { + t.Fatalf("%s: %v", desc, err) + } + vs := ms(strings.Fields(val)) + if !reflect.DeepEqual(list, vs) { + t.Errorf("%s = %v, want %v", desc, list, vs) + } + } + + for _, line := range strings.Split(tests, "\n") { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "#") || line == "" { + continue + } + i := strings.Index(line, ":") + if i < 0 { + t.Fatalf("missing colon: %q", line) + } + key := strings.TrimSpace(line[:i]) + val := strings.TrimSpace(line[i+1:]) + if key == "" { + t.Fatalf("missing key: %q", line) + } + kf := strings.Fields(key) + switch kf[0] { + case "name": + if len(kf) != 1 { + t.Fatalf("name takes no arguments: %q", line) + } + flush() + reqs = make(reqsMap) + fns = nil + name = val + continue + case "build": + if len(kf) != 2 { + t.Fatalf("build takes one argument: %q", line) + } + fns = append(fns, func(t *testing.T) { + list, err := BuildList([]module.Version{m(kf[1])}, reqs) + checkList(t, key, list, err, val) + }) + continue + case "upgrade*": + if len(kf) != 2 { + t.Fatalf("upgrade* takes one argument: %q", line) + } + fns = append(fns, func(t *testing.T) { + list, err := UpgradeAll(m(kf[1]), reqs) + checkList(t, key, list, err, val) + }) + continue + case "upgradereq": + if len(kf) < 2 { + t.Fatalf("upgrade takes at least one argument: %q", line) + } + fns = append(fns, func(t *testing.T) { + list, err := Upgrade(m(kf[1]), reqs, ms(kf[2:])...) + if err == nil { + // Copy the reqs map, but substitute the upgraded requirements in + // place of the target's original requirements. + upReqs := make(reqsMap, len(reqs)) + for m, r := range reqs { + upReqs[m] = r + } + upReqs[m(kf[1])] = list + + list, err = Req(m(kf[1]), nil, upReqs) + } + checkList(t, key, list, err, val) + }) + continue + case "upgrade": + if len(kf) < 2 { + t.Fatalf("upgrade takes at least one argument: %q", line) + } + fns = append(fns, func(t *testing.T) { + list, err := Upgrade(m(kf[1]), reqs, ms(kf[2:])...) + checkList(t, key, list, err, val) + }) + continue + case "downgrade": + if len(kf) < 2 { + t.Fatalf("downgrade takes at least one argument: %q", line) + } + fns = append(fns, func(t *testing.T) { + list, err := Downgrade(m(kf[1]), reqs, ms(kf[1:])...) + checkList(t, key, list, err, val) + }) + continue + case "req": + if len(kf) < 2 { + t.Fatalf("req takes at least one argument: %q", line) + } + fns = append(fns, func(t *testing.T) { + list, err := Req(m(kf[1]), kf[2:], reqs) + checkList(t, key, list, err, val) + }) + continue + } + if len(kf) == 1 && 'A' <= key[0] && key[0] <= 'Z' { + var rs []module.Version + for _, f := range strings.Fields(val) { + r := m(f) + if reqs[r] == nil { + reqs[r] = []module.Version{} + } + rs = append(rs, r) + } + reqs[m(key)] = rs + continue + } + t.Fatalf("bad line: %q", line) + } + flush() +} + +type reqsMap map[module.Version][]module.Version + +func (r reqsMap) Max(_, v1, v2 string) string { + if v1 == "none" || v2 == "" { + return v2 + } + if v2 == "none" || v1 == "" { + return v1 + } + if v1 < v2 { + return v2 + } + return v1 +} + +func (r reqsMap) Upgrade(m module.Version) (module.Version, error) { + u := module.Version{Version: "none"} + for k := range r { + if k.Path == m.Path && r.Max(k.Path, u.Version, k.Version) == k.Version && !strings.HasSuffix(k.Version, ".hidden") { + u = k + } + } + if u.Path == "" { + return module.Version{}, fmt.Errorf("missing module: %v", module.Version{Path: m.Path}) + } + return u, nil +} + +func (r reqsMap) Previous(m module.Version) (module.Version, error) { + var p module.Version + for k := range r { + if k.Path == m.Path && p.Version < k.Version && k.Version < m.Version && !strings.HasSuffix(k.Version, ".hidden") { + p = k + } + } + if p.Path == "" { + return module.Version{Path: m.Path, Version: "none"}, nil + } + return p, nil +} + +func (r reqsMap) Required(m module.Version) ([]module.Version, error) { + rr, ok := r[m] + if !ok { + return nil, fmt.Errorf("missing module: %v", m) + } + return rr, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/par/queue.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/par/queue.go new file mode 100644 index 0000000000000000000000000000000000000000..180bc75e3430041c77c52466ec021cfb1d6c12b1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/par/queue.go @@ -0,0 +1,88 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package par + +import "fmt" + +// Queue manages a set of work items to be executed in parallel. The number of +// active work items is limited, and excess items are queued sequentially. +type Queue struct { + maxActive int + st chan queueState +} + +type queueState struct { + active int // number of goroutines processing work; always nonzero when len(backlog) > 0 + backlog []func() + idle chan struct{} // if non-nil, closed when active becomes 0 +} + +// NewQueue returns a Queue that executes up to maxActive items in parallel. +// +// maxActive must be positive. +func NewQueue(maxActive int) *Queue { + if maxActive < 1 { + panic(fmt.Sprintf("par.NewQueue called with nonpositive limit (%d)", maxActive)) + } + + q := &Queue{ + maxActive: maxActive, + st: make(chan queueState, 1), + } + q.st <- queueState{} + return q +} + +// Add adds f as a work item in the queue. +// +// Add returns immediately, but the queue will be marked as non-idle until after +// f (and any subsequently-added work) has completed. +func (q *Queue) Add(f func()) { + st := <-q.st + if st.active == q.maxActive { + st.backlog = append(st.backlog, f) + q.st <- st + return + } + if st.active == 0 { + // Mark q as non-idle. + st.idle = nil + } + st.active++ + q.st <- st + + go func() { + for { + f() + + st := <-q.st + if len(st.backlog) == 0 { + if st.active--; st.active == 0 && st.idle != nil { + close(st.idle) + } + q.st <- st + return + } + f, st.backlog = st.backlog[0], st.backlog[1:] + q.st <- st + } + }() +} + +// Idle returns a channel that will be closed when q has no (active or enqueued) +// work outstanding. +func (q *Queue) Idle() <-chan struct{} { + st := <-q.st + defer func() { q.st <- st }() + + if st.idle == nil { + st.idle = make(chan struct{}) + if st.active == 0 { + close(st.idle) + } + } + + return st.idle +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/par/queue_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/par/queue_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1331e65f98a81f0c02a8f8cce191338b638a41ec --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/par/queue_test.go @@ -0,0 +1,79 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package par + +import ( + "sync" + "testing" +) + +func TestQueueIdle(t *testing.T) { + q := NewQueue(1) + select { + case <-q.Idle(): + default: + t.Errorf("NewQueue(1) is not initially idle.") + } + + started := make(chan struct{}) + unblock := make(chan struct{}) + q.Add(func() { + close(started) + <-unblock + }) + + <-started + idle := q.Idle() + select { + case <-idle: + t.Errorf("NewQueue(1) is marked idle while processing work.") + default: + } + + close(unblock) + <-idle // Should be closed as soon as the Add callback returns. +} + +func TestQueueBacklog(t *testing.T) { + const ( + maxActive = 2 + totalWork = 3 * maxActive + ) + + q := NewQueue(maxActive) + t.Logf("q = NewQueue(%d)", maxActive) + + var wg sync.WaitGroup + wg.Add(totalWork) + started := make([]chan struct{}, totalWork) + unblock := make(chan struct{}) + for i := range started { + started[i] = make(chan struct{}) + i := i + q.Add(func() { + close(started[i]) + <-unblock + wg.Done() + }) + } + + for i, c := range started { + if i < maxActive { + <-c // Work item i should be started immediately. + } else { + select { + case <-c: + t.Errorf("Work item %d started before previous items finished.", i) + default: + } + } + } + + close(unblock) + for _, c := range started[maxActive:] { + <-c + } + wg.Wait() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/par/work.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/par/work.go new file mode 100644 index 0000000000000000000000000000000000000000..5b6de9425a583f17c67b8c2da7de5b2a3d17dd0d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/par/work.go @@ -0,0 +1,223 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package par implements parallel execution helpers. +package par + +import ( + "errors" + "math/rand" + "sync" + "sync/atomic" +) + +// Work manages a set of work items to be executed in parallel, at most once each. +// The items in the set must all be valid map keys. +type Work[T comparable] struct { + f func(T) // function to run for each item + running int // total number of runners + + mu sync.Mutex + added map[T]bool // items added to set + todo []T // items yet to be run + wait sync.Cond // wait when todo is empty + waiting int // number of runners waiting for todo +} + +func (w *Work[T]) init() { + if w.added == nil { + w.added = make(map[T]bool) + } +} + +// Add adds item to the work set, if it hasn't already been added. +func (w *Work[T]) Add(item T) { + w.mu.Lock() + w.init() + if !w.added[item] { + w.added[item] = true + w.todo = append(w.todo, item) + if w.waiting > 0 { + w.wait.Signal() + } + } + w.mu.Unlock() +} + +// Do runs f in parallel on items from the work set, +// with at most n invocations of f running at a time. +// It returns when everything added to the work set has been processed. +// At least one item should have been added to the work set +// before calling Do (or else Do returns immediately), +// but it is allowed for f(item) to add new items to the set. +// Do should only be used once on a given Work. +func (w *Work[T]) Do(n int, f func(item T)) { + if n < 1 { + panic("par.Work.Do: n < 1") + } + if w.running >= 1 { + panic("par.Work.Do: already called Do") + } + + w.running = n + w.f = f + w.wait.L = &w.mu + + for i := 0; i < n-1; i++ { + go w.runner() + } + w.runner() +} + +// runner executes work in w until both nothing is left to do +// and all the runners are waiting for work. +// (Then all the runners return.) +func (w *Work[T]) runner() { + for { + // Wait for something to do. + w.mu.Lock() + for len(w.todo) == 0 { + w.waiting++ + if w.waiting == w.running { + // All done. + w.wait.Broadcast() + w.mu.Unlock() + return + } + w.wait.Wait() + w.waiting-- + } + + // Pick something to do at random, + // to eliminate pathological contention + // in case items added at about the same time + // are most likely to contend. + i := rand.Intn(len(w.todo)) + item := w.todo[i] + w.todo[i] = w.todo[len(w.todo)-1] + w.todo = w.todo[:len(w.todo)-1] + w.mu.Unlock() + + w.f(item) + } +} + +// ErrCache is like Cache except that it also stores +// an error value alongside the cached value V. +type ErrCache[K comparable, V any] struct { + Cache[K, errValue[V]] +} + +type errValue[V any] struct { + v V + err error +} + +func (c *ErrCache[K, V]) Do(key K, f func() (V, error)) (V, error) { + v := c.Cache.Do(key, func() errValue[V] { + v, err := f() + return errValue[V]{v, err} + }) + return v.v, v.err +} + +var ErrCacheEntryNotFound = errors.New("cache entry not found") + +// Get returns the cached result associated with key. +// It returns ErrCacheEntryNotFound if there is no such result. +func (c *ErrCache[K, V]) Get(key K) (V, error) { + v, ok := c.Cache.Get(key) + if !ok { + v.err = ErrCacheEntryNotFound + } + return v.v, v.err +} + +// Cache runs an action once per key and caches the result. +type Cache[K comparable, V any] struct { + m sync.Map +} + +type cacheEntry[V any] struct { + done atomic.Bool + mu sync.Mutex + result V +} + +// Do calls the function f if and only if Do is being called for the first time with this key. +// No call to Do with a given key returns until the one call to f returns. +// Do returns the value returned by the one call to f. +func (c *Cache[K, V]) Do(key K, f func() V) V { + entryIface, ok := c.m.Load(key) + if !ok { + entryIface, _ = c.m.LoadOrStore(key, new(cacheEntry[V])) + } + e := entryIface.(*cacheEntry[V]) + if !e.done.Load() { + e.mu.Lock() + if !e.done.Load() { + e.result = f() + e.done.Store(true) + } + e.mu.Unlock() + } + return e.result +} + +// Get returns the cached result associated with key +// and reports whether there is such a result. +// +// If the result for key is being computed, Get does not wait for the computation to finish. +func (c *Cache[K, V]) Get(key K) (V, bool) { + entryIface, ok := c.m.Load(key) + if !ok { + return *new(V), false + } + e := entryIface.(*cacheEntry[V]) + if !e.done.Load() { + return *new(V), false + } + return e.result, true +} + +// Clear removes all entries in the cache. +// +// Concurrent calls to Get may return old values. Concurrent calls to Do +// may return old values or store results in entries that have been deleted. +// +// TODO(jayconrod): Delete this after the package cache clearing functions +// in internal/load have been removed. +func (c *Cache[K, V]) Clear() { + c.m.Range(func(key, value any) bool { + c.m.Delete(key) + return true + }) +} + +// Delete removes an entry from the map. It is safe to call Delete for an +// entry that does not exist. Delete will return quickly, even if the result +// for a key is still being computed; the computation will finish, but the +// result won't be accessible through the cache. +// +// TODO(jayconrod): Delete this after the package cache clearing functions +// in internal/load have been removed. +func (c *Cache[K, V]) Delete(key K) { + c.m.Delete(key) +} + +// DeleteIf calls pred for each key in the map. If pred returns true for a key, +// DeleteIf removes the corresponding entry. If the result for a key is +// still being computed, DeleteIf will remove the entry without waiting for +// the computation to finish. The result won't be accessible through the cache. +// +// TODO(jayconrod): Delete this after the package cache clearing functions +// in internal/load have been removed. +func (c *Cache[K, V]) DeleteIf(pred func(key K) bool) { + c.m.Range(func(key, _ any) bool { + if key := key.(K); pred(key) { + c.Delete(key) + } + return true + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/par/work_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/par/work_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9d96ffae50ca558db797bd0b4c2d230e767b3faa --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/par/work_test.go @@ -0,0 +1,76 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package par + +import ( + "sync/atomic" + "testing" + "time" +) + +func TestWork(t *testing.T) { + var w Work[int] + + const N = 10000 + n := int32(0) + w.Add(N) + w.Do(100, func(i int) { + atomic.AddInt32(&n, 1) + if i >= 2 { + w.Add(i - 1) + w.Add(i - 2) + } + w.Add(i >> 1) + w.Add((i >> 1) ^ 1) + }) + if n != N+1 { + t.Fatalf("ran %d items, expected %d", n, N+1) + } +} + +func TestWorkParallel(t *testing.T) { + for tries := 0; tries < 10; tries++ { + var w Work[int] + const N = 100 + for i := 0; i < N; i++ { + w.Add(i) + } + start := time.Now() + var n int32 + w.Do(N, func(x int) { + time.Sleep(1 * time.Millisecond) + atomic.AddInt32(&n, +1) + }) + if n != N { + t.Fatalf("par.Work.Do did not do all the work") + } + if time.Since(start) < N/2*time.Millisecond { + return + } + } + t.Fatalf("par.Work.Do does not seem to be parallel") +} + +func TestCache(t *testing.T) { + var cache Cache[int, int] + + n := 1 + v := cache.Do(1, func() int { n++; return n }) + if v != 2 { + t.Fatalf("cache.Do(1) did not run f") + } + v = cache.Do(1, func() int { n++; return n }) + if v != 2 { + t.Fatalf("cache.Do(1) ran f again!") + } + v = cache.Do(2, func() int { n++; return n }) + if v != 3 { + t.Fatalf("cache.Do(2) did not run f") + } + v = cache.Do(1, func() int { n++; return n }) + if v != 2 { + t.Fatalf("cache.Do(1) did not returned saved value from original cache.Do(1)") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio.go new file mode 100644 index 0000000000000000000000000000000000000000..15b33773cf5f5b065a3b1931d3d8ea7f5446f680 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio.go @@ -0,0 +1,53 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package robustio wraps I/O functions that are prone to failure on Windows, +// transparently retrying errors up to an arbitrary timeout. +// +// Errors are classified heuristically and retries are bounded, so the functions +// in this package do not completely eliminate spurious errors. However, they do +// significantly reduce the rate of failure in practice. +// +// If so, the error will likely wrap one of: +// The functions in this package do not completely eliminate spurious errors, +// but substantially reduce their rate of occurrence in practice. +package robustio + +// Rename is like os.Rename, but on Windows retries errors that may occur if the +// file is concurrently read or overwritten. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func Rename(oldpath, newpath string) error { + return rename(oldpath, newpath) +} + +// ReadFile is like os.ReadFile, but on Windows retries errors that may +// occur if the file is concurrently replaced. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func ReadFile(filename string) ([]byte, error) { + return readFile(filename) +} + +// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur +// if an executable file in the directory has recently been executed. +// +// (See golang.org/issue/19491.) +func RemoveAll(path string) error { + return removeAll(path) +} + +// IsEphemeralError reports whether err is one of the errors that the functions +// in this package attempt to mitigate. +// +// Errors considered ephemeral include: +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION +// +// This set may be expanded in the future; programs must not rely on the +// non-ephemerality of any given error. +func IsEphemeralError(err error) bool { + return isEphemeralError(err) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_darwin.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_darwin.go new file mode 100644 index 0000000000000000000000000000000000000000..99fd8ebc2fff18afa6764b3e8c2a0fd26caa9241 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_darwin.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" +) + +const errFileNotFound = syscall.ENOENT + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + return errno == errFileNotFound + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_flaky.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_flaky.go new file mode 100644 index 0000000000000000000000000000000000000000..c56e36ca62412aae46a98f41f5a2d9ad5191fe8e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_flaky.go @@ -0,0 +1,91 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows || darwin + +package robustio + +import ( + "errors" + "math/rand" + "os" + "syscall" + "time" +) + +const arbitraryTimeout = 2000 * time.Millisecond + +// retry retries ephemeral errors from f up to an arbitrary timeout +// to work around filesystem flakiness on Windows and Darwin. +func retry(f func() (err error, mayRetry bool)) error { + var ( + bestErr error + lowestErrno syscall.Errno + start time.Time + nextSleep time.Duration = 1 * time.Millisecond + ) + for { + err, mayRetry := f() + if err == nil || !mayRetry { + return err + } + + var errno syscall.Errno + if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { + bestErr = err + lowestErrno = errno + } else if bestErr == nil { + bestErr = err + } + + if start.IsZero() { + start = time.Now() + } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { + break + } + time.Sleep(nextSleep) + nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) + } + + return bestErr +} + +// rename is like os.Rename, but retries ephemeral errors. +// +// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with +// MOVEFILE_REPLACE_EXISTING. +// +// Windows also provides a different system call, ReplaceFile, +// that provides similar semantics, but perhaps preserves more metadata. (The +// documentation on the differences between the two is very sparse.) +// +// Empirical error rates with MoveFileEx are lower under modest concurrency, so +// for now we're sticking with what the os package already provides. +func rename(oldpath, newpath string) (err error) { + return retry(func() (err error, mayRetry bool) { + err = os.Rename(oldpath, newpath) + return err, isEphemeralError(err) + }) +} + +// readFile is like os.ReadFile, but retries ephemeral errors. +func readFile(filename string) ([]byte, error) { + var b []byte + err := retry(func() (err error, mayRetry bool) { + b, err = os.ReadFile(filename) + + // Unlike in rename, we do not retry errFileNotFound here: it can occur + // as a spurious error, but the file may also genuinely not exist, so the + // increase in robustness is probably not worth the extra latency. + return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound) + }) + return b, err +} + +func removeAll(path string) error { + return retry(func() (err error, mayRetry bool) { + err = os.RemoveAll(path) + return err, isEphemeralError(err) + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_other.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_other.go new file mode 100644 index 0000000000000000000000000000000000000000..da9a46e4face362eb5d95872bccc2ecd4a7a5424 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_other.go @@ -0,0 +1,27 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !darwin + +package robustio + +import ( + "os" +) + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func readFile(filename string) ([]byte, error) { + return os.ReadFile(filename) +} + +func removeAll(path string) error { + return os.RemoveAll(path) +} + +func isEphemeralError(err error) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_windows.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..687dcb66f83d15d0be4c7d45f4652a236afb2ac2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/robustio/robustio_windows.go @@ -0,0 +1,27 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "internal/syscall/windows" + "syscall" +) + +const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + switch errno { + case syscall.ERROR_ACCESS_DENIED, + syscall.ERROR_FILE_NOT_FOUND, + windows.ERROR_SHARING_VIOLATION: + return true + } + } + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/run/run.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/run/run.go new file mode 100644 index 0000000000000000000000000000000000000000..a97d975e2251e2aa903fce4e280389e6c0111bb8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/run/run.go @@ -0,0 +1,212 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package run implements the “go run” command. +package run + +import ( + "context" + "go/build" + "path" + "path/filepath" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/modload" + "cmd/go/internal/str" + "cmd/go/internal/work" +) + +var CmdRun = &base.Command{ + UsageLine: "go run [build flags] [-exec xprog] package [arguments...]", + Short: "compile and run Go program", + Long: ` +Run compiles and runs the named main Go package. +Typically the package is specified as a list of .go source files from a single +directory, but it may also be an import path, file system path, or pattern +matching a single known package, as in 'go run .' or 'go run my/cmd'. + +If the package argument has a version suffix (like @latest or @v1.0.0), +"go run" builds the program in module-aware mode, ignoring the go.mod file in +the current directory or any parent directory, if there is one. This is useful +for running programs without affecting the dependencies of the main module. + +If the package argument doesn't have a version suffix, "go run" may run in +module-aware mode or GOPATH mode, depending on the GO111MODULE environment +variable and the presence of a go.mod file. See 'go help modules' for details. +If module-aware mode is enabled, "go run" runs in the context of the main +module. + +By default, 'go run' runs the compiled binary directly: 'a.out arguments...'. +If the -exec flag is given, 'go run' invokes the binary using xprog: + 'xprog a.out arguments...'. +If the -exec flag is not given, GOOS or GOARCH is different from the system +default, and a program named go_$GOOS_$GOARCH_exec can be found +on the current search path, 'go run' invokes the binary using that program, +for example 'go_js_wasm_exec a.out arguments...'. This allows execution of +cross-compiled programs when a simulator or other execution method is +available. + +By default, 'go run' compiles the binary without generating the information +used by debuggers, to reduce build time. To include debugger information in +the binary, use 'go build'. + +The exit status of Run is not the exit status of the compiled binary. + +For more about build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. + +See also: go build. + `, +} + +func init() { + CmdRun.Run = runRun // break init loop + + work.AddBuildFlags(CmdRun, work.DefaultBuildFlags) + if cfg.Experiment != nil && cfg.Experiment.CoverageRedesign { + work.AddCoverFlags(CmdRun, nil) + } + CmdRun.Flag.Var((*base.StringsFlag)(&work.ExecCmd), "exec", "") +} + +func runRun(ctx context.Context, cmd *base.Command, args []string) { + if shouldUseOutsideModuleMode(args) { + // Set global module flags for 'go run cmd@version'. + // This must be done before modload.Init, but we need to call work.BuildInit + // before loading packages, since it affects package locations, e.g., + // for -race and -msan. + modload.ForceUseModules = true + modload.RootMode = modload.NoRoot + modload.AllowMissingModuleImports() + modload.Init() + } else { + modload.InitWorkfile() + } + + work.BuildInit() + b := work.NewBuilder("") + defer func() { + if err := b.Close(); err != nil { + base.Fatal(err) + } + }() + + i := 0 + for i < len(args) && strings.HasSuffix(args[i], ".go") { + i++ + } + pkgOpts := load.PackageOpts{MainOnly: true} + var p *load.Package + if i > 0 { + files := args[:i] + for _, file := range files { + if strings.HasSuffix(file, "_test.go") { + // GoFilesPackage is going to assign this to TestGoFiles. + // Reject since it won't be part of the build. + base.Fatalf("go: cannot run *_test.go files (%s)", file) + } + } + p = load.GoFilesPackage(ctx, pkgOpts, files) + } else if len(args) > 0 && !strings.HasPrefix(args[0], "-") { + arg := args[0] + var pkgs []*load.Package + if strings.Contains(arg, "@") && !build.IsLocalImport(arg) && !filepath.IsAbs(arg) { + var err error + pkgs, err = load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args[:1]) + if err != nil { + base.Fatal(err) + } + } else { + pkgs = load.PackagesAndErrors(ctx, pkgOpts, args[:1]) + } + + if len(pkgs) == 0 { + base.Fatalf("go: no packages loaded from %s", arg) + } + if len(pkgs) > 1 { + var names []string + for _, p := range pkgs { + names = append(names, p.ImportPath) + } + base.Fatalf("go: pattern %s matches multiple packages:\n\t%s", arg, strings.Join(names, "\n\t")) + } + p = pkgs[0] + i++ + } else { + base.Fatalf("go: no go files listed") + } + cmdArgs := args[i:] + load.CheckPackageErrors([]*load.Package{p}) + + if cfg.Experiment.CoverageRedesign && cfg.BuildCover { + load.PrepareForCoverageBuild([]*load.Package{p}) + } + + p.Internal.OmitDebug = true + p.Target = "" // must build - not up to date + if p.Internal.CmdlineFiles { + //set executable name if go file is given as cmd-argument + var src string + if len(p.GoFiles) > 0 { + src = p.GoFiles[0] + } else if len(p.CgoFiles) > 0 { + src = p.CgoFiles[0] + } else { + // this case could only happen if the provided source uses cgo + // while cgo is disabled. + hint := "" + if !cfg.BuildContext.CgoEnabled { + hint = " (cgo is disabled)" + } + base.Fatalf("go: no suitable source files%s", hint) + } + p.Internal.ExeName = src[:len(src)-len(".go")] + } else { + p.Internal.ExeName = path.Base(p.ImportPath) + } + + a1 := b.LinkAction(work.ModeBuild, work.ModeBuild, p) + a := &work.Action{Mode: "go run", Actor: work.ActorFunc(buildRunProgram), Args: cmdArgs, Deps: []*work.Action{a1}} + b.Do(ctx, a) +} + +// shouldUseOutsideModuleMode returns whether 'go run' will load packages in +// module-aware mode, ignoring the go.mod file in the current directory. It +// returns true if the first argument contains "@", does not begin with "-" +// (resembling a flag) or end with ".go" (a file). The argument must not be a +// local or absolute file path. +// +// These rules are slightly different than other commands. Whether or not +// 'go run' uses this mode, it interprets arguments ending with ".go" as files +// and uses arguments up to the last ".go" argument to comprise the package. +// If there are no ".go" arguments, only the first argument is interpreted +// as a package path, since there can be only one package. +func shouldUseOutsideModuleMode(args []string) bool { + // NOTE: "@" not allowed in import paths, but it is allowed in non-canonical + // versions. + return len(args) > 0 && + !strings.HasSuffix(args[0], ".go") && + !strings.HasPrefix(args[0], "-") && + strings.Contains(args[0], "@") && + !build.IsLocalImport(args[0]) && + !filepath.IsAbs(args[0]) +} + +// buildRunProgram is the action for running a binary that has already +// been compiled. We ignore exit status. +func buildRunProgram(b *work.Builder, ctx context.Context, a *work.Action) error { + cmdline := str.StringList(work.FindExecCmd(), a.Deps[0].Target, a.Args) + if cfg.BuildN || cfg.BuildX { + b.Shell(a).ShowCmd("", "%s", strings.Join(cmdline, " ")) + if cfg.BuildN { + return nil + } + } + + base.RunStdin(cmdline) + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/script/cmds.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/cmds.go new file mode 100644 index 0000000000000000000000000000000000000000..ecd35ff8b122c19abda0a7e147a585237261d5f1 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/cmds.go @@ -0,0 +1,1126 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "cmd/go/internal/cfg" + "cmd/go/internal/robustio" + "errors" + "fmt" + "internal/diff" + "io/fs" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +// DefaultCmds returns a set of broadly useful script commands. +// +// Run the 'help' command within a script engine to view a list of the available +// commands. +func DefaultCmds() map[string]Cmd { + return map[string]Cmd{ + "cat": Cat(), + "cd": Cd(), + "chmod": Chmod(), + "cmp": Cmp(), + "cmpenv": Cmpenv(), + "cp": Cp(), + "echo": Echo(), + "env": Env(), + "exec": Exec(func(cmd *exec.Cmd) error { return cmd.Process.Signal(os.Interrupt) }, 100*time.Millisecond), // arbitrary grace period + "exists": Exists(), + "grep": Grep(), + "help": Help(), + "mkdir": Mkdir(), + "mv": Mv(), + "rm": Rm(), + "replace": Replace(), + "sleep": Sleep(), + "stderr": Stderr(), + "stdout": Stdout(), + "stop": Stop(), + "symlink": Symlink(), + "wait": Wait(), + } +} + +// Command returns a new Cmd with a Usage method that returns a copy of the +// given CmdUsage and a Run method calls the given function. +func Command(usage CmdUsage, run func(*State, ...string) (WaitFunc, error)) Cmd { + return &funcCmd{ + usage: usage, + run: run, + } +} + +// A funcCmd implements Cmd using a function value. +type funcCmd struct { + usage CmdUsage + run func(*State, ...string) (WaitFunc, error) +} + +func (c *funcCmd) Run(s *State, args ...string) (WaitFunc, error) { + return c.run(s, args...) +} + +func (c *funcCmd) Usage() *CmdUsage { return &c.usage } + +// firstNonFlag returns a slice containing the index of the first argument in +// rawArgs that is not a flag, or nil if all arguments are flags. +func firstNonFlag(rawArgs ...string) []int { + for i, arg := range rawArgs { + if !strings.HasPrefix(arg, "-") { + return []int{i} + } + if arg == "--" { + return []int{i + 1} + } + } + return nil +} + +// Cat writes the concatenated contents of the named file(s) to the script's +// stdout buffer. +func Cat() Cmd { + return Command( + CmdUsage{ + Summary: "concatenate files and print to the script's stdout buffer", + Args: "files...", + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) == 0 { + return nil, ErrUsage + } + + paths := make([]string, 0, len(args)) + for _, arg := range args { + paths = append(paths, s.Path(arg)) + } + + var buf strings.Builder + errc := make(chan error, 1) + go func() { + for _, p := range paths { + b, err := os.ReadFile(p) + buf.Write(b) + if err != nil { + errc <- err + return + } + } + errc <- nil + }() + + wait := func(*State) (stdout, stderr string, err error) { + err = <-errc + return buf.String(), "", err + } + return wait, nil + }) +} + +// Cd changes the current working directory. +func Cd() Cmd { + return Command( + CmdUsage{ + Summary: "change the working directory", + Args: "dir", + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 1 { + return nil, ErrUsage + } + return nil, s.Chdir(args[0]) + }) +} + +// Chmod changes the permissions of a file or a directory.. +func Chmod() Cmd { + return Command( + CmdUsage{ + Summary: "change file mode bits", + Args: "perm paths...", + Detail: []string{ + "Changes the permissions of the named files or directories to be equal to perm.", + "Only numerical permissions are supported.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 2 { + return nil, ErrUsage + } + + perm, err := strconv.ParseUint(args[0], 0, 32) + if err != nil || perm&uint64(fs.ModePerm) != perm { + return nil, fmt.Errorf("invalid mode: %s", args[0]) + } + + for _, arg := range args[1:] { + err := os.Chmod(s.Path(arg), fs.FileMode(perm)) + if err != nil { + return nil, err + } + } + return nil, nil + }) +} + +// Cmp compares the contents of two files, or the contents of either the +// "stdout" or "stderr" buffer and a file, returning a non-nil error if the +// contents differ. +func Cmp() Cmd { + return Command( + CmdUsage{ + Args: "[-q] file1 file2", + Summary: "compare files for differences", + Detail: []string{ + "By convention, file1 is the actual data and file2 is the expected data.", + "The command succeeds if the file contents are identical.", + "File1 can be 'stdout' or 'stderr' to compare the stdout or stderr buffer from the most recent command.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, doCompare(s, false, args...) + }) +} + +// Cmpenv is like Compare, but also performs environment substitutions +// on the contents of both arguments. +func Cmpenv() Cmd { + return Command( + CmdUsage{ + Args: "[-q] file1 file2", + Summary: "compare files for differences, with environment expansion", + Detail: []string{ + "By convention, file1 is the actual data and file2 is the expected data.", + "The command succeeds if the file contents are identical after substituting variables from the script environment.", + "File1 can be 'stdout' or 'stderr' to compare the script's stdout or stderr buffer.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, doCompare(s, true, args...) + }) +} + +func doCompare(s *State, env bool, args ...string) error { + quiet := false + if len(args) > 0 && args[0] == "-q" { + quiet = true + args = args[1:] + } + if len(args) != 2 { + return ErrUsage + } + + name1, name2 := args[0], args[1] + var text1, text2 string + switch name1 { + case "stdout": + text1 = s.Stdout() + case "stderr": + text1 = s.Stderr() + default: + data, err := os.ReadFile(s.Path(name1)) + if err != nil { + return err + } + text1 = string(data) + } + + data, err := os.ReadFile(s.Path(name2)) + if err != nil { + return err + } + text2 = string(data) + + if env { + text1 = s.ExpandEnv(text1, false) + text2 = s.ExpandEnv(text2, false) + } + + if text1 != text2 { + if !quiet { + diffText := diff.Diff(name1, []byte(text1), name2, []byte(text2)) + s.Logf("%s\n", diffText) + } + return fmt.Errorf("%s and %s differ", name1, name2) + } + return nil +} + +// Cp copies one or more files to a new location. +func Cp() Cmd { + return Command( + CmdUsage{ + Summary: "copy files to a target file or directory", + Args: "src... dst", + Detail: []string{ + "src can include 'stdout' or 'stderr' to copy from the script's stdout or stderr buffer.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 2 { + return nil, ErrUsage + } + + dst := s.Path(args[len(args)-1]) + info, err := os.Stat(dst) + dstDir := err == nil && info.IsDir() + if len(args) > 2 && !dstDir { + return nil, &fs.PathError{Op: "cp", Path: dst, Err: errors.New("destination is not a directory")} + } + + for _, arg := range args[:len(args)-1] { + var ( + src string + data []byte + mode fs.FileMode + ) + switch arg { + case "stdout": + src = arg + data = []byte(s.Stdout()) + mode = 0666 + case "stderr": + src = arg + data = []byte(s.Stderr()) + mode = 0666 + default: + src = s.Path(arg) + info, err := os.Stat(src) + if err != nil { + return nil, err + } + mode = info.Mode() & 0777 + data, err = os.ReadFile(src) + if err != nil { + return nil, err + } + } + targ := dst + if dstDir { + targ = filepath.Join(dst, filepath.Base(src)) + } + err := os.WriteFile(targ, data, mode) + if err != nil { + return nil, err + } + } + + return nil, nil + }) +} + +// Echo writes its arguments to stdout, followed by a newline. +func Echo() Cmd { + return Command( + CmdUsage{ + Summary: "display a line of text", + Args: "string...", + }, + func(s *State, args ...string) (WaitFunc, error) { + var buf strings.Builder + for i, arg := range args { + if i > 0 { + buf.WriteString(" ") + } + buf.WriteString(arg) + } + buf.WriteString("\n") + out := buf.String() + + // Stuff the result into a callback to satisfy the OutputCommandFunc + // interface, even though it isn't really asynchronous even if run in the + // background. + // + // Nobody should be running 'echo' as a background command, but it's not worth + // defining yet another interface, and also doesn't seem worth shoehorning + // into a SimpleCommand the way we did with Wait. + return func(*State) (stdout, stderr string, err error) { + return out, "", nil + }, nil + }) +} + +// Env sets or logs the values of environment variables. +// +// With no arguments, Env reports all variables in the environment. +// "key=value" arguments set variables, and arguments without "=" +// cause the corresponding value to be printed to the stdout buffer. +func Env() Cmd { + return Command( + CmdUsage{ + Summary: "set or log the values of environment variables", + Args: "[key[=value]...]", + Detail: []string{ + "With no arguments, print the script environment to the log.", + "Otherwise, add the listed key=value pairs to the environment or print the listed keys.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + out := new(strings.Builder) + if len(args) == 0 { + for _, kv := range s.env { + fmt.Fprintf(out, "%s\n", kv) + } + } else { + for _, env := range args { + i := strings.Index(env, "=") + if i < 0 { + // Display value instead of setting it. + fmt.Fprintf(out, "%s=%s\n", env, s.envMap[env]) + continue + } + if err := s.Setenv(env[:i], env[i+1:]); err != nil { + return nil, err + } + } + } + var wait WaitFunc + if out.Len() > 0 || len(args) == 0 { + wait = func(*State) (stdout, stderr string, err error) { + return out.String(), "", nil + } + } + return wait, nil + }) +} + +// Exec runs an arbitrary executable as a subprocess. +// +// When the Script's context is canceled, Exec sends the interrupt signal, then +// waits for up to the given delay for the subprocess to flush output before +// terminating it with os.Kill. +func Exec(cancel func(*exec.Cmd) error, waitDelay time.Duration) Cmd { + return Command( + CmdUsage{ + Summary: "run an executable program with arguments", + Args: "program [args...]", + Detail: []string{ + "Note that 'exec' does not terminate the script (unlike Unix shells).", + }, + Async: true, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 1 { + return nil, ErrUsage + } + + // Use the script's PATH to look up the command (if it does not contain a separator) + // instead of the test process's PATH (see lookPath). + // Don't use filepath.Clean, since that changes "./foo" to "foo". + name := filepath.FromSlash(args[0]) + path := name + if !strings.Contains(name, string(filepath.Separator)) { + var err error + path, err = lookPath(s, name) + if err != nil { + return nil, err + } + } + + return startCommand(s, name, path, args[1:], cancel, waitDelay) + }) +} + +func startCommand(s *State, name, path string, args []string, cancel func(*exec.Cmd) error, waitDelay time.Duration) (WaitFunc, error) { + var ( + cmd *exec.Cmd + stdoutBuf, stderrBuf strings.Builder + ) + for { + cmd = exec.CommandContext(s.Context(), path, args...) + if cancel == nil { + cmd.Cancel = nil + } else { + cmd.Cancel = func() error { return cancel(cmd) } + } + cmd.WaitDelay = waitDelay + cmd.Args[0] = name + cmd.Dir = s.Getwd() + cmd.Env = s.env + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + err := cmd.Start() + if err == nil { + break + } + if isETXTBSY(err) { + // If the script (or its host process) just wrote the executable we're + // trying to run, a fork+exec in another thread may be holding open the FD + // that we used to write the executable (see https://go.dev/issue/22315). + // Since the descriptor should have CLOEXEC set, the problem should + // resolve as soon as the forked child reaches its exec call. + // Keep retrying until that happens. + } else { + return nil, err + } + } + + wait := func(s *State) (stdout, stderr string, err error) { + err = cmd.Wait() + return stdoutBuf.String(), stderrBuf.String(), err + } + return wait, nil +} + +// lookPath is (roughly) like exec.LookPath, but it uses the script's current +// PATH to find the executable. +func lookPath(s *State, command string) (string, error) { + var strEqual func(string, string) bool + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Using GOOS as a proxy for case-insensitive file system. + // TODO(bcmills): Remove this assumption. + strEqual = strings.EqualFold + } else { + strEqual = func(a, b string) bool { return a == b } + } + + var pathExt []string + var searchExt bool + var isExecutable func(os.FileInfo) bool + if runtime.GOOS == "windows" { + // Use the test process's PathExt instead of the script's. + // If PathExt is set in the command's environment, cmd.Start fails with + // "parameter is invalid". Not sure why. + // If the command already has an extension in PathExt (like "cmd.exe") + // don't search for other extensions (not "cmd.bat.exe"). + pathExt = strings.Split(os.Getenv("PathExt"), string(filepath.ListSeparator)) + searchExt = true + cmdExt := filepath.Ext(command) + for _, ext := range pathExt { + if strEqual(cmdExt, ext) { + searchExt = false + break + } + } + isExecutable = func(fi os.FileInfo) bool { + return fi.Mode().IsRegular() + } + } else { + isExecutable = func(fi os.FileInfo) bool { + return fi.Mode().IsRegular() && fi.Mode().Perm()&0111 != 0 + } + } + + pathEnv, _ := s.LookupEnv(pathEnvName()) + for _, dir := range strings.Split(pathEnv, string(filepath.ListSeparator)) { + if dir == "" { + continue + } + + // Determine whether dir needs a trailing path separator. + // Note: we avoid filepath.Join in this function because it cleans the + // result: we want to preserve the exact dir prefix from the environment. + sep := string(filepath.Separator) + if os.IsPathSeparator(dir[len(dir)-1]) { + sep = "" + } + + if searchExt { + ents, err := os.ReadDir(dir) + if err != nil { + continue + } + for _, ent := range ents { + for _, ext := range pathExt { + if !ent.IsDir() && strEqual(ent.Name(), command+ext) { + return dir + sep + ent.Name(), nil + } + } + } + } else { + path := dir + sep + command + if fi, err := os.Stat(path); err == nil && isExecutable(fi) { + return path, nil + } + } + } + return "", &exec.Error{Name: command, Err: exec.ErrNotFound} +} + +// pathEnvName returns the platform-specific variable used by os/exec.LookPath +// to look up executable names (either "PATH" or "path"). +// +// TODO(bcmills): Investigate whether we can instead use PATH uniformly and +// rewrite it to $path when executing subprocesses. +func pathEnvName() string { + switch runtime.GOOS { + case "plan9": + return "path" + default: + return "PATH" + } +} + +// Exists checks that the named file(s) exist. +func Exists() Cmd { + return Command( + CmdUsage{ + Summary: "check that files exist", + Args: "[-readonly] [-exec] file...", + }, + func(s *State, args ...string) (WaitFunc, error) { + var readonly, exec bool + loop: + for len(args) > 0 { + switch args[0] { + case "-readonly": + readonly = true + args = args[1:] + case "-exec": + exec = true + args = args[1:] + default: + break loop + } + } + if len(args) == 0 { + return nil, ErrUsage + } + + for _, file := range args { + file = s.Path(file) + info, err := os.Stat(file) + if err != nil { + return nil, err + } + if readonly && info.Mode()&0222 != 0 { + return nil, fmt.Errorf("%s exists but is writable", file) + } + if exec && runtime.GOOS != "windows" && info.Mode()&0111 == 0 { + return nil, fmt.Errorf("%s exists but is not executable", file) + } + } + + return nil, nil + }) +} + +// Grep checks that file content matches a regexp. +// Like stdout/stderr and unlike Unix grep, it accepts Go regexp syntax. +// +// Grep does not modify the State's stdout or stderr buffers. +// (Its output goes to the script log, not stdout.) +func Grep() Cmd { + return Command( + CmdUsage{ + Summary: "find lines in a file that match a pattern", + Args: matchUsage + " file", + Detail: []string{ + "The command succeeds if at least one match (or the exact count, if given) is found.", + "The -q flag suppresses printing of matches.", + }, + RegexpArgs: firstNonFlag, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, match(s, args, "", "grep") + }) +} + +const matchUsage = "[-count=N] [-q] 'pattern'" + +// match implements the Grep, Stdout, and Stderr commands. +func match(s *State, args []string, text, name string) error { + n := 0 + if len(args) >= 1 && strings.HasPrefix(args[0], "-count=") { + var err error + n, err = strconv.Atoi(args[0][len("-count="):]) + if err != nil { + return fmt.Errorf("bad -count=: %v", err) + } + if n < 1 { + return fmt.Errorf("bad -count=: must be at least 1") + } + args = args[1:] + } + quiet := false + if len(args) >= 1 && args[0] == "-q" { + quiet = true + args = args[1:] + } + + isGrep := name == "grep" + + wantArgs := 1 + if isGrep { + wantArgs = 2 + } + if len(args) != wantArgs { + return ErrUsage + } + + pattern := `(?m)` + args[0] + re, err := regexp.Compile(pattern) + if err != nil { + return err + } + + if isGrep { + name = args[1] // for error messages + data, err := os.ReadFile(s.Path(args[1])) + if err != nil { + return err + } + text = string(data) + } + + if n > 0 { + count := len(re.FindAllString(text, -1)) + if count != n { + return fmt.Errorf("found %d matches for %#q in %s", count, pattern, name) + } + return nil + } + + if !re.MatchString(text) { + return fmt.Errorf("no match for %#q in %s", pattern, name) + } + + if !quiet { + // Print the lines containing the match. + loc := re.FindStringIndex(text) + for loc[0] > 0 && text[loc[0]-1] != '\n' { + loc[0]-- + } + for loc[1] < len(text) && text[loc[1]] != '\n' { + loc[1]++ + } + lines := strings.TrimSuffix(text[loc[0]:loc[1]], "\n") + s.Logf("matched: %s\n", lines) + } + return nil +} + +// Help writes command documentation to the script log. +func Help() Cmd { + return Command( + CmdUsage{ + Summary: "log help text for commands and conditions", + Args: "[-v] name...", + Detail: []string{ + "To display help for a specific condition, enclose it in brackets: 'help [amd64]'.", + "To display complete documentation when listing all commands, pass the -v flag.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if s.engine == nil { + return nil, errors.New("no engine configured") + } + + verbose := false + if len(args) > 0 { + verbose = true + if args[0] == "-v" { + args = args[1:] + } + } + + var cmds, conds []string + for _, arg := range args { + if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") { + conds = append(conds, arg[1:len(arg)-1]) + } else { + cmds = append(cmds, arg) + } + } + + out := new(strings.Builder) + + if len(conds) > 0 || (len(args) == 0 && len(s.engine.Conds) > 0) { + if conds == nil { + out.WriteString("conditions:\n\n") + } + s.engine.ListConds(out, s, conds...) + } + + if len(cmds) > 0 || len(args) == 0 { + if len(args) == 0 { + out.WriteString("\ncommands:\n\n") + } + s.engine.ListCmds(out, verbose, cmds...) + } + + wait := func(*State) (stdout, stderr string, err error) { + return out.String(), "", nil + } + return wait, nil + }) +} + +// Mkdir creates a directory and any needed parent directories. +func Mkdir() Cmd { + return Command( + CmdUsage{ + Summary: "create directories, if they do not already exist", + Args: "path...", + Detail: []string{ + "Unlike Unix mkdir, parent directories are always created if needed.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 1 { + return nil, ErrUsage + } + for _, arg := range args { + if err := os.MkdirAll(s.Path(arg), 0777); err != nil { + return nil, err + } + } + return nil, nil + }) +} + +// Mv renames an existing file or directory to a new path. +func Mv() Cmd { + return Command( + CmdUsage{ + Summary: "rename a file or directory to a new path", + Args: "old new", + Detail: []string{ + "OS-specific restrictions may apply when old and new are in different directories.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 2 { + return nil, ErrUsage + } + return nil, os.Rename(s.Path(args[0]), s.Path(args[1])) + }) +} + +// Program returns a new command that runs the named program, found from the +// host process's PATH (not looked up in the script's PATH). +func Program(name string, cancel func(*exec.Cmd) error, waitDelay time.Duration) Cmd { + var ( + shortName string + summary string + lookPathOnce sync.Once + path string + pathErr error + ) + if filepath.IsAbs(name) { + lookPathOnce.Do(func() { path = filepath.Clean(name) }) + shortName = strings.TrimSuffix(filepath.Base(path), ".exe") + summary = "run the '" + shortName + "' program provided by the script host" + } else { + shortName = name + summary = "run the '" + shortName + "' program from the script host's PATH" + } + + return Command( + CmdUsage{ + Summary: summary, + Args: "[args...]", + Async: true, + }, + func(s *State, args ...string) (WaitFunc, error) { + lookPathOnce.Do(func() { + path, pathErr = cfg.LookPath(name) + }) + if pathErr != nil { + return nil, pathErr + } + return startCommand(s, shortName, path, args, cancel, waitDelay) + }) +} + +// Replace replaces all occurrences of a string in a file with another string. +func Replace() Cmd { + return Command( + CmdUsage{ + Summary: "replace strings in a file", + Args: "[old new]... file", + Detail: []string{ + "The 'old' and 'new' arguments are unquoted as if in quoted Go strings.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args)%2 != 1 { + return nil, ErrUsage + } + + oldNew := make([]string, 0, len(args)-1) + for _, arg := range args[:len(args)-1] { + s, err := strconv.Unquote(`"` + arg + `"`) + if err != nil { + return nil, err + } + oldNew = append(oldNew, s) + } + + r := strings.NewReplacer(oldNew...) + file := s.Path(args[len(args)-1]) + + data, err := os.ReadFile(file) + if err != nil { + return nil, err + } + replaced := r.Replace(string(data)) + + return nil, os.WriteFile(file, []byte(replaced), 0666) + }) +} + +// Rm removes a file or directory. +// +// If a directory, Rm also recursively removes that directory's +// contents. +func Rm() Cmd { + return Command( + CmdUsage{ + Summary: "remove a file or directory", + Args: "path...", + Detail: []string{ + "If the path is a directory, its contents are removed recursively.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 1 { + return nil, ErrUsage + } + for _, arg := range args { + if err := removeAll(s.Path(arg)); err != nil { + return nil, err + } + } + return nil, nil + }) +} + +// removeAll removes dir and all files and directories it contains. +// +// Unlike os.RemoveAll, removeAll attempts to make the directories writable if +// needed in order to remove their contents. +func removeAll(dir string) error { + // module cache has 0444 directories; + // make them writable in order to remove content. + filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error { + // chmod not only directories, but also things that we couldn't even stat + // due to permission errors: they may also be unreadable directories. + if err != nil || info.IsDir() { + os.Chmod(path, 0777) + } + return nil + }) + return robustio.RemoveAll(dir) +} + +// Sleep sleeps for the given Go duration or until the script's context is +// cancelled, whichever happens first. +func Sleep() Cmd { + return Command( + CmdUsage{ + Summary: "sleep for a specified duration", + Args: "duration", + Detail: []string{ + "The duration must be given as a Go time.Duration string.", + }, + Async: true, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 1 { + return nil, ErrUsage + } + + d, err := time.ParseDuration(args[0]) + if err != nil { + return nil, err + } + + timer := time.NewTimer(d) + wait := func(s *State) (stdout, stderr string, err error) { + ctx := s.Context() + select { + case <-ctx.Done(): + timer.Stop() + return "", "", ctx.Err() + case <-timer.C: + return "", "", nil + } + } + return wait, nil + }) +} + +// Stderr searches for a regular expression in the stderr buffer. +func Stderr() Cmd { + return Command( + CmdUsage{ + Summary: "find lines in the stderr buffer that match a pattern", + Args: matchUsage + " file", + Detail: []string{ + "The command succeeds if at least one match (or the exact count, if given) is found.", + "The -q flag suppresses printing of matches.", + }, + RegexpArgs: firstNonFlag, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, match(s, args, s.Stderr(), "stderr") + }) +} + +// Stdout searches for a regular expression in the stdout buffer. +func Stdout() Cmd { + return Command( + CmdUsage{ + Summary: "find lines in the stdout buffer that match a pattern", + Args: matchUsage + " file", + Detail: []string{ + "The command succeeds if at least one match (or the exact count, if given) is found.", + "The -q flag suppresses printing of matches.", + }, + RegexpArgs: firstNonFlag, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, match(s, args, s.Stdout(), "stdout") + }) +} + +// Stop returns a sentinel error that causes script execution to halt +// and s.Execute to return with a nil error. +func Stop() Cmd { + return Command( + CmdUsage{ + Summary: "stop execution of the script", + Args: "[msg]", + Detail: []string{ + "The message is written to the script log, but no error is reported from the script engine.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) > 1 { + return nil, ErrUsage + } + // TODO(bcmills): The argument passed to stop seems redundant with comments. + // Either use it systematically or remove it. + if len(args) == 1 { + return nil, stopError{msg: args[0]} + } + return nil, stopError{} + }) +} + +// stopError is the sentinel error type returned by the Stop command. +type stopError struct { + msg string +} + +func (s stopError) Error() string { + if s.msg == "" { + return "stop" + } + return "stop: " + s.msg +} + +// Symlink creates a symbolic link. +func Symlink() Cmd { + return Command( + CmdUsage{ + Summary: "create a symlink", + Args: "path -> target", + Detail: []string{ + "Creates path as a symlink to target.", + "The '->' token (like in 'ls -l' output on Unix) is required.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 3 || args[1] != "->" { + return nil, ErrUsage + } + + // Note that the link target args[2] is not interpreted with s.Path: + // it will be interpreted relative to the directory file is in. + return nil, os.Symlink(filepath.FromSlash(args[2]), s.Path(args[0])) + }) +} + +// Wait waits for the completion of background commands. +// +// When Wait returns, the stdout and stderr buffers contain the concatenation of +// the background commands' respective outputs in the order in which those +// commands were started. +func Wait() Cmd { + return Command( + CmdUsage{ + Summary: "wait for completion of background commands", + Args: "", + Detail: []string{ + "Waits for all background commands to complete.", + "The output (and any error) from each command is printed to the log in the order in which the commands were started.", + "After the call to 'wait', the script's stdout and stderr buffers contain the concatenation of the background commands' outputs.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) > 0 { + return nil, ErrUsage + } + + var stdouts, stderrs []string + var errs []*CommandError + for _, bg := range s.background { + stdout, stderr, err := bg.wait(s) + + beforeArgs := "" + if len(bg.args) > 0 { + beforeArgs = " " + } + s.Logf("[background] %s%s%s\n", bg.name, beforeArgs, quoteArgs(bg.args)) + + if stdout != "" { + s.Logf("[stdout]\n%s", stdout) + stdouts = append(stdouts, stdout) + } + if stderr != "" { + s.Logf("[stderr]\n%s", stderr) + stderrs = append(stderrs, stderr) + } + if err != nil { + s.Logf("[%v]\n", err) + } + if cmdErr := checkStatus(bg.command, err); cmdErr != nil { + errs = append(errs, cmdErr.(*CommandError)) + } + } + + s.stdout = strings.Join(stdouts, "") + s.stderr = strings.Join(stderrs, "") + s.background = nil + if len(errs) > 0 { + return nil, waitError{errs: errs} + } + return nil, nil + }) +} + +// A waitError wraps one or more errors returned by background commands. +type waitError struct { + errs []*CommandError +} + +func (w waitError) Error() string { + b := new(strings.Builder) + for i, err := range w.errs { + if i != 0 { + b.WriteString("\n") + } + b.WriteString(err.Error()) + } + return b.String() +} + +func (w waitError) Unwrap() error { + if len(w.errs) == 1 { + return w.errs[0] + } + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/script/cmds_other.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/cmds_other.go new file mode 100644 index 0000000000000000000000000000000000000000..847b225ae64989e3f61df72131ad2ef0c762f316 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/cmds_other.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(unix || windows) + +package script + +func isETXTBSY(err error) bool { + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/script/cmds_posix.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/cmds_posix.go new file mode 100644 index 0000000000000000000000000000000000000000..2525f6e7529d8c9dd47602ac8008b221e022a95e --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/cmds_posix.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || windows + +package script + +import ( + "errors" + "syscall" +) + +func isETXTBSY(err error) bool { + return errors.Is(err, syscall.ETXTBSY) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/script/conds.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/conds.go new file mode 100644 index 0000000000000000000000000000000000000000..d70f274efc2261a50ce96022bcf44a021b0b1a1d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/conds.go @@ -0,0 +1,205 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "cmd/go/internal/imports" + "fmt" + "os" + "runtime" + "sync" +) + +// DefaultConds returns a set of broadly useful script conditions. +// +// Run the 'help' command within a script engine to view a list of the available +// conditions. +func DefaultConds() map[string]Cond { + conds := make(map[string]Cond) + + conds["GOOS"] = PrefixCondition( + "runtime.GOOS == ", + func(_ *State, suffix string) (bool, error) { + if suffix == runtime.GOOS { + return true, nil + } + if _, ok := imports.KnownOS[suffix]; !ok { + return false, fmt.Errorf("unrecognized GOOS %q", suffix) + } + return false, nil + }) + + conds["GOARCH"] = PrefixCondition( + "runtime.GOARCH == ", + func(_ *State, suffix string) (bool, error) { + if suffix == runtime.GOARCH { + return true, nil + } + if _, ok := imports.KnownArch[suffix]; !ok { + return false, fmt.Errorf("unrecognized GOOS %q", suffix) + } + return false, nil + }) + + conds["compiler"] = PrefixCondition( + "runtime.Compiler == ", + func(_ *State, suffix string) (bool, error) { + if suffix == runtime.Compiler { + return true, nil + } + switch suffix { + case "gc", "gccgo": + return false, nil + default: + return false, fmt.Errorf("unrecognized compiler %q", suffix) + } + }) + + conds["root"] = BoolCondition("os.Geteuid() == 0", os.Geteuid() == 0) + + return conds +} + +// Condition returns a Cond with the given summary and evaluation function. +func Condition(summary string, eval func(*State) (bool, error)) Cond { + return &funcCond{eval: eval, usage: CondUsage{Summary: summary}} +} + +type funcCond struct { + eval func(*State) (bool, error) + usage CondUsage +} + +func (c *funcCond) Usage() *CondUsage { return &c.usage } + +func (c *funcCond) Eval(s *State, suffix string) (bool, error) { + if suffix != "" { + return false, ErrUsage + } + return c.eval(s) +} + +// PrefixCondition returns a Cond with the given summary and evaluation function. +func PrefixCondition(summary string, eval func(*State, string) (bool, error)) Cond { + return &prefixCond{eval: eval, usage: CondUsage{Summary: summary, Prefix: true}} +} + +type prefixCond struct { + eval func(*State, string) (bool, error) + usage CondUsage +} + +func (c *prefixCond) Usage() *CondUsage { return &c.usage } + +func (c *prefixCond) Eval(s *State, suffix string) (bool, error) { + return c.eval(s, suffix) +} + +// BoolCondition returns a Cond with the given truth value and summary. +// The Cond rejects the use of condition suffixes. +func BoolCondition(summary string, v bool) Cond { + return &boolCond{v: v, usage: CondUsage{Summary: summary}} +} + +type boolCond struct { + v bool + usage CondUsage +} + +func (b *boolCond) Usage() *CondUsage { return &b.usage } + +func (b *boolCond) Eval(s *State, suffix string) (bool, error) { + if suffix != "" { + return false, ErrUsage + } + return b.v, nil +} + +// OnceCondition returns a Cond that calls eval the first time the condition is +// evaluated. Future calls reuse the same result. +// +// The eval function is not passed a *State because the condition is cached +// across all execution states and must not vary by state. +func OnceCondition(summary string, eval func() (bool, error)) Cond { + return &onceCond{eval: eval, usage: CondUsage{Summary: summary}} +} + +type onceCond struct { + once sync.Once + v bool + err error + eval func() (bool, error) + usage CondUsage +} + +func (l *onceCond) Usage() *CondUsage { return &l.usage } + +func (l *onceCond) Eval(s *State, suffix string) (bool, error) { + if suffix != "" { + return false, ErrUsage + } + l.once.Do(func() { l.v, l.err = l.eval() }) + return l.v, l.err +} + +// CachedCondition is like Condition but only calls eval the first time the +// condition is evaluated for a given suffix. +// Future calls with the same suffix reuse the earlier result. +// +// The eval function is not passed a *State because the condition is cached +// across all execution states and must not vary by state. +func CachedCondition(summary string, eval func(string) (bool, error)) Cond { + return &cachedCond{eval: eval, usage: CondUsage{Summary: summary, Prefix: true}} +} + +type cachedCond struct { + m sync.Map + eval func(string) (bool, error) + usage CondUsage +} + +func (c *cachedCond) Usage() *CondUsage { return &c.usage } + +func (c *cachedCond) Eval(_ *State, suffix string) (bool, error) { + for { + var ready chan struct{} + + v, loaded := c.m.Load(suffix) + if !loaded { + ready = make(chan struct{}) + v, loaded = c.m.LoadOrStore(suffix, (<-chan struct{})(ready)) + + if !loaded { + inPanic := true + defer func() { + if inPanic { + c.m.Delete(suffix) + } + close(ready) + }() + + b, err := c.eval(suffix) + inPanic = false + + if err == nil { + c.m.Store(suffix, b) + return b, nil + } else { + c.m.Store(suffix, err) + return false, err + } + } + } + + switch v := v.(type) { + case bool: + return v, nil + case error: + return false, v + case <-chan struct{}: + <-v + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/script/engine.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/engine.go new file mode 100644 index 0000000000000000000000000000000000000000..ba821712e5ec69bc35ba85183d982b8365529921 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/engine.go @@ -0,0 +1,788 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package script implements a small, customizable, platform-agnostic scripting +// language. +// +// Scripts are run by an [Engine] configured with a set of available commands +// and conditions that guard those commands. Each script has an associated +// working directory and environment, along with a buffer containing the stdout +// and stderr output of a prior command, tracked in a [State] that commands can +// inspect and modify. +// +// The default commands configured by [NewEngine] resemble a simplified Unix +// shell. +// +// # Script Language +// +// Each line of a script is parsed into a sequence of space-separated command +// words, with environment variable expansion within each word and # marking an +// end-of-line comment. Additional variables named ':' and '/' are expanded +// within script arguments (expanding to the value of os.PathListSeparator and +// os.PathSeparator respectively) but are not inherited in subprocess +// environments. +// +// Adding single quotes around text keeps spaces in that text from being treated +// as word separators and also disables environment variable expansion. +// Inside a single-quoted block of text, a repeated single quote indicates +// a literal single quote, as in: +// +// 'Don''t communicate by sharing memory.' +// +// A line beginning with # is a comment and conventionally explains what is +// being done or tested at the start of a new section of the script. +// +// Commands are executed one at a time, and errors are checked for each command; +// if any command fails unexpectedly, no subsequent commands in the script are +// executed. The command prefix ! indicates that the command on the rest of the +// line (typically go or a matching predicate) must fail instead of succeeding. +// The command prefix ? indicates that the command may or may not succeed, but +// the script should continue regardless. +// +// The command prefix [cond] indicates that the command on the rest of the line +// should only run when the condition is satisfied. +// +// A condition can be negated: [!root] means to run the rest of the line only if +// the user is not root. Multiple conditions may be given for a single command, +// for example, '[linux] [amd64] skip'. The command will run if all conditions +// are satisfied. +package script + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "sort" + "strings" + "time" +) + +// An Engine stores the configuration for executing a set of scripts. +// +// The same Engine may execute multiple scripts concurrently. +type Engine struct { + Cmds map[string]Cmd + Conds map[string]Cond + + // If Quiet is true, Execute deletes log prints from the previous + // section when starting a new section. + Quiet bool +} + +// NewEngine returns an Engine configured with a basic set of commands and conditions. +func NewEngine() *Engine { + return &Engine{ + Cmds: DefaultCmds(), + Conds: DefaultConds(), + } +} + +// A Cmd is a command that is available to a script. +type Cmd interface { + // Run begins running the command. + // + // If the command produces output or can be run in the background, run returns + // a WaitFunc that will be called to obtain the result of the command and + // update the engine's stdout and stderr buffers. + // + // Run itself and the returned WaitFunc may inspect and/or modify the State, + // but the State's methods must not be called concurrently after Run has + // returned. + // + // Run may retain and access the args slice until the WaitFunc has returned. + Run(s *State, args ...string) (WaitFunc, error) + + // Usage returns the usage for the command, which the caller must not modify. + Usage() *CmdUsage +} + +// A WaitFunc is a function called to retrieve the results of a Cmd. +type WaitFunc func(*State) (stdout, stderr string, err error) + +// A CmdUsage describes the usage of a Cmd, independent of its name +// (which can change based on its registration). +type CmdUsage struct { + Summary string // in the style of the Name section of a Unix 'man' page, omitting the name + Args string // a brief synopsis of the command's arguments (only) + Detail []string // zero or more sentences in the style of the Description section of a Unix 'man' page + + // If Async is true, the Cmd is meaningful to run in the background, and its + // Run method must return either a non-nil WaitFunc or a non-nil error. + Async bool + + // RegexpArgs reports which arguments, if any, should be treated as regular + // expressions. It takes as input the raw, unexpanded arguments and returns + // the list of argument indices that will be interpreted as regular + // expressions. + // + // If RegexpArgs is nil, all arguments are assumed not to be regular + // expressions. + RegexpArgs func(rawArgs ...string) []int +} + +// A Cond is a condition deciding whether a command should be run. +type Cond interface { + // Eval reports whether the condition applies to the given State. + // + // If the condition's usage reports that it is a prefix, + // the condition must be used with a suffix. + // Otherwise, the passed-in suffix argument is always the empty string. + Eval(s *State, suffix string) (bool, error) + + // Usage returns the usage for the condition, which the caller must not modify. + Usage() *CondUsage +} + +// A CondUsage describes the usage of a Cond, independent of its name +// (which can change based on its registration). +type CondUsage struct { + Summary string // a single-line summary of when the condition is true + + // If Prefix is true, the condition is a prefix and requires a + // colon-separated suffix (like "[GOOS:linux]" for the "GOOS" condition). + // The suffix may be the empty string (like "[prefix:]"). + Prefix bool +} + +// Execute reads and executes script, writing the output to log. +// +// Execute stops and returns an error at the first command that does not succeed. +// The returned error's text begins with "file:line: ". +// +// If the script runs to completion or ends by a 'stop' command, +// Execute returns nil. +// +// Execute does not stop background commands started by the script +// before returning. To stop those, use [State.CloseAndWait] or the +// [Wait] command. +func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Writer) (err error) { + defer func(prev *Engine) { s.engine = prev }(s.engine) + s.engine = e + + var sectionStart time.Time + // endSection flushes the logs for the current section from s.log to log. + // ok indicates whether all commands in the section succeeded. + endSection := func(ok bool) error { + var err error + if sectionStart.IsZero() { + // We didn't write a section header or record a timestamp, so just dump the + // whole log without those. + if s.log.Len() > 0 { + err = s.flushLog(log) + } + } else if s.log.Len() == 0 { + // Adding elapsed time for doing nothing is meaningless, so don't. + _, err = io.WriteString(log, "\n") + } else { + // Insert elapsed time for section at the end of the section's comment. + _, err = fmt.Fprintf(log, " (%.3fs)\n", time.Since(sectionStart).Seconds()) + + if err == nil && (!ok || !e.Quiet) { + err = s.flushLog(log) + } else { + s.log.Reset() + } + } + + sectionStart = time.Time{} + return err + } + + var lineno int + lineErr := func(err error) error { + if errors.As(err, new(*CommandError)) { + return err + } + return fmt.Errorf("%s:%d: %w", file, lineno, err) + } + + // In case of failure or panic, flush any pending logs for the section. + defer func() { + if sErr := endSection(false); sErr != nil && err == nil { + err = lineErr(sErr) + } + }() + + for { + if err := s.ctx.Err(); err != nil { + // This error wasn't produced by any particular command, + // so don't wrap it in a CommandError. + return lineErr(err) + } + + line, err := script.ReadString('\n') + if err == io.EOF { + if line == "" { + break // Reached the end of the script. + } + // If the script doesn't end in a newline, interpret the final line. + } else if err != nil { + return lineErr(err) + } + line = strings.TrimSuffix(line, "\n") + lineno++ + + // The comment character "#" at the start of the line delimits a section of + // the script. + if strings.HasPrefix(line, "#") { + // If there was a previous section, the fact that we are starting a new + // one implies the success of the previous one. + // + // At the start of the script, the state may also contain accumulated logs + // from commands executed on the State outside of the engine in order to + // set it up; flush those logs too. + if err := endSection(true); err != nil { + return lineErr(err) + } + + // Log the section start without a newline so that we can add + // a timestamp for the section when it ends. + _, err = fmt.Fprintf(log, "%s", line) + sectionStart = time.Now() + if err != nil { + return lineErr(err) + } + continue + } + + cmd, err := parse(file, lineno, line) + if cmd == nil && err == nil { + continue // Ignore blank lines. + } + s.Logf("> %s\n", line) + if err != nil { + return lineErr(err) + } + + // Evaluate condition guards. + ok, err := e.conditionsActive(s, cmd.conds) + if err != nil { + return lineErr(err) + } + if !ok { + s.Logf("[condition not met]\n") + continue + } + + impl := e.Cmds[cmd.name] + + // Expand variables in arguments. + var regexpArgs []int + if impl != nil { + usage := impl.Usage() + if usage.RegexpArgs != nil { + // First join rawArgs without expansion to pass to RegexpArgs. + rawArgs := make([]string, 0, len(cmd.rawArgs)) + for _, frags := range cmd.rawArgs { + var b strings.Builder + for _, frag := range frags { + b.WriteString(frag.s) + } + rawArgs = append(rawArgs, b.String()) + } + regexpArgs = usage.RegexpArgs(rawArgs...) + } + } + cmd.args = expandArgs(s, cmd.rawArgs, regexpArgs) + + // Run the command. + err = e.runCommand(s, cmd, impl) + if err != nil { + if stop := (stopError{}); errors.As(err, &stop) { + // Since the 'stop' command halts execution of the entire script, + // log its message separately from the section in which it appears. + err = endSection(true) + s.Logf("%v\n", stop) + if err == nil { + return nil + } + } + return lineErr(err) + } + } + + if err := endSection(true); err != nil { + return lineErr(err) + } + return nil +} + +// A command is a complete command parsed from a script. +type command struct { + file string + line int + want expectedStatus + conds []condition // all must be satisfied + name string // the name of the command; must be non-empty + rawArgs [][]argFragment + args []string // shell-expanded arguments following name + background bool // command should run in background (ends with a trailing &) +} + +// An expectedStatus describes the expected outcome of a command. +// Script execution halts when a command does not match its expected status. +type expectedStatus string + +const ( + success expectedStatus = "" + failure expectedStatus = "!" + successOrFailure expectedStatus = "?" +) + +type argFragment struct { + s string + quoted bool // if true, disable variable expansion for this fragment +} + +type condition struct { + want bool + tag string +} + +const argSepChars = " \t\r\n#" + +// parse parses a single line as a list of space-separated arguments. +// subject to environment variable expansion (but not resplitting). +// Single quotes around text disable splitting and expansion. +// To embed a single quote, double it: +// +// 'Don''t communicate by sharing memory.' +func parse(filename string, lineno int, line string) (cmd *command, err error) { + cmd = &command{file: filename, line: lineno} + var ( + rawArg []argFragment // text fragments of current arg so far (need to add line[start:i]) + start = -1 // if >= 0, position where current arg text chunk starts + quoted = false // currently processing quoted text + ) + + flushArg := func() error { + if len(rawArg) == 0 { + return nil // Nothing to flush. + } + defer func() { rawArg = nil }() + + if cmd.name == "" && len(rawArg) == 1 && !rawArg[0].quoted { + arg := rawArg[0].s + + // Command prefix ! means negate the expectations about this command: + // go command should fail, match should not be found, etc. + // Prefix ? means allow either success or failure. + switch want := expectedStatus(arg); want { + case failure, successOrFailure: + if cmd.want != "" { + return errors.New("duplicated '!' or '?' token") + } + cmd.want = want + return nil + } + + // Command prefix [cond] means only run this command if cond is satisfied. + if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") { + want := true + arg = strings.TrimSpace(arg[1 : len(arg)-1]) + if strings.HasPrefix(arg, "!") { + want = false + arg = strings.TrimSpace(arg[1:]) + } + if arg == "" { + return errors.New("empty condition") + } + cmd.conds = append(cmd.conds, condition{want: want, tag: arg}) + return nil + } + + if arg == "" { + return errors.New("empty command") + } + cmd.name = arg + return nil + } + + cmd.rawArgs = append(cmd.rawArgs, rawArg) + return nil + } + + for i := 0; ; i++ { + if !quoted && (i >= len(line) || strings.ContainsRune(argSepChars, rune(line[i]))) { + // Found arg-separating space. + if start >= 0 { + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: false}) + start = -1 + } + if err := flushArg(); err != nil { + return nil, err + } + if i >= len(line) || line[i] == '#' { + break + } + continue + } + if i >= len(line) { + return nil, errors.New("unterminated quoted argument") + } + if line[i] == '\'' { + if !quoted { + // starting a quoted chunk + if start >= 0 { + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: false}) + } + start = i + 1 + quoted = true + continue + } + // 'foo''bar' means foo'bar, like in rc shell and Pascal. + if i+1 < len(line) && line[i+1] == '\'' { + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: true}) + start = i + 1 + i++ // skip over second ' before next iteration + continue + } + // ending a quoted chunk + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: true}) + start = i + 1 + quoted = false + continue + } + // found character worth saving; make sure we're saving + if start < 0 { + start = i + } + } + + if cmd.name == "" { + if cmd.want != "" || len(cmd.conds) > 0 || len(cmd.rawArgs) > 0 || cmd.background { + // The line contains a command prefix or suffix, but no actual command. + return nil, errors.New("missing command") + } + + // The line is blank, or contains only a comment. + return nil, nil + } + + if n := len(cmd.rawArgs); n > 0 { + last := cmd.rawArgs[n-1] + if len(last) == 1 && !last[0].quoted && last[0].s == "&" { + cmd.background = true + cmd.rawArgs = cmd.rawArgs[:n-1] + } + } + return cmd, nil +} + +// expandArgs expands the shell variables in rawArgs and joins them to form the +// final arguments to pass to a command. +func expandArgs(s *State, rawArgs [][]argFragment, regexpArgs []int) []string { + args := make([]string, 0, len(rawArgs)) + for i, frags := range rawArgs { + isRegexp := false + for _, j := range regexpArgs { + if i == j { + isRegexp = true + break + } + } + + var b strings.Builder + for _, frag := range frags { + if frag.quoted { + b.WriteString(frag.s) + } else { + b.WriteString(s.ExpandEnv(frag.s, isRegexp)) + } + } + args = append(args, b.String()) + } + return args +} + +// quoteArgs returns a string that parse would parse as args when passed to a command. +// +// TODO(bcmills): This function should have a fuzz test. +func quoteArgs(args []string) string { + var b strings.Builder + for i, arg := range args { + if i > 0 { + b.WriteString(" ") + } + if strings.ContainsAny(arg, "'"+argSepChars) { + // Quote the argument to a form that would be parsed as a single argument. + b.WriteString("'") + b.WriteString(strings.ReplaceAll(arg, "'", "''")) + b.WriteString("'") + } else { + b.WriteString(arg) + } + } + return b.String() +} + +func (e *Engine) conditionsActive(s *State, conds []condition) (bool, error) { + for _, cond := range conds { + var impl Cond + prefix, suffix, ok := strings.Cut(cond.tag, ":") + if ok { + impl = e.Conds[prefix] + if impl == nil { + return false, fmt.Errorf("unknown condition prefix %q", prefix) + } + if !impl.Usage().Prefix { + return false, fmt.Errorf("condition %q cannot be used with a suffix", prefix) + } + } else { + impl = e.Conds[cond.tag] + if impl == nil { + return false, fmt.Errorf("unknown condition %q", cond.tag) + } + if impl.Usage().Prefix { + return false, fmt.Errorf("condition %q requires a suffix", cond.tag) + } + } + active, err := impl.Eval(s, suffix) + + if err != nil { + return false, fmt.Errorf("evaluating condition %q: %w", cond.tag, err) + } + if active != cond.want { + return false, nil + } + } + + return true, nil +} + +func (e *Engine) runCommand(s *State, cmd *command, impl Cmd) error { + if impl == nil { + return cmdError(cmd, errors.New("unknown command")) + } + + async := impl.Usage().Async + if cmd.background && !async { + return cmdError(cmd, errors.New("command cannot be run in background")) + } + + wait, runErr := impl.Run(s, cmd.args...) + if wait == nil { + if async && runErr == nil { + return cmdError(cmd, errors.New("internal error: async command returned a nil WaitFunc")) + } + return checkStatus(cmd, runErr) + } + if runErr != nil { + return cmdError(cmd, errors.New("internal error: command returned both an error and a WaitFunc")) + } + + if cmd.background { + s.background = append(s.background, backgroundCmd{ + command: cmd, + wait: wait, + }) + // Clear stdout and stderr, since they no longer correspond to the last + // command executed. + s.stdout = "" + s.stderr = "" + return nil + } + + if wait != nil { + stdout, stderr, waitErr := wait(s) + s.stdout = stdout + s.stderr = stderr + if stdout != "" { + s.Logf("[stdout]\n%s", stdout) + } + if stderr != "" { + s.Logf("[stderr]\n%s", stderr) + } + if cmdErr := checkStatus(cmd, waitErr); cmdErr != nil { + return cmdErr + } + if waitErr != nil { + // waitErr was expected (by cmd.want), so log it instead of returning it. + s.Logf("[%v]\n", waitErr) + } + } + return nil +} + +func checkStatus(cmd *command, err error) error { + if err == nil { + if cmd.want == failure { + return cmdError(cmd, ErrUnexpectedSuccess) + } + return nil + } + + if s := (stopError{}); errors.As(err, &s) { + // This error originated in the Stop command. + // Propagate it as-is. + return cmdError(cmd, err) + } + + if w := (waitError{}); errors.As(err, &w) { + // This error was surfaced from a background process by a call to Wait. + // Add a call frame for Wait itself, but ignore its "want" field. + // (Wait itself cannot fail to wait on commands or else it would leak + // processes and/or goroutines — so a negative assertion for it would be at + // best ambiguous.) + return cmdError(cmd, err) + } + + if cmd.want == success { + return cmdError(cmd, err) + } + + if cmd.want == failure && (errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled)) { + // The command was terminated because the script is no longer interested in + // its output, so we don't know what it would have done had it run to + // completion — for all we know, it could have exited without error if it + // ran just a smidge faster. + return cmdError(cmd, err) + } + + return nil +} + +// ListCmds prints to w a list of the named commands, +// annotating each with its arguments and a short usage summary. +// If verbose is true, ListCmds prints full details for each command. +// +// Each of the name arguments should be a command name. +// If no names are passed as arguments, ListCmds lists all the +// commands registered in e. +func (e *Engine) ListCmds(w io.Writer, verbose bool, names ...string) error { + if names == nil { + names = make([]string, 0, len(e.Cmds)) + for name := range e.Cmds { + names = append(names, name) + } + sort.Strings(names) + } + + for _, name := range names { + cmd := e.Cmds[name] + usage := cmd.Usage() + + suffix := "" + if usage.Async { + suffix = " [&]" + } + + _, err := fmt.Fprintf(w, "%s %s%s\n\t%s\n", name, usage.Args, suffix, usage.Summary) + if err != nil { + return err + } + + if verbose { + if _, err := io.WriteString(w, "\n"); err != nil { + return err + } + for _, line := range usage.Detail { + if err := wrapLine(w, line, 60, "\t"); err != nil { + return err + } + } + if _, err := io.WriteString(w, "\n"); err != nil { + return err + } + } + } + + return nil +} + +func wrapLine(w io.Writer, line string, cols int, indent string) error { + line = strings.TrimLeft(line, " ") + for len(line) > cols { + bestSpace := -1 + for i, r := range line { + if r == ' ' { + if i <= cols || bestSpace < 0 { + bestSpace = i + } + if i > cols { + break + } + } + } + if bestSpace < 0 { + break + } + + if _, err := fmt.Fprintf(w, "%s%s\n", indent, line[:bestSpace]); err != nil { + return err + } + line = line[bestSpace+1:] + } + + _, err := fmt.Fprintf(w, "%s%s\n", indent, line) + return err +} + +// ListConds prints to w a list of conditions, one per line, +// annotating each with a description and whether the condition +// is true in the state s (if s is non-nil). +// +// Each of the tag arguments should be a condition string of +// the form "name" or "name:suffix". If no tags are passed as +// arguments, ListConds lists all conditions registered in +// the engine e. +func (e *Engine) ListConds(w io.Writer, s *State, tags ...string) error { + if tags == nil { + tags = make([]string, 0, len(e.Conds)) + for name := range e.Conds { + tags = append(tags, name) + } + sort.Strings(tags) + } + + for _, tag := range tags { + if prefix, suffix, ok := strings.Cut(tag, ":"); ok { + cond := e.Conds[prefix] + if cond == nil { + return fmt.Errorf("unknown condition prefix %q", prefix) + } + usage := cond.Usage() + if !usage.Prefix { + return fmt.Errorf("condition %q cannot be used with a suffix", prefix) + } + + activeStr := "" + if s != nil { + if active, _ := cond.Eval(s, suffix); active { + activeStr = " (active)" + } + } + _, err := fmt.Fprintf(w, "[%s]%s\n\t%s\n", tag, activeStr, usage.Summary) + if err != nil { + return err + } + continue + } + + cond := e.Conds[tag] + if cond == nil { + return fmt.Errorf("unknown condition %q", tag) + } + var err error + usage := cond.Usage() + if usage.Prefix { + _, err = fmt.Fprintf(w, "[%s:*]\n\t%s\n", tag, usage.Summary) + } else { + activeStr := "" + if s != nil { + if ok, _ := cond.Eval(s, ""); ok { + activeStr = " (active)" + } + } + _, err = fmt.Fprintf(w, "[%s]%s\n\t%s\n", tag, activeStr, usage.Summary) + } + if err != nil { + return err + } + } + + return nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/script/errors.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..7f43e728886a537290193703f3479bebcf647ffe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/errors.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "errors" + "fmt" +) + +// ErrUnexpectedSuccess indicates that a script command that was expected to +// fail (as indicated by a "!" prefix) instead completed successfully. +var ErrUnexpectedSuccess = errors.New("unexpected success") + +// A CommandError describes an error resulting from attempting to execute a +// specific command. +type CommandError struct { + File string + Line int + Op string + Args []string + Err error +} + +func cmdError(cmd *command, err error) *CommandError { + return &CommandError{ + File: cmd.file, + Line: cmd.line, + Op: cmd.name, + Args: cmd.args, + Err: err, + } +} + +func (e *CommandError) Error() string { + if len(e.Args) == 0 { + return fmt.Sprintf("%s:%d: %s: %v", e.File, e.Line, e.Op, e.Err) + } + return fmt.Sprintf("%s:%d: %s %s: %v", e.File, e.Line, e.Op, quoteArgs(e.Args), e.Err) +} + +func (e *CommandError) Unwrap() error { return e.Err } + +// A UsageError reports the valid arguments for a command. +// +// It may be returned in response to invalid arguments. +type UsageError struct { + Name string + Command Cmd +} + +func (e *UsageError) Error() string { + usage := e.Command.Usage() + suffix := "" + if usage.Async { + suffix = " [&]" + } + return fmt.Sprintf("usage: %s %s%s", e.Name, usage.Args, suffix) +} + +// ErrUsage may be returned by a Command to indicate that it was called with +// invalid arguments; its Usage method may be called to obtain details. +var ErrUsage = errors.New("invalid usage") diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/script/scripttest/scripttest.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/scripttest/scripttest.go new file mode 100644 index 0000000000000000000000000000000000000000..6d7bd7863b9f42b1bebf1906e794c5119d599e34 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/scripttest/scripttest.go @@ -0,0 +1,143 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scripttest adapts the script engine for use in tests. +package scripttest + +import ( + "bufio" + "cmd/go/internal/cfg" + "cmd/go/internal/script" + "errors" + "io" + "strings" + "testing" +) + +// DefaultCmds returns a set of broadly useful script commands. +// +// This set includes all of the commands in script.DefaultCmds, +// as well as a "skip" command that halts the script and causes the +// testing.TB passed to Run to be skipped. +func DefaultCmds() map[string]script.Cmd { + cmds := script.DefaultCmds() + cmds["skip"] = Skip() + return cmds +} + +// DefaultConds returns a set of broadly useful script conditions. +// +// This set includes all of the conditions in script.DefaultConds, +// as well as: +// +// - Conditions of the form "exec:foo" are active when the executable "foo" is +// found in the test process's PATH, and inactive when the executable is +// not found. +// +// - "short" is active when testing.Short() is true. +// +// - "verbose" is active when testing.Verbose() is true. +func DefaultConds() map[string]script.Cond { + conds := script.DefaultConds() + conds["exec"] = CachedExec() + conds["short"] = script.BoolCondition("testing.Short()", testing.Short()) + conds["verbose"] = script.BoolCondition("testing.Verbose()", testing.Verbose()) + return conds +} + +// Run runs the script from the given filename starting at the given initial state. +// When the script completes, Run closes the state. +func Run(t testing.TB, e *script.Engine, s *script.State, filename string, testScript io.Reader) { + t.Helper() + err := func() (err error) { + log := new(strings.Builder) + log.WriteString("\n") // Start output on a new line for consistent indentation. + + // Defer writing to the test log in case the script engine panics during execution, + // but write the log before we write the final "skip" or "FAIL" line. + t.Helper() + defer func() { + t.Helper() + + if closeErr := s.CloseAndWait(log); err == nil { + err = closeErr + } + + if log.Len() > 0 { + t.Log(strings.TrimSuffix(log.String(), "\n")) + } + }() + + if testing.Verbose() { + // Add the environment to the start of the script log. + wait, err := script.Env().Run(s) + if err != nil { + t.Fatal(err) + } + if wait != nil { + stdout, stderr, err := wait(s) + if err != nil { + t.Fatalf("env: %v\n%s", err, stderr) + } + if len(stdout) > 0 { + s.Logf("%s\n", stdout) + } + } + } + + return e.Execute(s, filename, bufio.NewReader(testScript), log) + }() + + if skip := (skipError{}); errors.As(err, &skip) { + if skip.msg == "" { + t.Skip("SKIP") + } else { + t.Skipf("SKIP: %v", skip.msg) + } + } + if err != nil { + t.Errorf("FAIL: %v", err) + } +} + +// Skip returns a sentinel error that causes Run to mark the test as skipped. +func Skip() script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "skip the current test", + Args: "[msg]", + }, + func(_ *script.State, args ...string) (script.WaitFunc, error) { + if len(args) > 1 { + return nil, script.ErrUsage + } + if len(args) == 0 { + return nil, skipError{""} + } + return nil, skipError{args[0]} + }) +} + +type skipError struct { + msg string +} + +func (s skipError) Error() string { + if s.msg == "" { + return "skip" + } + return s.msg +} + +// CachedExec returns a Condition that reports whether the PATH of the test +// binary itself (not the script's current environment) contains the named +// executable. +func CachedExec() script.Cond { + return script.CachedCondition( + " names an executable in the test binary's PATH", + func(name string) (bool, error) { + _, err := cfg.LookPath(name) + return err == nil, nil + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/script/state.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/state.go new file mode 100644 index 0000000000000000000000000000000000000000..548f67376b2163f720e2185330de76aaa3ca0b22 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/script/state.go @@ -0,0 +1,236 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "bytes" + "context" + "fmt" + "internal/txtar" + "io" + "io/fs" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" +) + +// A State encapsulates the current state of a running script engine, +// including the script environment and any running background commands. +type State struct { + engine *Engine // the engine currently executing the script, if any + + ctx context.Context + cancel context.CancelFunc + file string + log bytes.Buffer + + workdir string // initial working directory + pwd string // current working directory during execution + env []string // environment list (for os/exec) + envMap map[string]string // environment mapping (matches env) + stdout string // standard output from last 'go' command; for 'stdout' command + stderr string // standard error from last 'go' command; for 'stderr' command + + background []backgroundCmd +} + +type backgroundCmd struct { + *command + wait WaitFunc +} + +// NewState returns a new State permanently associated with ctx, with its +// initial working directory in workdir and its initial environment set to +// initialEnv (or os.Environ(), if initialEnv is nil). +// +// The new State also contains pseudo-environment-variables for +// ${/} and ${:} (for the platform's path and list separators respectively), +// but does not pass those to subprocesses. +func NewState(ctx context.Context, workdir string, initialEnv []string) (*State, error) { + absWork, err := filepath.Abs(workdir) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(ctx) + + // Make a fresh copy of the env slice to avoid aliasing bugs if we ever + // start modifying it in place; this also establishes the invariant that + // s.env contains no duplicates. + env := cleanEnv(initialEnv, absWork) + + envMap := make(map[string]string, len(env)) + + // Add entries for ${:} and ${/} to make it easier to write platform-independent + // paths in scripts. + envMap["/"] = string(os.PathSeparator) + envMap[":"] = string(os.PathListSeparator) + + for _, kv := range env { + if k, v, ok := strings.Cut(kv, "="); ok { + envMap[k] = v + } + } + + s := &State{ + ctx: ctx, + cancel: cancel, + workdir: absWork, + pwd: absWork, + env: env, + envMap: envMap, + } + s.Setenv("PWD", absWork) + return s, nil +} + +// CloseAndWait cancels the State's Context and waits for any background commands to +// finish. If any remaining background command ended in an unexpected state, +// Close returns a non-nil error. +func (s *State) CloseAndWait(log io.Writer) error { + s.cancel() + wait, err := Wait().Run(s) + if wait != nil { + panic("script: internal error: Wait unexpectedly returns its own WaitFunc") + } + if flushErr := s.flushLog(log); err == nil { + err = flushErr + } + return err +} + +// Chdir changes the State's working directory to the given path. +func (s *State) Chdir(path string) error { + dir := s.Path(path) + if _, err := os.Stat(dir); err != nil { + return &fs.PathError{Op: "Chdir", Path: dir, Err: err} + } + s.pwd = dir + s.Setenv("PWD", dir) + return nil +} + +// Context returns the Context with which the State was created. +func (s *State) Context() context.Context { + return s.ctx +} + +// Environ returns a copy of the current script environment, +// in the form "key=value". +func (s *State) Environ() []string { + return append([]string(nil), s.env...) +} + +// ExpandEnv replaces ${var} or $var in the string according to the values of +// the environment variables in s. References to undefined variables are +// replaced by the empty string. +func (s *State) ExpandEnv(str string, inRegexp bool) string { + return os.Expand(str, func(key string) string { + e := s.envMap[key] + if inRegexp { + // Quote to literal strings: we want paths like C:\work\go1.4 to remain + // paths rather than regular expressions. + e = regexp.QuoteMeta(e) + } + return e + }) +} + +// ExtractFiles extracts the files in ar to the state's current directory, +// expanding any environment variables within each name. +// +// The files must reside within the working directory with which the State was +// originally created. +func (s *State) ExtractFiles(ar *txtar.Archive) error { + wd := s.workdir + + // Add trailing separator to terminate wd. + // This prevents extracting to outside paths which prefix wd, + // e.g. extracting to /home/foobar when wd is /home/foo + if wd == "" { + panic("s.workdir is unexpectedly empty") + } + if !os.IsPathSeparator(wd[len(wd)-1]) { + wd += string(filepath.Separator) + } + + for _, f := range ar.Files { + name := s.Path(s.ExpandEnv(f.Name, false)) + + if !strings.HasPrefix(name, wd) { + return fmt.Errorf("file %#q is outside working directory", f.Name) + } + + if err := os.MkdirAll(filepath.Dir(name), 0777); err != nil { + return err + } + if err := os.WriteFile(name, f.Data, 0666); err != nil { + return err + } + } + + return nil +} + +// Getwd returns the directory in which to run the next script command. +func (s *State) Getwd() string { return s.pwd } + +// Logf writes output to the script's log without updating its stdout or stderr +// buffers. (The output log functions as a kind of meta-stderr.) +func (s *State) Logf(format string, args ...any) { + fmt.Fprintf(&s.log, format, args...) +} + +// flushLog writes the contents of the script's log to w and clears the log. +func (s *State) flushLog(w io.Writer) error { + _, err := w.Write(s.log.Bytes()) + s.log.Reset() + return err +} + +// LookupEnv retrieves the value of the environment variable in s named by the key. +func (s *State) LookupEnv(key string) (string, bool) { + v, ok := s.envMap[key] + return v, ok +} + +// Path returns the absolute path in the host operating system for a +// script-based (generally slash-separated and relative) path. +func (s *State) Path(path string) string { + if filepath.IsAbs(path) { + return filepath.Clean(path) + } + return filepath.Join(s.pwd, path) +} + +// Setenv sets the value of the environment variable in s named by the key. +func (s *State) Setenv(key, value string) error { + s.env = cleanEnv(append(s.env, key+"="+value), s.pwd) + s.envMap[key] = value + return nil +} + +// Stdout returns the stdout output of the last command run, +// or the empty string if no command has been run. +func (s *State) Stdout() string { return s.stdout } + +// Stderr returns the stderr output of the last command run, +// or the empty string if no command has been run. +func (s *State) Stderr() string { return s.stderr } + +// cleanEnv returns a copy of env with any duplicates removed in favor of +// later values and any required system variables defined. +// +// If env is nil, cleanEnv copies the environment from os.Environ(). +func cleanEnv(env []string, pwd string) []string { + // There are some funky edge-cases in this logic, especially on Windows (with + // case-insensitive environment variables and variables with keys like "=C:"). + // Rather than duplicating exec.dedupEnv here, cheat and use exec.Cmd directly. + cmd := &exec.Cmd{Env: env} + cmd.Dir = pwd + return cmd.Environ() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/search/search.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/search/search.go new file mode 100644 index 0000000000000000000000000000000000000000..9f216d57568e551b130f7b1a761469fefbb5f0bb --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/search/search.go @@ -0,0 +1,512 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package search + +import ( + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/fsys" + "cmd/go/internal/str" + "cmd/internal/pkgpattern" + "fmt" + "go/build" + "io/fs" + "os" + "path" + "path/filepath" + "strings" +) + +// A Match represents the result of matching a single package pattern. +type Match struct { + pattern string // the pattern itself + Dirs []string // if the pattern is local, directories that potentially contain matching packages + Pkgs []string // matching packages (import paths) + Errs []error // errors matching the patterns to packages, NOT errors loading those packages + + // Errs may be non-empty even if len(Pkgs) > 0, indicating that some matching + // packages could be located but results may be incomplete. + // If len(Pkgs) == 0 && len(Errs) == 0, the pattern is well-formed but did not + // match any packages. +} + +// NewMatch returns a Match describing the given pattern, +// without resolving its packages or errors. +func NewMatch(pattern string) *Match { + return &Match{pattern: pattern} +} + +// Pattern returns the pattern to be matched. +func (m *Match) Pattern() string { return m.pattern } + +// AddError appends a MatchError wrapping err to m.Errs. +func (m *Match) AddError(err error) { + m.Errs = append(m.Errs, &MatchError{Match: m, Err: err}) +} + +// IsLiteral reports whether the pattern is free of wildcards and meta-patterns. +// +// A literal pattern must match at most one package. +func (m *Match) IsLiteral() bool { + return !strings.Contains(m.pattern, "...") && !m.IsMeta() +} + +// IsLocal reports whether the pattern must be resolved from a specific root or +// directory, such as a filesystem path or a single module. +func (m *Match) IsLocal() bool { + return build.IsLocalImport(m.pattern) || filepath.IsAbs(m.pattern) +} + +// IsMeta reports whether the pattern is a “meta-package” keyword that represents +// multiple packages, such as "std", "cmd", or "all". +func (m *Match) IsMeta() bool { + return IsMetaPackage(m.pattern) +} + +// IsMetaPackage checks if name is a reserved package name that expands to multiple packages. +func IsMetaPackage(name string) bool { + return name == "std" || name == "cmd" || name == "all" +} + +// A MatchError indicates an error that occurred while attempting to match a +// pattern. +type MatchError struct { + Match *Match + Err error +} + +func (e *MatchError) Error() string { + if e.Match.IsLiteral() { + return fmt.Sprintf("%s: %v", e.Match.Pattern(), e.Err) + } + return fmt.Sprintf("pattern %s: %v", e.Match.Pattern(), e.Err) +} + +func (e *MatchError) Unwrap() error { + return e.Err +} + +// MatchPackages sets m.Pkgs to a non-nil slice containing all the packages that +// can be found under the $GOPATH directories and $GOROOT that match the +// pattern. The pattern must be either "all" (all packages), "std" (standard +// packages), "cmd" (standard commands), or a path including "...". +// +// If any errors may have caused the set of packages to be incomplete, +// MatchPackages appends those errors to m.Errs. +func (m *Match) MatchPackages() { + m.Pkgs = []string{} + if m.IsLocal() { + m.AddError(fmt.Errorf("internal error: MatchPackages: %s is not a valid package pattern", m.pattern)) + return + } + + if m.IsLiteral() { + m.Pkgs = []string{m.pattern} + return + } + + match := func(string) bool { return true } + treeCanMatch := func(string) bool { return true } + if !m.IsMeta() { + match = pkgpattern.MatchPattern(m.pattern) + treeCanMatch = pkgpattern.TreeCanMatchPattern(m.pattern) + } + + have := map[string]bool{ + "builtin": true, // ignore pseudo-package that exists only for documentation + } + if !cfg.BuildContext.CgoEnabled { + have["runtime/cgo"] = true // ignore during walk + } + + for _, src := range cfg.BuildContext.SrcDirs() { + if (m.pattern == "std" || m.pattern == "cmd") && src != cfg.GOROOTsrc { + continue + } + + // If the root itself is a symlink to a directory, + // we want to follow it (see https://go.dev/issue/50807). + // Add a trailing separator to force that to happen. + src = str.WithFilePathSeparator(filepath.Clean(src)) + root := src + if m.pattern == "cmd" { + root += "cmd" + string(filepath.Separator) + } + + err := fsys.Walk(root, func(path string, fi fs.FileInfo, err error) error { + if err != nil { + return err // Likely a permission error, which could interfere with matching. + } + if path == src { + return nil // GOROOT/src and GOPATH/src cannot contain packages. + } + + want := true + // Avoid .foo, _foo, and testdata directory trees. + _, elem := filepath.Split(path) + if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { + want = false + } + + name := filepath.ToSlash(path[len(src):]) + if m.pattern == "std" && (!IsStandardImportPath(name) || name == "cmd") { + // The name "std" is only the standard library. + // If the name is cmd, it's the root of the command tree. + want = false + } + if !treeCanMatch(name) { + want = false + } + + if !fi.IsDir() { + if fi.Mode()&fs.ModeSymlink != 0 && want && strings.Contains(m.pattern, "...") { + if target, err := fsys.Stat(path); err == nil && target.IsDir() { + fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", path) + } + } + return nil + } + if !want { + return filepath.SkipDir + } + + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + pkg, err := cfg.BuildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); noGo { + // The package does not actually exist, so record neither the package + // nor the error. + return nil + } + // There was an error importing path, but not matching it, + // which is all that Match promises to do. + // Ignore the import error. + } + + // If we are expanding "cmd", skip main + // packages under cmd/vendor. At least as of + // March, 2017, there is one there for the + // vendored pprof tool. + if m.pattern == "cmd" && pkg != nil && strings.HasPrefix(pkg.ImportPath, "cmd/vendor") && pkg.Name == "main" { + return nil + } + + m.Pkgs = append(m.Pkgs, name) + return nil + }) + if err != nil { + m.AddError(err) + } + } +} + +// MatchDirs sets m.Dirs to a non-nil slice containing all directories that +// potentially match a local pattern. The pattern must begin with an absolute +// path, or "./", or "../". On Windows, the pattern may use slash or backslash +// separators or a mix of both. +// +// If any errors may have caused the set of directories to be incomplete, +// MatchDirs appends those errors to m.Errs. +func (m *Match) MatchDirs(modRoots []string) { + m.Dirs = []string{} + if !m.IsLocal() { + m.AddError(fmt.Errorf("internal error: MatchDirs: %s is not a valid filesystem pattern", m.pattern)) + return + } + + if m.IsLiteral() { + m.Dirs = []string{m.pattern} + return + } + + // Clean the path and create a matching predicate. + // filepath.Clean removes "./" prefixes (and ".\" on Windows). We need to + // preserve these, since they are meaningful in MatchPattern and in + // returned import paths. + cleanPattern := filepath.Clean(m.pattern) + isLocal := strings.HasPrefix(m.pattern, "./") || (os.PathSeparator == '\\' && strings.HasPrefix(m.pattern, `.\`)) + prefix := "" + if cleanPattern != "." && isLocal { + prefix = "./" + cleanPattern = "." + string(os.PathSeparator) + cleanPattern + } + slashPattern := filepath.ToSlash(cleanPattern) + match := pkgpattern.MatchPattern(slashPattern) + + // Find directory to begin the scan. + // Could be smarter but this one optimization + // is enough for now, since ... is usually at the + // end of a path. + i := strings.Index(cleanPattern, "...") + dir, _ := filepath.Split(cleanPattern[:i]) + + // pattern begins with ./ or ../. + // path.Clean will discard the ./ but not the ../. + // We need to preserve the ./ for pattern matching + // and in the returned import paths. + + if len(modRoots) > 1 { + abs, err := filepath.Abs(dir) + if err != nil { + m.AddError(err) + return + } + var found bool + for _, modRoot := range modRoots { + if modRoot != "" && str.HasFilePathPrefix(abs, modRoot) { + found = true + } + } + if !found { + plural := "" + if len(modRoots) > 1 { + plural = "s" + } + m.AddError(fmt.Errorf("directory %s is outside module root%s (%s)", abs, plural, strings.Join(modRoots, ", "))) + } + } + + // If dir is actually a symlink to a directory, + // we want to follow it (see https://go.dev/issue/50807). + // Add a trailing separator to force that to happen. + dir = str.WithFilePathSeparator(dir) + err := fsys.Walk(dir, func(path string, fi fs.FileInfo, err error) error { + if err != nil { + return err // Likely a permission error, which could interfere with matching. + } + if !fi.IsDir() { + return nil + } + top := false + if path == dir { + // Walk starts at dir and recurses. For the recursive case, + // the path is the result of filepath.Join, which calls filepath.Clean. + // The initial case is not Cleaned, though, so we do this explicitly. + // + // This converts a path like "./io/" to "io". Without this step, running + // "cd $GOROOT/src; go list ./io/..." would incorrectly skip the io + // package, because prepending the prefix "./" to the unclean path would + // result in "././io", and match("././io") returns false. + top = true + path = filepath.Clean(path) + } + + // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". + _, elem := filepath.Split(path) + dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." + if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + if !top && cfg.ModulesEnabled { + // Ignore other modules found in subdirectories. + if fi, err := fsys.Stat(filepath.Join(path, "go.mod")); err == nil && !fi.IsDir() { + return filepath.SkipDir + } + } + + name := prefix + filepath.ToSlash(path) + if !match(name) { + return nil + } + + // We keep the directory if we can import it, or if we can't import it + // due to invalid Go source files. This means that directories containing + // parse errors will be built (and fail) instead of being silently skipped + // as not matching the pattern. Go 1.5 and earlier skipped, but that + // behavior means people miss serious mistakes. + // See golang.org/issue/11407. + if p, err := cfg.BuildContext.ImportDir(path, 0); err != nil && (p == nil || len(p.InvalidGoFiles) == 0) { + if _, noGo := err.(*build.NoGoError); noGo { + // The package does not actually exist, so record neither the package + // nor the error. + return nil + } + // There was an error importing path, but not matching it, + // which is all that Match promises to do. + // Ignore the import error. + } + m.Dirs = append(m.Dirs, name) + return nil + }) + if err != nil { + m.AddError(err) + } +} + +// WarnUnmatched warns about patterns that didn't match any packages. +func WarnUnmatched(matches []*Match) { + for _, m := range matches { + if len(m.Pkgs) == 0 && len(m.Errs) == 0 { + fmt.Fprintf(os.Stderr, "go: warning: %q matched no packages\n", m.pattern) + } + } +} + +// ImportPaths returns the matching paths to use for the given command line. +// It calls ImportPathsQuiet and then WarnUnmatched. +func ImportPaths(patterns, modRoots []string) []*Match { + matches := ImportPathsQuiet(patterns, modRoots) + WarnUnmatched(matches) + return matches +} + +// ImportPathsQuiet is like ImportPaths but does not warn about patterns with no matches. +func ImportPathsQuiet(patterns, modRoots []string) []*Match { + var out []*Match + for _, a := range CleanPatterns(patterns) { + m := NewMatch(a) + if m.IsLocal() { + m.MatchDirs(modRoots) + + // Change the file import path to a regular import path if the package + // is in GOPATH or GOROOT. We don't report errors here; LoadImport + // (or something similar) will report them later. + m.Pkgs = make([]string, len(m.Dirs)) + for i, dir := range m.Dirs { + absDir := dir + if !filepath.IsAbs(dir) { + absDir = filepath.Join(base.Cwd(), dir) + } + if bp, _ := cfg.BuildContext.ImportDir(absDir, build.FindOnly); bp.ImportPath != "" && bp.ImportPath != "." { + m.Pkgs[i] = bp.ImportPath + } else { + m.Pkgs[i] = dir + } + } + } else { + m.MatchPackages() + } + + out = append(out, m) + } + return out +} + +// CleanPatterns returns the patterns to use for the given command line. It +// canonicalizes the patterns but does not evaluate any matches. For patterns +// that are not local or absolute paths, it preserves text after '@' to avoid +// modifying version queries. +func CleanPatterns(patterns []string) []string { + if len(patterns) == 0 { + return []string{"."} + } + var out []string + for _, a := range patterns { + var p, v string + if build.IsLocalImport(a) || filepath.IsAbs(a) { + p = a + } else if i := strings.IndexByte(a, '@'); i < 0 { + p = a + } else { + p = a[:i] + v = a[i:] + } + + // Arguments may be either file paths or import paths. + // As a courtesy to Windows developers, rewrite \ to / + // in arguments that look like import paths. + // Don't replace slashes in absolute paths. + if filepath.IsAbs(p) { + p = filepath.Clean(p) + } else { + if filepath.Separator == '\\' { + p = strings.ReplaceAll(p, `\`, `/`) + } + + // Put argument in canonical form, but preserve leading ./. + if strings.HasPrefix(p, "./") { + p = "./" + path.Clean(p) + if p == "./." { + p = "." + } + } else { + p = path.Clean(p) + } + } + + out = append(out, p+v) + } + return out +} + +// IsStandardImportPath reports whether $GOROOT/src/path should be considered +// part of the standard distribution. For historical reasons we allow people to add +// their own code to $GOROOT instead of using $GOPATH, but we assume that +// code will start with a domain name (dot in the first element). +// +// Note that this function is meant to evaluate whether a directory found in GOROOT +// should be treated as part of the standard library. It should not be used to decide +// that a directory found in GOPATH should be rejected: directories in GOPATH +// need not have dots in the first element, and they just take their chances +// with future collisions in the standard library. +func IsStandardImportPath(path string) bool { + i := strings.Index(path, "/") + if i < 0 { + i = len(path) + } + elem := path[:i] + return !strings.Contains(elem, ".") +} + +// IsRelativePath reports whether pattern should be interpreted as a directory +// path relative to the current directory, as opposed to a pattern matching +// import paths. +func IsRelativePath(pattern string) bool { + return strings.HasPrefix(pattern, "./") || strings.HasPrefix(pattern, "../") || pattern == "." || pattern == ".." +} + +// InDir checks whether path is in the file tree rooted at dir. +// If so, InDir returns an equivalent path relative to dir. +// If not, InDir returns an empty string. +// InDir makes some effort to succeed even in the presence of symbolic links. +func InDir(path, dir string) string { + // inDirLex reports whether path is lexically in dir, + // without considering symbolic or hard links. + inDirLex := func(path, dir string) (string, bool) { + if dir == "" { + return path, true + } + rel := str.TrimFilePathPrefix(path, dir) + if rel == path { + return "", false + } + if rel == "" { + return ".", true + } + return rel, true + } + + if rel, ok := inDirLex(path, dir); ok { + return rel + } + xpath, err := filepath.EvalSymlinks(path) + if err != nil || xpath == path { + xpath = "" + } else { + if rel, ok := inDirLex(xpath, dir); ok { + return rel + } + } + + xdir, err := filepath.EvalSymlinks(dir) + if err == nil && xdir != dir { + if rel, ok := inDirLex(path, xdir); ok { + return rel + } + if xpath != "" { + if rel, ok := inDirLex(xpath, xdir); ok { + return rel + } + } + } + return "" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/str/path.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/str/path.go new file mode 100644 index 0000000000000000000000000000000000000000..83a3d0eb75388f57b5bfbdac17b64e0bcdb5747a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/str/path.go @@ -0,0 +1,133 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package str + +import ( + "os" + "path/filepath" + "runtime" + "strings" +) + +// HasPathPrefix reports whether the slash-separated path s +// begins with the elements in prefix. +func HasPathPrefix(s, prefix string) bool { + if len(s) == len(prefix) { + return s == prefix + } + if prefix == "" { + return true + } + if len(s) > len(prefix) { + if prefix[len(prefix)-1] == '/' || s[len(prefix)] == '/' { + return s[:len(prefix)] == prefix + } + } + return false +} + +// HasFilePathPrefix reports whether the filesystem path s +// begins with the elements in prefix. +// +// HasFilePathPrefix is case-sensitive (except for volume names) even if the +// filesystem is not, does not apply Unicode normalization even if the +// filesystem does, and assumes that all path separators are canonicalized to +// filepath.Separator (as returned by filepath.Clean). +func HasFilePathPrefix(s, prefix string) bool { + sv := filepath.VolumeName(s) + pv := filepath.VolumeName(prefix) + + // Strip the volume from both paths before canonicalizing sv and pv: + // it's unlikely that strings.ToUpper will change the length of the string, + // but doesn't seem impossible. + s = s[len(sv):] + prefix = prefix[len(pv):] + + // Always treat Windows volume names as case-insensitive, even though + // we don't treat the rest of the path as such. + // + // TODO(bcmills): Why do we care about case only for the volume name? It's + // been this way since https://go.dev/cl/11316, but I don't understand why + // that problem doesn't apply to case differences in the entire path. + if sv != pv { + sv = strings.ToUpper(sv) + pv = strings.ToUpper(pv) + } + + switch { + default: + return false + case sv != pv: + return false + case len(s) == len(prefix): + return s == prefix + case prefix == "": + return true + case len(s) > len(prefix): + if prefix[len(prefix)-1] == filepath.Separator { + return strings.HasPrefix(s, prefix) + } + return s[len(prefix)] == filepath.Separator && s[:len(prefix)] == prefix + } +} + +// TrimFilePathPrefix returns s without the leading path elements in prefix, +// such that joining the string to prefix produces s. +// +// If s does not start with prefix (HasFilePathPrefix with the same arguments +// returns false), TrimFilePathPrefix returns s. If s equals prefix, +// TrimFilePathPrefix returns "". +func TrimFilePathPrefix(s, prefix string) string { + if prefix == "" { + // Trimming the empty string from a path should join to produce that path. + // (Trim("/tmp/foo", "") should give "/tmp/foo", not "tmp/foo".) + return s + } + if !HasFilePathPrefix(s, prefix) { + return s + } + + trimmed := s[len(prefix):] + if len(trimmed) > 0 && os.IsPathSeparator(trimmed[0]) { + if runtime.GOOS == "windows" && prefix == filepath.VolumeName(prefix) && len(prefix) == 2 && prefix[1] == ':' { + // Joining a relative path to a bare Windows drive letter produces a path + // relative to the working directory on that drive, but the original path + // was absolute, not relative. Keep the leading path separator so that it + // remains absolute when joined to prefix. + } else { + // Prefix ends in a regular path element, so strip the path separator that + // follows it. + trimmed = trimmed[1:] + } + } + return trimmed +} + +// WithFilePathSeparator returns s with a trailing path separator, or the empty +// string if s is empty. +func WithFilePathSeparator(s string) string { + if s == "" || os.IsPathSeparator(s[len(s)-1]) { + return s + } + return s + string(filepath.Separator) +} + +// QuoteGlob returns s with all Glob metacharacters quoted. +// We don't try to handle backslash here, as that can appear in a +// file path on Windows. +func QuoteGlob(s string) string { + if !strings.ContainsAny(s, `*?[]`) { + return s + } + var sb strings.Builder + for _, c := range s { + switch c { + case '*', '?', '[', ']': + sb.WriteByte('\\') + } + sb.WriteRune(c) + } + return sb.String() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/str/str.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/str/str.go new file mode 100644 index 0000000000000000000000000000000000000000..af7c6999722fa589dde9eca6ea39f758407b6971 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/str/str.go @@ -0,0 +1,113 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package str provides string manipulation utilities. +package str + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// StringList flattens its arguments into a single []string. +// Each argument in args must have type string or []string. +func StringList(args ...any) []string { + var x []string + for _, arg := range args { + switch arg := arg.(type) { + case []string: + x = append(x, arg...) + case string: + x = append(x, arg) + default: + panic("stringList: invalid argument of type " + fmt.Sprintf("%T", arg)) + } + } + return x +} + +// ToFold returns a string with the property that +// +// strings.EqualFold(s, t) iff ToFold(s) == ToFold(t) +// +// This lets us test a large set of strings for fold-equivalent +// duplicates without making a quadratic number of calls +// to EqualFold. Note that strings.ToUpper and strings.ToLower +// do not have the desired property in some corner cases. +func ToFold(s string) string { + // Fast path: all ASCII, no upper case. + // Most paths look like this already. + for i := 0; i < len(s); i++ { + c := s[i] + if c >= utf8.RuneSelf || 'A' <= c && c <= 'Z' { + goto Slow + } + } + return s + +Slow: + var b strings.Builder + for _, r := range s { + // SimpleFold(x) cycles to the next equivalent rune > x + // or wraps around to smaller values. Iterate until it wraps, + // and we've found the minimum value. + for { + r0 := r + r = unicode.SimpleFold(r0) + if r <= r0 { + break + } + } + // Exception to allow fast path above: A-Z => a-z + if 'A' <= r && r <= 'Z' { + r += 'a' - 'A' + } + b.WriteRune(r) + } + return b.String() +} + +// FoldDup reports a pair of strings from the list that are +// equal according to strings.EqualFold. +// It returns "", "" if there are no such strings. +func FoldDup(list []string) (string, string) { + clash := map[string]string{} + for _, s := range list { + fold := ToFold(s) + if t := clash[fold]; t != "" { + if s > t { + s, t = t, s + } + return s, t + } + clash[fold] = s + } + return "", "" +} + +// Contains reports whether x contains s. +func Contains(x []string, s string) bool { + for _, t := range x { + if t == s { + return true + } + } + return false +} + +// Uniq removes consecutive duplicate strings from ss. +func Uniq(ss *[]string) { + if len(*ss) <= 1 { + return + } + uniq := (*ss)[:1] + for _, s := range *ss { + if s != uniq[len(uniq)-1] { + uniq = append(uniq, s) + } + } + *ss = uniq +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/str/str_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/str/str_test.go new file mode 100644 index 0000000000000000000000000000000000000000..7c1987766635c8b291eda6c3344ff1eecb000156 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/str/str_test.go @@ -0,0 +1,185 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package str + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "testing" +) + +var foldDupTests = []struct { + list []string + f1, f2 string +}{ + {StringList("math/rand", "math/big"), "", ""}, + {StringList("math", "strings"), "", ""}, + {StringList("strings"), "", ""}, + {StringList("strings", "strings"), "strings", "strings"}, + {StringList("Rand", "rand", "math", "math/rand", "math/Rand"), "Rand", "rand"}, +} + +func TestFoldDup(t *testing.T) { + for _, tt := range foldDupTests { + f1, f2 := FoldDup(tt.list) + if f1 != tt.f1 || f2 != tt.f2 { + t.Errorf("foldDup(%q) = %q, %q, want %q, %q", tt.list, f1, f2, tt.f1, tt.f2) + } + } +} + +func TestHasPathPrefix(t *testing.T) { + type testCase struct { + s, prefix string + want bool + } + for _, tt := range []testCase{ + {"", "", true}, + {"", "/", false}, + {"foo", "", true}, + {"foo", "/", false}, + {"foo", "foo", true}, + {"foo", "foo/", false}, + {"foo", "/foo", false}, + {"foo/bar", "", true}, + {"foo/bar", "foo", true}, + {"foo/bar", "foo/", true}, + {"foo/bar", "/foo", false}, + {"foo/bar", "foo/bar", true}, + {"foo/bar", "foo/bar/", false}, + {"foo/bar", "/foo/bar", false}, + } { + got := HasPathPrefix(tt.s, tt.prefix) + if got != tt.want { + t.Errorf("HasPathPrefix(%q, %q) = %v; want %v", tt.s, tt.prefix, got, tt.want) + } + } +} + +func TestTrimFilePathPrefixSlash(t *testing.T) { + if os.PathSeparator != '/' { + t.Skipf("test requires slash-separated file paths") + } + + type testCase struct { + s, prefix, want string + } + for _, tt := range []testCase{ + {"/", "", "/"}, + {"/", "/", ""}, + {"/foo", "", "/foo"}, + {"/foo", "/", "foo"}, + {"/foo", "/foo", ""}, + {"/foo/bar", "/foo", "bar"}, + {"/foo/bar", "/foo/", "bar"}, + {"/foo/", "/", "foo/"}, + {"/foo/", "/foo", ""}, + {"/foo/", "/foo/", ""}, + + // if prefix is not s's prefix, return s + {"", "/", ""}, + {"/foo", "/bar", "/foo"}, + {"/foo", "/foo/bar", "/foo"}, + {"foo", "/foo", "foo"}, + {"/foo", "foo", "/foo"}, + {"/foo", "/foo/", "/foo"}, + } { + got := TrimFilePathPrefix(tt.s, tt.prefix) + if got == tt.want { + t.Logf("TrimFilePathPrefix(%q, %q) = %q", tt.s, tt.prefix, got) + } else { + t.Errorf("TrimFilePathPrefix(%q, %q) = %q, want %q", tt.s, tt.prefix, got, tt.want) + } + + if HasFilePathPrefix(tt.s, tt.prefix) { + joined := filepath.Join(tt.prefix, got) + if clean := filepath.Clean(tt.s); joined != clean { + t.Errorf("filepath.Join(%q, %q) = %q, want %q", tt.prefix, got, joined, clean) + } + } + } +} + +func TestTrimFilePathPrefixWindows(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skipf("test requires Windows file paths") + } + type testCase struct { + s, prefix, want string + } + for _, tt := range []testCase{ + {`\`, ``, `\`}, + {`\`, `\`, ``}, + {`C:`, `C:`, ``}, + {`C:\`, `C:`, `\`}, + {`C:\`, `C:\`, ``}, + {`C:\foo`, ``, `C:\foo`}, + {`C:\foo`, `C:`, `\foo`}, + {`C:\foo`, `C:\`, `foo`}, + {`C:\foo`, `C:\foo`, ``}, + {`C:\foo\`, `C:\foo`, ``}, + {`C:\foo\bar`, `C:\foo`, `bar`}, + {`C:\foo\bar`, `C:\foo\`, `bar`}, + // if prefix is not s's prefix, return s + {`C:\foo`, `C:\bar`, `C:\foo`}, + {`C:\foo`, `C:\foo\bar`, `C:\foo`}, + {`C:`, `C:\`, `C:`}, + // if volumes are different, return s + {`C:`, ``, `C:`}, + {`C:\`, ``, `C:\`}, + {`C:\foo`, ``, `C:\foo`}, + {`C:\foo`, `\foo`, `C:\foo`}, + {`C:\foo`, `D:\foo`, `C:\foo`}, + + //UNC path + {`\\host\share\foo`, `\\host\share`, `foo`}, + {`\\host\share\foo`, `\\host\share\`, `foo`}, + {`\\host\share\foo`, `\\host\share\foo`, ``}, + {`\\host\share\foo\bar`, `\\host\share\foo`, `bar`}, + {`\\host\share\foo\bar`, `\\host\share\foo\`, `bar`}, + // if prefix is not s's prefix, return s + {`\\host\share\foo`, `\\host\share\bar`, `\\host\share\foo`}, + {`\\host\share\foo`, `\\host\share\foo\bar`, `\\host\share\foo`}, + // if either host or share name is different, return s + {`\\host\share\foo`, ``, `\\host\share\foo`}, + {`\\host\share\foo`, `\foo`, `\\host\share\foo`}, + {`\\host\share\foo`, `\\host\other\`, `\\host\share\foo`}, + {`\\host\share\foo`, `\\other\share\`, `\\host\share\foo`}, + {`\\host\share\foo`, `\\host\`, `\\host\share\foo`}, + {`\\host\share\foo`, `\share\`, `\\host\share\foo`}, + + // only volume names are case-insensitive + {`C:\foo`, `c:`, `\foo`}, + {`C:\foo`, `c:\foo`, ``}, + {`c:\foo`, `C:`, `\foo`}, + {`c:\foo`, `C:\foo`, ``}, + {`C:\foo`, `C:\Foo`, `C:\foo`}, + {`\\Host\Share\foo`, `\\host\share`, `foo`}, + {`\\Host\Share\foo`, `\\host\share\foo`, ``}, + {`\\host\share\foo`, `\\Host\Share`, `foo`}, + {`\\host\share\foo`, `\\Host\Share\foo`, ``}, + {`\\Host\Share\foo`, `\\Host\Share\Foo`, `\\Host\Share\foo`}, + } { + got := TrimFilePathPrefix(tt.s, tt.prefix) + if got == tt.want { + t.Logf("TrimFilePathPrefix(%#q, %#q) = %#q", tt.s, tt.prefix, got) + } else { + t.Errorf("TrimFilePathPrefix(%#q, %#q) = %#q, want %#q", tt.s, tt.prefix, got, tt.want) + } + + if HasFilePathPrefix(tt.s, tt.prefix) { + // Although TrimFilePathPrefix is only case-insensitive in the volume name, + // what we care about in testing Join is that absolute paths remain + // absolute and relative paths remaining relative — there is no harm in + // over-normalizing letters in the comparison, so we use EqualFold. + joined := filepath.Join(tt.prefix, got) + if clean := filepath.Clean(tt.s); !strings.EqualFold(joined, clean) { + t.Errorf("filepath.Join(%#q, %#q) = %#q, want %#q", tt.prefix, got, joined, clean) + } + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/test/cover.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/cover.go new file mode 100644 index 0000000000000000000000000000000000000000..f614458dc46a15baead3d1efa7839e0593894eee --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/cover.go @@ -0,0 +1,85 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +var coverMerge struct { + f *os.File + sync.Mutex // for f.Write +} + +// initCoverProfile initializes the test coverage profile. +// It must be run before any calls to mergeCoverProfile or closeCoverProfile. +// Using this function clears the profile in case it existed from a previous run, +// or in case it doesn't exist and the test is going to fail to create it (or not run). +func initCoverProfile() { + if testCoverProfile == "" || testC { + return + } + if !filepath.IsAbs(testCoverProfile) { + testCoverProfile = filepath.Join(testOutputDir.getAbs(), testCoverProfile) + } + + // No mutex - caller's responsibility to call with no racing goroutines. + f, err := os.Create(testCoverProfile) + if err != nil { + base.Fatalf("%v", err) + } + _, err = fmt.Fprintf(f, "mode: %s\n", cfg.BuildCoverMode) + if err != nil { + base.Fatalf("%v", err) + } + coverMerge.f = f +} + +// mergeCoverProfile merges file into the profile stored in testCoverProfile. +// It prints any errors it encounters to ew. +func mergeCoverProfile(ew io.Writer, file string) { + if coverMerge.f == nil { + return + } + coverMerge.Lock() + defer coverMerge.Unlock() + + expect := fmt.Sprintf("mode: %s\n", cfg.BuildCoverMode) + buf := make([]byte, len(expect)) + r, err := os.Open(file) + if err != nil { + // Test did not create profile, which is OK. + return + } + defer r.Close() + + n, err := io.ReadFull(r, buf) + if n == 0 { + return + } + if err != nil || string(buf) != expect { + fmt.Fprintf(ew, "error: test wrote malformed coverage profile %s.\n", file) + return + } + _, err = io.Copy(coverMerge.f, r) + if err != nil { + fmt.Fprintf(ew, "error: saving coverage profile: %v\n", err) + } +} + +func closeCoverProfile() { + if coverMerge.f == nil { + return + } + if err := coverMerge.f.Close(); err != nil { + base.Errorf("closing coverage profile: %v", err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/test/flagdefs.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/flagdefs.go new file mode 100644 index 0000000000000000000000000000000000000000..baa0cdf4c64d45f8e1cce4a3f8970d47b3aaf9ab --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/flagdefs.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by genflags.go — DO NOT EDIT. + +package test + +// passFlagToTest contains the flags that should be forwarded to +// the test binary with the prefix "test.". +var passFlagToTest = map[string]bool{ + "bench": true, + "benchmem": true, + "benchtime": true, + "blockprofile": true, + "blockprofilerate": true, + "count": true, + "coverprofile": true, + "cpu": true, + "cpuprofile": true, + "failfast": true, + "fullpath": true, + "fuzz": true, + "fuzzminimizetime": true, + "fuzztime": true, + "list": true, + "memprofile": true, + "memprofilerate": true, + "mutexprofile": true, + "mutexprofilefraction": true, + "outputdir": true, + "parallel": true, + "run": true, + "short": true, + "shuffle": true, + "skip": true, + "timeout": true, + "trace": true, + "v": true, +} + +var passAnalyzersToVet = map[string]bool{ + "appends": true, + "asmdecl": true, + "assign": true, + "atomic": true, + "bool": true, + "bools": true, + "buildtag": true, + "buildtags": true, + "cgocall": true, + "composites": true, + "copylocks": true, + "defers": true, + "directive": true, + "errorsas": true, + "framepointer": true, + "httpresponse": true, + "ifaceassert": true, + "loopclosure": true, + "lostcancel": true, + "methods": true, + "nilfunc": true, + "printf": true, + "rangeloops": true, + "shift": true, + "sigchanyzer": true, + "slog": true, + "stdmethods": true, + "stringintconv": true, + "structtag": true, + "testinggoroutine": true, + "tests": true, + "timeformat": true, + "unmarshal": true, + "unreachable": true, + "unsafeptr": true, + "unusedresult": true, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/test/flagdefs_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/flagdefs_test.go new file mode 100644 index 0000000000000000000000000000000000000000..5461b2d1a5237831c548a32f7b62dfa1907c64b7 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/flagdefs_test.go @@ -0,0 +1,76 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "cmd/go/internal/cfg" + "cmd/go/internal/test/internal/genflags" + "internal/testenv" + "maps" + "os" + "testing" +) + +func TestMain(m *testing.M) { + cfg.SetGOROOT(testenv.GOROOT(nil), false) + os.Exit(m.Run()) +} + +func TestPassFlagToTest(t *testing.T) { + wantNames := genflags.ShortTestFlags() + + missing := map[string]bool{} + for _, name := range wantNames { + if !passFlagToTest[name] { + missing[name] = true + } + } + if len(missing) > 0 { + t.Errorf("passFlagToTest is missing entries: %v", missing) + } + + extra := maps.Clone(passFlagToTest) + for _, name := range wantNames { + delete(extra, name) + } + if len(extra) > 0 { + t.Errorf("passFlagToTest contains extra entries: %v", extra) + } + + if t.Failed() { + t.Logf("To regenerate:\n\tgo generate cmd/go/internal/test") + } +} + +func TestPassAnalyzersToVet(t *testing.T) { + testenv.MustHaveGoBuild(t) // runs 'go tool vet -flags' + + wantNames, err := genflags.VetAnalyzers() + if err != nil { + t.Fatal(err) + } + + missing := map[string]bool{} + for _, name := range wantNames { + if !passAnalyzersToVet[name] { + missing[name] = true + } + } + if len(missing) > 0 { + t.Errorf("passAnalyzersToVet is missing entries: %v", missing) + } + + extra := maps.Clone(passAnalyzersToVet) + for _, name := range wantNames { + delete(extra, name) + } + if len(extra) > 0 { + t.Errorf("passFlagToTest contains extra entries: %v", extra) + } + + if t.Failed() { + t.Logf("To regenerate:\n\tgo generate cmd/go/internal/test") + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/test/genflags.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/genflags.go new file mode 100644 index 0000000000000000000000000000000000000000..bb5ceb647baf4ce508865f313255ff2d12aba71b --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/genflags.go @@ -0,0 +1,84 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import ( + "bytes" + "log" + "os" + "os/exec" + "text/template" + + "cmd/go/internal/test/internal/genflags" +) + +func main() { + if err := regenerate(); err != nil { + log.Fatal(err) + } +} + +func regenerate() error { + vetAnalyzers, err := genflags.VetAnalyzers() + if err != nil { + return err + } + + t := template.Must(template.New("fileTemplate").Parse(fileTemplate)) + tData := map[string][]string{ + "testFlags": genflags.ShortTestFlags(), + "vetAnalyzers": vetAnalyzers, + } + buf := bytes.NewBuffer(nil) + if err := t.Execute(buf, tData); err != nil { + return err + } + + f, err := os.Create("flagdefs.go") + if err != nil { + return err + } + + cmd := exec.Command("gofmt") + cmd.Stdin = buf + cmd.Stdout = f + cmd.Stderr = os.Stderr + cmdErr := cmd.Run() + + if err := f.Close(); err != nil { + return err + } + if cmdErr != nil { + os.Remove(f.Name()) + return cmdErr + } + + return nil +} + +const fileTemplate = `// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by genflags.go — DO NOT EDIT. + +package test + +// passFlagToTest contains the flags that should be forwarded to +// the test binary with the prefix "test.". +var passFlagToTest = map[string]bool { +{{- range .testFlags}} + "{{.}}": true, +{{- end }} +} + +var passAnalyzersToVet = map[string]bool { +{{- range .vetAnalyzers}} + "{{.}}": true, +{{- end }} +} +` diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/test/internal/genflags/testflag.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/internal/genflags/testflag.go new file mode 100644 index 0000000000000000000000000000000000000000..712428d86adc1607f417a4c047be006231cfd4af --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/internal/genflags/testflag.go @@ -0,0 +1,35 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genflags + +import ( + "flag" + "strings" + "testing" +) + +// ShortTestFlags returns the set of "-test." flag shorthand names that end +// users may pass to 'go test'. +func ShortTestFlags() []string { + testing.Init() + + var names []string + flag.VisitAll(func(f *flag.Flag) { + var name string + var found bool + if name, found = strings.CutPrefix(f.Name, "test."); !found { + return + } + + switch name { + case "testlogfile", "paniconexit0", "fuzzcachedir", "fuzzworker", "gocoverdir": + // These flags are only for use by cmd/go. + default: + names = append(names, name) + } + }) + + return names +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/test/internal/genflags/vetflag.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/internal/genflags/vetflag.go new file mode 100644 index 0000000000000000000000000000000000000000..1448811af053e4ab7f2270c6d3e9ee13f9bb2f0f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/internal/genflags/vetflag.go @@ -0,0 +1,68 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genflags + +import ( + "bytes" + "cmd/go/internal/base" + "encoding/json" + "fmt" + "os/exec" + "regexp" + "sort" +) + +// VetAnalyzers computes analyzers and their aliases supported by vet. +func VetAnalyzers() ([]string, error) { + // get supported vet flag information + tool := base.Tool("vet") + vetcmd := exec.Command(tool, "-flags") + out := new(bytes.Buffer) + vetcmd.Stdout = out + if err := vetcmd.Run(); err != nil { + return nil, fmt.Errorf("go vet: can't execute %s -flags: %v\n", tool, err) + } + var analysisFlags []struct { + Name string + Bool bool + Usage string + } + if err := json.Unmarshal(out.Bytes(), &analysisFlags); err != nil { + return nil, fmt.Errorf("go vet: can't unmarshal JSON from %s -flags: %v", tool, err) + } + + // parse the flags to figure out which ones stand for analyses + analyzerSet := make(map[string]bool) + rEnable := regexp.MustCompile("^enable .+ analysis$") + for _, flag := range analysisFlags { + if rEnable.MatchString(flag.Usage) { + analyzerSet[flag.Name] = true + } + } + + rDeprecated := regexp.MustCompile("^deprecated alias for -(?P(.+))$") + // Returns the original value matched by rDeprecated on input value. + // If there is no match, "" is returned. + originalValue := func(value string) string { + match := rDeprecated.FindStringSubmatch(value) + if len(match) < 2 { + return "" + } + return match[1] + } + // extract deprecated aliases for existing analyses + for _, flag := range analysisFlags { + if o := originalValue(flag.Usage); analyzerSet[o] { + analyzerSet[flag.Name] = true + } + } + + var analyzers []string + for a := range analyzerSet { + analyzers = append(analyzers, a) + } + sort.Strings(analyzers) + return analyzers, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/test/test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/test.go new file mode 100644 index 0000000000000000000000000000000000000000..13818b72ab50ff2a4d54e72b34838a8b861c4d26 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/test.go @@ -0,0 +1,2098 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "bytes" + "context" + "errors" + "fmt" + "internal/coverage" + "internal/platform" + "io" + "io/fs" + "os" + "os/exec" + "path/filepath" + "regexp" + "slices" + "strconv" + "strings" + "sync" + "time" + + "cmd/go/internal/base" + "cmd/go/internal/cache" + "cmd/go/internal/cfg" + "cmd/go/internal/load" + "cmd/go/internal/lockedfile" + "cmd/go/internal/modload" + "cmd/go/internal/search" + "cmd/go/internal/str" + "cmd/go/internal/trace" + "cmd/go/internal/work" + "cmd/internal/test2json" + + "golang.org/x/mod/module" +) + +// Break init loop. +func init() { + CmdTest.Run = runTest +} + +const testUsage = "go test [build/test flags] [packages] [build/test flags & test binary flags]" + +var CmdTest = &base.Command{ + CustomFlags: true, + UsageLine: testUsage, + Short: "test packages", + Long: ` +'Go test' automates testing the packages named by the import paths. +It prints a summary of the test results in the format: + + ok archive/tar 0.011s + FAIL archive/zip 0.022s + ok compress/gzip 0.033s + ... + +followed by detailed output for each failed package. + +'Go test' recompiles each package along with any files with names matching +the file pattern "*_test.go". +These additional files can contain test functions, benchmark functions, fuzz +tests and example functions. See 'go help testfunc' for more. +Each listed package causes the execution of a separate test binary. +Files whose names begin with "_" (including "_test.go") or "." are ignored. + +Test files that declare a package with the suffix "_test" will be compiled as a +separate package, and then linked and run with the main test binary. + +The go tool will ignore a directory named "testdata", making it available +to hold ancillary data needed by the tests. + +As part of building a test binary, go test runs go vet on the package +and its test source files to identify significant problems. If go vet +finds any problems, go test reports those and does not run the test +binary. Only a high-confidence subset of the default go vet checks are +used. That subset is: atomic, bool, buildtags, directive, errorsas, +ifaceassert, nilfunc, printf, and stringintconv. You can see +the documentation for these and other vet tests via "go doc cmd/vet". +To disable the running of go vet, use the -vet=off flag. To run all +checks, use the -vet=all flag. + +All test output and summary lines are printed to the go command's +standard output, even if the test printed them to its own standard +error. (The go command's standard error is reserved for printing +errors building the tests.) + +The go command places $GOROOT/bin at the beginning of $PATH +in the test's environment, so that tests that execute +'go' commands use the same 'go' as the parent 'go test' command. + +Go test runs in two different modes: + +The first, called local directory mode, occurs when go test is +invoked with no package arguments (for example, 'go test' or 'go +test -v'). In this mode, go test compiles the package sources and +tests found in the current directory and then runs the resulting +test binary. In this mode, caching (discussed below) is disabled. +After the package test finishes, go test prints a summary line +showing the test status ('ok' or 'FAIL'), package name, and elapsed +time. + +The second, called package list mode, occurs when go test is invoked +with explicit package arguments (for example 'go test math', 'go +test ./...', and even 'go test .'). In this mode, go test compiles +and tests each of the packages listed on the command line. If a +package test passes, go test prints only the final 'ok' summary +line. If a package test fails, go test prints the full test output. +If invoked with the -bench or -v flag, go test prints the full +output even for passing package tests, in order to display the +requested benchmark results or verbose logging. After the package +tests for all of the listed packages finish, and their output is +printed, go test prints a final 'FAIL' status if any package test +has failed. + +In package list mode only, go test caches successful package test +results to avoid unnecessary repeated running of tests. When the +result of a test can be recovered from the cache, go test will +redisplay the previous output instead of running the test binary +again. When this happens, go test prints '(cached)' in place of the +elapsed time in the summary line. + +The rule for a match in the cache is that the run involves the same +test binary and the flags on the command line come entirely from a +restricted set of 'cacheable' test flags, defined as -benchtime, -cpu, +-list, -parallel, -run, -short, -timeout, -failfast, and -v. +If a run of go test has any test or non-test flags outside this set, +the result is not cached. To disable test caching, use any test flag +or argument other than the cacheable flags. The idiomatic way to disable +test caching explicitly is to use -count=1. Tests that open files within +the package's source root (usually $GOPATH) or that consult environment +variables only match future runs in which the files and environment +variables are unchanged. A cached test result is treated as executing +in no time at all, so a successful package test result will be cached and +reused regardless of -timeout setting. + +In addition to the build flags, the flags handled by 'go test' itself are: + + -args + Pass the remainder of the command line (everything after -args) + to the test binary, uninterpreted and unchanged. + Because this flag consumes the remainder of the command line, + the package list (if present) must appear before this flag. + + -c + Compile the test binary to pkg.test in the current directory but do not run it + (where pkg is the last element of the package's import path). + The file name or target directory can be changed with the -o flag. + + -exec xprog + Run the test binary using xprog. The behavior is the same as + in 'go run'. See 'go help run' for details. + + -json + Convert test output to JSON suitable for automated processing. + See 'go doc test2json' for the encoding details. + + -o file + Compile the test binary to the named file. + The test still runs (unless -c or -i is specified). + If file ends in a slash or names an existing directory, + the test is written to pkg.test in that directory. + +The test binary also accepts flags that control execution of the test; these +flags are also accessible by 'go test'. See 'go help testflag' for details. + +For more about build flags, see 'go help build'. +For more about specifying packages, see 'go help packages'. + +See also: go build, go vet. +`, +} + +var HelpTestflag = &base.Command{ + UsageLine: "testflag", + Short: "testing flags", + Long: ` +The 'go test' command takes both flags that apply to 'go test' itself +and flags that apply to the resulting test binary. + +Several of the flags control profiling and write an execution profile +suitable for "go tool pprof"; run "go tool pprof -h" for more +information. The --alloc_space, --alloc_objects, and --show_bytes +options of pprof control how the information is presented. + +The following flags are recognized by the 'go test' command and +control the execution of any test: + + -bench regexp + Run only those benchmarks matching a regular expression. + By default, no benchmarks are run. + To run all benchmarks, use '-bench .' or '-bench=.'. + The regular expression is split by unbracketed slash (/) + characters into a sequence of regular expressions, and each + part of a benchmark's identifier must match the corresponding + element in the sequence, if any. Possible parents of matches + are run with b.N=1 to identify sub-benchmarks. For example, + given -bench=X/Y, top-level benchmarks matching X are run + with b.N=1 to find any sub-benchmarks matching Y, which are + then run in full. + + -benchtime t + Run enough iterations of each benchmark to take t, specified + as a time.Duration (for example, -benchtime 1h30s). + The default is 1 second (1s). + The special syntax Nx means to run the benchmark N times + (for example, -benchtime 100x). + + -count n + Run each test, benchmark, and fuzz seed n times (default 1). + If -cpu is set, run n times for each GOMAXPROCS value. + Examples are always run once. -count does not apply to + fuzz tests matched by -fuzz. + + -cover + Enable coverage analysis. + Note that because coverage works by annotating the source + code before compilation, compilation and test failures with + coverage enabled may report line numbers that don't correspond + to the original sources. + + -covermode set,count,atomic + Set the mode for coverage analysis for the package[s] + being tested. The default is "set" unless -race is enabled, + in which case it is "atomic". + The values: + set: bool: does this statement run? + count: int: how many times does this statement run? + atomic: int: count, but correct in multithreaded tests; + significantly more expensive. + Sets -cover. + + -coverpkg pattern1,pattern2,pattern3 + Apply coverage analysis in each test to packages matching the patterns. + The default is for each test to analyze only the package being tested. + See 'go help packages' for a description of package patterns. + Sets -cover. + + -cpu 1,2,4 + Specify a list of GOMAXPROCS values for which the tests, benchmarks or + fuzz tests should be executed. The default is the current value + of GOMAXPROCS. -cpu does not apply to fuzz tests matched by -fuzz. + + -failfast + Do not start new tests after the first test failure. + + -fullpath + Show full file names in the error messages. + + -fuzz regexp + Run the fuzz test matching the regular expression. When specified, + the command line argument must match exactly one package within the + main module, and regexp must match exactly one fuzz test within + that package. Fuzzing will occur after tests, benchmarks, seed corpora + of other fuzz tests, and examples have completed. See the Fuzzing + section of the testing package documentation for details. + + -fuzztime t + Run enough iterations of the fuzz target during fuzzing to take t, + specified as a time.Duration (for example, -fuzztime 1h30s). + The default is to run forever. + The special syntax Nx means to run the fuzz target N times + (for example, -fuzztime 1000x). + + -fuzzminimizetime t + Run enough iterations of the fuzz target during each minimization + attempt to take t, as specified as a time.Duration (for example, + -fuzzminimizetime 30s). + The default is 60s. + The special syntax Nx means to run the fuzz target N times + (for example, -fuzzminimizetime 100x). + + -json + Log verbose output and test results in JSON. This presents the + same information as the -v flag in a machine-readable format. + + -list regexp + List tests, benchmarks, fuzz tests, or examples matching the regular + expression. No tests, benchmarks, fuzz tests, or examples will be run. + This will only list top-level tests. No subtest or subbenchmarks will be + shown. + + -parallel n + Allow parallel execution of test functions that call t.Parallel, and + fuzz targets that call t.Parallel when running the seed corpus. + The value of this flag is the maximum number of tests to run + simultaneously. + While fuzzing, the value of this flag is the maximum number of + subprocesses that may call the fuzz function simultaneously, regardless of + whether T.Parallel is called. + By default, -parallel is set to the value of GOMAXPROCS. + Setting -parallel to values higher than GOMAXPROCS may cause degraded + performance due to CPU contention, especially when fuzzing. + Note that -parallel only applies within a single test binary. + The 'go test' command may run tests for different packages + in parallel as well, according to the setting of the -p flag + (see 'go help build'). + + -run regexp + Run only those tests, examples, and fuzz tests matching the regular + expression. For tests, the regular expression is split by unbracketed + slash (/) characters into a sequence of regular expressions, and each + part of a test's identifier must match the corresponding element in + the sequence, if any. Note that possible parents of matches are + run too, so that -run=X/Y matches and runs and reports the result + of all tests matching X, even those without sub-tests matching Y, + because it must run them to look for those sub-tests. + See also -skip. + + -short + Tell long-running tests to shorten their run time. + It is off by default but set during all.bash so that installing + the Go tree can run a sanity check but not spend time running + exhaustive tests. + + -shuffle off,on,N + Randomize the execution order of tests and benchmarks. + It is off by default. If -shuffle is set to on, then it will seed + the randomizer using the system clock. If -shuffle is set to an + integer N, then N will be used as the seed value. In both cases, + the seed will be reported for reproducibility. + + -skip regexp + Run only those tests, examples, fuzz tests, and benchmarks that + do not match the regular expression. Like for -run and -bench, + for tests and benchmarks, the regular expression is split by unbracketed + slash (/) characters into a sequence of regular expressions, and each + part of a test's identifier must match the corresponding element in + the sequence, if any. + + -timeout d + If a test binary runs longer than duration d, panic. + If d is 0, the timeout is disabled. + The default is 10 minutes (10m). + + -v + Verbose output: log all tests as they are run. Also print all + text from Log and Logf calls even if the test succeeds. + + -vet list + Configure the invocation of "go vet" during "go test" + to use the comma-separated list of vet checks. + If list is empty, "go test" runs "go vet" with a curated list of + checks believed to be always worth addressing. + If list is "off", "go test" does not run "go vet" at all. + +The following flags are also recognized by 'go test' and can be used to +profile the tests during execution: + + -benchmem + Print memory allocation statistics for benchmarks. + + -blockprofile block.out + Write a goroutine blocking profile to the specified file + when all tests are complete. + Writes test binary as -c would. + + -blockprofilerate n + Control the detail provided in goroutine blocking profiles by + calling runtime.SetBlockProfileRate with n. + See 'go doc runtime.SetBlockProfileRate'. + The profiler aims to sample, on average, one blocking event every + n nanoseconds the program spends blocked. By default, + if -test.blockprofile is set without this flag, all blocking events + are recorded, equivalent to -test.blockprofilerate=1. + + -coverprofile cover.out + Write a coverage profile to the file after all tests have passed. + Sets -cover. + + -cpuprofile cpu.out + Write a CPU profile to the specified file before exiting. + Writes test binary as -c would. + + -memprofile mem.out + Write an allocation profile to the file after all tests have passed. + Writes test binary as -c would. + + -memprofilerate n + Enable more precise (and expensive) memory allocation profiles by + setting runtime.MemProfileRate. See 'go doc runtime.MemProfileRate'. + To profile all memory allocations, use -test.memprofilerate=1. + + -mutexprofile mutex.out + Write a mutex contention profile to the specified file + when all tests are complete. + Writes test binary as -c would. + + -mutexprofilefraction n + Sample 1 in n stack traces of goroutines holding a + contended mutex. + + -outputdir directory + Place output files from profiling in the specified directory, + by default the directory in which "go test" is running. + + -trace trace.out + Write an execution trace to the specified file before exiting. + +Each of these flags is also recognized with an optional 'test.' prefix, +as in -test.v. When invoking the generated test binary (the result of +'go test -c') directly, however, the prefix is mandatory. + +The 'go test' command rewrites or removes recognized flags, +as appropriate, both before and after the optional package list, +before invoking the test binary. + +For instance, the command + + go test -v -myflag testdata -cpuprofile=prof.out -x + +will compile the test binary and then run it as + + pkg.test -test.v -myflag testdata -test.cpuprofile=prof.out + +(The -x flag is removed because it applies only to the go command's +execution, not to the test itself.) + +The test flags that generate profiles (other than for coverage) also +leave the test binary in pkg.test for use when analyzing the profiles. + +When 'go test' runs a test binary, it does so from within the +corresponding package's source code directory. Depending on the test, +it may be necessary to do the same when invoking a generated test +binary directly. Because that directory may be located within the +module cache, which may be read-only and is verified by checksums, the +test must not write to it or any other directory within the module +unless explicitly requested by the user (such as with the -fuzz flag, +which writes failures to testdata/fuzz). + +The command-line package list, if present, must appear before any +flag not known to the go test command. Continuing the example above, +the package list would have to appear before -myflag, but could appear +on either side of -v. + +When 'go test' runs in package list mode, 'go test' caches successful +package test results to avoid unnecessary repeated running of tests. To +disable test caching, use any test flag or argument other than the +cacheable flags. The idiomatic way to disable test caching explicitly +is to use -count=1. + +To keep an argument for a test binary from being interpreted as a +known flag or a package name, use -args (see 'go help test') which +passes the remainder of the command line through to the test binary +uninterpreted and unaltered. + +For instance, the command + + go test -v -args -x -v + +will compile the test binary and then run it as + + pkg.test -test.v -x -v + +Similarly, + + go test -args math + +will compile the test binary and then run it as + + pkg.test math + +In the first example, the -x and the second -v are passed through to the +test binary unchanged and with no effect on the go command itself. +In the second example, the argument math is passed through to the test +binary, instead of being interpreted as the package list. +`, +} + +var HelpTestfunc = &base.Command{ + UsageLine: "testfunc", + Short: "testing functions", + Long: ` +The 'go test' command expects to find test, benchmark, and example functions +in the "*_test.go" files corresponding to the package under test. + +A test function is one named TestXxx (where Xxx does not start with a +lower case letter) and should have the signature, + + func TestXxx(t *testing.T) { ... } + +A benchmark function is one named BenchmarkXxx and should have the signature, + + func BenchmarkXxx(b *testing.B) { ... } + +A fuzz test is one named FuzzXxx and should have the signature, + + func FuzzXxx(f *testing.F) { ... } + +An example function is similar to a test function but, instead of using +*testing.T to report success or failure, prints output to os.Stdout. +If the last comment in the function starts with "Output:" then the output +is compared exactly against the comment (see examples below). If the last +comment begins with "Unordered output:" then the output is compared to the +comment, however the order of the lines is ignored. An example with no such +comment is compiled but not executed. An example with no text after +"Output:" is compiled, executed, and expected to produce no output. + +Godoc displays the body of ExampleXxx to demonstrate the use +of the function, constant, or variable Xxx. An example of a method M with +receiver type T or *T is named ExampleT_M. There may be multiple examples +for a given function, constant, or variable, distinguished by a trailing _xxx, +where xxx is a suffix not beginning with an upper case letter. + +Here is an example of an example: + + func ExamplePrintln() { + Println("The output of\nthis example.") + // Output: The output of + // this example. + } + +Here is another example where the ordering of the output is ignored: + + func ExamplePerm() { + for _, value := range Perm(4) { + fmt.Println(value) + } + + // Unordered output: 4 + // 2 + // 1 + // 3 + // 0 + } + +The entire test file is presented as the example when it contains a single +example function, at least one other function, type, variable, or constant +declaration, and no tests, benchmarks, or fuzz tests. + +See the documentation of the testing package for more information. +`, +} + +var ( + testBench string // -bench flag + testC bool // -c flag + testCoverPkgs []*load.Package // -coverpkg flag + testCoverProfile string // -coverprofile flag + testFuzz string // -fuzz flag + testJSON bool // -json flag + testList string // -list flag + testO string // -o flag + testOutputDir outputdirFlag // -outputdir flag + testShuffle shuffleFlag // -shuffle flag + testTimeout time.Duration // -timeout flag + testV testVFlag // -v flag + testVet = vetFlag{flags: defaultVetFlags} // -vet flag +) + +type testVFlag struct { + on bool // -v is set in some form + json bool // -v=test2json is set, to make output better for test2json +} + +func (*testVFlag) IsBoolFlag() bool { return true } + +func (f *testVFlag) Set(arg string) error { + if v, err := strconv.ParseBool(arg); err == nil { + f.on = v + f.json = false + return nil + } + if arg == "test2json" { + f.on = true + f.json = arg == "test2json" + return nil + } + return fmt.Errorf("invalid flag -test.v=%s", arg) +} + +func (f *testVFlag) String() string { + if f.json { + return "test2json" + } + if f.on { + return "true" + } + return "false" +} + +var ( + testArgs []string + pkgArgs []string + pkgs []*load.Package + + testHelp bool // -help option passed to test via -args + + testKillTimeout = 100 * 365 * 24 * time.Hour // backup alarm; defaults to about a century if no timeout is set + testWaitDelay time.Duration // how long to wait for output to close after a test binary exits; zero means unlimited + testCacheExpire time.Time // ignore cached test results before this time + + testBlockProfile, testCPUProfile, testMemProfile, testMutexProfile, testTrace string // profiling flag that limits test to one package + + testODir = false +) + +// testProfile returns the name of an arbitrary single-package profiling flag +// that is set, if any. +func testProfile() string { + switch { + case testBlockProfile != "": + return "-blockprofile" + case testCPUProfile != "": + return "-cpuprofile" + case testMemProfile != "": + return "-memprofile" + case testMutexProfile != "": + return "-mutexprofile" + case testTrace != "": + return "-trace" + default: + return "" + } +} + +// testNeedBinary reports whether the test needs to keep the binary around. +func testNeedBinary() bool { + switch { + case testBlockProfile != "": + return true + case testCPUProfile != "": + return true + case testMemProfile != "": + return true + case testMutexProfile != "": + return true + case testO != "": + return true + default: + return false + } +} + +// testShowPass reports whether the output for a passing test should be shown. +func testShowPass() bool { + return testV.on || testList != "" || testHelp +} + +var defaultVetFlags = []string{ + // TODO(rsc): Decide which tests are enabled by default. + // See golang.org/issue/18085. + // "-asmdecl", + // "-assign", + "-atomic", + "-bool", + "-buildtags", + // "-cgocall", + // "-composites", + // "-copylocks", + "-directive", + "-errorsas", + // "-httpresponse", + "-ifaceassert", + // "-lostcancel", + // "-methods", + "-nilfunc", + "-printf", + // "-rangeloops", + // "-shift", + "-slog", + "-stringintconv", + // "-structtags", + // "-tests", + // "-unreachable", + // "-unsafeptr", + // "-unusedresult", +} + +func runTest(ctx context.Context, cmd *base.Command, args []string) { + pkgArgs, testArgs = testFlags(args) + modload.InitWorkfile() // The test command does custom flag processing; initialize workspaces after that. + + if cfg.DebugTrace != "" { + var close func() error + var err error + ctx, close, err = trace.Start(ctx, cfg.DebugTrace) + if err != nil { + base.Fatalf("failed to start trace: %v", err) + } + defer func() { + if err := close(); err != nil { + base.Fatalf("failed to stop trace: %v", err) + } + }() + } + + ctx, span := trace.StartSpan(ctx, fmt.Sprint("Running ", cmd.Name(), " command")) + defer span.Done() + + work.FindExecCmd() // initialize cached result + + work.BuildInit() + work.VetFlags = testVet.flags + work.VetExplicit = testVet.explicit + + pkgOpts := load.PackageOpts{ModResolveTests: true} + pkgs = load.PackagesAndErrors(ctx, pkgOpts, pkgArgs) + load.CheckPackageErrors(pkgs) + if len(pkgs) == 0 { + base.Fatalf("no packages to test") + } + + if testFuzz != "" { + if !platform.FuzzSupported(cfg.Goos, cfg.Goarch) { + base.Fatalf("-fuzz flag is not supported on %s/%s", cfg.Goos, cfg.Goarch) + } + if len(pkgs) != 1 { + base.Fatalf("cannot use -fuzz flag with multiple packages") + } + if testCoverProfile != "" { + base.Fatalf("cannot use -coverprofile flag with -fuzz flag") + } + if profileFlag := testProfile(); profileFlag != "" { + base.Fatalf("cannot use %s flag with -fuzz flag", profileFlag) + } + + // Reject the '-fuzz' flag if the package is outside the main module. + // Otherwise, if fuzzing identifies a failure it could corrupt checksums in + // the module cache (or permanently alter the behavior of std tests for all + // users) by writing the failing input to the package's testdata directory. + // (See https://golang.org/issue/48495 and test_fuzz_modcache.txt.) + mainMods := modload.MainModules + if m := pkgs[0].Module; m != nil && m.Path != "" { + if !mainMods.Contains(m.Path) { + base.Fatalf("cannot use -fuzz flag on package outside the main module") + } + } else if pkgs[0].Standard && modload.Enabled() { + // Because packages in 'std' and 'cmd' are part of the standard library, + // they are only treated as part of a module in 'go mod' subcommands and + // 'go get'. However, we still don't want to accidentally corrupt their + // testdata during fuzzing, nor do we want to fail with surprising errors + // if GOROOT isn't writable (as is often the case for Go toolchains + // installed through package managers). + // + // If the user is requesting to fuzz a standard-library package, ensure + // that they are in the same module as that package (just like when + // fuzzing any other package). + if strings.HasPrefix(pkgs[0].ImportPath, "cmd/") { + if !mainMods.Contains("cmd") || !mainMods.InGorootSrc(module.Version{Path: "cmd"}) { + base.Fatalf("cannot use -fuzz flag on package outside the main module") + } + } else { + if !mainMods.Contains("std") || !mainMods.InGorootSrc(module.Version{Path: "std"}) { + base.Fatalf("cannot use -fuzz flag on package outside the main module") + } + } + } + } + if testProfile() != "" && len(pkgs) != 1 { + base.Fatalf("cannot use %s flag with multiple packages", testProfile()) + } + + if testO != "" { + if strings.HasSuffix(testO, "/") || strings.HasSuffix(testO, string(os.PathSeparator)) { + testODir = true + } else if fi, err := os.Stat(testO); err == nil && fi.IsDir() { + testODir = true + } + } + + if len(pkgs) > 1 && (testC || testO != "") && !base.IsNull(testO) { + if testO != "" && !testODir { + base.Fatalf("with multiple packages, -o must refer to a directory or %s", os.DevNull) + } + + pkgsForBinary := map[string][]*load.Package{} + + for _, p := range pkgs { + testBinary := testBinaryName(p) + pkgsForBinary[testBinary] = append(pkgsForBinary[testBinary], p) + } + + for testBinary, pkgs := range pkgsForBinary { + if len(pkgs) > 1 { + var buf strings.Builder + for _, pkg := range pkgs { + buf.WriteString(pkg.ImportPath) + buf.WriteString("\n") + } + + base.Errorf("cannot write test binary %s for multiple packages:\n%s", testBinary, buf.String()) + } + } + + base.ExitIfErrors() + } + + initCoverProfile() + defer closeCoverProfile() + + // If a test timeout is finite, set our kill timeout + // to that timeout plus one minute. This is a backup alarm in case + // the test wedges with a goroutine spinning and its background + // timer does not get a chance to fire. + // Don't set this if fuzzing, since it should be able to run + // indefinitely. + if testTimeout > 0 && testFuzz == "" { + // The WaitDelay for the test process depends on both the OS I/O and + // scheduling overhead and the amount of I/O generated by the test just + // before it exits. We set the minimum at 5 seconds to account for the OS + // overhead, and scale it up from there proportional to the overall test + // timeout on the assumption that the time to write and read a goroutine + // dump from a timed-out test process scales roughly with the overall + // running time of the test. + // + // This is probably too generous when the timeout is very long, but it seems + // better to hard-code a scale factor than to hard-code a constant delay. + if wd := testTimeout / 10; wd < 5*time.Second { + testWaitDelay = 5 * time.Second + } else { + testWaitDelay = wd + } + + // We expect the test binary to terminate itself (and dump stacks) after + // exactly testTimeout. We give it up to one WaitDelay or one minute, + // whichever is longer, to finish dumping stacks before we send it an + // external signal: if the process has a lot of goroutines, dumping stacks + // after the timeout can take a while. + // + // After the signal is delivered, the test process may have up to one + // additional WaitDelay to finish writing its output streams. + if testWaitDelay < 1*time.Minute { + testKillTimeout = testTimeout + 1*time.Minute + } else { + testKillTimeout = testTimeout + testWaitDelay + } + } + + // Read testcache expiration time, if present. + // (We implement go clean -testcache by writing an expiration date + // instead of searching out and deleting test result cache entries.) + if dir := cache.DefaultDir(); dir != "off" { + if data, _ := lockedfile.Read(filepath.Join(dir, "testexpire.txt")); len(data) > 0 && data[len(data)-1] == '\n' { + if t, err := strconv.ParseInt(string(data[:len(data)-1]), 10, 64); err == nil { + testCacheExpire = time.Unix(0, t) + } + } + } + + b := work.NewBuilder("") + defer func() { + if err := b.Close(); err != nil { + base.Fatal(err) + } + }() + + var builds, runs, prints []*work.Action + var writeCoverMetaAct *work.Action + + if cfg.BuildCoverPkg != nil { + match := make([]func(*load.Package) bool, len(cfg.BuildCoverPkg)) + for i := range cfg.BuildCoverPkg { + match[i] = load.MatchPackage(cfg.BuildCoverPkg[i], base.Cwd()) + } + + // Select for coverage all dependencies matching the -coverpkg + // patterns. + plist := load.TestPackageList(ctx, pkgOpts, pkgs) + testCoverPkgs = load.SelectCoverPackages(plist, match, "test") + if cfg.Experiment.CoverageRedesign && len(testCoverPkgs) > 0 { + // create a new singleton action that will collect up the + // meta-data files from all of the packages mentioned in + // "-coverpkg" and write them to a summary file. This new + // action will depend on all the build actions for the + // test packages, and all the run actions for these + // packages will depend on it. Motivating example: + // supposed we have a top level directory with three + // package subdirs, "a", "b", and "c", and + // from the top level, a user runs "go test -coverpkg=./... ./...". + // This will result in (roughly) the following action graph: + // + // build("a") build("b") build("c") + // | | | + // link("a.test") link("b.test") link("c.test") + // | | | + // run("a.test") run("b.test") run("c.test") + // | | | + // print print print + // + // When -coverpkg= is in effect, we want to + // express the coverage percentage for each package as a + // fraction of *all* the statements that match the + // pattern, hence if "c" doesn't import "a", we need to + // pass as meta-data file for "a" (emitted during the + // package "a" build) to the package "c" run action, so + // that it can be incorporated with "c"'s regular + // metadata. To do this, we add edges from each compile + // action to a "writeCoverMeta" action, then from the + // writeCoverMeta action to each run action. Updated + // graph: + // + // build("a") build("b") build("c") + // | \ / | / | + // | v v | / | + // | writemeta <-|-------------+ | + // | ||| | | + // | ||\ | | + // link("a.test")/\ \ link("b.test") link("c.test") + // | / \ +-|--------------+ | + // | / \ | \ | + // | v v | v | + // run("a.test") run("b.test") run("c.test") + // | | | + // print print print + // + writeCoverMetaAct = &work.Action{ + Mode: "write coverage meta-data file", + Actor: work.ActorFunc(work.WriteCoverMetaFilesFile), + Objdir: b.NewObjdir(), + } + for _, p := range testCoverPkgs { + p.Internal.Cover.GenMeta = true + } + } + } + + // Inform the compiler that it should instrument the binary at + // build-time when fuzzing is enabled. + if testFuzz != "" { + // Don't instrument packages which may affect coverage guidance but are + // unlikely to be useful. Most of these are used by the testing or + // internal/fuzz packages concurrently with fuzzing. + var skipInstrumentation = map[string]bool{ + "context": true, + "internal/fuzz": true, + "reflect": true, + "runtime": true, + "sync": true, + "sync/atomic": true, + "syscall": true, + "testing": true, + "time": true, + } + for _, p := range load.TestPackageList(ctx, pkgOpts, pkgs) { + if !skipInstrumentation[p.ImportPath] { + p.Internal.FuzzInstrument = true + } + } + } + + // Collect all the packages imported by the packages being tested. + allImports := make(map[*load.Package]bool) + for _, p := range pkgs { + if p.Error != nil && p.Error.IsImportCycle { + continue + } + for _, p1 := range p.Internal.Imports { + allImports[p1] = true + } + } + + if cfg.BuildCover { + for _, p := range pkgs { + // sync/atomic import is inserted by the cover tool if + // we're using atomic mode (and not compiling + // sync/atomic package itself). See #18486 and #57445. + // Note that this needs to be done prior to any of the + // builderTest invocations below, due to the fact that + // a given package in the 'pkgs' list may import + // package Q which appears later in the list (if this + // happens we'll wind up building the Q compile action + // before updating its deps to include sync/atomic). + if cfg.BuildCoverMode == "atomic" && p.ImportPath != "sync/atomic" { + load.EnsureImport(p, "sync/atomic") + } + // Tag the package for static meta-data generation if no + // test files (this works only with the new coverage + // design). Do this here (as opposed to in builderTest) so + // as to handle the case where we're testing multiple + // packages and one of the earlier packages imports a + // later package. Note that if -coverpkg is in effect + // p.Internal.Cover.GenMeta will wind up being set for + // all matching packages. + if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 && + cfg.BuildCoverPkg == nil && + cfg.Experiment.CoverageRedesign { + p.Internal.Cover.GenMeta = true + } + } + } + + // Prepare build + run + print actions for all packages being tested. + for _, p := range pkgs { + buildTest, runTest, printTest, err := builderTest(b, ctx, pkgOpts, p, allImports[p], writeCoverMetaAct) + if err != nil { + str := err.Error() + str = strings.TrimPrefix(str, "\n") + if p.ImportPath != "" { + base.Errorf("# %s\n%s", p.ImportPath, str) + } else { + base.Errorf("%s", str) + } + fmt.Printf("FAIL\t%s [setup failed]\n", p.ImportPath) + continue + } + builds = append(builds, buildTest) + runs = append(runs, runTest) + prints = append(prints, printTest) + } + + // Order runs for coordinating start JSON prints. + ch := make(chan struct{}) + close(ch) + for _, a := range runs { + if r, ok := a.Actor.(*runTestActor); ok { + r.prev = ch + ch = make(chan struct{}) + r.next = ch + } + } + + // Ultimately the goal is to print the output. + root := &work.Action{Mode: "go test", Actor: work.ActorFunc(printExitStatus), Deps: prints} + + // Force the printing of results to happen in order, + // one at a time. + for i, a := range prints { + if i > 0 { + a.Deps = append(a.Deps, prints[i-1]) + } + } + + // Force benchmarks to run in serial. + if !testC && (testBench != "") { + // The first run must wait for all builds. + // Later runs must wait for the previous run's print. + for i, run := range runs { + if i == 0 { + run.Deps = append(run.Deps, builds...) + } else { + run.Deps = append(run.Deps, prints[i-1]) + } + } + } + + b.Do(ctx, root) +} + +var windowsBadWords = []string{ + "install", + "patch", + "setup", + "update", +} + +func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package, imported bool, writeCoverMetaAct *work.Action) (buildAction, runAction, printAction *work.Action, err error) { + if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 { + if cfg.BuildCover && cfg.Experiment.CoverageRedesign { + if p.Internal.Cover.GenMeta { + p.Internal.Cover.Mode = cfg.BuildCoverMode + } + } + build := b.CompileAction(work.ModeBuild, work.ModeBuild, p) + run := &work.Action{ + Mode: "test run", + Actor: new(runTestActor), + Deps: []*work.Action{build}, + Objdir: b.NewObjdir(), + Package: p, + IgnoreFail: true, // run (prepare output) even if build failed + } + if writeCoverMetaAct != nil { + // There is no real "run" for this package (since there + // are no tests), but if coverage is turned on, we can + // collect coverage data for the code in the package by + // asking cmd/cover for a static meta-data file as part of + // the package build. This static meta-data file is then + // consumed by a pseudo-action (writeCoverMetaAct) that + // adds it to a summary file, then this summary file is + // consumed by the various "run test" actions. Below we + // add a dependence edge between the build action and the + // "write meta files" pseudo-action, and then another dep + // from writeCoverMetaAct to the run action. See the + // comment in runTest() at the definition of + // writeCoverMetaAct for more details. + run.Deps = append(run.Deps, writeCoverMetaAct) + writeCoverMetaAct.Deps = append(writeCoverMetaAct.Deps, build) + } + addTestVet(b, p, run, nil) + print := &work.Action{ + Mode: "test print", + Actor: work.ActorFunc(builderPrintTest), + Deps: []*work.Action{run}, + Package: p, + IgnoreFail: true, // print even if test failed + } + return build, run, print, nil + } + + // Build Package structs describing: + // pmain - pkg.test binary + // ptest - package + test files + // pxtest - package of external test files + var cover *load.TestCover + if cfg.BuildCover { + cover = &load.TestCover{ + Mode: cfg.BuildCoverMode, + Local: cfg.BuildCoverPkg == nil, + Pkgs: testCoverPkgs, + Paths: cfg.BuildCoverPkg, + } + } + pmain, ptest, pxtest, err := load.TestPackagesFor(ctx, pkgOpts, p, cover) + if err != nil { + return nil, nil, nil, err + } + + // If imported is true then this package is imported by some + // package being tested. Make building the test version of the + // package depend on building the non-test version, so that we + // only report build errors once. Issue #44624. + if imported && ptest != p { + buildTest := b.CompileAction(work.ModeBuild, work.ModeBuild, ptest) + buildP := b.CompileAction(work.ModeBuild, work.ModeBuild, p) + buildTest.Deps = append(buildTest.Deps, buildP) + } + + testBinary := testBinaryName(p) + + testDir := b.NewObjdir() + if err := b.BackgroundShell().Mkdir(testDir); err != nil { + return nil, nil, nil, err + } + + pmain.Dir = testDir + pmain.Internal.OmitDebug = !testC && !testNeedBinary() + if pmain.ImportPath == "runtime.test" { + // The runtime package needs a symbolized binary for its tests. + // See runtime/unsafepoint_test.go. + pmain.Internal.OmitDebug = false + } + + if !cfg.BuildN { + // writeTestmain writes _testmain.go, + // using the test description gathered in t. + if err := os.WriteFile(testDir+"_testmain.go", *pmain.Internal.TestmainGo, 0666); err != nil { + return nil, nil, nil, err + } + } + + // Set compile objdir to testDir we've already created, + // so that the default file path stripping applies to _testmain.go. + b.CompileAction(work.ModeBuild, work.ModeBuild, pmain).Objdir = testDir + + a := b.LinkAction(work.ModeBuild, work.ModeBuild, pmain) + a.Target = testDir + testBinary + cfg.ExeSuffix + if cfg.Goos == "windows" { + // There are many reserved words on Windows that, + // if used in the name of an executable, cause Windows + // to try to ask for extra permissions. + // The word list includes setup, install, update, and patch, + // but it does not appear to be defined anywhere. + // We have run into this trying to run the + // go.codereview/patch tests. + // For package names containing those words, use test.test.exe + // instead of pkgname.test.exe. + // Note that this file name is only used in the Go command's + // temporary directory. If the -c or other flags are + // given, the code below will still use pkgname.test.exe. + // There are two user-visible effects of this change. + // First, you can actually run 'go test' in directories that + // have names that Windows thinks are installer-like, + // without getting a dialog box asking for more permissions. + // Second, in the Windows process listing during go test, + // the test shows up as test.test.exe, not pkgname.test.exe. + // That second one is a drawback, but it seems a small + // price to pay for the test running at all. + // If maintaining the list of bad words is too onerous, + // we could just do this always on Windows. + for _, bad := range windowsBadWords { + if strings.Contains(testBinary, bad) { + a.Target = testDir + "test.test" + cfg.ExeSuffix + break + } + } + } + buildAction = a + var installAction, cleanAction *work.Action + if testC || testNeedBinary() { + // -c or profiling flag: create action to copy binary to ./test.out. + target := filepath.Join(base.Cwd(), testBinary+cfg.ExeSuffix) + isNull := false + + if testO != "" { + target = testO + + if testODir { + if filepath.IsAbs(target) { + target = filepath.Join(target, testBinary+cfg.ExeSuffix) + } else { + target = filepath.Join(base.Cwd(), target, testBinary+cfg.ExeSuffix) + } + } else { + if base.IsNull(target) { + isNull = true + } else if !filepath.IsAbs(target) { + target = filepath.Join(base.Cwd(), target) + } + } + } + + if isNull { + runAction = buildAction + } else { + pmain.Target = target + installAction = &work.Action{ + Mode: "test build", + Actor: work.ActorFunc(work.BuildInstallFunc), + Deps: []*work.Action{buildAction}, + Package: pmain, + Target: target, + } + runAction = installAction // make sure runAction != nil even if not running test + } + } + + var vetRunAction *work.Action + if testC { + printAction = &work.Action{Mode: "test print (nop)", Package: p, Deps: []*work.Action{runAction}} // nop + vetRunAction = printAction + } else { + // run test + rta := &runTestActor{ + writeCoverMetaAct: writeCoverMetaAct, + } + runAction = &work.Action{ + Mode: "test run", + Actor: rta, + Deps: []*work.Action{buildAction}, + Package: p, + IgnoreFail: true, // run (prepare output) even if build failed + TryCache: rta.c.tryCache, + } + if writeCoverMetaAct != nil { + // If writeCoverMetaAct != nil, this indicates that our + // "go test -coverpkg" run actions will need to read the + // meta-files summary file written by writeCoverMetaAct, + // so add a dependence edge from writeCoverMetaAct to the + // run action. + runAction.Deps = append(runAction.Deps, writeCoverMetaAct) + if !p.IsTestOnly() { + // Package p is not test only, meaning that the build + // action for p may generate a static meta-data file. + // Add a dependence edge from p to writeCoverMetaAct, + // which needs to know the name of that meta-data + // file. + compileAction := b.CompileAction(work.ModeBuild, work.ModeBuild, p) + writeCoverMetaAct.Deps = append(writeCoverMetaAct.Deps, compileAction) + } + } + runAction.Objdir = testDir + vetRunAction = runAction + cleanAction = &work.Action{ + Mode: "test clean", + Actor: work.ActorFunc(builderCleanTest), + Deps: []*work.Action{runAction}, + Package: p, + IgnoreFail: true, // clean even if test failed + Objdir: testDir, + } + printAction = &work.Action{ + Mode: "test print", + Actor: work.ActorFunc(builderPrintTest), + Deps: []*work.Action{cleanAction}, + Package: p, + IgnoreFail: true, // print even if test failed + } + } + + if len(ptest.GoFiles)+len(ptest.CgoFiles) > 0 { + addTestVet(b, ptest, vetRunAction, installAction) + } + if pxtest != nil { + addTestVet(b, pxtest, vetRunAction, installAction) + } + + if installAction != nil { + if runAction != installAction { + installAction.Deps = append(installAction.Deps, runAction) + } + if cleanAction != nil { + cleanAction.Deps = append(cleanAction.Deps, installAction) + } + } + + return buildAction, runAction, printAction, nil +} + +func addTestVet(b *work.Builder, p *load.Package, runAction, installAction *work.Action) { + if testVet.off { + return + } + + vet := b.VetAction(work.ModeBuild, work.ModeBuild, p) + runAction.Deps = append(runAction.Deps, vet) + // Install will clean the build directory. + // Make sure vet runs first. + // The install ordering in b.VetAction does not apply here + // because we are using a custom installAction (created above). + if installAction != nil { + installAction.Deps = append(installAction.Deps, vet) + } +} + +var noTestsToRun = []byte("\ntesting: warning: no tests to run\n") +var noFuzzTestsToFuzz = []byte("\ntesting: warning: no fuzz tests to fuzz\n") +var tooManyFuzzTestsToFuzz = []byte("\ntesting: warning: -fuzz matches more than one fuzz test, won't fuzz\n") + +// runTestActor is the actor for running a test. +type runTestActor struct { + c runCache + + // writeCoverMetaAct points to the pseudo-action for collecting + // coverage meta-data files for selected -cover test runs. See the + // comment in runTest at the definition of writeCoverMetaAct for + // more details. + writeCoverMetaAct *work.Action + + // sequencing of json start messages, to preserve test order + prev <-chan struct{} // wait to start until prev is closed + next chan<- struct{} // close next once the next test can start. +} + +// runCache is the cache for running a single test. +type runCache struct { + disableCache bool // cache should be disabled for this run + + buf *bytes.Buffer + id1 cache.ActionID + id2 cache.ActionID +} + +// stdoutMu and lockedStdout provide a locked standard output +// that guarantees never to interlace writes from multiple +// goroutines, so that we can have multiple JSON streams writing +// to a lockedStdout simultaneously and know that events will +// still be intelligible. +var stdoutMu sync.Mutex + +type lockedStdout struct{} + +func (lockedStdout) Write(b []byte) (int, error) { + stdoutMu.Lock() + defer stdoutMu.Unlock() + return os.Stdout.Write(b) +} + +func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action) error { + sh := b.Shell(a) + + // Wait for previous test to get started and print its first json line. + select { + case <-r.prev: + case <-base.Interrupted: + // We can't wait for the previous test action to complete: we don't start + // new actions after an interrupt, so if that action wasn't already running + // it might never happen. Instead, just don't log anything for this action. + base.SetExitStatus(1) + return nil + } + + var stdout io.Writer = os.Stdout + var err error + if testJSON { + json := test2json.NewConverter(lockedStdout{}, a.Package.ImportPath, test2json.Timestamp) + defer func() { + json.Exited(err) + json.Close() + }() + stdout = json + } + + // Release next test to start (test2json.NewConverter writes the start event). + close(r.next) + + if a.Failed { + // We were unable to build the binary. + a.Failed = false + fmt.Fprintf(stdout, "FAIL\t%s [build failed]\n", a.Package.ImportPath) + // Tell the JSON converter that this was a failure, not a passing run. + err = errors.New("build failed") + base.SetExitStatus(1) + return nil + } + + coverProfTempFile := func(a *work.Action) string { + if a.Objdir == "" { + panic("internal error: objdir not set in coverProfTempFile") + } + return a.Objdir + "_cover_.out" + } + + if p := a.Package; len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 { + reportNoTestFiles := true + if cfg.BuildCover && cfg.Experiment.CoverageRedesign && p.Internal.Cover.GenMeta { + if err := sh.Mkdir(a.Objdir); err != nil { + return err + } + mf, err := work.BuildActionCoverMetaFile(a) + if err != nil { + return err + } else if mf != "" { + reportNoTestFiles = false + // Write out "percent statements covered". + if err := work.WriteCoveragePercent(b, a, mf, stdout); err != nil { + return err + } + // If -coverprofile is in effect, then generate a + // coverage profile fragment for this package and + // merge it with the final -coverprofile output file. + if coverMerge.f != nil { + cp := coverProfTempFile(a) + if err := work.WriteCoverageProfile(b, a, mf, cp, stdout); err != nil { + return err + } + mergeCoverProfile(stdout, cp) + } + } + } + if reportNoTestFiles { + fmt.Fprintf(stdout, "? \t%s\t[no test files]\n", p.ImportPath) + } + return nil + } + + var buf bytes.Buffer + if len(pkgArgs) == 0 || testBench != "" || testFuzz != "" { + // Stream test output (no buffering) when no package has + // been given on the command line (implicit current directory) + // or when benchmarking or fuzzing. + // No change to stdout. + } else { + // If we're only running a single package under test or if parallelism is + // set to 1, and if we're displaying all output (testShowPass), we can + // hurry the output along, echoing it as soon as it comes in. + // We still have to copy to &buf for caching the result. This special + // case was introduced in Go 1.5 and is intentionally undocumented: + // the exact details of output buffering are up to the go command and + // subject to change. It would be nice to remove this special case + // entirely, but it is surely very helpful to see progress being made + // when tests are run on slow single-CPU ARM systems. + // + // If we're showing JSON output, then display output as soon as + // possible even when multiple tests are being run: the JSON output + // events are attributed to specific package tests, so interlacing them + // is OK. + if testShowPass() && (len(pkgs) == 1 || cfg.BuildP == 1) || testJSON { + // Write both to stdout and buf, for possible saving + // to cache, and for looking for the "no tests to run" message. + stdout = io.MultiWriter(stdout, &buf) + } else { + stdout = &buf + } + } + + if r.c.buf == nil { + // We did not find a cached result using the link step action ID, + // so we ran the link step. Try again now with the link output + // content ID. The attempt using the action ID makes sure that + // if the link inputs don't change, we reuse the cached test + // result without even rerunning the linker. The attempt using + // the link output (test binary) content ID makes sure that if + // we have different link inputs but the same final binary, + // we still reuse the cached test result. + // c.saveOutput will store the result under both IDs. + r.c.tryCacheWithID(b, a, a.Deps[0].BuildContentID()) + } + if r.c.buf != nil { + if stdout != &buf { + stdout.Write(r.c.buf.Bytes()) + r.c.buf.Reset() + } + a.TestOutput = r.c.buf + return nil + } + + execCmd := work.FindExecCmd() + testlogArg := []string{} + if !r.c.disableCache && len(execCmd) == 0 { + testlogArg = []string{"-test.testlogfile=" + a.Objdir + "testlog.txt"} + } + panicArg := "-test.paniconexit0" + fuzzArg := []string{} + if testFuzz != "" { + fuzzCacheDir := filepath.Join(cache.Default().FuzzDir(), a.Package.ImportPath) + fuzzArg = []string{"-test.fuzzcachedir=" + fuzzCacheDir} + } + coverdirArg := []string{} + addToEnv := "" + if cfg.BuildCover { + gcd := filepath.Join(a.Objdir, "gocoverdir") + if err := sh.Mkdir(gcd); err != nil { + // If we can't create a temp dir, terminate immediately + // with an error as opposed to returning an error to the + // caller; failed MkDir most likely indicates that we're + // out of disk space or there is some other systemic error + // that will make forward progress unlikely. + base.Fatalf("failed to create temporary dir: %v", err) + } + coverdirArg = append(coverdirArg, "-test.gocoverdir="+gcd) + if r.writeCoverMetaAct != nil { + // Copy the meta-files file over into the test's coverdir + // directory so that the coverage runtime support will be + // able to find it. + src := r.writeCoverMetaAct.Objdir + coverage.MetaFilesFileName + dst := filepath.Join(gcd, coverage.MetaFilesFileName) + if err := sh.CopyFile(dst, src, 0666, false); err != nil { + return err + } + } + // Even though we are passing the -test.gocoverdir option to + // the test binary, also set GOCOVERDIR as well. This is + // intended to help with tests that run "go build" to build + // fresh copies of tools to test as part of the testing. + addToEnv = "GOCOVERDIR=" + gcd + } + args := str.StringList(execCmd, a.Deps[0].BuiltTarget(), testlogArg, panicArg, fuzzArg, coverdirArg, testArgs) + + if testCoverProfile != "" { + // Write coverage to temporary profile, for merging later. + for i, arg := range args { + if strings.HasPrefix(arg, "-test.coverprofile=") { + args[i] = "-test.coverprofile=" + coverProfTempFile(a) + } + } + } + + if cfg.BuildN || cfg.BuildX { + sh.ShowCmd("", "%s", strings.Join(args, " ")) + if cfg.BuildN { + return nil + } + } + + // Normally, the test will terminate itself when the timeout expires, + // but add a last-ditch deadline to detect and stop wedged binaries. + ctx, cancel := context.WithTimeout(ctx, testKillTimeout) + defer cancel() + + // Now we're ready to actually run the command. + // + // If the -o flag is set, or if at some point we change cmd/go to start + // copying test executables into the build cache, we may run into spurious + // ETXTBSY errors on Unix platforms (see https://go.dev/issue/22315). + // + // Since we know what causes those, and we know that they should resolve + // quickly (the ETXTBSY error will resolve as soon as the subprocess + // holding the descriptor open reaches its 'exec' call), we retry them + // in a loop. + + var ( + cmd *exec.Cmd + t0 time.Time + cancelKilled = false + cancelSignaled = false + ) + for { + cmd = exec.CommandContext(ctx, args[0], args[1:]...) + cmd.Dir = a.Package.Dir + + env := slices.Clip(cfg.OrigEnv) + env = base.AppendPATH(env) + env = base.AppendPWD(env, cmd.Dir) + cmd.Env = env + if addToEnv != "" { + cmd.Env = append(cmd.Env, addToEnv) + } + + cmd.Stdout = stdout + cmd.Stderr = stdout + + cmd.Cancel = func() error { + if base.SignalTrace == nil { + err := cmd.Process.Kill() + if err == nil { + cancelKilled = true + } + return err + } + + // Send a quit signal in the hope that the program will print + // a stack trace and exit. + err := cmd.Process.Signal(base.SignalTrace) + if err == nil { + cancelSignaled = true + } + return err + } + cmd.WaitDelay = testWaitDelay + + base.StartSigHandlers() + t0 = time.Now() + err = cmd.Run() + + if !isETXTBSY(err) { + // We didn't hit the race in #22315, so there is no reason to retry the + // command. + break + } + } + + out := buf.Bytes() + a.TestOutput = &buf + t := fmt.Sprintf("%.3fs", time.Since(t0).Seconds()) + + mergeCoverProfile(cmd.Stdout, a.Objdir+"_cover_.out") + + if err == nil { + norun := "" + if !testShowPass() && !testJSON { + buf.Reset() + } + if bytes.HasPrefix(out, noTestsToRun[1:]) || bytes.Contains(out, noTestsToRun) { + norun = " [no tests to run]" + } + if bytes.HasPrefix(out, noFuzzTestsToFuzz[1:]) || bytes.Contains(out, noFuzzTestsToFuzz) { + norun = " [no fuzz tests to fuzz]" + } + if bytes.HasPrefix(out, tooManyFuzzTestsToFuzz[1:]) || bytes.Contains(out, tooManyFuzzTestsToFuzz) { + norun = "[-fuzz matches more than one fuzz test, won't fuzz]" + } + if len(out) > 0 && !bytes.HasSuffix(out, []byte("\n")) { + // Ensure that the output ends with a newline before the "ok" + // line we're about to print (https://golang.org/issue/49317). + cmd.Stdout.Write([]byte("\n")) + } + fmt.Fprintf(cmd.Stdout, "ok \t%s\t%s%s%s\n", a.Package.ImportPath, t, coveragePercentage(out), norun) + r.c.saveOutput(a) + } else { + base.SetExitStatus(1) + if cancelSignaled { + fmt.Fprintf(cmd.Stdout, "*** Test killed with %v: ran too long (%v).\n", base.SignalTrace, testKillTimeout) + } else if cancelKilled { + fmt.Fprintf(cmd.Stdout, "*** Test killed: ran too long (%v).\n", testKillTimeout) + } else if errors.Is(err, exec.ErrWaitDelay) { + fmt.Fprintf(cmd.Stdout, "*** Test I/O incomplete %v after exiting.\n", cmd.WaitDelay) + } + var ee *exec.ExitError + if len(out) == 0 || !errors.As(err, &ee) || !ee.Exited() { + // If there was no test output, print the exit status so that the reason + // for failure is clear. + fmt.Fprintf(cmd.Stdout, "%s\n", err) + } else if !bytes.HasSuffix(out, []byte("\n")) { + // Otherwise, ensure that the output ends with a newline before the FAIL + // line we're about to print (https://golang.org/issue/49317). + cmd.Stdout.Write([]byte("\n")) + } + + // NOTE(golang.org/issue/37555): test2json reports that a test passes + // unless "FAIL" is printed at the beginning of a line. The test may not + // actually print that if it panics, exits, or terminates abnormally, + // so we print it here. We can't always check whether it was printed + // because some tests need stdout to be a terminal (golang.org/issue/34791), + // not a pipe. + // TODO(golang.org/issue/29062): tests that exit with status 0 without + // printing a final result should fail. + prefix := "" + if testJSON || testV.json { + prefix = "\x16" + } + fmt.Fprintf(cmd.Stdout, "%sFAIL\t%s\t%s\n", prefix, a.Package.ImportPath, t) + } + + if cmd.Stdout != &buf { + buf.Reset() // cmd.Stdout was going to os.Stdout already + } + return nil +} + +// tryCache is called just before the link attempt, +// to see if the test result is cached and therefore the link is unneeded. +// It reports whether the result can be satisfied from cache. +func (c *runCache) tryCache(b *work.Builder, a *work.Action) bool { + return c.tryCacheWithID(b, a, a.Deps[0].BuildActionID()) +} + +func (c *runCache) tryCacheWithID(b *work.Builder, a *work.Action, id string) bool { + if len(pkgArgs) == 0 { + // Caching does not apply to "go test", + // only to "go test foo" (including "go test ."). + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: caching disabled in local directory mode\n") + } + c.disableCache = true + return false + } + + if a.Package.Root == "" { + // Caching does not apply to tests outside of any module, GOPATH, or GOROOT. + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: caching disabled for package outside of module root, GOPATH, or GOROOT: %s\n", a.Package.ImportPath) + } + c.disableCache = true + return false + } + + var cacheArgs []string + for _, arg := range testArgs { + i := strings.Index(arg, "=") + if i < 0 || !strings.HasPrefix(arg, "-test.") { + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: caching disabled for test argument: %s\n", arg) + } + c.disableCache = true + return false + } + switch arg[:i] { + case "-test.benchtime", + "-test.cpu", + "-test.list", + "-test.parallel", + "-test.run", + "-test.short", + "-test.timeout", + "-test.failfast", + "-test.v": + // These are cacheable. + // Note that this list is documented above, + // so if you add to this list, update the docs too. + cacheArgs = append(cacheArgs, arg) + + default: + // nothing else is cacheable + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: caching disabled for test argument: %s\n", arg) + } + c.disableCache = true + return false + } + } + + // The test cache result fetch is a two-level lookup. + // + // First, we use the content hash of the test binary + // and its command-line arguments to find the + // list of environment variables and files consulted + // the last time the test was run with those arguments. + // (To avoid unnecessary links, we store this entry + // under two hashes: id1 uses the linker inputs as a + // proxy for the test binary, and id2 uses the actual + // test binary. If the linker inputs are unchanged, + // this way we avoid the link step, even though we + // do not cache link outputs.) + // + // Second, we compute a hash of the values of the + // environment variables and the content of the files + // listed in the log from the previous run. + // Then we look up test output using a combination of + // the hash from the first part (testID) and the hash of the + // test inputs (testInputsID). + // + // In order to store a new test result, we must redo the + // testInputsID computation using the log from the run + // we want to cache, and then we store that new log and + // the new outputs. + + h := cache.NewHash("testResult") + fmt.Fprintf(h, "test binary %s args %q execcmd %q", id, cacheArgs, work.ExecCmd) + testID := h.Sum() + if c.id1 == (cache.ActionID{}) { + c.id1 = testID + } else { + c.id2 = testID + } + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: %s: test ID %x => %x\n", a.Package.ImportPath, id, testID) + } + + // Load list of referenced environment variables and files + // from last run of testID, and compute hash of that content. + data, entry, err := cache.GetBytes(cache.Default(), testID) + if !bytes.HasPrefix(data, testlogMagic) || data[len(data)-1] != '\n' { + if cache.DebugTest { + if err != nil { + fmt.Fprintf(os.Stderr, "testcache: %s: input list not found: %v\n", a.Package.ImportPath, err) + } else { + fmt.Fprintf(os.Stderr, "testcache: %s: input list malformed\n", a.Package.ImportPath) + } + } + return false + } + testInputsID, err := computeTestInputsID(a, data) + if err != nil { + return false + } + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: %s: test ID %x => input ID %x => %x\n", a.Package.ImportPath, testID, testInputsID, testAndInputKey(testID, testInputsID)) + } + + // Parse cached result in preparation for changing run time to "(cached)". + // If we can't parse the cached result, don't use it. + data, entry, err = cache.GetBytes(cache.Default(), testAndInputKey(testID, testInputsID)) + if len(data) == 0 || data[len(data)-1] != '\n' { + if cache.DebugTest { + if err != nil { + fmt.Fprintf(os.Stderr, "testcache: %s: test output not found: %v\n", a.Package.ImportPath, err) + } else { + fmt.Fprintf(os.Stderr, "testcache: %s: test output malformed\n", a.Package.ImportPath) + } + } + return false + } + if entry.Time.Before(testCacheExpire) { + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: %s: test output expired due to go clean -testcache\n", a.Package.ImportPath) + } + return false + } + i := bytes.LastIndexByte(data[:len(data)-1], '\n') + 1 + if !bytes.HasPrefix(data[i:], []byte("ok \t")) { + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: %s: test output malformed\n", a.Package.ImportPath) + } + return false + } + j := bytes.IndexByte(data[i+len("ok \t"):], '\t') + if j < 0 { + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: %s: test output malformed\n", a.Package.ImportPath) + } + return false + } + j += i + len("ok \t") + 1 + + // Committed to printing. + c.buf = new(bytes.Buffer) + c.buf.Write(data[:j]) + c.buf.WriteString("(cached)") + for j < len(data) && ('0' <= data[j] && data[j] <= '9' || data[j] == '.' || data[j] == 's') { + j++ + } + c.buf.Write(data[j:]) + return true +} + +var errBadTestInputs = errors.New("error parsing test inputs") +var testlogMagic = []byte("# test log\n") // known to testing/internal/testdeps/deps.go + +// computeTestInputsID computes the "test inputs ID" +// (see comment in tryCacheWithID above) for the +// test log. +func computeTestInputsID(a *work.Action, testlog []byte) (cache.ActionID, error) { + testlog = bytes.TrimPrefix(testlog, testlogMagic) + h := cache.NewHash("testInputs") + pwd := a.Package.Dir + for _, line := range bytes.Split(testlog, []byte("\n")) { + if len(line) == 0 { + continue + } + s := string(line) + op, name, found := strings.Cut(s, " ") + if !found { + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: %s: input list malformed (%q)\n", a.Package.ImportPath, line) + } + return cache.ActionID{}, errBadTestInputs + } + switch op { + default: + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: %s: input list malformed (%q)\n", a.Package.ImportPath, line) + } + return cache.ActionID{}, errBadTestInputs + case "getenv": + fmt.Fprintf(h, "env %s %x\n", name, hashGetenv(name)) + case "chdir": + pwd = name // always absolute + fmt.Fprintf(h, "chdir %s %x\n", name, hashStat(name)) + case "stat": + if !filepath.IsAbs(name) { + name = filepath.Join(pwd, name) + } + if a.Package.Root == "" || search.InDir(name, a.Package.Root) == "" { + // Do not recheck files outside the module, GOPATH, or GOROOT root. + break + } + fmt.Fprintf(h, "stat %s %x\n", name, hashStat(name)) + case "open": + if !filepath.IsAbs(name) { + name = filepath.Join(pwd, name) + } + if a.Package.Root == "" || search.InDir(name, a.Package.Root) == "" { + // Do not recheck files outside the module, GOPATH, or GOROOT root. + break + } + fh, err := hashOpen(name) + if err != nil { + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: %s: input file %s: %s\n", a.Package.ImportPath, name, err) + } + return cache.ActionID{}, err + } + fmt.Fprintf(h, "open %s %x\n", name, fh) + } + } + sum := h.Sum() + return sum, nil +} + +func hashGetenv(name string) cache.ActionID { + h := cache.NewHash("getenv") + v, ok := os.LookupEnv(name) + if !ok { + h.Write([]byte{0}) + } else { + h.Write([]byte{1}) + h.Write([]byte(v)) + } + return h.Sum() +} + +const modTimeCutoff = 2 * time.Second + +var errFileTooNew = errors.New("file used as input is too new") + +func hashOpen(name string) (cache.ActionID, error) { + h := cache.NewHash("open") + info, err := os.Stat(name) + if err != nil { + fmt.Fprintf(h, "err %v\n", err) + return h.Sum(), nil + } + hashWriteStat(h, info) + if info.IsDir() { + files, err := os.ReadDir(name) + if err != nil { + fmt.Fprintf(h, "err %v\n", err) + } + for _, f := range files { + fmt.Fprintf(h, "file %s ", f.Name()) + finfo, err := f.Info() + if err != nil { + fmt.Fprintf(h, "err %v\n", err) + } else { + hashWriteStat(h, finfo) + } + } + } else if info.Mode().IsRegular() { + // Because files might be very large, do not attempt + // to hash the entirety of their content. Instead assume + // the mtime and size recorded in hashWriteStat above + // are good enough. + // + // To avoid problems for very recent files where a new + // write might not change the mtime due to file system + // mtime precision, reject caching if a file was read that + // is less than modTimeCutoff old. + if time.Since(info.ModTime()) < modTimeCutoff { + return cache.ActionID{}, errFileTooNew + } + } + return h.Sum(), nil +} + +func hashStat(name string) cache.ActionID { + h := cache.NewHash("stat") + if info, err := os.Stat(name); err != nil { + fmt.Fprintf(h, "err %v\n", err) + } else { + hashWriteStat(h, info) + } + if info, err := os.Lstat(name); err != nil { + fmt.Fprintf(h, "err %v\n", err) + } else { + hashWriteStat(h, info) + } + return h.Sum() +} + +func hashWriteStat(h io.Writer, info fs.FileInfo) { + fmt.Fprintf(h, "stat %d %x %v %v\n", info.Size(), uint64(info.Mode()), info.ModTime(), info.IsDir()) +} + +// testAndInputKey returns the actual cache key for the pair (testID, testInputsID). +func testAndInputKey(testID, testInputsID cache.ActionID) cache.ActionID { + return cache.Subkey(testID, fmt.Sprintf("inputs:%x", testInputsID)) +} + +func (c *runCache) saveOutput(a *work.Action) { + if c.id1 == (cache.ActionID{}) && c.id2 == (cache.ActionID{}) { + return + } + + // See comment about two-level lookup in tryCacheWithID above. + testlog, err := os.ReadFile(a.Objdir + "testlog.txt") + if err != nil || !bytes.HasPrefix(testlog, testlogMagic) || testlog[len(testlog)-1] != '\n' { + if cache.DebugTest { + if err != nil { + fmt.Fprintf(os.Stderr, "testcache: %s: reading testlog: %v\n", a.Package.ImportPath, err) + } else { + fmt.Fprintf(os.Stderr, "testcache: %s: reading testlog: malformed\n", a.Package.ImportPath) + } + } + return + } + testInputsID, err := computeTestInputsID(a, testlog) + if err != nil { + return + } + if c.id1 != (cache.ActionID{}) { + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: %s: save test ID %x => input ID %x => %x\n", a.Package.ImportPath, c.id1, testInputsID, testAndInputKey(c.id1, testInputsID)) + } + cache.PutNoVerify(cache.Default(), c.id1, bytes.NewReader(testlog)) + cache.PutNoVerify(cache.Default(), testAndInputKey(c.id1, testInputsID), bytes.NewReader(a.TestOutput.Bytes())) + } + if c.id2 != (cache.ActionID{}) { + if cache.DebugTest { + fmt.Fprintf(os.Stderr, "testcache: %s: save test ID %x => input ID %x => %x\n", a.Package.ImportPath, c.id2, testInputsID, testAndInputKey(c.id2, testInputsID)) + } + cache.PutNoVerify(cache.Default(), c.id2, bytes.NewReader(testlog)) + cache.PutNoVerify(cache.Default(), testAndInputKey(c.id2, testInputsID), bytes.NewReader(a.TestOutput.Bytes())) + } +} + +// coveragePercentage returns the coverage results (if enabled) for the +// test. It uncovers the data by scanning the output from the test run. +func coveragePercentage(out []byte) string { + if !cfg.BuildCover { + return "" + } + // The string looks like + // test coverage for encoding/binary: 79.9% of statements + // Extract the piece from the percentage to the end of the line. + re := regexp.MustCompile(`coverage: (.*)\n`) + matches := re.FindSubmatch(out) + if matches == nil { + // Probably running "go test -cover" not "go test -cover fmt". + // The coverage output will appear in the output directly. + return "" + } + return fmt.Sprintf("\tcoverage: %s", matches[1]) +} + +// builderCleanTest is the action for cleaning up after a test. +func builderCleanTest(b *work.Builder, ctx context.Context, a *work.Action) error { + if cfg.BuildWork { + return nil + } + b.Shell(a).RemoveAll(a.Objdir) + return nil +} + +// builderPrintTest is the action for printing a test result. +func builderPrintTest(b *work.Builder, ctx context.Context, a *work.Action) error { + clean := a.Deps[0] + run := clean.Deps[0] + if run.TestOutput != nil { + os.Stdout.Write(run.TestOutput.Bytes()) + run.TestOutput = nil + } + return nil +} + +// printExitStatus is the action for printing the final exit status. +// If we are running multiple test targets, print a final "FAIL" +// in case a failure in an early package has already scrolled +// off of the user's terminal. +// (See https://golang.org/issue/30507#issuecomment-470593235.) +// +// In JSON mode, we need to maintain valid JSON output and +// we assume that the test output is being parsed by a tool +// anyway, so the failure will not be missed and would be +// awkward to try to wedge into the JSON stream. +// +// In fuzz mode, we only allow a single package for now +// (see CL 350156 and https://golang.org/issue/46312), +// so there is no possibility of scrolling off and no need +// to print the final status. +func printExitStatus(b *work.Builder, ctx context.Context, a *work.Action) error { + if !testJSON && testFuzz == "" && len(pkgArgs) != 0 { + if base.GetExitStatus() != 0 { + fmt.Println("FAIL") + return nil + } + } + return nil +} + +// testBinaryName can be used to create name for test binary executable. +// Use last element of import path, not package name. +// They differ when package name is "main". +// But if the import path is "command-line-arguments", +// like it is during 'go run', use the package name. +func testBinaryName(p *load.Package) string { + var elem string + if p.ImportPath == "command-line-arguments" { + elem = p.Name + } else { + elem = p.DefaultExecName() + } + + return elem + ".test" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/test/test_nonunix.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/test_nonunix.go new file mode 100644 index 0000000000000000000000000000000000000000..df8448730d6dbd8ab8718396055485fb511d45cf --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/test_nonunix.go @@ -0,0 +1,12 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package test + +func isETXTBSY(err error) bool { + // syscall.ETXTBSY is only meaningful on Unix platforms. + return false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/test/test_unix.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/test_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..f50ef98703e2e853757e2868aef7c25b5c12a12d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/test_unix.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package test + +import ( + "errors" + "syscall" +) + +func isETXTBSY(err error) bool { + return errors.Is(err, syscall.ETXTBSY) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/test/testflag.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/testflag.go new file mode 100644 index 0000000000000000000000000000000000000000..425378889d9af06da941763ab25c90ca4f81142c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/test/testflag.go @@ -0,0 +1,416 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package test + +import ( + "cmd/go/internal/base" + "cmd/go/internal/cmdflag" + "cmd/go/internal/work" + "errors" + "flag" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +//go:generate go run ./genflags.go + +// The flag handling part of go test is large and distracting. +// We can't use (*flag.FlagSet).Parse because some of the flags from +// our command line are for us, and some are for the test binary, and +// some are for both. + +func init() { + work.AddBuildFlags(CmdTest, work.OmitVFlag) + + cf := CmdTest.Flag + cf.BoolVar(&testC, "c", false, "") + cf.StringVar(&testO, "o", "", "") + work.AddCoverFlags(CmdTest, &testCoverProfile) + cf.Var((*base.StringsFlag)(&work.ExecCmd), "exec", "") + cf.BoolVar(&testJSON, "json", false, "") + cf.Var(&testVet, "vet", "") + + // Register flags to be forwarded to the test binary. We retain variables for + // some of them so that cmd/go knows what to do with the test output, or knows + // to build the test in a way that supports the use of the flag. + + cf.StringVar(&testBench, "bench", "", "") + cf.Bool("benchmem", false, "") + cf.String("benchtime", "", "") + cf.StringVar(&testBlockProfile, "blockprofile", "", "") + cf.String("blockprofilerate", "", "") + cf.Int("count", 0, "") + cf.String("cpu", "", "") + cf.StringVar(&testCPUProfile, "cpuprofile", "", "") + cf.Bool("failfast", false, "") + cf.StringVar(&testFuzz, "fuzz", "", "") + cf.Bool("fullpath", false, "") + cf.StringVar(&testList, "list", "", "") + cf.StringVar(&testMemProfile, "memprofile", "", "") + cf.String("memprofilerate", "", "") + cf.StringVar(&testMutexProfile, "mutexprofile", "", "") + cf.String("mutexprofilefraction", "", "") + cf.Var(&testOutputDir, "outputdir", "") + cf.Int("parallel", 0, "") + cf.String("run", "", "") + cf.Bool("short", false, "") + cf.String("skip", "", "") + cf.DurationVar(&testTimeout, "timeout", 10*time.Minute, "") // known to cmd/dist + cf.String("fuzztime", "", "") + cf.String("fuzzminimizetime", "", "") + cf.StringVar(&testTrace, "trace", "", "") + cf.Var(&testV, "v", "") + cf.Var(&testShuffle, "shuffle", "") + + for name, ok := range passFlagToTest { + if ok { + cf.Var(cf.Lookup(name).Value, "test."+name, "") + } + } +} + +// outputdirFlag implements the -outputdir flag. +// It interprets an empty value as the working directory of the 'go' command. +type outputdirFlag struct { + abs string +} + +func (f *outputdirFlag) String() string { + return f.abs +} + +func (f *outputdirFlag) Set(value string) (err error) { + if value == "" { + f.abs = "" + } else { + f.abs, err = filepath.Abs(value) + } + return err +} + +func (f *outputdirFlag) getAbs() string { + if f.abs == "" { + return base.Cwd() + } + return f.abs +} + +// vetFlag implements the special parsing logic for the -vet flag: +// a comma-separated list, with distinguished values "all" and +// "off", plus a boolean tracking whether it was set explicitly. +// +// "all" is encoded as vetFlag{true, false, nil}, since it will +// pass no flags to the vet binary, and by default, it runs all +// analyzers. +type vetFlag struct { + explicit bool + off bool + flags []string // passed to vet when invoked automatically during 'go test' +} + +func (f *vetFlag) String() string { + switch { + case !f.off && !f.explicit && len(f.flags) == 0: + return "all" + case f.off: + return "off" + } + + var buf strings.Builder + for i, f := range f.flags { + if i > 0 { + buf.WriteByte(',') + } + buf.WriteString(f) + } + return buf.String() +} + +func (f *vetFlag) Set(value string) error { + switch { + case value == "": + *f = vetFlag{flags: defaultVetFlags} + return nil + case strings.Contains(value, "="): + return fmt.Errorf("-vet argument cannot contain equal signs") + case strings.Contains(value, " "): + return fmt.Errorf("-vet argument is comma-separated list, cannot contain spaces") + } + + *f = vetFlag{explicit: true} + var single string + for _, arg := range strings.Split(value, ",") { + switch arg { + case "": + return fmt.Errorf("-vet argument contains empty list element") + case "all": + single = arg + *f = vetFlag{explicit: true} + continue + case "off": + single = arg + *f = vetFlag{ + explicit: true, + off: true, + } + continue + default: + if _, ok := passAnalyzersToVet[arg]; !ok { + return fmt.Errorf("-vet argument must be a supported analyzer or a distinguished value; found %s", arg) + } + f.flags = append(f.flags, "-"+arg) + } + } + if len(f.flags) > 1 && single != "" { + return fmt.Errorf("-vet does not accept %q in a list with other analyzers", single) + } + if len(f.flags) > 1 && single != "" { + return fmt.Errorf("-vet does not accept %q in a list with other analyzers", single) + } + return nil +} + +type shuffleFlag struct { + on bool + seed *int64 +} + +func (f *shuffleFlag) String() string { + if !f.on { + return "off" + } + if f.seed == nil { + return "on" + } + return fmt.Sprintf("%d", *f.seed) +} + +func (f *shuffleFlag) Set(value string) error { + if value == "off" { + *f = shuffleFlag{on: false} + return nil + } + + if value == "on" { + *f = shuffleFlag{on: true} + return nil + } + + seed, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf(`-shuffle argument must be "on", "off", or an int64: %v`, err) + } + + *f = shuffleFlag{on: true, seed: &seed} + return nil +} + +// testFlags processes the command line, grabbing -x and -c, rewriting known flags +// to have "test" before them, and reading the command line for the test binary. +// Unfortunately for us, we need to do our own flag processing because go test +// grabs some flags but otherwise its command line is just a holding place for +// pkg.test's arguments. +// We allow known flags both before and after the package name list, +// to allow both +// +// go test fmt -custom-flag-for-fmt-test +// go test -x math +func testFlags(args []string) (packageNames, passToTest []string) { + base.SetFromGOFLAGS(&CmdTest.Flag) + addFromGOFLAGS := map[string]bool{} + CmdTest.Flag.Visit(func(f *flag.Flag) { + if short := strings.TrimPrefix(f.Name, "test."); passFlagToTest[short] { + addFromGOFLAGS[f.Name] = true + } + }) + + // firstUnknownFlag helps us report an error when flags not known to 'go + // test' are used along with -i or -c. + firstUnknownFlag := "" + + explicitArgs := make([]string, 0, len(args)) + inPkgList := false + afterFlagWithoutValue := false + for len(args) > 0 { + f, remainingArgs, err := cmdflag.ParseOne(&CmdTest.Flag, args) + + wasAfterFlagWithoutValue := afterFlagWithoutValue + afterFlagWithoutValue = false // provisionally + + if errors.Is(err, flag.ErrHelp) { + exitWithUsage() + } + + if errors.Is(err, cmdflag.ErrFlagTerminator) { + // 'go list' allows package arguments to be named either before or after + // the terminator, but 'go test' has historically allowed them only + // before. Preserve that behavior and treat all remaining arguments — + // including the terminator itself! — as arguments to the test. + explicitArgs = append(explicitArgs, args...) + break + } + + if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) { + if !inPkgList && packageNames != nil { + // We already saw the package list previously, and this argument is not + // a flag, so it — and everything after it — must be either a value for + // a preceding flag or a literal argument to the test binary. + if wasAfterFlagWithoutValue { + // This argument could syntactically be a flag value, so + // optimistically assume that it is and keep looking for go command + // flags after it. + // + // (If we're wrong, we'll at least be consistent with historical + // behavior; see https://golang.org/issue/40763.) + explicitArgs = append(explicitArgs, nf.RawArg) + args = remainingArgs + continue + } else { + // This argument syntactically cannot be a flag value, so it must be a + // positional argument, and so must everything after it. + explicitArgs = append(explicitArgs, args...) + break + } + } + + inPkgList = true + packageNames = append(packageNames, nf.RawArg) + args = remainingArgs // Consume the package name. + continue + } + + if inPkgList { + // This argument is syntactically a flag, so if we were in the package + // list we're not anymore. + inPkgList = false + } + + if nd := (cmdflag.FlagNotDefinedError{}); errors.As(err, &nd) { + // This is a flag we do not know. We must assume that any args we see + // after this might be flag arguments, not package names, so make + // packageNames non-nil to indicate that the package list is complete. + // + // (Actually, we only strictly need to assume that if the flag is not of + // the form -x=value, but making this more precise would be a breaking + // change in the command line API.) + if packageNames == nil { + packageNames = []string{} + } + + if nd.RawArg == "-args" || nd.RawArg == "--args" { + // -args or --args signals that everything that follows + // should be passed to the test. + explicitArgs = append(explicitArgs, remainingArgs...) + break + } + + if firstUnknownFlag == "" { + firstUnknownFlag = nd.RawArg + } + + explicitArgs = append(explicitArgs, nd.RawArg) + args = remainingArgs + if !nd.HasValue { + afterFlagWithoutValue = true + } + continue + } + + if err != nil { + fmt.Fprintln(os.Stderr, err) + exitWithUsage() + } + + if short := strings.TrimPrefix(f.Name, "test."); passFlagToTest[short] { + explicitArgs = append(explicitArgs, fmt.Sprintf("-test.%s=%v", short, f.Value)) + + // This flag has been overridden explicitly, so don't forward its implicit + // value from GOFLAGS. + delete(addFromGOFLAGS, short) + delete(addFromGOFLAGS, "test."+short) + } + + args = remainingArgs + } + if firstUnknownFlag != "" && testC { + fmt.Fprintf(os.Stderr, "go: unknown flag %s cannot be used with -c\n", firstUnknownFlag) + exitWithUsage() + } + + var injectedFlags []string + if testJSON { + // If converting to JSON, we need the full output in order to pipe it to test2json. + // The -test.v=test2json flag is like -test.v=true but causes the test to add + // extra ^V characters before testing output lines and other framing, + // which helps test2json do a better job creating the JSON events. + injectedFlags = append(injectedFlags, "-test.v=test2json") + delete(addFromGOFLAGS, "v") + delete(addFromGOFLAGS, "test.v") + } + + // Inject flags from GOFLAGS before the explicit command-line arguments. + // (They must appear before the flag terminator or first non-flag argument.) + // Also determine whether flags with awkward defaults have already been set. + var timeoutSet, outputDirSet bool + CmdTest.Flag.Visit(func(f *flag.Flag) { + short := strings.TrimPrefix(f.Name, "test.") + if addFromGOFLAGS[f.Name] { + injectedFlags = append(injectedFlags, fmt.Sprintf("-test.%s=%v", short, f.Value)) + } + switch short { + case "timeout": + timeoutSet = true + case "outputdir": + outputDirSet = true + } + }) + + // 'go test' has a default timeout, but the test binary itself does not. + // If the timeout wasn't set (and forwarded) explicitly, add the default + // timeout to the command line. + if testTimeout > 0 && !timeoutSet { + injectedFlags = append(injectedFlags, fmt.Sprintf("-test.timeout=%v", testTimeout)) + } + + // Similarly, the test binary defaults -test.outputdir to its own working + // directory, but 'go test' defaults it to the working directory of the 'go' + // command. Set it explicitly if it is needed due to some other flag that + // requests output. + if testProfile() != "" && !outputDirSet { + injectedFlags = append(injectedFlags, "-test.outputdir="+testOutputDir.getAbs()) + } + + // If the user is explicitly passing -help or -h, show output + // of the test binary so that the help output is displayed + // even though the test will exit with success. + // This loop is imperfect: it will do the wrong thing for a case + // like -args -test.outputdir -help. Such cases are probably rare, + // and getting this wrong doesn't do too much harm. +helpLoop: + for _, arg := range explicitArgs { + switch arg { + case "--": + break helpLoop + case "-h", "-help", "--help": + testHelp = true + break helpLoop + } + } + + // Forward any unparsed arguments (following --args) to the test binary. + return packageNames, append(injectedFlags, explicitArgs...) +} + +func exitWithUsage() { + fmt.Fprintf(os.Stderr, "usage: %s\n", CmdTest.UsageLine) + fmt.Fprintf(os.Stderr, "Run 'go help %s' and 'go help %s' for details.\n", CmdTest.LongName(), HelpTestflag.LongName()) + + base.SetExitStatus(2) + base.Exit() +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/tool/tool.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/tool/tool.go new file mode 100644 index 0000000000000000000000000000000000000000..ebe189bb819a37dc28e5ebe1da4365d87b369abe --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/tool/tool.go @@ -0,0 +1,224 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tool implements the “go tool” command. +package tool + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "go/build" + "internal/platform" + "os" + "os/exec" + "os/signal" + "sort" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" +) + +var CmdTool = &base.Command{ + Run: runTool, + UsageLine: "go tool [-n] command [args...]", + Short: "run specified go tool", + Long: ` +Tool runs the go tool command identified by the arguments. +With no arguments it prints the list of known tools. + +The -n flag causes tool to print the command that would be +executed but not execute it. + +For more about each tool command, see 'go doc cmd/'. +`, +} + +var toolN bool + +// Return whether tool can be expected in the gccgo tool directory. +// Other binaries could be in the same directory so don't +// show those with the 'go tool' command. +func isGccgoTool(tool string) bool { + switch tool { + case "cgo", "fix", "cover", "godoc", "vet": + return true + } + return false +} + +func init() { + base.AddChdirFlag(&CmdTool.Flag) + CmdTool.Flag.BoolVar(&toolN, "n", false, "") +} + +func runTool(ctx context.Context, cmd *base.Command, args []string) { + if len(args) == 0 { + listTools() + return + } + toolName := args[0] + // The tool name must be lower-case letters, numbers or underscores. + for _, c := range toolName { + switch { + case 'a' <= c && c <= 'z', '0' <= c && c <= '9', c == '_': + default: + fmt.Fprintf(os.Stderr, "go: bad tool name %q\n", toolName) + base.SetExitStatus(2) + return + } + } + + toolPath, err := base.ToolPath(toolName) + if err != nil { + if toolName == "dist" && len(args) > 1 && args[1] == "list" { + // cmd/distpack removes the 'dist' tool from the toolchain to save space, + // since it is normally only used for building the toolchain in the first + // place. However, 'go tool dist list' is useful for listing all supported + // platforms. + // + // If the dist tool does not exist, impersonate this command. + if impersonateDistList(args[2:]) { + return + } + } + + // Emit the usual error for the missing tool. + _ = base.Tool(toolName) + } + + if toolN { + cmd := toolPath + if len(args) > 1 { + cmd += " " + strings.Join(args[1:], " ") + } + fmt.Printf("%s\n", cmd) + return + } + args[0] = toolPath // in case the tool wants to re-exec itself, e.g. cmd/dist + toolCmd := &exec.Cmd{ + Path: toolPath, + Args: args, + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + } + err = toolCmd.Start() + if err == nil { + c := make(chan os.Signal, 100) + signal.Notify(c) + go func() { + for sig := range c { + toolCmd.Process.Signal(sig) + } + }() + err = toolCmd.Wait() + signal.Stop(c) + close(c) + } + if err != nil { + // Only print about the exit status if the command + // didn't even run (not an ExitError) or it didn't exit cleanly + // or we're printing command lines too (-x mode). + // Assume if command exited cleanly (even with non-zero status) + // it printed any messages it wanted to print. + if e, ok := err.(*exec.ExitError); !ok || !e.Exited() || cfg.BuildX { + fmt.Fprintf(os.Stderr, "go tool %s: %s\n", toolName, err) + } + base.SetExitStatus(1) + return + } +} + +// listTools prints a list of the available tools in the tools directory. +func listTools() { + f, err := os.Open(build.ToolDir) + if err != nil { + fmt.Fprintf(os.Stderr, "go: no tool directory: %s\n", err) + base.SetExitStatus(2) + return + } + defer f.Close() + names, err := f.Readdirnames(-1) + if err != nil { + fmt.Fprintf(os.Stderr, "go: can't read tool directory: %s\n", err) + base.SetExitStatus(2) + return + } + + sort.Strings(names) + for _, name := range names { + // Unify presentation by going to lower case. + // If it's windows, don't show the .exe suffix. + name = strings.TrimSuffix(strings.ToLower(name), cfg.ToolExeSuffix()) + + // The tool directory used by gccgo will have other binaries + // in addition to go tools. Only display go tools here. + if cfg.BuildToolchainName == "gccgo" && !isGccgoTool(name) { + continue + } + fmt.Println(name) + } +} + +func impersonateDistList(args []string) (handled bool) { + fs := flag.NewFlagSet("go tool dist list", flag.ContinueOnError) + jsonFlag := fs.Bool("json", false, "produce JSON output") + brokenFlag := fs.Bool("broken", false, "include broken ports") + + // The usage for 'go tool dist' claims that + // “All commands take -v flags to emit extra information”, + // but list -v appears not to have any effect. + _ = fs.Bool("v", false, "emit extra information") + + if err := fs.Parse(args); err != nil || len(fs.Args()) > 0 { + // Unrecognized flag or argument. + // Force fallback to the real 'go tool dist'. + return false + } + + if !*jsonFlag { + for _, p := range platform.List { + if !*brokenFlag && platform.Broken(p.GOOS, p.GOARCH) { + continue + } + fmt.Println(p) + } + return true + } + + type jsonResult struct { + GOOS string + GOARCH string + CgoSupported bool + FirstClass bool + Broken bool `json:",omitempty"` + } + + var results []jsonResult + for _, p := range platform.List { + broken := platform.Broken(p.GOOS, p.GOARCH) + if broken && !*brokenFlag { + continue + } + if *jsonFlag { + results = append(results, jsonResult{ + GOOS: p.GOOS, + GOARCH: p.GOARCH, + CgoSupported: platform.CgoSupported(p.GOOS, p.GOARCH), + FirstClass: platform.FirstClass(p.GOOS, p.GOARCH), + Broken: broken, + }) + } + } + out, err := json.MarshalIndent(results, "", "\t") + if err != nil { + return false + } + + os.Stdout.Write(out) + return true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/exec.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/exec.go new file mode 100644 index 0000000000000000000000000000000000000000..820fe93e87c377e770b7fe1573de50a74f12dc3d --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/exec.go @@ -0,0 +1,55 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !js && !wasip1 + +package toolchain + +import ( + "cmd/go/internal/base" + "internal/godebug" + "os" + "os/exec" + "runtime" + "syscall" +) + +// execGoToolchain execs the Go toolchain with the given name (gotoolchain), +// GOROOT directory, and go command executable. +// The GOROOT directory is empty if we are invoking a command named +// gotoolchain found in $PATH. +func execGoToolchain(gotoolchain, dir, exe string) { + os.Setenv(targetEnv, gotoolchain) + if dir == "" { + os.Unsetenv("GOROOT") + } else { + os.Setenv("GOROOT", dir) + } + + // On Windows, there is no syscall.Exec, so the best we can do + // is run a subprocess and exit with the same status. + // Doing the same on Unix would be a problem because it wouldn't + // propagate signals and such, but there are no signals on Windows. + // We also use the exec case when GODEBUG=gotoolchainexec=0, + // to allow testing this code even when not on Windows. + if godebug.New("#gotoolchainexec").Value() == "0" || runtime.GOOS == "windows" { + cmd := exec.Command(exe, os.Args[1:]...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + if e, ok := err.(*exec.ExitError); ok && e.ProcessState != nil { + if e.ProcessState.Exited() { + os.Exit(e.ProcessState.ExitCode()) + } + base.Fatalf("exec %s: %s", gotoolchain, e.ProcessState) + } + base.Fatalf("exec %s: %s", exe, err) + } + os.Exit(0) + } + err := syscall.Exec(exe, os.Args, os.Environ()) + base.Fatalf("exec %s: %v", gotoolchain, err) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/exec_stub.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/exec_stub.go new file mode 100644 index 0000000000000000000000000000000000000000..e2123790a7e945923ad3db7cebdef5b6b2262277 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/exec_stub.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build js || wasip1 + +package toolchain + +import "cmd/go/internal/base" + +func execGoToolchain(gotoolchain, dir, exe string) { + base.Fatalf("execGoToolchain unsupported") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_none.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_none.go new file mode 100644 index 0000000000000000000000000000000000000000..8fdf71a6e6ad9b23303e2b2eaa80138324cc6876 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_none.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix && !plan9 && !windows + +package toolchain + +import "io/fs" + +// pathDirs returns the directories in the system search path. +func pathDirs() []string { + return nil +} + +// pathVersion returns the Go version implemented by the file +// described by de and info in directory dir. +// The analysis only uses the name itself; it does not run the program. +func pathVersion(dir string, de fs.DirEntry, info fs.FileInfo) (string, bool) { + return "", false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_plan9.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_plan9.go new file mode 100644 index 0000000000000000000000000000000000000000..3f836a07b1038364c6b9249963d7d4828e51cc19 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_plan9.go @@ -0,0 +1,29 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package toolchain + +import ( + "io/fs" + "os" + "path/filepath" + + "cmd/go/internal/gover" +) + +// pathDirs returns the directories in the system search path. +func pathDirs() []string { + return filepath.SplitList(os.Getenv("path")) +} + +// pathVersion returns the Go version implemented by the file +// described by de and info in directory dir. +// The analysis only uses the name itself; it does not run the program. +func pathVersion(dir string, de fs.DirEntry, info fs.FileInfo) (string, bool) { + v := gover.FromToolchain(de.Name()) + if v == "" || info.Mode()&0111 == 0 { + return "", false + } + return v, true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_unix.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..519c53ec30b818b17252a607003d855957e82c0a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_unix.go @@ -0,0 +1,46 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package toolchain + +import ( + "internal/syscall/unix" + "io/fs" + "os" + "path/filepath" + "syscall" + + "cmd/go/internal/gover" +) + +// pathDirs returns the directories in the system search path. +func pathDirs() []string { + return filepath.SplitList(os.Getenv("PATH")) +} + +// pathVersion returns the Go version implemented by the file +// described by de and info in directory dir. +// The analysis only uses the name itself; it does not run the program. +func pathVersion(dir string, de fs.DirEntry, info fs.FileInfo) (string, bool) { + v := gover.FromToolchain(de.Name()) + if v == "" { + return "", false + } + + // Mimicking exec.findExecutable here. + // ENOSYS means Eaccess is not available or not implemented. + // EPERM can be returned by Linux containers employing seccomp. + // In both cases, fall back to checking the permission bits. + err := unix.Eaccess(filepath.Join(dir, de.Name()), unix.X_OK) + if (err == syscall.ENOSYS || err == syscall.EPERM) && info.Mode()&0111 != 0 { + err = nil + } + if err != nil { + return "", false + } + + return v, true +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_windows.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..086c591e0589dfef2f8a60d59fc7e03691302331 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/path_windows.go @@ -0,0 +1,78 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package toolchain + +import ( + "io/fs" + "os" + "path/filepath" + "strings" + "sync" + + "cmd/go/internal/gover" +) + +// pathExts is a cached PATHEXT list. +var pathExts struct { + once sync.Once + list []string +} + +func initPathExts() { + var exts []string + x := os.Getenv(`PATHEXT`) + if x != "" { + for _, e := range strings.Split(strings.ToLower(x), `;`) { + if e == "" { + continue + } + if e[0] != '.' { + e = "." + e + } + exts = append(exts, e) + } + } else { + exts = []string{".com", ".exe", ".bat", ".cmd"} + } + pathExts.list = exts +} + +// pathDirs returns the directories in the system search path. +func pathDirs() []string { + return filepath.SplitList(os.Getenv("PATH")) +} + +// pathVersion returns the Go version implemented by the file +// described by de and info in directory dir. +// The analysis only uses the name itself; it does not run the program. +func pathVersion(dir string, de fs.DirEntry, info fs.FileInfo) (string, bool) { + pathExts.once.Do(initPathExts) + name, _, ok := cutExt(de.Name(), pathExts.list) + if !ok { + return "", false + } + v := gover.FromToolchain(name) + if v == "" { + return "", false + } + return v, true +} + +// cutExt looks for any of the known extensions at the end of file. +// If one is found, cutExt returns the file name with the extension trimmed, +// the extension itself, and true to signal that an extension was found. +// Otherwise cutExt returns file, "", false. +func cutExt(file string, exts []string) (name, ext string, found bool) { + i := strings.LastIndex(file, ".") + if i < 0 { + return file, "", false + } + for _, x := range exts { + if strings.EqualFold(file[i:], x) { + return file[:i], file[i:], true + } + } + return file, "", false +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/select.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/select.go new file mode 100644 index 0000000000000000000000000000000000000000..14a8d3c21d2e71a35e1f2d31b3a05de49a9385df --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/select.go @@ -0,0 +1,661 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package toolchain implements dynamic switching of Go toolchains. +package toolchain + +import ( + "context" + "errors" + "flag" + "fmt" + "go/build" + "io/fs" + "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/modfetch" + "cmd/go/internal/modload" + "cmd/go/internal/run" + "cmd/go/internal/work" + + "golang.org/x/mod/module" +) + +const ( + // We download golang.org/toolchain version v0.0.1-.-. + // If the 0.0.1 indicates anything at all, its the version of the toolchain packaging: + // if for some reason we needed to change the way toolchains are packaged into + // module zip files in a future version of Go, we could switch to v0.0.2 and then + // older versions expecting the old format could use v0.0.1 and newer versions + // would use v0.0.2. Of course, then we'd also have to publish two of each + // module zip file. It's not likely we'll ever need to change this. + gotoolchainModule = "golang.org/toolchain" + gotoolchainVersion = "v0.0.1" + + // targetEnv is a special environment variable set to the expected + // toolchain version during the toolchain switch by the parent + // process and cleared in the child process. When set, that indicates + // to the child to confirm that it provides the expected toolchain version. + targetEnv = "GOTOOLCHAIN_INTERNAL_SWITCH_VERSION" + + // countEnv is a special environment variable + // that is incremented during each toolchain switch, to detect loops. + // It is cleared before invoking programs in 'go run', 'go test', 'go generate', and 'go tool' + // by invoking them in an environment filtered with FilterEnv, + // so user programs should not see this in their environment. + countEnv = "GOTOOLCHAIN_INTERNAL_SWITCH_COUNT" + + // maxSwitch is the maximum toolchain switching depth. + // Most uses should never see more than three. + // (Perhaps one for the initial GOTOOLCHAIN dispatch, + // a second for go get doing an upgrade, and a third if + // for some reason the chosen upgrade version is too small + // by a little.) + // When the count reaches maxSwitch - 10, we start logging + // the switched versions for debugging before crashing with + // a fatal error upon reaching maxSwitch. + // That should be enough to see the repetition. + maxSwitch = 100 +) + +// FilterEnv returns a copy of env with internal GOTOOLCHAIN environment +// variables filtered out. +func FilterEnv(env []string) []string { + // Note: Don't need to filter out targetEnv because Switch does that. + var out []string + for _, e := range env { + if strings.HasPrefix(e, countEnv+"=") { + continue + } + out = append(out, e) + } + return out +} + +// Select invokes a different Go toolchain if directed by +// the GOTOOLCHAIN environment variable or the user's configuration +// or go.mod file. +// It must be called early in startup. +// See https://go.dev/doc/toolchain#select. +func Select() { + log.SetPrefix("go: ") + defer log.SetPrefix("") + + if !modload.WillBeEnabled() { + return + } + + // As a special case, let "go env GOTOOLCHAIN" and "go env -w GOTOOLCHAIN=..." + // be handled by the local toolchain, since an older toolchain may not understand it. + // This provides an easy way out of "go env -w GOTOOLCHAIN=go1.19" and makes + // sure that "go env GOTOOLCHAIN" always prints the local go command's interpretation of it. + // We look for these specific command lines in order to avoid mishandling + // + // GOTOOLCHAIN=go1.999 go env -newflag GOTOOLCHAIN + // + // where -newflag is a flag known to Go 1.999 but not known to us. + if (len(os.Args) == 3 && os.Args[1] == "env" && os.Args[2] == "GOTOOLCHAIN") || + (len(os.Args) == 4 && os.Args[1] == "env" && os.Args[2] == "-w" && strings.HasPrefix(os.Args[3], "GOTOOLCHAIN=")) { + return + } + + // Interpret GOTOOLCHAIN to select the Go toolchain to run. + gotoolchain := cfg.Getenv("GOTOOLCHAIN") + gover.Startup.GOTOOLCHAIN = gotoolchain + if gotoolchain == "" { + // cfg.Getenv should fall back to $GOROOT/go.env, + // so this should not happen, unless a packager + // has deleted the GOTOOLCHAIN line from go.env. + // It can also happen if GOROOT is missing or broken, + // in which case best to let the go command keep running + // and diagnose the problem. + return + } + + // Note: minToolchain is what https://go.dev/doc/toolchain#select calls the default toolchain. + minToolchain := gover.LocalToolchain() + minVers := gover.Local() + var mode string + if gotoolchain == "auto" { + mode = "auto" + } else if gotoolchain == "path" { + mode = "path" + } else { + min, suffix, plus := strings.Cut(gotoolchain, "+") // go1.2.3+auto + if min != "local" { + v := gover.FromToolchain(min) + if v == "" { + if plus { + base.Fatalf("invalid GOTOOLCHAIN %q: invalid minimum toolchain %q", gotoolchain, min) + } + base.Fatalf("invalid GOTOOLCHAIN %q", gotoolchain) + } + minToolchain = min + minVers = v + } + if plus && suffix != "auto" && suffix != "path" { + base.Fatalf("invalid GOTOOLCHAIN %q: only version suffixes are +auto and +path", gotoolchain) + } + mode = suffix + } + + gotoolchain = minToolchain + if (mode == "auto" || mode == "path") && !goInstallVersion() { + // Read go.mod to find new minimum and suggested toolchain. + file, goVers, toolchain := modGoToolchain() + gover.Startup.AutoFile = file + if toolchain == "default" { + // "default" means always use the default toolchain, + // which is already set, so nothing to do here. + // Note that if we have Go 1.21 installed originally, + // GOTOOLCHAIN=go1.30.0+auto or GOTOOLCHAIN=go1.30.0, + // and the go.mod says "toolchain default", we use Go 1.30, not Go 1.21. + // That is, default overrides the "auto" part of the calculation + // but not the minimum that the user has set. + // Of course, if the go.mod also says "go 1.35", using Go 1.30 + // will provoke an error about the toolchain being too old. + // That's what people who use toolchain default want: + // only ever use the toolchain configured by the user + // (including its environment and go env -w file). + gover.Startup.AutoToolchain = toolchain + } else { + if toolchain != "" { + // Accept toolchain only if it is > our min. + // (If it is equal, then min satisfies it anyway: that can matter if min + // has a suffix like "go1.21.1-foo" and toolchain is "go1.21.1".) + toolVers := gover.FromToolchain(toolchain) + if toolVers == "" || (!strings.HasPrefix(toolchain, "go") && !strings.Contains(toolchain, "-go")) { + base.Fatalf("invalid toolchain %q in %s", toolchain, base.ShortPath(file)) + } + if gover.Compare(toolVers, minVers) > 0 { + gotoolchain = toolchain + minVers = toolVers + gover.Startup.AutoToolchain = toolchain + } + } + if gover.Compare(goVers, minVers) > 0 { + gotoolchain = "go" + goVers + // Starting with Go 1.21, the first released version has a .0 patch version suffix. + // Don't try to download a language version (sans patch component), such as go1.22. + // Instead, use the first toolchain of that language version, such as 1.22.0. + // See golang.org/issue/62278. + if gover.IsLang(goVers) && gover.Compare(goVers, "1.21") >= 0 { + gotoolchain += ".0" + } + gover.Startup.AutoGoVersion = goVers + gover.Startup.AutoToolchain = "" // in case we are overriding it for being too old + } + } + } + + // If we are invoked as a target toolchain, confirm that + // we provide the expected version and then run. + // This check is delayed until after the handling of auto and path + // so that we have initialized gover.Startup for use in error messages. + if target := os.Getenv(targetEnv); target != "" && TestVersionSwitch != "loop" { + if gover.LocalToolchain() != target { + base.Fatalf("toolchain %v invoked to provide %v", gover.LocalToolchain(), target) + } + os.Unsetenv(targetEnv) + + // Note: It is tempting to check that if gotoolchain != "local" + // then target == gotoolchain here, as a sanity check that + // the child has made the same version determination as the parent. + // This turns out not always to be the case. Specifically, if we are + // running Go 1.21 with GOTOOLCHAIN=go1.22+auto, which invokes + // Go 1.22, then 'go get go@1.23.0' or 'go get needs_go_1_23' + // will invoke Go 1.23, but as the Go 1.23 child the reason for that + // will not be apparent here: it will look like we should be using Go 1.22. + // We rely on the targetEnv being set to know not to downgrade. + // A longer term problem with the sanity check is that the exact details + // may change over time: there may be other reasons that a future Go + // version might invoke an older one, and the older one won't know why. + // Best to just accept that we were invoked to provide a specific toolchain + // (which we just checked) and leave it at that. + return + } + + if gotoolchain == "local" || gotoolchain == gover.LocalToolchain() { + // Let the current binary handle the command. + return + } + + // Minimal sanity check of GOTOOLCHAIN setting before search. + // We want to allow things like go1.20.3 but also gccgo-go1.20.3. + // We want to disallow mistakes / bad ideas like GOTOOLCHAIN=bash, + // since we will find that in the path lookup. + if !strings.HasPrefix(gotoolchain, "go1") && !strings.Contains(gotoolchain, "-go1") { + base.Fatalf("invalid GOTOOLCHAIN %q", gotoolchain) + } + + Exec(gotoolchain) +} + +// TestVersionSwitch is set in the test go binary to the value in $TESTGO_VERSION_SWITCH. +// Valid settings are: +// +// "switch" - simulate version switches by reinvoking the test go binary with a different TESTGO_VERSION. +// "mismatch" - like "switch" but forget to set TESTGO_VERSION, so it looks like we invoked a mismatched toolchain +// "loop" - like "mismatch" but forget the target check, causing a toolchain switching loop +var TestVersionSwitch string + +// Exec invokes the specified Go toolchain or else prints an error and exits the process. +// If $GOTOOLCHAIN is set to path or min+path, Exec only considers the PATH +// as a source of Go toolchains. Otherwise Exec tries the PATH but then downloads +// a toolchain if necessary. +func Exec(gotoolchain string) { + log.SetPrefix("go: ") + + writeBits = sysWriteBits() + + count, _ := strconv.Atoi(os.Getenv(countEnv)) + if count >= maxSwitch-10 { + fmt.Fprintf(os.Stderr, "go: switching from go%v to %v [depth %d]\n", gover.Local(), gotoolchain, count) + } + if count >= maxSwitch { + base.Fatalf("too many toolchain switches") + } + os.Setenv(countEnv, fmt.Sprint(count+1)) + + env := cfg.Getenv("GOTOOLCHAIN") + pathOnly := env == "path" || strings.HasSuffix(env, "+path") + + // For testing, if TESTGO_VERSION is already in use + // (only happens in the cmd/go test binary) + // and TESTGO_VERSION_SWITCH=switch is set, + // "switch" toolchains by changing TESTGO_VERSION + // and reinvoking the current binary. + // The special cases =loop and =mismatch skip the + // setting of TESTGO_VERSION so that it looks like we + // accidentally invoked the wrong toolchain, + // to test detection of that failure mode. + switch TestVersionSwitch { + case "switch": + os.Setenv("TESTGO_VERSION", gotoolchain) + fallthrough + case "loop", "mismatch": + exe, err := os.Executable() + if err != nil { + base.Fatalf("%v", err) + } + execGoToolchain(gotoolchain, os.Getenv("GOROOT"), exe) + } + + // Look in PATH for the toolchain before we download one. + // This allows custom toolchains as well as reuse of toolchains + // already installed using go install golang.org/dl/go1.2.3@latest. + if exe, err := cfg.LookPath(gotoolchain); err == nil { + execGoToolchain(gotoolchain, "", exe) + } + + // GOTOOLCHAIN=auto looks in PATH and then falls back to download. + // GOTOOLCHAIN=path only looks in PATH. + if pathOnly { + base.Fatalf("cannot find %q in PATH", gotoolchain) + } + + // Set up modules without an explicit go.mod, to download distribution. + modload.Reset() + modload.ForceUseModules = true + modload.RootMode = modload.NoRoot + modload.Init() + + // Download and unpack toolchain module into module cache. + // Note that multiple go commands might be doing this at the same time, + // and that's OK: the module cache handles that case correctly. + m := module.Version{ + Path: gotoolchainModule, + Version: gotoolchainVersion + "-" + gotoolchain + "." + runtime.GOOS + "-" + runtime.GOARCH, + } + dir, err := modfetch.Download(context.Background(), m) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + toolVers := gover.FromToolchain(gotoolchain) + if gover.IsLang(toolVers) && gover.Compare(toolVers, "1.21") >= 0 { + base.Fatalf("invalid toolchain: %s is a language version but not a toolchain version (%s.x)", gotoolchain, gotoolchain) + } + base.Fatalf("download %s for %s/%s: toolchain not available", gotoolchain, runtime.GOOS, runtime.GOARCH) + } + base.Fatalf("download %s: %v", gotoolchain, err) + } + + // On first use after download, set the execute bits on the commands + // so that we can run them. Note that multiple go commands might be + // doing this at the same time, but if so no harm done. + if runtime.GOOS != "windows" { + info, err := os.Stat(filepath.Join(dir, "bin/go")) + if err != nil { + base.Fatalf("download %s: %v", gotoolchain, err) + } + if info.Mode()&0111 == 0 { + // allowExec sets the exec permission bits on all files found in dir. + allowExec := func(dir string) { + err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + info, err := os.Stat(path) + if err != nil { + return err + } + if err := os.Chmod(path, info.Mode()&0777|0111); err != nil { + return err + } + } + return nil + }) + if err != nil { + base.Fatalf("download %s: %v", gotoolchain, err) + } + } + + // Set the bits in pkg/tool before bin/go. + // If we are racing with another go command and do bin/go first, + // then the check of bin/go above might succeed, the other go command + // would skip its own mode-setting, and then the go command might + // try to run a tool before we get to setting the bits on pkg/tool. + // Setting pkg/tool before bin/go avoids that ordering problem. + // The only other tool the go command invokes is gofmt, + // so we set that one explicitly before handling bin (which will include bin/go). + allowExec(filepath.Join(dir, "pkg/tool")) + allowExec(filepath.Join(dir, "bin/gofmt")) + allowExec(filepath.Join(dir, "bin")) + } + } + + srcUGoMod := filepath.Join(dir, "src/_go.mod") + srcGoMod := filepath.Join(dir, "src/go.mod") + if size(srcGoMod) != size(srcUGoMod) { + err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if path == srcUGoMod { + // Leave for last, in case we are racing with another go command. + return nil + } + if pdir, name := filepath.Split(path); name == "_go.mod" { + if err := raceSafeCopy(path, pdir+"go.mod"); err != nil { + return err + } + } + return nil + }) + // Handle src/go.mod; this is the signal to other racing go commands + // that everything is okay and they can skip this step. + if err == nil { + err = raceSafeCopy(srcUGoMod, srcGoMod) + } + if err != nil { + base.Fatalf("download %s: %v", gotoolchain, err) + } + } + + // Reinvoke the go command. + execGoToolchain(gotoolchain, dir, filepath.Join(dir, "bin/go")) +} + +func size(path string) int64 { + info, err := os.Stat(path) + if err != nil { + return -1 + } + return info.Size() +} + +var writeBits fs.FileMode + +// raceSafeCopy copies the file old to the file new, being careful to ensure +// that if multiple go commands call raceSafeCopy(old, new) at the same time, +// they don't interfere with each other: both will succeed and return and +// later observe the correct content in new. Like in the build cache, we arrange +// this by opening new without truncation and then writing the content. +// Both go commands can do this simultaneously and will write the same thing +// (old never changes content). +func raceSafeCopy(old, new string) error { + oldInfo, err := os.Stat(old) + if err != nil { + return err + } + newInfo, err := os.Stat(new) + if err == nil && newInfo.Size() == oldInfo.Size() { + return nil + } + data, err := os.ReadFile(old) + if err != nil { + return err + } + // The module cache has unwritable directories by default. + // Restore the user write bit in the directory so we can create + // the new go.mod file. We clear it again at the end on a + // best-effort basis (ignoring failures). + dir := filepath.Dir(old) + info, err := os.Stat(dir) + if err != nil { + return err + } + if err := os.Chmod(dir, info.Mode()|writeBits); err != nil { + return err + } + defer os.Chmod(dir, info.Mode()) + // Note: create the file writable, so that a racing go command + // doesn't get an error before we store the actual data. + f, err := os.OpenFile(new, os.O_CREATE|os.O_WRONLY, writeBits&^0o111) + if err != nil { + // If OpenFile failed because a racing go command completed our work + // (and then OpenFile failed because the directory or file is now read-only), + // count that as a success. + if size(old) == size(new) { + return nil + } + return err + } + defer os.Chmod(new, oldInfo.Mode()) + if _, err := f.Write(data); err != nil { + f.Close() + return err + } + return f.Close() +} + +// modGoToolchain finds the enclosing go.work or go.mod file +// and returns the go version and toolchain lines from the file. +// The toolchain line overrides the version line +func modGoToolchain() (file, goVers, toolchain string) { + wd := base.UncachedCwd() + file = modload.FindGoWork(wd) + // $GOWORK can be set to a file that does not yet exist, if we are running 'go work init'. + // Do not try to load the file in that case + if _, err := os.Stat(file); err != nil { + file = "" + } + if file == "" { + file = modload.FindGoMod(wd) + } + if file == "" { + return "", "", "" + } + + data, err := os.ReadFile(file) + if err != nil { + base.Fatalf("%v", err) + } + return file, gover.GoModLookup(data, "go"), gover.GoModLookup(data, "toolchain") +} + +// goInstallVersion reports whether the command line is go install m@v or go run m@v. +// If so, Select must not read the go.mod or go.work file in "auto" or "path" mode. +func goInstallVersion() bool { + // Note: We assume there are no flags between 'go' and 'install' or 'run'. + // During testing there are some debugging flags that are accepted + // in that position, but in production go binaries there are not. + if len(os.Args) < 3 { + return false + } + + var cmdFlags *flag.FlagSet + switch os.Args[1] { + default: + // Command doesn't support a pkg@version as the main module. + return false + case "install": + cmdFlags = &work.CmdInstall.Flag + case "run": + cmdFlags = &run.CmdRun.Flag + } + + // The modcachrw flag is unique, in that it affects how we fetch the + // requested module to even figure out what toolchain it needs. + // We need to actually set it before we check the toolchain version. + // (See https://go.dev/issue/64282.) + modcacherwFlag := cmdFlags.Lookup("modcacherw") + if modcacherwFlag == nil { + base.Fatalf("internal error: modcacherw flag not registered for command") + } + modcacherwVal, ok := modcacherwFlag.Value.(interface { + IsBoolFlag() bool + flag.Value + }) + if !ok || !modcacherwVal.IsBoolFlag() { + base.Fatalf("internal error: modcacherw is not a boolean flag") + } + + // Make a best effort to parse the command's args to find the pkg@version + // argument and the -modcacherw flag. + var ( + pkgArg string + modcacherwSeen bool + ) + for args := os.Args[2:]; len(args) > 0; { + a := args[0] + args = args[1:] + if a == "--" { + if len(args) == 0 { + return false + } + pkgArg = args[0] + break + } + + a, ok := strings.CutPrefix(a, "-") + if !ok { + // Not a flag argument. Must be a package. + pkgArg = a + break + } + a = strings.TrimPrefix(a, "-") // Treat --flag as -flag. + + name, val, hasEq := strings.Cut(a, "=") + + if name == "modcacherw" { + if !hasEq { + val = "true" + } + if err := modcacherwVal.Set(val); err != nil { + return false + } + modcacherwSeen = true + continue + } + + if hasEq { + // Already has a value; don't bother parsing it. + continue + } + + f := run.CmdRun.Flag.Lookup(a) + if f == nil { + // We don't know whether this flag is a boolean. + if os.Args[1] == "run" { + // We don't know where to find the pkg@version argument. + // For run, the pkg@version can be anywhere on the command line, + // because it is preceded by run flags and followed by arguments to the + // program being run. Since we don't know whether this flag takes + // an argument, we can't reliably identify the end of the run flags. + // Just give up and let the user clarify using the "=" form.. + return false + } + + // We would like to let 'go install -newflag pkg@version' work even + // across a toolchain switch. To make that work, assume by default that + // the pkg@version is the last argument and skip the remaining args unless + // we spot a plausible "-modcacherw" flag. + for len(args) > 0 { + a := args[0] + name, _, _ := strings.Cut(a, "=") + if name == "-modcacherw" || name == "--modcacherw" { + break + } + if len(args) == 1 && !strings.HasPrefix(a, "-") { + pkgArg = a + } + args = args[1:] + } + continue + } + + if bf, ok := f.Value.(interface{ IsBoolFlag() bool }); !ok || !bf.IsBoolFlag() { + // The next arg is the value for this flag. Skip it. + args = args[1:] + continue + } + } + + if !strings.Contains(pkgArg, "@") || build.IsLocalImport(pkgArg) || filepath.IsAbs(pkgArg) { + return false + } + path, version, _ := strings.Cut(pkgArg, "@") + if path == "" || version == "" || gover.IsToolchain(path) { + return false + } + + if !modcacherwSeen && base.InGOFLAGS("-modcacherw") { + fs := flag.NewFlagSet("goInstallVersion", flag.ExitOnError) + fs.Var(modcacherwVal, "modcacherw", modcacherwFlag.Usage) + base.SetFromGOFLAGS(fs) + } + + // It would be correct to simply return true here, bypassing use + // of the current go.mod or go.work, and let "go run" or "go install" + // do the rest, including a toolchain switch. + // Our goal instead is, since we have gone to the trouble of handling + // unknown flags to some degree, to run the switch now, so that + // these commands can switch to a newer toolchain directed by the + // go.mod which may actually understand the flag. + // This was brought up during the go.dev/issue/57001 proposal discussion + // and may end up being common in self-contained "go install" or "go run" + // command lines if we add new flags in the future. + + // Set up modules without an explicit go.mod, to download go.mod. + modload.ForceUseModules = true + modload.RootMode = modload.NoRoot + modload.Init() + defer modload.Reset() + + // See internal/load.PackagesAndErrorsOutsideModule + ctx := context.Background() + allowed := modload.CheckAllowed + if modload.IsRevisionQuery(path, version) { + // Don't check for retractions if a specific revision is requested. + allowed = nil + } + noneSelected := func(path string) (version string) { return "none" } + _, err := modload.QueryPackages(ctx, path, version, noneSelected, allowed) + if errors.Is(err, gover.ErrTooNew) { + // Run early switch, same one go install or go run would eventually do, + // if it understood all the command-line flags. + SwitchOrFatal(ctx, err) + } + + return true // pkg@version found +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/switch.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/switch.go new file mode 100644 index 0000000000000000000000000000000000000000..2c6a2b8f4363aa973f02261274ce75c99195fbca --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/switch.go @@ -0,0 +1,231 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package toolchain + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/gover" + "cmd/go/internal/modfetch" +) + +// A Switcher collects errors to be reported and then decides +// between reporting the errors or switching to a new toolchain +// to resolve them. +// +// The client calls [Switcher.Error] repeatedly with errors encountered +// and then calls [Switcher.Switch]. If the errors included any +// *gover.TooNewErrors (potentially wrapped) and switching is +// permitted by GOTOOLCHAIN, Switch switches to a new toolchain. +// Otherwise Switch prints all the errors using base.Error. +// +// See https://go.dev/doc/toolchain#switch. +type Switcher struct { + TooNew *gover.TooNewError // max go requirement observed + Errors []error // errors collected so far +} + +// Error reports the error to the Switcher, +// which saves it for processing during Switch. +func (s *Switcher) Error(err error) { + s.Errors = append(s.Errors, err) + s.addTooNew(err) +} + +// addTooNew adds any TooNew errors that can be found in err. +func (s *Switcher) addTooNew(err error) { + switch err := err.(type) { + case interface{ Unwrap() []error }: + for _, e := range err.Unwrap() { + s.addTooNew(e) + } + + case interface{ Unwrap() error }: + s.addTooNew(err.Unwrap()) + + case *gover.TooNewError: + if s.TooNew == nil || + gover.Compare(err.GoVersion, s.TooNew.GoVersion) > 0 || + gover.Compare(err.GoVersion, s.TooNew.GoVersion) == 0 && err.What < s.TooNew.What { + s.TooNew = err + } + } +} + +// NeedSwitch reports whether Switch would attempt to switch toolchains. +func (s *Switcher) NeedSwitch() bool { + return s.TooNew != nil && (HasAuto() || HasPath()) +} + +// Switch decides whether to switch to a newer toolchain +// to resolve any of the saved errors. +// It switches if toolchain switches are permitted and there is at least one TooNewError. +// +// If Switch decides not to switch toolchains, it prints the errors using base.Error and returns. +// +// If Switch decides to switch toolchains but cannot identify a toolchain to use. +// it prints the errors along with one more about not being able to find the toolchain +// and returns. +// +// Otherwise, Switch prints an informational message giving a reason for the +// switch and the toolchain being invoked and then switches toolchains. +// This operation never returns. +func (s *Switcher) Switch(ctx context.Context) { + if !s.NeedSwitch() { + for _, err := range s.Errors { + base.Error(err) + } + return + } + + // Switch to newer Go toolchain if necessary and possible. + tv, err := NewerToolchain(ctx, s.TooNew.GoVersion) + if err != nil { + for _, err := range s.Errors { + base.Error(err) + } + base.Error(fmt.Errorf("switching to go >= %v: %w", s.TooNew.GoVersion, err)) + return + } + + fmt.Fprintf(os.Stderr, "go: %v requires go >= %v; switching to %v\n", s.TooNew.What, s.TooNew.GoVersion, tv) + Exec(tv) + panic("unreachable") +} + +// SwitchOrFatal attempts a toolchain switch based on the information in err +// and otherwise falls back to base.Fatal(err). +func SwitchOrFatal(ctx context.Context, err error) { + var s Switcher + s.Error(err) + s.Switch(ctx) + base.Exit() +} + +// NewerToolchain returns the name of the toolchain to use when we need +// to switch to a newer toolchain that must support at least the given Go version. +// See https://go.dev/doc/toolchain#switch. +// +// If the latest major release is 1.N.0, we use the latest patch release of 1.(N-1) if that's >= version. +// Otherwise we use the latest 1.N if that's allowed. +// Otherwise we use the latest release. +func NewerToolchain(ctx context.Context, version string) (string, error) { + fetch := autoToolchains + if !HasAuto() { + fetch = pathToolchains + } + list, err := fetch(ctx) + if err != nil { + return "", err + } + return newerToolchain(version, list) +} + +// autoToolchains returns the list of toolchain versions available to GOTOOLCHAIN=auto or =min+auto mode. +func autoToolchains(ctx context.Context) ([]string, error) { + var versions *modfetch.Versions + err := modfetch.TryProxies(func(proxy string) error { + v, err := modfetch.Lookup(ctx, proxy, "go").Versions(ctx, "") + if err != nil { + return err + } + versions = v + return nil + }) + if err != nil { + return nil, err + } + return versions.List, nil +} + +// pathToolchains returns the list of toolchain versions available to GOTOOLCHAIN=path or =min+path mode. +func pathToolchains(ctx context.Context) ([]string, error) { + have := make(map[string]bool) + var list []string + for _, dir := range pathDirs() { + if dir == "" || !filepath.IsAbs(dir) { + // Refuse to use local directories in $PATH (hard-coding exec.ErrDot). + continue + } + entries, err := os.ReadDir(dir) + if err != nil { + continue + } + for _, de := range entries { + if de.IsDir() || !strings.HasPrefix(de.Name(), "go1.") { + continue + } + info, err := de.Info() + if err != nil { + continue + } + v, ok := pathVersion(dir, de, info) + if !ok || !strings.HasPrefix(v, "1.") || have[v] { + continue + } + have[v] = true + list = append(list, v) + } + } + sort.Slice(list, func(i, j int) bool { + return gover.Compare(list[i], list[j]) < 0 + }) + return list, nil +} + +// newerToolchain implements NewerToolchain where the list of choices is known. +// It is separated out for easier testing of this logic. +func newerToolchain(need string, list []string) (string, error) { + // Consider each release in the list, from newest to oldest, + // considering only entries >= need and then only entries + // that are the latest in their language family + // (the latest 1.40, the latest 1.39, and so on). + // We prefer the latest patch release before the most recent release family, + // so if the latest release is 1.40.1 we'll take the latest 1.39.X. + // Failing that, we prefer the latest patch release before the most recent + // prerelease family, so if the latest release is 1.40rc1 is out but 1.39 is okay, + // we'll still take 1.39.X. + // Failing that we'll take the latest release. + latest := "" + for i := len(list) - 1; i >= 0; i-- { + v := list[i] + if gover.Compare(v, need) < 0 { + break + } + if gover.Lang(latest) == gover.Lang(v) { + continue + } + newer := latest + latest = v + if newer != "" && !gover.IsPrerelease(newer) { + // latest is the last patch release of Go 1.X, and we saw a non-prerelease of Go 1.(X+1), + // so latest is the one we want. + break + } + } + if latest == "" { + return "", fmt.Errorf("no releases found for go >= %v", need) + } + return "go" + latest, nil +} + +// HasAuto reports whether the GOTOOLCHAIN setting allows "auto" upgrades. +func HasAuto() bool { + env := cfg.Getenv("GOTOOLCHAIN") + return env == "auto" || strings.HasSuffix(env, "+auto") +} + +// HasPath reports whether the GOTOOLCHAIN setting allows "path" upgrades. +func HasPath() bool { + env := cfg.Getenv("GOTOOLCHAIN") + return env == "path" || strings.HasSuffix(env, "+path") +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/toolchain_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/toolchain_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e8ed5664e11b9242c55ff3fe2d162d281522c9bd --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/toolchain_test.go @@ -0,0 +1,66 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package toolchain + +import ( + "strings" + "testing" +) + +func TestNewerToolchain(t *testing.T) { + for _, tt := range newerToolchainTests { + out, err := newerToolchain(tt.need, tt.list) + if (err != nil) != (out == "") { + t.Errorf("newerToolchain(%v, %v) = %v, %v, want error", tt.need, tt.list, out, err) + continue + } + if out != tt.out { + t.Errorf("newerToolchain(%v, %v) = %v, %v want %v, nil", tt.need, tt.list, out, err, tt.out) + } + } +} + +var f = strings.Fields + +var relRC = []string{"1.39.0", "1.39.1", "1.39.2", "1.40.0", "1.40.1", "1.40.2", "1.41rc1"} +var rel2 = []string{"1.39.0", "1.39.1", "1.39.2", "1.40.0", "1.40.1", "1.40.2"} +var rel0 = []string{"1.39.0", "1.39.1", "1.39.2", "1.40.0"} +var newerToolchainTests = []struct { + need string + list []string + out string +}{ + {"1.30", rel0, "go1.39.2"}, + {"1.30", rel2, "go1.39.2"}, + {"1.30", relRC, "go1.39.2"}, + {"1.38", rel0, "go1.39.2"}, + {"1.38", rel2, "go1.39.2"}, + {"1.38", relRC, "go1.39.2"}, + {"1.38.1", rel0, "go1.39.2"}, + {"1.38.1", rel2, "go1.39.2"}, + {"1.38.1", relRC, "go1.39.2"}, + {"1.39", rel0, "go1.39.2"}, + {"1.39", rel2, "go1.39.2"}, + {"1.39", relRC, "go1.39.2"}, + {"1.39.2", rel0, "go1.39.2"}, + {"1.39.2", rel2, "go1.39.2"}, + {"1.39.2", relRC, "go1.39.2"}, + {"1.39.3", rel0, "go1.40.0"}, + {"1.39.3", rel2, "go1.40.2"}, + {"1.39.3", relRC, "go1.40.2"}, + {"1.40", rel0, "go1.40.0"}, + {"1.40", rel2, "go1.40.2"}, + {"1.40", relRC, "go1.40.2"}, + {"1.40.1", rel0, ""}, + {"1.40.1", rel2, "go1.40.2"}, + {"1.40.1", relRC, "go1.40.2"}, + {"1.41", rel0, ""}, + {"1.41", rel2, ""}, + {"1.41", relRC, "go1.41rc1"}, + {"1.41.0", rel0, ""}, + {"1.41.0", rel2, ""}, + {"1.41.0", relRC, ""}, + {"1.40", nil, ""}, +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/umask_none.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/umask_none.go new file mode 100644 index 0000000000000000000000000000000000000000..b092fe8b7dd51bfd1f5d231f7bef90d94002ae13 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/umask_none.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(darwin || freebsd || linux || netbsd || openbsd) + +package toolchain + +import "io/fs" + +func sysWriteBits() fs.FileMode { + return 0700 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/umask_unix.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/umask_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..cbe4307311f806a64f5c02d8cd7c5ce5c69b986f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/toolchain/umask_unix.go @@ -0,0 +1,28 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || freebsd || linux || netbsd || openbsd + +package toolchain + +import ( + "io/fs" + "syscall" +) + +// sysWriteBits determines which bits to OR into the mode to make a directory writable. +// It must be called when there are no other file system operations happening. +func sysWriteBits() fs.FileMode { + // Read current umask. There's no way to read it without also setting it, + // so set it conservatively and then restore the original one. + m := syscall.Umask(0o777) + syscall.Umask(m) // restore bits + if m&0o22 == 0o22 { // group and world are unwritable by default + return 0o700 + } + if m&0o2 == 0o2 { // group is writable by default, but not world + return 0o770 + } + return 0o777 // everything is writable by default +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/trace/trace.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/trace/trace.go new file mode 100644 index 0000000000000000000000000000000000000000..f96aa40002e76f0769e5d4869c72a0bc1d05346f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/trace/trace.go @@ -0,0 +1,206 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "context" + "encoding/json" + "errors" + "internal/trace/traceviewer/format" + "os" + "strings" + "sync/atomic" + "time" +) + +// Constants used in event fields. +// See https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU +// for more details. +const ( + phaseDurationBegin = "B" + phaseDurationEnd = "E" + phaseFlowStart = "s" + phaseFlowEnd = "f" + + bindEnclosingSlice = "e" +) + +var traceStarted atomic.Bool + +func getTraceContext(ctx context.Context) (traceContext, bool) { + if !traceStarted.Load() { + return traceContext{}, false + } + v := ctx.Value(traceKey{}) + if v == nil { + return traceContext{}, false + } + return v.(traceContext), true +} + +// StartSpan starts a trace event with the given name. The Span ends when its Done method is called. +func StartSpan(ctx context.Context, name string) (context.Context, *Span) { + tc, ok := getTraceContext(ctx) + if !ok { + return ctx, nil + } + childSpan := &Span{t: tc.t, name: name, tid: tc.tid, start: time.Now()} + tc.t.writeEvent(&format.Event{ + Name: childSpan.name, + Time: float64(childSpan.start.UnixNano()) / float64(time.Microsecond), + TID: childSpan.tid, + Phase: phaseDurationBegin, + }) + ctx = context.WithValue(ctx, traceKey{}, traceContext{tc.t, tc.tid}) + return ctx, childSpan +} + +// StartGoroutine associates the context with a new Thread ID. The Chrome trace viewer associates each +// trace event with a thread, and doesn't expect events with the same thread id to happen at the +// same time. +func StartGoroutine(ctx context.Context) context.Context { + tc, ok := getTraceContext(ctx) + if !ok { + return ctx + } + return context.WithValue(ctx, traceKey{}, traceContext{tc.t, tc.t.getNextTID()}) +} + +// Flow marks a flow indicating that the 'to' span depends on the 'from' span. +// Flow should be called while the 'to' span is in progress. +func Flow(ctx context.Context, from *Span, to *Span) { + tc, ok := getTraceContext(ctx) + if !ok || from == nil || to == nil { + return + } + + id := tc.t.getNextFlowID() + tc.t.writeEvent(&format.Event{ + Name: from.name + " -> " + to.name, + Category: "flow", + ID: id, + Time: float64(from.end.UnixNano()) / float64(time.Microsecond), + Phase: phaseFlowStart, + TID: from.tid, + }) + tc.t.writeEvent(&format.Event{ + Name: from.name + " -> " + to.name, + Category: "flow", // TODO(matloob): Add Category to Flow? + ID: id, + Time: float64(to.start.UnixNano()) / float64(time.Microsecond), + Phase: phaseFlowEnd, + TID: to.tid, + BindPoint: bindEnclosingSlice, + }) +} + +type Span struct { + t *tracer + + name string + tid uint64 + start time.Time + end time.Time +} + +func (s *Span) Done() { + if s == nil { + return + } + s.end = time.Now() + s.t.writeEvent(&format.Event{ + Name: s.name, + Time: float64(s.end.UnixNano()) / float64(time.Microsecond), + TID: s.tid, + Phase: phaseDurationEnd, + }) +} + +type tracer struct { + file chan traceFile // 1-buffered + + nextTID atomic.Uint64 + nextFlowID atomic.Uint64 +} + +func (t *tracer) writeEvent(ev *format.Event) error { + f := <-t.file + defer func() { t.file <- f }() + var err error + if f.entries == 0 { + _, err = f.sb.WriteString("[\n") + } else { + _, err = f.sb.WriteString(",") + } + f.entries++ + if err != nil { + return nil + } + + if err := f.enc.Encode(ev); err != nil { + return err + } + + // Write event string to output file. + _, err = f.f.WriteString(f.sb.String()) + f.sb.Reset() + return err +} + +func (t *tracer) Close() error { + f := <-t.file + defer func() { t.file <- f }() + + _, firstErr := f.f.WriteString("]") + if err := f.f.Close(); firstErr == nil { + firstErr = err + } + return firstErr +} + +func (t *tracer) getNextTID() uint64 { + return t.nextTID.Add(1) +} + +func (t *tracer) getNextFlowID() uint64 { + return t.nextFlowID.Add(1) +} + +// traceKey is the context key for tracing information. It is unexported to prevent collisions with context keys defined in +// other packages. +type traceKey struct{} + +type traceContext struct { + t *tracer + tid uint64 +} + +// Start starts a trace which writes to the given file. +func Start(ctx context.Context, file string) (context.Context, func() error, error) { + traceStarted.Store(true) + if file == "" { + return nil, nil, errors.New("no trace file supplied") + } + f, err := os.Create(file) + if err != nil { + return nil, nil, err + } + t := &tracer{file: make(chan traceFile, 1)} + sb := new(strings.Builder) + t.file <- traceFile{ + f: f, + sb: sb, + enc: json.NewEncoder(sb), + } + ctx = context.WithValue(ctx, traceKey{}, traceContext{t: t}) + return ctx, t.Close, nil +} + +type traceFile struct { + f *os.File + sb *strings.Builder + enc *json.Encoder + entries int64 +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/discovery.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/discovery.go new file mode 100644 index 0000000000000000000000000000000000000000..327b44cb9afa8fcee402fbb6385b0dd822abf3de --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/discovery.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcs + +import ( + "encoding/xml" + "fmt" + "io" + "strings" +) + +// charsetReader returns a reader that converts from the given charset to UTF-8. +// Currently it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful +// error which is printed by go get, so the user can find why the package +// wasn't downloaded if the encoding is not supported. Note that, in +// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters +// greater than 0x7f are not rejected). +func charsetReader(charset string, input io.Reader) (io.Reader, error) { + switch strings.ToLower(charset) { + case "utf-8", "ascii": + return input, nil + default: + return nil, fmt.Errorf("can't decode XML document using charset %q", charset) + } +} + +// parseMetaGoImports returns meta imports from the HTML in r. +// Parsing ends at the end of the section or the beginning of the . +func parseMetaGoImports(r io.Reader, mod ModuleMode) ([]metaImport, error) { + d := xml.NewDecoder(r) + d.CharsetReader = charsetReader + d.Strict = false + var imports []metaImport + for { + t, err := d.RawToken() + if err != nil { + if err != io.EOF && len(imports) == 0 { + return nil, err + } + break + } + if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") { + break + } + if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") { + break + } + e, ok := t.(xml.StartElement) + if !ok || !strings.EqualFold(e.Name.Local, "meta") { + continue + } + if attrValue(e.Attr, "name") != "go-import" { + continue + } + if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 { + imports = append(imports, metaImport{ + Prefix: f[0], + VCS: f[1], + RepoRoot: f[2], + }) + } + } + + // Extract mod entries if we are paying attention to them. + var list []metaImport + var have map[string]bool + if mod == PreferMod { + have = make(map[string]bool) + for _, m := range imports { + if m.VCS == "mod" { + have[m.Prefix] = true + list = append(list, m) + } + } + } + + // Append non-mod entries, ignoring those superseded by a mod entry. + for _, m := range imports { + if m.VCS != "mod" && !have[m.Prefix] { + list = append(list, m) + } + } + return list, nil +} + +// attrValue returns the attribute value for the case-insensitive key +// `name', or the empty string if nothing is found. +func attrValue(attrs []xml.Attr, name string) string { + for _, a := range attrs { + if strings.EqualFold(a.Name.Local, name) { + return a.Value + } + } + return "" +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/discovery_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/discovery_test.go new file mode 100644 index 0000000000000000000000000000000000000000..eb99fdf64c141a91b702996cc730db5942714e98 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/discovery_test.go @@ -0,0 +1,110 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcs + +import ( + "reflect" + "strings" + "testing" +) + +var parseMetaGoImportsTests = []struct { + in string + mod ModuleMode + out []metaImport +}{ + { + ``, + IgnoreMod, + []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, + }, + { + ` + `, + IgnoreMod, + []metaImport{ + {"foo/bar", "git", "https://github.com/rsc/foo/bar"}, + {"baz/quux", "git", "http://github.com/rsc/baz/quux"}, + }, + }, + { + ` + `, + IgnoreMod, + []metaImport{ + {"foo/bar", "git", "https://github.com/rsc/foo/bar"}, + }, + }, + { + ` + `, + IgnoreMod, + []metaImport{ + {"foo/bar", "git", "https://github.com/rsc/foo/bar"}, + }, + }, + { + ` + `, + PreferMod, + []metaImport{ + {"foo/bar", "mod", "http://github.com/rsc/baz/quux"}, + }, + }, + { + ` + + `, + IgnoreMod, + []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, + }, + { + ` + `, + IgnoreMod, + []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, + }, + { + ``, + IgnoreMod, + []metaImport{{"foo/bar", "git", "https://github.com/rsc/foo/bar"}}, + }, + { + // XML doesn't like
. + `Page Not Found
DRAFT
`, + IgnoreMod, + []metaImport{{"chitin.io/chitin", "git", "https://github.com/chitin-io/chitin"}}, + }, + { + ` + + `, + IgnoreMod, + []metaImport{{"myitcv.io", "git", "https://github.com/myitcv/x"}}, + }, + { + ` + + `, + PreferMod, + []metaImport{ + {"myitcv.io/blah2", "mod", "https://raw.githubusercontent.com/myitcv/pubx/master"}, + {"myitcv.io", "git", "https://github.com/myitcv/x"}, + }, + }, +} + +func TestParseMetaGoImports(t *testing.T) { + for i, tt := range parseMetaGoImportsTests { + out, err := parseMetaGoImports(strings.NewReader(tt.in), tt.mod) + if err != nil { + t.Errorf("test#%d: %v", i, err) + continue + } + if !reflect.DeepEqual(out, tt.out) { + t.Errorf("test#%d:\n\thave %q\n\twant %q", i, out, tt.out) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/vcs.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/vcs.go new file mode 100644 index 0000000000000000000000000000000000000000..8550f2a560e4eb3001f80a88d8b05beee3c0eaf5 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/vcs.go @@ -0,0 +1,1655 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcs + +import ( + "bytes" + "errors" + "fmt" + "internal/lazyregexp" + "internal/singleflight" + "io/fs" + "log" + urlpkg "net/url" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "cmd/go/internal/base" + "cmd/go/internal/cfg" + "cmd/go/internal/search" + "cmd/go/internal/str" + "cmd/go/internal/web" + + "golang.org/x/mod/module" +) + +// A Cmd describes how to use a version control system +// like Mercurial, Git, or Subversion. +type Cmd struct { + Name string + Cmd string // name of binary to invoke command + RootNames []rootName // filename and mode indicating the root of a checkout directory + + CreateCmd []string // commands to download a fresh copy of a repository + DownloadCmd []string // commands to download updates into an existing repository + + TagCmd []tagCmd // commands to list tags + TagLookupCmd []tagCmd // commands to lookup tags before running tagSyncCmd + TagSyncCmd []string // commands to sync to specific tag + TagSyncDefault []string // commands to sync to default tag + + Scheme []string + PingCmd string + + RemoteRepo func(v *Cmd, rootDir string) (remoteRepo string, err error) + ResolveRepo func(v *Cmd, rootDir, remoteRepo string) (realRepo string, err error) + Status func(v *Cmd, rootDir string) (Status, error) +} + +// Status is the current state of a local repository. +type Status struct { + Revision string // Optional. + CommitTime time.Time // Optional. + Uncommitted bool // Required. +} + +var ( + // VCSTestRepoURL is the URL of the HTTP server that serves the repos for + // vcs-test.golang.org. + // + // In tests, this is set to the URL of an httptest.Server hosting a + // cmd/go/internal/vcweb.Server. + VCSTestRepoURL string + + // VCSTestHosts is the set of hosts supported by the vcs-test server. + VCSTestHosts []string + + // VCSTestIsLocalHost reports whether the given URL refers to a local + // (loopback) host, such as "localhost" or "127.0.0.1:8080". + VCSTestIsLocalHost func(*urlpkg.URL) bool +) + +var defaultSecureScheme = map[string]bool{ + "https": true, + "git+ssh": true, + "bzr+ssh": true, + "svn+ssh": true, + "ssh": true, +} + +func (v *Cmd) IsSecure(repo string) bool { + u, err := urlpkg.Parse(repo) + if err != nil { + // If repo is not a URL, it's not secure. + return false + } + if VCSTestRepoURL != "" && web.IsLocalHost(u) { + // If the vcstest server is in use, it may redirect to other local ports for + // other protocols (such as svn). Assume that all loopback addresses are + // secure during testing. + return true + } + return v.isSecureScheme(u.Scheme) +} + +func (v *Cmd) isSecureScheme(scheme string) bool { + switch v.Cmd { + case "git": + // GIT_ALLOW_PROTOCOL is an environment variable defined by Git. It is a + // colon-separated list of schemes that are allowed to be used with git + // fetch/clone. Any scheme not mentioned will be considered insecure. + if allow := os.Getenv("GIT_ALLOW_PROTOCOL"); allow != "" { + for _, s := range strings.Split(allow, ":") { + if s == scheme { + return true + } + } + return false + } + } + return defaultSecureScheme[scheme] +} + +// A tagCmd describes a command to list available tags +// that can be passed to tagSyncCmd. +type tagCmd struct { + cmd string // command to list tags + pattern string // regexp to extract tags from list +} + +// vcsList lists the known version control systems +var vcsList = []*Cmd{ + vcsHg, + vcsGit, + vcsSvn, + vcsBzr, + vcsFossil, +} + +// vcsMod is a stub for the "mod" scheme. It's returned by +// repoRootForImportPathDynamic, but is otherwise not treated as a VCS command. +var vcsMod = &Cmd{Name: "mod"} + +// vcsByCmd returns the version control system for the given +// command name (hg, git, svn, bzr). +func vcsByCmd(cmd string) *Cmd { + for _, vcs := range vcsList { + if vcs.Cmd == cmd { + return vcs + } + } + return nil +} + +// vcsHg describes how to use Mercurial. +var vcsHg = &Cmd{ + Name: "Mercurial", + Cmd: "hg", + RootNames: []rootName{ + {filename: ".hg", isDir: true}, + }, + + CreateCmd: []string{"clone -U -- {repo} {dir}"}, + DownloadCmd: []string{"pull"}, + + // We allow both tag and branch names as 'tags' + // for selecting a version. This lets people have + // a go.release.r60 branch and a go1 branch + // and make changes in both, without constantly + // editing .hgtags. + TagCmd: []tagCmd{ + {"tags", `^(\S+)`}, + {"branches", `^(\S+)`}, + }, + TagSyncCmd: []string{"update -r {tag}"}, + TagSyncDefault: []string{"update default"}, + + Scheme: []string{"https", "http", "ssh"}, + PingCmd: "identify -- {scheme}://{repo}", + RemoteRepo: hgRemoteRepo, + Status: hgStatus, +} + +func hgRemoteRepo(vcsHg *Cmd, rootDir string) (remoteRepo string, err error) { + out, err := vcsHg.runOutput(rootDir, "paths default") + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil +} + +func hgStatus(vcsHg *Cmd, rootDir string) (Status, error) { + // Output changeset ID and seconds since epoch. + out, err := vcsHg.runOutputVerboseOnly(rootDir, `log -l1 -T {node}:{date|hgdate}`) + if err != nil { + return Status{}, err + } + + // Successful execution without output indicates an empty repo (no commits). + var rev string + var commitTime time.Time + if len(out) > 0 { + // Strip trailing timezone offset. + if i := bytes.IndexByte(out, ' '); i > 0 { + out = out[:i] + } + rev, commitTime, err = parseRevTime(out) + if err != nil { + return Status{}, err + } + } + + // Also look for untracked files. + out, err = vcsHg.runOutputVerboseOnly(rootDir, "status") + if err != nil { + return Status{}, err + } + uncommitted := len(out) > 0 + + return Status{ + Revision: rev, + CommitTime: commitTime, + Uncommitted: uncommitted, + }, nil +} + +// parseRevTime parses commit details in "revision:seconds" format. +func parseRevTime(out []byte) (string, time.Time, error) { + buf := string(bytes.TrimSpace(out)) + + i := strings.IndexByte(buf, ':') + if i < 1 { + return "", time.Time{}, errors.New("unrecognized VCS tool output") + } + rev := buf[:i] + + secs, err := strconv.ParseInt(string(buf[i+1:]), 10, 64) + if err != nil { + return "", time.Time{}, fmt.Errorf("unrecognized VCS tool output: %v", err) + } + + return rev, time.Unix(secs, 0), nil +} + +// vcsGit describes how to use Git. +var vcsGit = &Cmd{ + Name: "Git", + Cmd: "git", + RootNames: []rootName{ + {filename: ".git", isDir: true}, + }, + + CreateCmd: []string{"clone -- {repo} {dir}", "-go-internal-cd {dir} submodule update --init --recursive"}, + DownloadCmd: []string{"pull --ff-only", "submodule update --init --recursive"}, + + TagCmd: []tagCmd{ + // tags/xxx matches a git tag named xxx + // origin/xxx matches a git branch named xxx on the default remote repository + {"show-ref", `(?:tags|origin)/(\S+)$`}, + }, + TagLookupCmd: []tagCmd{ + {"show-ref tags/{tag} origin/{tag}", `((?:tags|origin)/\S+)$`}, + }, + TagSyncCmd: []string{"checkout {tag}", "submodule update --init --recursive"}, + // both createCmd and downloadCmd update the working dir. + // No need to do more here. We used to 'checkout master' + // but that doesn't work if the default branch is not named master. + // DO NOT add 'checkout master' here. + // See golang.org/issue/9032. + TagSyncDefault: []string{"submodule update --init --recursive"}, + + Scheme: []string{"git", "https", "http", "git+ssh", "ssh"}, + + // Leave out the '--' separator in the ls-remote command: git 2.7.4 does not + // support such a separator for that command, and this use should be safe + // without it because the {scheme} value comes from the predefined list above. + // See golang.org/issue/33836. + PingCmd: "ls-remote {scheme}://{repo}", + + RemoteRepo: gitRemoteRepo, + Status: gitStatus, +} + +// scpSyntaxRe matches the SCP-like addresses used by Git to access +// repositories by SSH. +var scpSyntaxRe = lazyregexp.New(`^(\w+)@([\w.-]+):(.*)$`) + +func gitRemoteRepo(vcsGit *Cmd, rootDir string) (remoteRepo string, err error) { + const cmd = "config remote.origin.url" + outb, err := vcsGit.run1(rootDir, cmd, nil, false) + if err != nil { + // if it doesn't output any message, it means the config argument is correct, + // but the config value itself doesn't exist + if outb != nil && len(outb) == 0 { + return "", errors.New("remote origin not found") + } + return "", err + } + out := strings.TrimSpace(string(outb)) + + var repoURL *urlpkg.URL + if m := scpSyntaxRe.FindStringSubmatch(out); m != nil { + // Match SCP-like syntax and convert it to a URL. + // Eg, "git@github.com:user/repo" becomes + // "ssh://git@github.com/user/repo". + repoURL = &urlpkg.URL{ + Scheme: "ssh", + User: urlpkg.User(m[1]), + Host: m[2], + Path: m[3], + } + } else { + repoURL, err = urlpkg.Parse(out) + if err != nil { + return "", err + } + } + + // Iterate over insecure schemes too, because this function simply + // reports the state of the repo. If we can't see insecure schemes then + // we can't report the actual repo URL. + for _, s := range vcsGit.Scheme { + if repoURL.Scheme == s { + return repoURL.String(), nil + } + } + return "", errors.New("unable to parse output of git " + cmd) +} + +func gitStatus(vcsGit *Cmd, rootDir string) (Status, error) { + out, err := vcsGit.runOutputVerboseOnly(rootDir, "status --porcelain") + if err != nil { + return Status{}, err + } + uncommitted := len(out) > 0 + + // "git status" works for empty repositories, but "git show" does not. + // Assume there are no commits in the repo when "git show" fails with + // uncommitted files and skip tagging revision / committime. + var rev string + var commitTime time.Time + out, err = vcsGit.runOutputVerboseOnly(rootDir, "-c log.showsignature=false show -s --format=%H:%ct") + if err != nil && !uncommitted { + return Status{}, err + } else if err == nil { + rev, commitTime, err = parseRevTime(out) + if err != nil { + return Status{}, err + } + } + + return Status{ + Revision: rev, + CommitTime: commitTime, + Uncommitted: uncommitted, + }, nil +} + +// vcsBzr describes how to use Bazaar. +var vcsBzr = &Cmd{ + Name: "Bazaar", + Cmd: "bzr", + RootNames: []rootName{ + {filename: ".bzr", isDir: true}, + }, + + CreateCmd: []string{"branch -- {repo} {dir}"}, + + // Without --overwrite bzr will not pull tags that changed. + // Replace by --overwrite-tags after http://pad.lv/681792 goes in. + DownloadCmd: []string{"pull --overwrite"}, + + TagCmd: []tagCmd{{"tags", `^(\S+)`}}, + TagSyncCmd: []string{"update -r {tag}"}, + TagSyncDefault: []string{"update -r revno:-1"}, + + Scheme: []string{"https", "http", "bzr", "bzr+ssh"}, + PingCmd: "info -- {scheme}://{repo}", + RemoteRepo: bzrRemoteRepo, + ResolveRepo: bzrResolveRepo, + Status: bzrStatus, +} + +func bzrRemoteRepo(vcsBzr *Cmd, rootDir string) (remoteRepo string, err error) { + outb, err := vcsBzr.runOutput(rootDir, "config parent_location") + if err != nil { + return "", err + } + return strings.TrimSpace(string(outb)), nil +} + +func bzrResolveRepo(vcsBzr *Cmd, rootDir, remoteRepo string) (realRepo string, err error) { + outb, err := vcsBzr.runOutput(rootDir, "info "+remoteRepo) + if err != nil { + return "", err + } + out := string(outb) + + // Expect: + // ... + // (branch root|repository branch): + // ... + + found := false + for _, prefix := range []string{"\n branch root: ", "\n repository branch: "} { + i := strings.Index(out, prefix) + if i >= 0 { + out = out[i+len(prefix):] + found = true + break + } + } + if !found { + return "", fmt.Errorf("unable to parse output of bzr info") + } + + i := strings.Index(out, "\n") + if i < 0 { + return "", fmt.Errorf("unable to parse output of bzr info") + } + out = out[:i] + return strings.TrimSpace(out), nil +} + +func bzrStatus(vcsBzr *Cmd, rootDir string) (Status, error) { + outb, err := vcsBzr.runOutputVerboseOnly(rootDir, "version-info") + if err != nil { + return Status{}, err + } + out := string(outb) + + // Expect (non-empty repositories only): + // + // revision-id: gopher@gopher.net-20211021072330-qshok76wfypw9lpm + // date: 2021-09-21 12:00:00 +1000 + // ... + var rev string + var commitTime time.Time + + for _, line := range strings.Split(out, "\n") { + i := strings.IndexByte(line, ':') + if i < 0 { + continue + } + key := line[:i] + value := strings.TrimSpace(line[i+1:]) + + switch key { + case "revision-id": + rev = value + case "date": + var err error + commitTime, err = time.Parse("2006-01-02 15:04:05 -0700", value) + if err != nil { + return Status{}, errors.New("unable to parse output of bzr version-info") + } + } + } + + outb, err = vcsBzr.runOutputVerboseOnly(rootDir, "status") + if err != nil { + return Status{}, err + } + + // Skip warning when working directory is set to an older revision. + if bytes.HasPrefix(outb, []byte("working tree is out of date")) { + i := bytes.IndexByte(outb, '\n') + if i < 0 { + i = len(outb) + } + outb = outb[:i] + } + uncommitted := len(outb) > 0 + + return Status{ + Revision: rev, + CommitTime: commitTime, + Uncommitted: uncommitted, + }, nil +} + +// vcsSvn describes how to use Subversion. +var vcsSvn = &Cmd{ + Name: "Subversion", + Cmd: "svn", + RootNames: []rootName{ + {filename: ".svn", isDir: true}, + }, + + CreateCmd: []string{"checkout -- {repo} {dir}"}, + DownloadCmd: []string{"update"}, + + // There is no tag command in subversion. + // The branch information is all in the path names. + + Scheme: []string{"https", "http", "svn", "svn+ssh"}, + PingCmd: "info -- {scheme}://{repo}", + RemoteRepo: svnRemoteRepo, +} + +func svnRemoteRepo(vcsSvn *Cmd, rootDir string) (remoteRepo string, err error) { + outb, err := vcsSvn.runOutput(rootDir, "info") + if err != nil { + return "", err + } + out := string(outb) + + // Expect: + // + // ... + // URL: + // ... + // + // Note that we're not using the Repository Root line, + // because svn allows checking out subtrees. + // The URL will be the URL of the subtree (what we used with 'svn co') + // while the Repository Root may be a much higher parent. + i := strings.Index(out, "\nURL: ") + if i < 0 { + return "", fmt.Errorf("unable to parse output of svn info") + } + out = out[i+len("\nURL: "):] + i = strings.Index(out, "\n") + if i < 0 { + return "", fmt.Errorf("unable to parse output of svn info") + } + out = out[:i] + return strings.TrimSpace(out), nil +} + +// fossilRepoName is the name go get associates with a fossil repository. In the +// real world the file can be named anything. +const fossilRepoName = ".fossil" + +// vcsFossil describes how to use Fossil (fossil-scm.org) +var vcsFossil = &Cmd{ + Name: "Fossil", + Cmd: "fossil", + RootNames: []rootName{ + {filename: ".fslckout", isDir: false}, + {filename: "_FOSSIL_", isDir: false}, + }, + + CreateCmd: []string{"-go-internal-mkdir {dir} clone -- {repo} " + filepath.Join("{dir}", fossilRepoName), "-go-internal-cd {dir} open .fossil"}, + DownloadCmd: []string{"up"}, + + TagCmd: []tagCmd{{"tag ls", `(.*)`}}, + TagSyncCmd: []string{"up tag:{tag}"}, + TagSyncDefault: []string{"up trunk"}, + + Scheme: []string{"https", "http"}, + RemoteRepo: fossilRemoteRepo, + Status: fossilStatus, +} + +func fossilRemoteRepo(vcsFossil *Cmd, rootDir string) (remoteRepo string, err error) { + out, err := vcsFossil.runOutput(rootDir, "remote-url") + if err != nil { + return "", err + } + return strings.TrimSpace(string(out)), nil +} + +var errFossilInfo = errors.New("unable to parse output of fossil info") + +func fossilStatus(vcsFossil *Cmd, rootDir string) (Status, error) { + outb, err := vcsFossil.runOutputVerboseOnly(rootDir, "info") + if err != nil { + return Status{}, err + } + out := string(outb) + + // Expect: + // ... + // checkout: 91ed71f22c77be0c3e250920f47bfd4e1f9024d2 2021-09-21 12:00:00 UTC + // ... + + // Extract revision and commit time. + // Ensure line ends with UTC (known timezone offset). + const prefix = "\ncheckout:" + const suffix = " UTC" + i := strings.Index(out, prefix) + if i < 0 { + return Status{}, errFossilInfo + } + checkout := out[i+len(prefix):] + i = strings.Index(checkout, suffix) + if i < 0 { + return Status{}, errFossilInfo + } + checkout = strings.TrimSpace(checkout[:i]) + + i = strings.IndexByte(checkout, ' ') + if i < 0 { + return Status{}, errFossilInfo + } + rev := checkout[:i] + + commitTime, err := time.ParseInLocation(time.DateTime, checkout[i+1:], time.UTC) + if err != nil { + return Status{}, fmt.Errorf("%v: %v", errFossilInfo, err) + } + + // Also look for untracked changes. + outb, err = vcsFossil.runOutputVerboseOnly(rootDir, "changes --differ") + if err != nil { + return Status{}, err + } + uncommitted := len(outb) > 0 + + return Status{ + Revision: rev, + CommitTime: commitTime, + Uncommitted: uncommitted, + }, nil +} + +func (v *Cmd) String() string { + return v.Name +} + +// run runs the command line cmd in the given directory. +// keyval is a list of key, value pairs. run expands +// instances of {key} in cmd into value, but only after +// splitting cmd into individual arguments. +// If an error occurs, run prints the command line and the +// command's combined stdout+stderr to standard error. +// Otherwise run discards the command's output. +func (v *Cmd) run(dir string, cmd string, keyval ...string) error { + _, err := v.run1(dir, cmd, keyval, true) + return err +} + +// runVerboseOnly is like run but only generates error output to standard error in verbose mode. +func (v *Cmd) runVerboseOnly(dir string, cmd string, keyval ...string) error { + _, err := v.run1(dir, cmd, keyval, false) + return err +} + +// runOutput is like run but returns the output of the command. +func (v *Cmd) runOutput(dir string, cmd string, keyval ...string) ([]byte, error) { + return v.run1(dir, cmd, keyval, true) +} + +// runOutputVerboseOnly is like runOutput but only generates error output to +// standard error in verbose mode. +func (v *Cmd) runOutputVerboseOnly(dir string, cmd string, keyval ...string) ([]byte, error) { + return v.run1(dir, cmd, keyval, false) +} + +// run1 is the generalized implementation of run and runOutput. +func (v *Cmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([]byte, error) { + m := make(map[string]string) + for i := 0; i < len(keyval); i += 2 { + m[keyval[i]] = keyval[i+1] + } + args := strings.Fields(cmdline) + for i, arg := range args { + args[i] = expand(m, arg) + } + + if len(args) >= 2 && args[0] == "-go-internal-mkdir" { + var err error + if filepath.IsAbs(args[1]) { + err = os.Mkdir(args[1], fs.ModePerm) + } else { + err = os.Mkdir(filepath.Join(dir, args[1]), fs.ModePerm) + } + if err != nil { + return nil, err + } + args = args[2:] + } + + if len(args) >= 2 && args[0] == "-go-internal-cd" { + if filepath.IsAbs(args[1]) { + dir = args[1] + } else { + dir = filepath.Join(dir, args[1]) + } + args = args[2:] + } + + _, err := cfg.LookPath(v.Cmd) + if err != nil { + fmt.Fprintf(os.Stderr, + "go: missing %s command. See https://golang.org/s/gogetcmd\n", + v.Name) + return nil, err + } + + cmd := exec.Command(v.Cmd, args...) + cmd.Dir = dir + if cfg.BuildX { + fmt.Fprintf(os.Stderr, "cd %s\n", dir) + fmt.Fprintf(os.Stderr, "%s %s\n", v.Cmd, strings.Join(args, " ")) + } + out, err := cmd.Output() + if err != nil { + if verbose || cfg.BuildV { + fmt.Fprintf(os.Stderr, "# cd %s; %s %s\n", dir, v.Cmd, strings.Join(args, " ")) + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + os.Stderr.Write(ee.Stderr) + } else { + fmt.Fprintln(os.Stderr, err.Error()) + } + } + } + return out, err +} + +// Ping pings to determine scheme to use. +func (v *Cmd) Ping(scheme, repo string) error { + // Run the ping command in an arbitrary working directory, + // but don't let the current working directory pollute the results. + // In module mode, we expect GOMODCACHE to exist and be a safe place for + // commands; in GOPATH mode, we expect that to be true of GOPATH/src. + dir := cfg.GOMODCACHE + if !cfg.ModulesEnabled { + dir = filepath.Join(cfg.BuildContext.GOPATH, "src") + } + os.MkdirAll(dir, 0777) // Ignore errors — if unsuccessful, the command will likely fail. + + release, err := base.AcquireNet() + if err != nil { + return err + } + defer release() + + return v.runVerboseOnly(dir, v.PingCmd, "scheme", scheme, "repo", repo) +} + +// Create creates a new copy of repo in dir. +// The parent of dir must exist; dir must not. +func (v *Cmd) Create(dir, repo string) error { + release, err := base.AcquireNet() + if err != nil { + return err + } + defer release() + + for _, cmd := range v.CreateCmd { + if err := v.run(filepath.Dir(dir), cmd, "dir", dir, "repo", repo); err != nil { + return err + } + } + return nil +} + +// Download downloads any new changes for the repo in dir. +func (v *Cmd) Download(dir string) error { + release, err := base.AcquireNet() + if err != nil { + return err + } + defer release() + + for _, cmd := range v.DownloadCmd { + if err := v.run(dir, cmd); err != nil { + return err + } + } + return nil +} + +// Tags returns the list of available tags for the repo in dir. +func (v *Cmd) Tags(dir string) ([]string, error) { + var tags []string + for _, tc := range v.TagCmd { + out, err := v.runOutput(dir, tc.cmd) + if err != nil { + return nil, err + } + re := regexp.MustCompile(`(?m-s)` + tc.pattern) + for _, m := range re.FindAllStringSubmatch(string(out), -1) { + tags = append(tags, m[1]) + } + } + return tags, nil +} + +// TagSync syncs the repo in dir to the named tag, +// which either is a tag returned by tags or is v.tagDefault. +func (v *Cmd) TagSync(dir, tag string) error { + if v.TagSyncCmd == nil { + return nil + } + if tag != "" { + for _, tc := range v.TagLookupCmd { + out, err := v.runOutput(dir, tc.cmd, "tag", tag) + if err != nil { + return err + } + re := regexp.MustCompile(`(?m-s)` + tc.pattern) + m := re.FindStringSubmatch(string(out)) + if len(m) > 1 { + tag = m[1] + break + } + } + } + + release, err := base.AcquireNet() + if err != nil { + return err + } + defer release() + + if tag == "" && v.TagSyncDefault != nil { + for _, cmd := range v.TagSyncDefault { + if err := v.run(dir, cmd); err != nil { + return err + } + } + return nil + } + + for _, cmd := range v.TagSyncCmd { + if err := v.run(dir, cmd, "tag", tag); err != nil { + return err + } + } + return nil +} + +// A vcsPath describes how to convert an import path into a +// version control system and repository name. +type vcsPath struct { + pathPrefix string // prefix this description applies to + regexp *lazyregexp.Regexp // compiled pattern for import path + repo string // repository to use (expand with match of re) + vcs string // version control system to use (expand with match of re) + check func(match map[string]string) error // additional checks + schemelessRepo bool // if true, the repo pattern lacks a scheme +} + +// FromDir inspects dir and its parents to determine the +// version control system and code repository to use. +// If no repository is found, FromDir returns an error +// equivalent to os.ErrNotExist. +func FromDir(dir, srcRoot string, allowNesting bool) (repoDir string, vcsCmd *Cmd, err error) { + // Clean and double-check that dir is in (a subdirectory of) srcRoot. + dir = filepath.Clean(dir) + if srcRoot != "" { + srcRoot = filepath.Clean(srcRoot) + if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator { + return "", nil, fmt.Errorf("directory %q is outside source root %q", dir, srcRoot) + } + } + + origDir := dir + for len(dir) > len(srcRoot) { + for _, vcs := range vcsList { + if isVCSRoot(dir, vcs.RootNames) { + // Record first VCS we find. + // If allowNesting is false (as it is in GOPATH), keep looking for + // repositories in parent directories and report an error if one is + // found to mitigate VCS injection attacks. + if vcsCmd == nil { + vcsCmd = vcs + repoDir = dir + if allowNesting { + return repoDir, vcsCmd, nil + } + continue + } + // Otherwise, we have one VCS inside a different VCS. + return "", nil, fmt.Errorf("directory %q uses %s, but parent %q uses %s", + repoDir, vcsCmd.Cmd, dir, vcs.Cmd) + } + } + + // Move to parent. + ndir := filepath.Dir(dir) + if len(ndir) >= len(dir) { + break + } + dir = ndir + } + if vcsCmd == nil { + return "", nil, &vcsNotFoundError{dir: origDir} + } + return repoDir, vcsCmd, nil +} + +// isVCSRoot identifies a VCS root by checking whether the directory contains +// any of the listed root names. +func isVCSRoot(dir string, rootNames []rootName) bool { + for _, root := range rootNames { + fi, err := os.Stat(filepath.Join(dir, root.filename)) + if err == nil && fi.IsDir() == root.isDir { + return true + } + } + + return false +} + +type rootName struct { + filename string + isDir bool +} + +type vcsNotFoundError struct { + dir string +} + +func (e *vcsNotFoundError) Error() string { + return fmt.Sprintf("directory %q is not using a known version control system", e.dir) +} + +func (e *vcsNotFoundError) Is(err error) bool { + return err == os.ErrNotExist +} + +// A govcsRule is a single GOVCS rule like private:hg|svn. +type govcsRule struct { + pattern string + allowed []string +} + +// A govcsConfig is a full GOVCS configuration. +type govcsConfig []govcsRule + +func parseGOVCS(s string) (govcsConfig, error) { + s = strings.TrimSpace(s) + if s == "" { + return nil, nil + } + var cfg govcsConfig + have := make(map[string]string) + for _, item := range strings.Split(s, ",") { + item = strings.TrimSpace(item) + if item == "" { + return nil, fmt.Errorf("empty entry in GOVCS") + } + pattern, list, found := strings.Cut(item, ":") + if !found { + return nil, fmt.Errorf("malformed entry in GOVCS (missing colon): %q", item) + } + pattern, list = strings.TrimSpace(pattern), strings.TrimSpace(list) + if pattern == "" { + return nil, fmt.Errorf("empty pattern in GOVCS: %q", item) + } + if list == "" { + return nil, fmt.Errorf("empty VCS list in GOVCS: %q", item) + } + if search.IsRelativePath(pattern) { + return nil, fmt.Errorf("relative pattern not allowed in GOVCS: %q", pattern) + } + if old := have[pattern]; old != "" { + return nil, fmt.Errorf("unreachable pattern in GOVCS: %q after %q", item, old) + } + have[pattern] = item + allowed := strings.Split(list, "|") + for i, a := range allowed { + a = strings.TrimSpace(a) + if a == "" { + return nil, fmt.Errorf("empty VCS name in GOVCS: %q", item) + } + allowed[i] = a + } + cfg = append(cfg, govcsRule{pattern, allowed}) + } + return cfg, nil +} + +func (c *govcsConfig) allow(path string, private bool, vcs string) bool { + for _, rule := range *c { + match := false + switch rule.pattern { + case "private": + match = private + case "public": + match = !private + default: + // Note: rule.pattern is known to be comma-free, + // so MatchPrefixPatterns is only matching a single pattern for us. + match = module.MatchPrefixPatterns(rule.pattern, path) + } + if !match { + continue + } + for _, allow := range rule.allowed { + if allow == vcs || allow == "all" { + return true + } + } + return false + } + + // By default, nothing is allowed. + return false +} + +var ( + govcs govcsConfig + govcsErr error + govcsOnce sync.Once +) + +// defaultGOVCS is the default setting for GOVCS. +// Setting GOVCS adds entries ahead of these but does not remove them. +// (They are appended to the parsed GOVCS setting.) +// +// The rationale behind allowing only Git and Mercurial is that +// these two systems have had the most attention to issues +// of being run as clients of untrusted servers. In contrast, +// Bazaar, Fossil, and Subversion have primarily been used +// in trusted, authenticated environments and are not as well +// scrutinized as attack surfaces. +// +// See golang.org/issue/41730 for details. +var defaultGOVCS = govcsConfig{ + {"private", []string{"all"}}, + {"public", []string{"git", "hg"}}, +} + +// checkGOVCS checks whether the policy defined by the environment variable +// GOVCS allows the given vcs command to be used with the given repository +// root path. Note that root may not be a real package or module path; it's +// the same as the root path in the go-import meta tag. +func checkGOVCS(vcs *Cmd, root string) error { + if vcs == vcsMod { + // Direct module (proxy protocol) fetches don't + // involve an external version control system + // and are always allowed. + return nil + } + + govcsOnce.Do(func() { + govcs, govcsErr = parseGOVCS(os.Getenv("GOVCS")) + govcs = append(govcs, defaultGOVCS...) + }) + if govcsErr != nil { + return govcsErr + } + + private := module.MatchPrefixPatterns(cfg.GOPRIVATE, root) + if !govcs.allow(root, private, vcs.Cmd) { + what := "public" + if private { + what = "private" + } + return fmt.Errorf("GOVCS disallows using %s for %s %s; see 'go help vcs'", vcs.Cmd, what, root) + } + + return nil +} + +// RepoRoot describes the repository root for a tree of source code. +type RepoRoot struct { + Repo string // repository URL, including scheme + Root string // import path corresponding to root of repo + IsCustom bool // defined by served tags (as opposed to hard-coded pattern) + VCS *Cmd +} + +func httpPrefix(s string) string { + for _, prefix := range [...]string{"http:", "https:"} { + if strings.HasPrefix(s, prefix) { + return prefix + } + } + return "" +} + +// ModuleMode specifies whether to prefer modules when looking up code sources. +type ModuleMode int + +const ( + IgnoreMod ModuleMode = iota + PreferMod +) + +// RepoRootForImportPath analyzes importPath to determine the +// version control system, and code repository to use. +func RepoRootForImportPath(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) { + rr, err := repoRootFromVCSPaths(importPath, security, vcsPaths) + if err == errUnknownSite { + rr, err = repoRootForImportDynamic(importPath, mod, security) + if err != nil { + err = importErrorf(importPath, "unrecognized import path %q: %v", importPath, err) + } + } + if err != nil { + rr1, err1 := repoRootFromVCSPaths(importPath, security, vcsPathsAfterDynamic) + if err1 == nil { + rr = rr1 + err = nil + } + } + + // Should have been taken care of above, but make sure. + if err == nil && strings.Contains(importPath, "...") && strings.Contains(rr.Root, "...") { + // Do not allow wildcards in the repo root. + rr = nil + err = importErrorf(importPath, "cannot expand ... in %q", importPath) + } + return rr, err +} + +var errUnknownSite = errors.New("dynamic lookup required to find mapping") + +// repoRootFromVCSPaths attempts to map importPath to a repoRoot +// using the mappings defined in vcsPaths. +func repoRootFromVCSPaths(importPath string, security web.SecurityMode, vcsPaths []*vcsPath) (*RepoRoot, error) { + if str.HasPathPrefix(importPath, "example.net") { + // TODO(rsc): This should not be necessary, but it's required to keep + // tests like ../../testdata/script/mod_get_extra.txt from using the network. + // That script has everything it needs in the replacement set, but it is still + // doing network calls. + return nil, fmt.Errorf("no modules on example.net") + } + if importPath == "rsc.io" { + // This special case allows tests like ../../testdata/script/govcs.txt + // to avoid making any network calls. The module lookup for a path + // like rsc.io/nonexist.svn/foo needs to not make a network call for + // a lookup on rsc.io. + return nil, fmt.Errorf("rsc.io is not a module") + } + // A common error is to use https://packagepath because that's what + // hg and git require. Diagnose this helpfully. + if prefix := httpPrefix(importPath); prefix != "" { + // The importPath has been cleaned, so has only one slash. The pattern + // ignores the slashes; the error message puts them back on the RHS at least. + return nil, fmt.Errorf("%q not allowed in import path", prefix+"//") + } + for _, srv := range vcsPaths { + if !str.HasPathPrefix(importPath, srv.pathPrefix) { + continue + } + m := srv.regexp.FindStringSubmatch(importPath) + if m == nil { + if srv.pathPrefix != "" { + return nil, importErrorf(importPath, "invalid %s import path %q", srv.pathPrefix, importPath) + } + continue + } + + // Build map of named subexpression matches for expand. + match := map[string]string{ + "prefix": srv.pathPrefix + "/", + "import": importPath, + } + for i, name := range srv.regexp.SubexpNames() { + if name != "" && match[name] == "" { + match[name] = m[i] + } + } + if srv.vcs != "" { + match["vcs"] = expand(match, srv.vcs) + } + if srv.repo != "" { + match["repo"] = expand(match, srv.repo) + } + if srv.check != nil { + if err := srv.check(match); err != nil { + return nil, err + } + } + vcs := vcsByCmd(match["vcs"]) + if vcs == nil { + return nil, fmt.Errorf("unknown version control system %q", match["vcs"]) + } + if err := checkGOVCS(vcs, match["root"]); err != nil { + return nil, err + } + var repoURL string + if !srv.schemelessRepo { + repoURL = match["repo"] + } else { + repo := match["repo"] + var ok bool + repoURL, ok = interceptVCSTest(repo, vcs, security) + if !ok { + scheme, err := func() (string, error) { + for _, s := range vcs.Scheme { + if security == web.SecureOnly && !vcs.isSecureScheme(s) { + continue + } + + // If we know how to ping URL schemes for this VCS, + // check that this repo works. + // Otherwise, default to the first scheme + // that meets the requested security level. + if vcs.PingCmd == "" { + return s, nil + } + if err := vcs.Ping(s, repo); err == nil { + return s, nil + } + } + securityFrag := "" + if security == web.SecureOnly { + securityFrag = "secure " + } + return "", fmt.Errorf("no %sprotocol found for repository", securityFrag) + }() + if err != nil { + return nil, err + } + repoURL = scheme + "://" + repo + } + } + rr := &RepoRoot{ + Repo: repoURL, + Root: match["root"], + VCS: vcs, + } + return rr, nil + } + return nil, errUnknownSite +} + +func interceptVCSTest(repo string, vcs *Cmd, security web.SecurityMode) (repoURL string, ok bool) { + if VCSTestRepoURL == "" { + return "", false + } + if vcs == vcsMod { + // Since the "mod" protocol is implemented internally, + // requests will be intercepted at a lower level (in cmd/go/internal/web). + return "", false + } + + if scheme, path, ok := strings.Cut(repo, "://"); ok { + if security == web.SecureOnly && !vcs.isSecureScheme(scheme) { + return "", false // Let the caller reject the original URL. + } + repo = path // Remove leading URL scheme if present. + } + for _, host := range VCSTestHosts { + if !str.HasPathPrefix(repo, host) { + continue + } + + httpURL := VCSTestRepoURL + strings.TrimPrefix(repo, host) + + if vcs == vcsSvn { + // Ping the vcweb HTTP server to tell it to initialize the SVN repository + // and get the SVN server URL. + u, err := urlpkg.Parse(httpURL + "?vcwebsvn=1") + if err != nil { + panic(fmt.Sprintf("invalid vcs-test repo URL: %v", err)) + } + svnURL, err := web.GetBytes(u) + svnURL = bytes.TrimSpace(svnURL) + if err == nil && len(svnURL) > 0 { + return string(svnURL) + strings.TrimPrefix(repo, host), true + } + + // vcs-test doesn't have a svn handler for the given path, + // so resolve the repo to HTTPS instead. + } + + return httpURL, true + } + return "", false +} + +// urlForImportPath returns a partially-populated URL for the given Go import path. +// +// The URL leaves the Scheme field blank so that web.Get will try any scheme +// allowed by the selected security mode. +func urlForImportPath(importPath string) (*urlpkg.URL, error) { + slash := strings.Index(importPath, "/") + if slash < 0 { + slash = len(importPath) + } + host, path := importPath[:slash], importPath[slash:] + if !strings.Contains(host, ".") { + return nil, errors.New("import path does not begin with hostname") + } + if len(path) == 0 { + path = "/" + } + return &urlpkg.URL{Host: host, Path: path, RawQuery: "go-get=1"}, nil +} + +// repoRootForImportDynamic finds a *RepoRoot for a custom domain that's not +// statically known by repoRootFromVCSPaths. +// +// This handles custom import paths like "name.tld/pkg/foo" or just "name.tld". +func repoRootForImportDynamic(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) { + url, err := urlForImportPath(importPath) + if err != nil { + return nil, err + } + resp, err := web.Get(security, url) + if err != nil { + msg := "https fetch: %v" + if security == web.Insecure { + msg = "http/" + msg + } + return nil, fmt.Errorf(msg, err) + } + body := resp.Body + defer body.Close() + imports, err := parseMetaGoImports(body, mod) + if len(imports) == 0 { + if respErr := resp.Err(); respErr != nil { + // If the server's status was not OK, prefer to report that instead of + // an XML parse error. + return nil, respErr + } + } + if err != nil { + return nil, fmt.Errorf("parsing %s: %v", importPath, err) + } + // Find the matched meta import. + mmi, err := matchGoImport(imports, importPath) + if err != nil { + if _, ok := err.(ImportMismatchError); !ok { + return nil, fmt.Errorf("parse %s: %v", url, err) + } + return nil, fmt.Errorf("parse %s: no go-import meta tags (%s)", resp.URL, err) + } + if cfg.BuildV { + log.Printf("get %q: found meta tag %#v at %s", importPath, mmi, url) + } + // If the import was "uni.edu/bob/project", which said the + // prefix was "uni.edu" and the RepoRoot was "evilroot.com", + // make sure we don't trust Bob and check out evilroot.com to + // "uni.edu" yet (possibly overwriting/preempting another + // non-evil student). Instead, first verify the root and see + // if it matches Bob's claim. + if mmi.Prefix != importPath { + if cfg.BuildV { + log.Printf("get %q: verifying non-authoritative meta tag", importPath) + } + var imports []metaImport + url, imports, err = metaImportsForPrefix(mmi.Prefix, mod, security) + if err != nil { + return nil, err + } + metaImport2, err := matchGoImport(imports, importPath) + if err != nil || mmi != metaImport2 { + return nil, fmt.Errorf("%s and %s disagree about go-import for %s", resp.URL, url, mmi.Prefix) + } + } + + if err := validateRepoRoot(mmi.RepoRoot); err != nil { + return nil, fmt.Errorf("%s: invalid repo root %q: %v", resp.URL, mmi.RepoRoot, err) + } + var vcs *Cmd + if mmi.VCS == "mod" { + vcs = vcsMod + } else { + vcs = vcsByCmd(mmi.VCS) + if vcs == nil { + return nil, fmt.Errorf("%s: unknown vcs %q", resp.URL, mmi.VCS) + } + } + + if err := checkGOVCS(vcs, mmi.Prefix); err != nil { + return nil, err + } + + repoURL, ok := interceptVCSTest(mmi.RepoRoot, vcs, security) + if !ok { + repoURL = mmi.RepoRoot + } + rr := &RepoRoot{ + Repo: repoURL, + Root: mmi.Prefix, + IsCustom: true, + VCS: vcs, + } + return rr, nil +} + +// validateRepoRoot returns an error if repoRoot does not seem to be +// a valid URL with scheme. +func validateRepoRoot(repoRoot string) error { + url, err := urlpkg.Parse(repoRoot) + if err != nil { + return err + } + if url.Scheme == "" { + return errors.New("no scheme") + } + if url.Scheme == "file" { + return errors.New("file scheme disallowed") + } + return nil +} + +var fetchGroup singleflight.Group +var ( + fetchCacheMu sync.Mutex + fetchCache = map[string]fetchResult{} // key is metaImportsForPrefix's importPrefix +) + +// metaImportsForPrefix takes a package's root import path as declared in a tag +// and returns its HTML discovery URL and the parsed metaImport lines +// found on the page. +// +// The importPath is of the form "golang.org/x/tools". +// It is an error if no imports are found. +// url will still be valid if err != nil. +// The returned url will be of the form "https://golang.org/x/tools?go-get=1" +func metaImportsForPrefix(importPrefix string, mod ModuleMode, security web.SecurityMode) (*urlpkg.URL, []metaImport, error) { + setCache := func(res fetchResult) (fetchResult, error) { + fetchCacheMu.Lock() + defer fetchCacheMu.Unlock() + fetchCache[importPrefix] = res + return res, nil + } + + resi, _, _ := fetchGroup.Do(importPrefix, func() (resi any, err error) { + fetchCacheMu.Lock() + if res, ok := fetchCache[importPrefix]; ok { + fetchCacheMu.Unlock() + return res, nil + } + fetchCacheMu.Unlock() + + url, err := urlForImportPath(importPrefix) + if err != nil { + return setCache(fetchResult{err: err}) + } + resp, err := web.Get(security, url) + if err != nil { + return setCache(fetchResult{url: url, err: fmt.Errorf("fetching %s: %v", importPrefix, err)}) + } + body := resp.Body + defer body.Close() + imports, err := parseMetaGoImports(body, mod) + if len(imports) == 0 { + if respErr := resp.Err(); respErr != nil { + // If the server's status was not OK, prefer to report that instead of + // an XML parse error. + return setCache(fetchResult{url: url, err: respErr}) + } + } + if err != nil { + return setCache(fetchResult{url: url, err: fmt.Errorf("parsing %s: %v", resp.URL, err)}) + } + if len(imports) == 0 { + err = fmt.Errorf("fetching %s: no go-import meta tag found in %s", importPrefix, resp.URL) + } + return setCache(fetchResult{url: url, imports: imports, err: err}) + }) + res := resi.(fetchResult) + return res.url, res.imports, res.err +} + +type fetchResult struct { + url *urlpkg.URL + imports []metaImport + err error +} + +// metaImport represents the parsed tags from HTML files. +type metaImport struct { + Prefix, VCS, RepoRoot string +} + +// An ImportMismatchError is returned where metaImport/s are present +// but none match our import path. +type ImportMismatchError struct { + importPath string + mismatches []string // the meta imports that were discarded for not matching our importPath +} + +func (m ImportMismatchError) Error() string { + formattedStrings := make([]string, len(m.mismatches)) + for i, pre := range m.mismatches { + formattedStrings[i] = fmt.Sprintf("meta tag %s did not match import path %s", pre, m.importPath) + } + return strings.Join(formattedStrings, ", ") +} + +// matchGoImport returns the metaImport from imports matching importPath. +// An error is returned if there are multiple matches. +// An ImportMismatchError is returned if none match. +func matchGoImport(imports []metaImport, importPath string) (metaImport, error) { + match := -1 + + errImportMismatch := ImportMismatchError{importPath: importPath} + for i, im := range imports { + if !str.HasPathPrefix(importPath, im.Prefix) { + errImportMismatch.mismatches = append(errImportMismatch.mismatches, im.Prefix) + continue + } + + if match >= 0 { + if imports[match].VCS == "mod" && im.VCS != "mod" { + // All the mod entries precede all the non-mod entries. + // We have a mod entry and don't care about the rest, + // matching or not. + break + } + return metaImport{}, fmt.Errorf("multiple meta tags match import path %q", importPath) + } + match = i + } + + if match == -1 { + return metaImport{}, errImportMismatch + } + return imports[match], nil +} + +// expand rewrites s to replace {k} with match[k] for each key k in match. +func expand(match map[string]string, s string) string { + // We want to replace each match exactly once, and the result of expansion + // must not depend on the iteration order through the map. + // A strings.Replacer has exactly the properties we're looking for. + oldNew := make([]string, 0, 2*len(match)) + for k, v := range match { + oldNew = append(oldNew, "{"+k+"}", v) + } + return strings.NewReplacer(oldNew...).Replace(s) +} + +// vcsPaths defines the meaning of import paths referring to +// commonly-used VCS hosting sites (github.com/user/dir) +// and import paths referring to a fully-qualified importPath +// containing a VCS type (foo.com/repo.git/dir) +var vcsPaths = []*vcsPath{ + // GitHub + { + pathPrefix: "github.com", + regexp: lazyregexp.New(`^(?Pgithub\.com/[\w.\-]+/[\w.\-]+)(/[\w.\-]+)*$`), + vcs: "git", + repo: "https://{root}", + check: noVCSSuffix, + }, + + // Bitbucket + { + pathPrefix: "bitbucket.org", + regexp: lazyregexp.New(`^(?Pbitbucket\.org/(?P[\w.\-]+/[\w.\-]+))(/[\w.\-]+)*$`), + vcs: "git", + repo: "https://{root}", + check: noVCSSuffix, + }, + + // IBM DevOps Services (JazzHub) + { + pathPrefix: "hub.jazz.net/git", + regexp: lazyregexp.New(`^(?Phub\.jazz\.net/git/[a-z0-9]+/[\w.\-]+)(/[\w.\-]+)*$`), + vcs: "git", + repo: "https://{root}", + check: noVCSSuffix, + }, + + // Git at Apache + { + pathPrefix: "git.apache.org", + regexp: lazyregexp.New(`^(?Pgit\.apache\.org/[a-z0-9_.\-]+\.git)(/[\w.\-]+)*$`), + vcs: "git", + repo: "https://{root}", + }, + + // Git at OpenStack + { + pathPrefix: "git.openstack.org", + regexp: lazyregexp.New(`^(?Pgit\.openstack\.org/[\w.\-]+/[\w.\-]+)(\.git)?(/[\w.\-]+)*$`), + vcs: "git", + repo: "https://{root}", + }, + + // chiselapp.com for fossil + { + pathPrefix: "chiselapp.com", + regexp: lazyregexp.New(`^(?Pchiselapp\.com/user/[A-Za-z0-9]+/repository/[\w.\-]+)$`), + vcs: "fossil", + repo: "https://{root}", + }, + + // General syntax for any server. + // Must be last. + { + regexp: lazyregexp.New(`(?P(?P([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?(/~?[\w.\-]+)+?)\.(?Pbzr|fossil|git|hg|svn))(/~?[\w.\-]+)*$`), + schemelessRepo: true, + }, +} + +// vcsPathsAfterDynamic gives additional vcsPaths entries +// to try after the dynamic HTML check. +// This gives those sites a chance to introduce tags +// as part of a graceful transition away from the hard-coded logic. +var vcsPathsAfterDynamic = []*vcsPath{ + // Launchpad. See golang.org/issue/11436. + { + pathPrefix: "launchpad.net", + regexp: lazyregexp.New(`^(?Plaunchpad\.net/((?P[\w.\-]+)(?P/[\w.\-]+)?|~[\w.\-]+/(\+junk|[\w.\-]+)/[\w.\-]+))(/[\w.\-]+)*$`), + vcs: "bzr", + repo: "https://{root}", + check: launchpadVCS, + }, +} + +// noVCSSuffix checks that the repository name does not +// end in .foo for any version control system foo. +// The usual culprit is ".git". +func noVCSSuffix(match map[string]string) error { + repo := match["repo"] + for _, vcs := range vcsList { + if strings.HasSuffix(repo, "."+vcs.Cmd) { + return fmt.Errorf("invalid version control suffix in %s path", match["prefix"]) + } + } + return nil +} + +// launchpadVCS solves the ambiguity for "lp.net/project/foo". In this case, +// "foo" could be a series name registered in Launchpad with its own branch, +// and it could also be the name of a directory within the main project +// branch one level up. +func launchpadVCS(match map[string]string) error { + if match["project"] == "" || match["series"] == "" { + return nil + } + url := &urlpkg.URL{ + Scheme: "https", + Host: "code.launchpad.net", + Path: expand(match, "/{project}{series}/.bzr/branch-format"), + } + _, err := web.GetBytes(url) + if err != nil { + match["root"] = expand(match, "launchpad.net/{project}") + match["repo"] = expand(match, "https://{root}") + } + return nil +} + +// importError is a copy of load.importError, made to avoid a dependency cycle +// on cmd/go/internal/load. It just needs to satisfy load.ImportPathError. +type importError struct { + importPath string + err error +} + +func importErrorf(path, format string, args ...any) error { + err := &importError{importPath: path, err: fmt.Errorf(format, args...)} + if errStr := err.Error(); !strings.Contains(errStr, path) { + panic(fmt.Sprintf("path %q not in error %q", path, errStr)) + } + return err +} + +func (e *importError) Error() string { + return e.err.Error() +} + +func (e *importError) Unwrap() error { + // Don't return e.err directly, since we're only wrapping an error if %w + // was passed to ImportErrorf. + return errors.Unwrap(e.err) +} + +func (e *importError) ImportPath() string { + return e.importPath +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/vcs_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/vcs_test.go new file mode 100644 index 0000000000000000000000000000000000000000..2ce85ea210967fd4966fe497817d04e5eb2f5e26 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcs/vcs_test.go @@ -0,0 +1,581 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcs + +import ( + "errors" + "fmt" + "internal/testenv" + "os" + "path/filepath" + "strings" + "testing" + + "cmd/go/internal/web" +) + +func init() { + // GOVCS defaults to public:git|hg,private:all, + // which breaks many tests here - they can't use non-git, non-hg VCS at all! + // Change to fully permissive. + // The tests of the GOVCS setting itself are in ../../testdata/script/govcs.txt. + os.Setenv("GOVCS", "*:all") +} + +// Test that RepoRootForImportPath determines the correct RepoRoot for a given importPath. +// TODO(cmang): Add tests for SVN and BZR. +func TestRepoRootForImportPath(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + + tests := []struct { + path string + want *RepoRoot + }{ + { + "github.com/golang/groupcache", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://github.com/golang/groupcache", + }, + }, + // Unicode letters in directories are not valid. + { + "github.com/user/unicode/испытание", + nil, + }, + // IBM DevOps Services tests + { + "hub.jazz.net/git/user1/pkgname", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://hub.jazz.net/git/user1/pkgname", + }, + }, + { + "hub.jazz.net/git/user1/pkgname/submodule/submodule/submodule", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://hub.jazz.net/git/user1/pkgname", + }, + }, + { + "hub.jazz.net", + nil, + }, + { + "hubajazz.net", + nil, + }, + { + "hub2.jazz.net", + nil, + }, + { + "hub.jazz.net/someotherprefix", + nil, + }, + { + "hub.jazz.net/someotherprefix/user1/pkgname", + nil, + }, + // Spaces are not valid in user names or package names + { + "hub.jazz.net/git/User 1/pkgname", + nil, + }, + { + "hub.jazz.net/git/user1/pkg name", + nil, + }, + // Dots are not valid in user names + { + "hub.jazz.net/git/user.1/pkgname", + nil, + }, + { + "hub.jazz.net/git/user/pkg.name", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://hub.jazz.net/git/user/pkg.name", + }, + }, + // User names cannot have uppercase letters + { + "hub.jazz.net/git/USER/pkgname", + nil, + }, + // OpenStack tests + { + "git.openstack.org/openstack/swift", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://git.openstack.org/openstack/swift", + }, + }, + // Trailing .git is less preferred but included for + // compatibility purposes while the same source needs to + // be compilable on both old and new go + { + "git.openstack.org/openstack/swift.git", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://git.openstack.org/openstack/swift.git", + }, + }, + { + "git.openstack.org/openstack/swift/go/hummingbird", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://git.openstack.org/openstack/swift", + }, + }, + { + "git.openstack.org", + nil, + }, + { + "git.openstack.org/openstack", + nil, + }, + // Spaces are not valid in package name + { + "git.apache.org/package name/path/to/lib", + nil, + }, + // Should have ".git" suffix + { + "git.apache.org/package-name/path/to/lib", + nil, + }, + { + "gitbapache.org", + nil, + }, + { + "git.apache.org/package-name.git", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://git.apache.org/package-name.git", + }, + }, + { + "git.apache.org/package-name_2.x.git/path/to/lib", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://git.apache.org/package-name_2.x.git", + }, + }, + { + "chiselapp.com/user/kyle/repository/fossilgg", + &RepoRoot{ + VCS: vcsFossil, + Repo: "https://chiselapp.com/user/kyle/repository/fossilgg", + }, + }, + { + // must have a user/$name/repository/$repo path + "chiselapp.com/kyle/repository/fossilgg", + nil, + }, + { + "chiselapp.com/user/kyle/fossilgg", + nil, + }, + { + "bitbucket.org/workspace/pkgname", + &RepoRoot{ + VCS: vcsGit, + Repo: "https://bitbucket.org/workspace/pkgname", + }, + }, + } + + for _, test := range tests { + got, err := RepoRootForImportPath(test.path, IgnoreMod, web.SecureOnly) + want := test.want + + if want == nil { + if err == nil { + t.Errorf("RepoRootForImportPath(%q): Error expected but not received", test.path) + } + continue + } + if err != nil { + t.Errorf("RepoRootForImportPath(%q): %v", test.path, err) + continue + } + if got.VCS.Name != want.VCS.Name || got.Repo != want.Repo { + t.Errorf("RepoRootForImportPath(%q) = VCS(%s) Repo(%s), want VCS(%s) Repo(%s)", test.path, got.VCS, got.Repo, want.VCS, want.Repo) + } + } +} + +// Test that vcs.FromDir correctly inspects a given directory and returns the +// right VCS and repo directory. +func TestFromDir(t *testing.T) { + tempDir := t.TempDir() + + for _, vcs := range vcsList { + for r, root := range vcs.RootNames { + vcsName := fmt.Sprint(vcs.Name, r) + dir := filepath.Join(tempDir, "example.com", vcsName, root.filename) + if root.isDir { + err := os.MkdirAll(dir, 0755) + if err != nil { + t.Fatal(err) + } + } else { + err := os.MkdirAll(filepath.Dir(dir), 0755) + if err != nil { + t.Fatal(err) + } + f, err := os.Create(dir) + if err != nil { + t.Fatal(err) + } + f.Close() + } + + wantRepoDir := filepath.Dir(dir) + gotRepoDir, gotVCS, err := FromDir(dir, tempDir, false) + if err != nil { + t.Errorf("FromDir(%q, %q): %v", dir, tempDir, err) + continue + } + if gotRepoDir != wantRepoDir || gotVCS.Name != vcs.Name { + t.Errorf("FromDir(%q, %q) = RepoDir(%s), VCS(%s); want RepoDir(%s), VCS(%s)", dir, tempDir, gotRepoDir, gotVCS.Name, wantRepoDir, vcs.Name) + } + } + } +} + +func TestIsSecure(t *testing.T) { + tests := []struct { + vcs *Cmd + url string + secure bool + }{ + {vcsGit, "http://example.com/foo.git", false}, + {vcsGit, "https://example.com/foo.git", true}, + {vcsBzr, "http://example.com/foo.bzr", false}, + {vcsBzr, "https://example.com/foo.bzr", true}, + {vcsSvn, "http://example.com/svn", false}, + {vcsSvn, "https://example.com/svn", true}, + {vcsHg, "http://example.com/foo.hg", false}, + {vcsHg, "https://example.com/foo.hg", true}, + {vcsGit, "ssh://user@example.com/foo.git", true}, + {vcsGit, "user@server:path/to/repo.git", false}, + {vcsGit, "user@server:", false}, + {vcsGit, "server:repo.git", false}, + {vcsGit, "server:path/to/repo.git", false}, + {vcsGit, "example.com:path/to/repo.git", false}, + {vcsGit, "path/that/contains/a:colon/repo.git", false}, + {vcsHg, "ssh://user@example.com/path/to/repo.hg", true}, + {vcsFossil, "http://example.com/foo", false}, + {vcsFossil, "https://example.com/foo", true}, + } + + for _, test := range tests { + secure := test.vcs.IsSecure(test.url) + if secure != test.secure { + t.Errorf("%s isSecure(%q) = %t; want %t", test.vcs, test.url, secure, test.secure) + } + } +} + +func TestIsSecureGitAllowProtocol(t *testing.T) { + tests := []struct { + vcs *Cmd + url string + secure bool + }{ + // Same as TestIsSecure to verify same behavior. + {vcsGit, "http://example.com/foo.git", false}, + {vcsGit, "https://example.com/foo.git", true}, + {vcsBzr, "http://example.com/foo.bzr", false}, + {vcsBzr, "https://example.com/foo.bzr", true}, + {vcsSvn, "http://example.com/svn", false}, + {vcsSvn, "https://example.com/svn", true}, + {vcsHg, "http://example.com/foo.hg", false}, + {vcsHg, "https://example.com/foo.hg", true}, + {vcsGit, "user@server:path/to/repo.git", false}, + {vcsGit, "user@server:", false}, + {vcsGit, "server:repo.git", false}, + {vcsGit, "server:path/to/repo.git", false}, + {vcsGit, "example.com:path/to/repo.git", false}, + {vcsGit, "path/that/contains/a:colon/repo.git", false}, + {vcsHg, "ssh://user@example.com/path/to/repo.hg", true}, + // New behavior. + {vcsGit, "ssh://user@example.com/foo.git", false}, + {vcsGit, "foo://example.com/bar.git", true}, + {vcsHg, "foo://example.com/bar.hg", false}, + {vcsSvn, "foo://example.com/svn", false}, + {vcsBzr, "foo://example.com/bar.bzr", false}, + } + + defer os.Unsetenv("GIT_ALLOW_PROTOCOL") + os.Setenv("GIT_ALLOW_PROTOCOL", "https:foo") + for _, test := range tests { + secure := test.vcs.IsSecure(test.url) + if secure != test.secure { + t.Errorf("%s isSecure(%q) = %t; want %t", test.vcs, test.url, secure, test.secure) + } + } +} + +func TestMatchGoImport(t *testing.T) { + tests := []struct { + imports []metaImport + path string + mi metaImport + err error + }{ + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo", + mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo/", + mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo", + mi: metaImport{Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/fooa", + mi: metaImport{Prefix: "example.com/user/fooa", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo/bar", + err: errors.New("should not be allowed to create nested repo"), + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo/bar/baz", + err: errors.New("should not be allowed to create nested repo"), + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo/bar/baz/qux", + err: errors.New("should not be allowed to create nested repo"), + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com/user/foo/bar/baz/", + err: errors.New("should not be allowed to create nested repo"), + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + {Prefix: "example.com/user/foo/bar", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "example.com", + err: errors.New("pathologically short path"), + }, + { + imports: []metaImport{ + {Prefix: "example.com/user/foo", VCS: "git", RepoRoot: "https://example.com/repo/target"}, + }, + path: "different.example.com/user/foo", + err: errors.New("meta tags do not match import path"), + }, + { + imports: []metaImport{ + {Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"}, + {Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"}, + }, + path: "myitcv.io/blah2/foo", + mi: metaImport{Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"}, + }, + { + imports: []metaImport{ + {Prefix: "myitcv.io/blah2", VCS: "mod", RepoRoot: "https://raw.githubusercontent.com/myitcv/pubx/master"}, + {Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"}, + }, + path: "myitcv.io/other", + mi: metaImport{Prefix: "myitcv.io", VCS: "git", RepoRoot: "https://github.com/myitcv/x"}, + }, + } + + for _, test := range tests { + mi, err := matchGoImport(test.imports, test.path) + if mi != test.mi { + t.Errorf("unexpected metaImport; got %v, want %v", mi, test.mi) + } + + got := err + want := test.err + if (got == nil) != (want == nil) { + t.Errorf("unexpected error; got %v, want %v", got, want) + } + } +} + +func TestValidateRepoRoot(t *testing.T) { + tests := []struct { + root string + ok bool + }{ + { + root: "", + ok: false, + }, + { + root: "http://", + ok: true, + }, + { + root: "git+ssh://", + ok: true, + }, + { + root: "http#://", + ok: false, + }, + { + root: "-config", + ok: false, + }, + { + root: "-config://", + ok: false, + }, + } + + for _, test := range tests { + err := validateRepoRoot(test.root) + ok := err == nil + if ok != test.ok { + want := "error" + if test.ok { + want = "nil" + } + t.Errorf("validateRepoRoot(%q) = %q, want %s", test.root, err, want) + } + } +} + +var govcsTests = []struct { + govcs string + path string + vcs string + ok bool +}{ + {"private:all", "is-public.com/foo", "zzz", false}, + {"private:all", "is-private.com/foo", "zzz", true}, + {"public:all", "is-public.com/foo", "zzz", true}, + {"public:all", "is-private.com/foo", "zzz", false}, + {"public:all,private:none", "is-public.com/foo", "zzz", true}, + {"public:all,private:none", "is-private.com/foo", "zzz", false}, + {"*:all", "is-public.com/foo", "zzz", true}, + {"golang.org:git", "golang.org/x/text", "zzz", false}, + {"golang.org:git", "golang.org/x/text", "git", true}, + {"golang.org:zzz", "golang.org/x/text", "zzz", true}, + {"golang.org:zzz", "golang.org/x/text", "git", false}, + {"golang.org:zzz", "golang.org/x/text", "zzz", true}, + {"golang.org:zzz", "golang.org/x/text", "git", false}, + {"golang.org:git|hg", "golang.org/x/text", "hg", true}, + {"golang.org:git|hg", "golang.org/x/text", "git", true}, + {"golang.org:git|hg", "golang.org/x/text", "zzz", false}, + {"golang.org:all", "golang.org/x/text", "hg", true}, + {"golang.org:all", "golang.org/x/text", "git", true}, + {"golang.org:all", "golang.org/x/text", "zzz", true}, + {"other.xyz/p:none,golang.org/x:git", "other.xyz/p/x", "git", false}, + {"other.xyz/p:none,golang.org/x:git", "unexpected.com", "git", false}, + {"other.xyz/p:none,golang.org/x:git", "golang.org/x/text", "zzz", false}, + {"other.xyz/p:none,golang.org/x:git", "golang.org/x/text", "git", true}, + {"other.xyz/p:none,golang.org/x:zzz", "golang.org/x/text", "zzz", true}, + {"other.xyz/p:none,golang.org/x:zzz", "golang.org/x/text", "git", false}, + {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/x/text", "hg", true}, + {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/x/text", "git", true}, + {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/x/text", "zzz", false}, + {"other.xyz/p:none,golang.org/x:all", "golang.org/x/text", "hg", true}, + {"other.xyz/p:none,golang.org/x:all", "golang.org/x/text", "git", true}, + {"other.xyz/p:none,golang.org/x:all", "golang.org/x/text", "zzz", true}, + {"other.xyz/p:none,golang.org/x:git", "golang.org/y/text", "zzz", false}, + {"other.xyz/p:none,golang.org/x:git", "golang.org/y/text", "git", false}, + {"other.xyz/p:none,golang.org/x:zzz", "golang.org/y/text", "zzz", false}, + {"other.xyz/p:none,golang.org/x:zzz", "golang.org/y/text", "git", false}, + {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/y/text", "hg", false}, + {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/y/text", "git", false}, + {"other.xyz/p:none,golang.org/x:git|hg", "golang.org/y/text", "zzz", false}, + {"other.xyz/p:none,golang.org/x:all", "golang.org/y/text", "hg", false}, + {"other.xyz/p:none,golang.org/x:all", "golang.org/y/text", "git", false}, + {"other.xyz/p:none,golang.org/x:all", "golang.org/y/text", "zzz", false}, +} + +func TestGOVCS(t *testing.T) { + for _, tt := range govcsTests { + cfg, err := parseGOVCS(tt.govcs) + if err != nil { + t.Errorf("parseGOVCS(%q): %v", tt.govcs, err) + continue + } + private := strings.HasPrefix(tt.path, "is-private") + ok := cfg.allow(tt.path, private, tt.vcs) + if ok != tt.ok { + t.Errorf("parseGOVCS(%q).allow(%q, %v, %q) = %v, want %v", + tt.govcs, tt.path, private, tt.vcs, ok, tt.ok) + } + } +} + +var govcsErrors = []struct { + s string + err string +}{ + {`,`, `empty entry in GOVCS`}, + {`,x`, `empty entry in GOVCS`}, + {`x,`, `malformed entry in GOVCS (missing colon): "x"`}, + {`x:y,`, `empty entry in GOVCS`}, + {`x`, `malformed entry in GOVCS (missing colon): "x"`}, + {`x:`, `empty VCS list in GOVCS: "x:"`}, + {`x:|`, `empty VCS name in GOVCS: "x:|"`}, + {`x:y|`, `empty VCS name in GOVCS: "x:y|"`}, + {`x:|y`, `empty VCS name in GOVCS: "x:|y"`}, + {`x:y,z:`, `empty VCS list in GOVCS: "z:"`}, + {`x:y,z:|`, `empty VCS name in GOVCS: "z:|"`}, + {`x:y,z:|w`, `empty VCS name in GOVCS: "z:|w"`}, + {`x:y,z:w|`, `empty VCS name in GOVCS: "z:w|"`}, + {`x:y,z:w||v`, `empty VCS name in GOVCS: "z:w||v"`}, + {`x:y,x:z`, `unreachable pattern in GOVCS: "x:z" after "x:y"`}, +} + +func TestGOVCSErrors(t *testing.T) { + for _, tt := range govcsErrors { + _, err := parseGOVCS(tt.s) + if err == nil || !strings.Contains(err.Error(), tt.err) { + t.Errorf("parseGOVCS(%s): err=%v, want %v", tt.s, err, tt.err) + } + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/auth.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/auth.go new file mode 100644 index 0000000000000000000000000000000000000000..383bf759ffcdc1ad44d22b15d695bdff519916d0 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/auth.go @@ -0,0 +1,108 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcweb + +import ( + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "os" + "path" + "strings" +) + +// authHandler serves requests only if the Basic Auth data sent with the request +// matches the contents of a ".access" file in the requested directory. +// +// For each request, the handler looks for a file named ".access" and parses it +// as a JSON-serialized accessToken. If the credentials from the request match +// the accessToken, the file is served normally; otherwise, it is rejected with +// the StatusCode and Message provided by the token. +type authHandler struct{} + +type accessToken struct { + Username, Password string + StatusCode int // defaults to 401. + Message string +} + +func (h *authHandler) Available() bool { return true } + +func (h *authHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) { + fs := http.Dir(dir) + + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + urlPath := req.URL.Path + if urlPath != "" && strings.HasPrefix(path.Base(urlPath), ".") { + http.Error(w, "filename contains leading dot", http.StatusBadRequest) + return + } + + f, err := fs.Open(urlPath) + if err != nil { + if os.IsNotExist(err) { + http.NotFound(w, req) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + return + } + + accessDir := urlPath + if fi, err := f.Stat(); err == nil && !fi.IsDir() { + accessDir = path.Dir(urlPath) + } + f.Close() + + var accessFile http.File + for { + var err error + accessFile, err = fs.Open(path.Join(accessDir, ".access")) + if err == nil { + break + } + + if !os.IsNotExist(err) { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if accessDir == "." { + http.Error(w, "failed to locate access file", http.StatusInternalServerError) + return + } + accessDir = path.Dir(accessDir) + } + + data, err := io.ReadAll(accessFile) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + var token accessToken + if err := json.Unmarshal(data, &token); err != nil { + logger.Print(err) + http.Error(w, "malformed access file", http.StatusInternalServerError) + return + } + if username, password, ok := req.BasicAuth(); !ok || username != token.Username || password != token.Password { + code := token.StatusCode + if code == 0 { + code = http.StatusUnauthorized + } + if code == http.StatusUnauthorized { + w.Header().Add("WWW-Authenticate", fmt.Sprintf("basic realm=%s", accessDir)) + } + http.Error(w, token.Message, code) + return + } + + http.FileServer(fs).ServeHTTP(w, req) + }) + + return handler, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/bzr.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/bzr.go new file mode 100644 index 0000000000000000000000000000000000000000..a915fb2b93347c2bfb9fe4d73209bc59c0233d8c --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/bzr.go @@ -0,0 +1,18 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcweb + +import ( + "log" + "net/http" +) + +type bzrHandler struct{} + +func (*bzrHandler) Available() bool { return true } + +func (*bzrHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) { + return http.FileServer(http.Dir(dir)), nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/dir.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/dir.go new file mode 100644 index 0000000000000000000000000000000000000000..2f122f414bb12d04109fd2c400237e7a1a3aabc4 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/dir.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcweb + +import ( + "log" + "net/http" +) + +// dirHandler is a vcsHandler that serves the raw contents of a directory. +type dirHandler struct{} + +func (*dirHandler) Available() bool { return true } + +func (*dirHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) { + return http.FileServer(http.Dir(dir)), nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/fossil.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/fossil.go new file mode 100644 index 0000000000000000000000000000000000000000..cc24f2f1b0dc2f54a72737f58eb2cc5e45cf31b2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/fossil.go @@ -0,0 +1,61 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcweb + +import ( + "fmt" + "log" + "net/http" + "net/http/cgi" + "os" + "os/exec" + "path/filepath" + "sync" +) + +type fossilHandler struct { + once sync.Once + fossilPath string + fossilPathErr error +} + +func (h *fossilHandler) Available() bool { + h.once.Do(func() { + h.fossilPath, h.fossilPathErr = exec.LookPath("fossil") + }) + return h.fossilPathErr == nil +} + +func (h *fossilHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) { + if !h.Available() { + return nil, ServerNotInstalledError{name: "fossil"} + } + + name := filepath.Base(dir) + db := filepath.Join(dir, name+".fossil") + + cgiPath := db + ".cgi" + cgiScript := fmt.Sprintf("#!%s\nrepository: %s\n", h.fossilPath, db) + if err := os.WriteFile(cgiPath, []byte(cgiScript), 0755); err != nil { + return nil, err + } + + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if _, err := os.Stat(db); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + ch := &cgi.Handler{ + Env: env, + Logger: logger, + Path: h.fossilPath, + Args: []string{cgiPath}, + Dir: dir, + } + ch.ServeHTTP(w, req) + }) + + return handler, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/git.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/git.go new file mode 100644 index 0000000000000000000000000000000000000000..d1e0563bede03378562ca3105c5472b97107ef2f --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/git.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcweb + +import ( + "log" + "net/http" + "net/http/cgi" + "os/exec" + "runtime" + "slices" + "sync" +) + +type gitHandler struct { + once sync.Once + gitPath string + gitPathErr error +} + +func (h *gitHandler) Available() bool { + if runtime.GOOS == "plan9" { + // The Git command is usually not the real Git on Plan 9. + // See https://golang.org/issues/29640. + return false + } + h.once.Do(func() { + h.gitPath, h.gitPathErr = exec.LookPath("git") + }) + return h.gitPathErr == nil +} + +func (h *gitHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) { + if !h.Available() { + return nil, ServerNotInstalledError{name: "git"} + } + + baseEnv := append(slices.Clip(env), + "GIT_PROJECT_ROOT="+dir, + "GIT_HTTP_EXPORT_ALL=1", + ) + + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // The Git client sends the requested Git protocol version as a + // "Git-Protocol" HTTP request header, which the CGI host then converts + // to an environment variable (HTTP_GIT_PROTOCOL). + // + // However, versions of Git older that 2.34.0 don't recognize the + // HTTP_GIT_PROTOCOL variable, and instead need that value to be set in the + // GIT_PROTOCOL variable. We do so here so that vcweb can work reliably + // with older Git releases. (As of the time of writing, the Go project's + // builders were on Git version 2.30.2.) + env := slices.Clip(baseEnv) + if p := req.Header.Get("Git-Protocol"); p != "" { + env = append(env, "GIT_PROTOCOL="+p) + } + + h := &cgi.Handler{ + Path: h.gitPath, + Logger: logger, + Args: []string{"http-backend"}, + Dir: dir, + Env: env, + } + h.ServeHTTP(w, req) + }) + + return handler, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/hg.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/hg.go new file mode 100644 index 0000000000000000000000000000000000000000..4571277c9f1a5a4bfd9235ed3582363dda02347a --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/hg.go @@ -0,0 +1,123 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcweb + +import ( + "bufio" + "context" + "errors" + "io" + "log" + "net/http" + "net/http/httputil" + "net/url" + "os" + "os/exec" + "slices" + "strings" + "sync" + "time" +) + +type hgHandler struct { + once sync.Once + hgPath string + hgPathErr error +} + +func (h *hgHandler) Available() bool { + h.once.Do(func() { + h.hgPath, h.hgPathErr = exec.LookPath("hg") + }) + return h.hgPathErr == nil +} + +func (h *hgHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) { + if !h.Available() { + return nil, ServerNotInstalledError{name: "hg"} + } + + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // Mercurial has a CGI server implementation (called hgweb). In theory we + // could use that — however, assuming that hgweb is even installed, the + // configuration for hgweb varies by Python version (2 vs 3), and we would + // rather not go rooting around trying to find the right Python version to + // run. + // + // Instead, we'll take a somewhat more roundabout approach: we assume that + // if "hg" works at all then "hg serve" works too, and we'll execute that as + // a subprocess, using a reverse proxy to forward the request and response. + + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + + cmd := exec.CommandContext(ctx, h.hgPath, "serve", "--port", "0", "--address", "localhost", "--accesslog", os.DevNull, "--name", "vcweb", "--print-url") + cmd.Dir = dir + cmd.Env = append(slices.Clip(env), "PWD="+dir) + + cmd.Cancel = func() error { + err := cmd.Process.Signal(os.Interrupt) + if err != nil && !errors.Is(err, os.ErrProcessDone) { + err = cmd.Process.Kill() + } + return err + } + // This WaitDelay is arbitrary. After 'hg serve' prints its URL, any further + // I/O is only for debugging. (The actual output goes through the HTTP URL, + // not the standard I/O streams.) + cmd.WaitDelay = 10 * time.Second + + stderr := new(strings.Builder) + cmd.Stderr = stderr + + stdout, err := cmd.StdoutPipe() + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + if err := cmd.Start(); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + var wg sync.WaitGroup + defer func() { + cancel() + err := cmd.Wait() + if out := strings.TrimSuffix(stderr.String(), "interrupted!\n"); out != "" { + logger.Printf("%v: %v\n%s", cmd, err, out) + } else { + logger.Printf("%v", cmd) + } + wg.Wait() + }() + + r := bufio.NewReader(stdout) + line, err := r.ReadString('\n') + if err != nil { + return + } + // We have read what should be the server URL. 'hg serve' shouldn't need to + // write anything else to stdout, but it's not a big deal if it does anyway. + // Keep the stdout pipe open so that 'hg serve' won't get a SIGPIPE, but + // actively discard its output so that it won't hang on a blocking write. + wg.Add(1) + go func() { + io.Copy(io.Discard, r) + wg.Done() + }() + + u, err := url.Parse(strings.TrimSpace(line)) + if err != nil { + logger.Printf("%v: %v", cmd, err) + http.Error(w, err.Error(), http.StatusBadGateway) + return + } + logger.Printf("proxying hg request to %s", u) + httputil.NewSingleHostReverseProxy(u).ServeHTTP(w, req) + }) + + return handler, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/insecure.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/insecure.go new file mode 100644 index 0000000000000000000000000000000000000000..1d6af25e79aee3529c2a1afe8fc8861778afd469 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/insecure.go @@ -0,0 +1,42 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcweb + +import ( + "log" + "net/http" +) + +// insecureHandler redirects requests to the same host and path but using the +// "http" scheme instead of "https". +type insecureHandler struct{} + +func (h *insecureHandler) Available() bool { return true } + +func (h *insecureHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) { + // The insecure-redirect handler implementation doesn't depend or dir or env. + // + // The only effect of the directory is to determine which prefix the caller + // will strip from the request before passing it on to this handler. + return h, nil +} + +func (h *insecureHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if req.Host == "" && req.URL.Host == "" { + http.Error(w, "no Host provided in request", http.StatusBadRequest) + return + } + + // Note that if the handler is wrapped with http.StripPrefix, the prefix + // will remain stripped in the redirected URL, preventing redirect loops + // if the scheme is already "http". + + u := *req.URL + u.Scheme = "http" + u.User = nil + u.Host = req.Host + + http.Redirect(w, req, u.String(), http.StatusFound) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/script.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/script.go new file mode 100644 index 0000000000000000000000000000000000000000..c35b46f735115f337ea5aaff563c1770b88303a2 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/script.go @@ -0,0 +1,345 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcweb + +import ( + "bufio" + "bytes" + "cmd/go/internal/script" + "context" + "errors" + "fmt" + "internal/txtar" + "io" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "golang.org/x/mod/module" + "golang.org/x/mod/zip" +) + +// newScriptEngine returns a script engine augmented with commands for +// reproducing version-control repositories by replaying commits. +func newScriptEngine() *script.Engine { + conds := script.DefaultConds() + + interrupt := func(cmd *exec.Cmd) error { return cmd.Process.Signal(os.Interrupt) } + gracePeriod := 30 * time.Second // arbitrary + + cmds := script.DefaultCmds() + cmds["at"] = scriptAt() + cmds["bzr"] = script.Program("bzr", interrupt, gracePeriod) + cmds["fossil"] = script.Program("fossil", interrupt, gracePeriod) + cmds["git"] = script.Program("git", interrupt, gracePeriod) + cmds["hg"] = script.Program("hg", interrupt, gracePeriod) + cmds["handle"] = scriptHandle() + cmds["modzip"] = scriptModzip() + cmds["svnadmin"] = script.Program("svnadmin", interrupt, gracePeriod) + cmds["svn"] = script.Program("svn", interrupt, gracePeriod) + cmds["unquote"] = scriptUnquote() + + return &script.Engine{ + Cmds: cmds, + Conds: conds, + } +} + +// loadScript interprets the given script content using the vcweb script engine. +// loadScript always returns either a non-nil handler or a non-nil error. +// +// The script content must be a txtar archive with a comment containing a script +// with exactly one "handle" command and zero or more VCS commands to prepare +// the repository to be served. +func (s *Server) loadScript(ctx context.Context, logger *log.Logger, scriptPath string, scriptContent []byte, workDir string) (http.Handler, error) { + ar := txtar.Parse(scriptContent) + + if err := os.MkdirAll(workDir, 0755); err != nil { + return nil, err + } + + st, err := s.newState(ctx, workDir) + if err != nil { + return nil, err + } + if err := st.ExtractFiles(ar); err != nil { + return nil, err + } + + scriptName := filepath.Base(scriptPath) + scriptLog := new(strings.Builder) + err = s.engine.Execute(st, scriptName, bufio.NewReader(bytes.NewReader(ar.Comment)), scriptLog) + closeErr := st.CloseAndWait(scriptLog) + logger.Printf("%s:", scriptName) + io.WriteString(logger.Writer(), scriptLog.String()) + io.WriteString(logger.Writer(), "\n") + if err != nil { + return nil, err + } + if closeErr != nil { + return nil, err + } + + sc, err := getScriptCtx(st) + if err != nil { + return nil, err + } + if sc.handler == nil { + return nil, errors.New("script completed without setting handler") + } + return sc.handler, nil +} + +// newState returns a new script.State for executing scripts in workDir. +func (s *Server) newState(ctx context.Context, workDir string) (*script.State, error) { + ctx = &scriptCtx{ + Context: ctx, + server: s, + } + + st, err := script.NewState(ctx, workDir, s.env) + if err != nil { + return nil, err + } + return st, nil +} + +// scriptEnviron returns a new environment that attempts to provide predictable +// behavior for the supported version-control tools. +func scriptEnviron(homeDir string) []string { + env := []string{ + "USER=gopher", + homeEnvName() + "=" + homeDir, + "GIT_CONFIG_NOSYSTEM=1", + "HGRCPATH=" + filepath.Join(homeDir, ".hgrc"), + "HGENCODING=utf-8", + } + // Preserve additional environment variables that may be needed by VCS tools. + for _, k := range []string{ + pathEnvName(), + tempEnvName(), + "SYSTEMROOT", // must be preserved on Windows to find DLLs; golang.org/issue/25210 + "WINDIR", // must be preserved on Windows to be able to run PowerShell command; golang.org/issue/30711 + "ComSpec", // must be preserved on Windows to be able to run Batch files; golang.org/issue/56555 + "DYLD_LIBRARY_PATH", // must be preserved on macOS systems to find shared libraries + "LD_LIBRARY_PATH", // must be preserved on Unix systems to find shared libraries + "LIBRARY_PATH", // allow override of non-standard static library paths + "PYTHONPATH", // may be needed by hg to find imported modules + } { + if v, ok := os.LookupEnv(k); ok { + env = append(env, k+"="+v) + } + } + + if os.Getenv("GO_BUILDER_NAME") != "" || os.Getenv("GIT_TRACE_CURL") == "1" { + // To help diagnose https://go.dev/issue/52545, + // enable tracing for Git HTTPS requests. + env = append(env, + "GIT_TRACE_CURL=1", + "GIT_TRACE_CURL_NO_DATA=1", + "GIT_REDACT_COOKIES=o,SSO,GSSO_Uberproxy") + } + + return env +} + +// homeEnvName returns the environment variable used by os.UserHomeDir +// to locate the user's home directory. +func homeEnvName() string { + switch runtime.GOOS { + case "windows": + return "USERPROFILE" + case "plan9": + return "home" + default: + return "HOME" + } +} + +// tempEnvName returns the environment variable used by os.TempDir +// to locate the default directory for temporary files. +func tempEnvName() string { + switch runtime.GOOS { + case "windows": + return "TMP" + case "plan9": + return "TMPDIR" // actually plan 9 doesn't have one at all but this is fine + default: + return "TMPDIR" + } +} + +// pathEnvName returns the environment variable used by exec.LookPath to +// identify directories to search for executables. +func pathEnvName() string { + switch runtime.GOOS { + case "plan9": + return "path" + default: + return "PATH" + } +} + +// A scriptCtx is a context.Context that stores additional state for script +// commands. +type scriptCtx struct { + context.Context + server *Server + commitTime time.Time + handlerName string + handler http.Handler +} + +// scriptCtxKey is the key associating the *scriptCtx in a script's Context.. +type scriptCtxKey struct{} + +func (sc *scriptCtx) Value(key any) any { + if key == (scriptCtxKey{}) { + return sc + } + return sc.Context.Value(key) +} + +func getScriptCtx(st *script.State) (*scriptCtx, error) { + sc, ok := st.Context().Value(scriptCtxKey{}).(*scriptCtx) + if !ok { + return nil, errors.New("scriptCtx not found in State.Context") + } + return sc, nil +} + +func scriptAt() script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "set the current commit time for all version control systems", + Args: "time", + Detail: []string{ + "The argument must be an absolute timestamp in RFC3339 format.", + }, + }, + func(st *script.State, args ...string) (script.WaitFunc, error) { + if len(args) != 1 { + return nil, script.ErrUsage + } + + sc, err := getScriptCtx(st) + if err != nil { + return nil, err + } + + sc.commitTime, err = time.ParseInLocation(time.RFC3339, args[0], time.UTC) + if err == nil { + st.Setenv("GIT_COMMITTER_DATE", args[0]) + st.Setenv("GIT_AUTHOR_DATE", args[0]) + } + return nil, err + }) +} + +func scriptHandle() script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "set the HTTP handler that will serve the script's output", + Args: "handler [dir]", + Detail: []string{ + "The handler will be passed the script's current working directory and environment as arguments.", + "Valid handlers include 'dir' (for general http.Dir serving), 'bzr', 'fossil', 'git', and 'hg'", + }, + }, + func(st *script.State, args ...string) (script.WaitFunc, error) { + if len(args) == 0 || len(args) > 2 { + return nil, script.ErrUsage + } + + sc, err := getScriptCtx(st) + if err != nil { + return nil, err + } + + if sc.handler != nil { + return nil, fmt.Errorf("server handler already set to %s", sc.handlerName) + } + + name := args[0] + h, ok := sc.server.vcsHandlers[name] + if !ok { + return nil, fmt.Errorf("unrecognized VCS %q", name) + } + sc.handlerName = name + if !h.Available() { + return nil, ServerNotInstalledError{name} + } + + dir := st.Getwd() + if len(args) >= 2 { + dir = st.Path(args[1]) + } + sc.handler, err = h.Handler(dir, st.Environ(), sc.server.logger) + return nil, err + }) +} + +func scriptModzip() script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "create a Go module zip file from a directory", + Args: "zipfile path@version dir", + }, + func(st *script.State, args ...string) (wait script.WaitFunc, err error) { + if len(args) != 3 { + return nil, script.ErrUsage + } + zipPath := st.Path(args[0]) + mPath, version, ok := strings.Cut(args[1], "@") + if !ok { + return nil, script.ErrUsage + } + dir := st.Path(args[2]) + + if err := os.MkdirAll(filepath.Dir(zipPath), 0755); err != nil { + return nil, err + } + f, err := os.Create(zipPath) + if err != nil { + return nil, err + } + defer func() { + if closeErr := f.Close(); err == nil { + err = closeErr + } + }() + + return nil, zip.CreateFromDir(f, module.Version{Path: mPath, Version: version}, dir) + }) +} + +func scriptUnquote() script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "unquote the argument as a Go string", + Args: "string", + }, + func(st *script.State, args ...string) (script.WaitFunc, error) { + if len(args) != 1 { + return nil, script.ErrUsage + } + + s, err := strconv.Unquote(`"` + args[0] + `"`) + if err != nil { + return nil, err + } + + wait := func(*script.State) (stdout, stderr string, err error) { + return s, "", nil + } + return wait, nil + }) +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/svn.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/svn.go new file mode 100644 index 0000000000000000000000000000000000000000..60222f1d0acb21a46663dacae3057ab7fd7ba855 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/svn.go @@ -0,0 +1,199 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcweb + +import ( + "io" + "log" + "net" + "net/http" + "os/exec" + "strings" + "sync" +) + +// An svnHandler serves requests for Subversion repos. +// +// Unlike the other vcweb handlers, svnHandler does not serve the Subversion +// protocol directly over the HTTP connection. Instead, it opens a separate port +// that serves the (non-HTTP) 'svn' protocol. The test binary can retrieve the +// URL for that port by sending an HTTP request with the query parameter +// "vcwebsvn=1". +// +// We take this approach because the 'svn' protocol is implemented by a +// lightweight 'svnserve' binary that is usually packaged along with the 'svn' +// client binary, whereas only known implementation of the Subversion HTTP +// protocol is the mod_dav_svn apache2 module. Apache2 has a lot of dependencies +// and also seems to rely on global configuration via well-known file paths, so +// implementing a hermetic test using apache2 would require the test to run in a +// complicated container environment, which wouldn't be nearly as +// straightforward for Go contributors to set up and test against on their local +// machine. +type svnHandler struct { + svnRoot string // a directory containing all svn repos to be served + logger *log.Logger + + pathOnce sync.Once + svnservePath string // the path to the 'svnserve' executable + svnserveErr error + + listenOnce sync.Once + s chan *svnState // 1-buffered +} + +// An svnState describes the state of a port serving the 'svn://' protocol. +type svnState struct { + listener net.Listener + listenErr error + conns map[net.Conn]struct{} + closing bool + done chan struct{} +} + +func (h *svnHandler) Available() bool { + h.pathOnce.Do(func() { + h.svnservePath, h.svnserveErr = exec.LookPath("svnserve") + }) + return h.svnserveErr == nil +} + +// Handler returns an http.Handler that checks for the "vcwebsvn" query +// parameter and then serves the 'svn://' URL for the repository at the +// requested path. +// The HTTP client is expected to read that URL and pass it to the 'svn' client. +func (h *svnHandler) Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) { + if !h.Available() { + return nil, ServerNotInstalledError{name: "svn"} + } + + // Go ahead and start the listener now, so that if it fails (for example, due + // to port exhaustion) we can return an error from the Handler method instead + // of serving an error for each individual HTTP request. + h.listenOnce.Do(func() { + h.s = make(chan *svnState, 1) + l, err := net.Listen("tcp", "localhost:0") + done := make(chan struct{}) + + h.s <- &svnState{ + listener: l, + listenErr: err, + conns: map[net.Conn]struct{}{}, + done: done, + } + if err != nil { + close(done) + return + } + + h.logger.Printf("serving svn on svn://%v", l.Addr()) + + go func() { + for { + c, err := l.Accept() + + s := <-h.s + if err != nil { + s.listenErr = err + if len(s.conns) == 0 { + close(s.done) + } + h.s <- s + return + } + if s.closing { + c.Close() + } else { + s.conns[c] = struct{}{} + go h.serve(c) + } + h.s <- s + } + }() + }) + + s := <-h.s + addr := "" + if s.listener != nil { + addr = s.listener.Addr().String() + } + err := s.listenErr + h.s <- s + if err != nil { + return nil, err + } + + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.FormValue("vcwebsvn") != "" { + w.Header().Add("Content-Type", "text/plain; charset=UTF-8") + io.WriteString(w, "svn://"+addr+"\n") + return + } + http.NotFound(w, req) + }) + + return handler, nil +} + +// serve serves a single 'svn://' connection on c. +func (h *svnHandler) serve(c net.Conn) { + defer func() { + c.Close() + + s := <-h.s + delete(s.conns, c) + if len(s.conns) == 0 && s.listenErr != nil { + close(s.done) + } + h.s <- s + }() + + // The "--inetd" flag causes svnserve to speak the 'svn' protocol over its + // stdin and stdout streams as if invoked by the Unix "inetd" service. + // We aren't using inetd, but we are implementing essentially the same + // approach: using a host process to listen for connections and spawn + // subprocesses to serve them. + cmd := exec.Command(h.svnservePath, "--read-only", "--root="+h.svnRoot, "--inetd") + cmd.Stdin = c + cmd.Stdout = c + stderr := new(strings.Builder) + cmd.Stderr = stderr + err := cmd.Run() + + var errFrag any = "ok" + if err != nil { + errFrag = err + } + stderrFrag := "" + if stderr.Len() > 0 { + stderrFrag = "\n" + stderr.String() + } + h.logger.Printf("%v: %s%s", cmd, errFrag, stderrFrag) +} + +// Close stops accepting new svn:// connections and terminates the existing +// ones, then waits for the 'svnserve' subprocesses to complete. +func (h *svnHandler) Close() error { + h.listenOnce.Do(func() {}) + if h.s == nil { + return nil + } + + var err error + s := <-h.s + s.closing = true + if s.listener == nil { + err = s.listenErr + } else { + err = s.listener.Close() + } + for c := range s.conns { + c.Close() + } + done := s.done + h.s <- s + + <-done + return err +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcstest/vcstest.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcstest/vcstest.go new file mode 100644 index 0000000000000000000000000000000000000000..d460259105ccc1a21fc76c1c2e60d473f9de8936 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcstest/vcstest.go @@ -0,0 +1,169 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package vcstest serves the repository scripts in cmd/go/testdata/vcstest +// using the [vcweb] script engine. +package vcstest + +import ( + "cmd/go/internal/vcs" + "cmd/go/internal/vcweb" + "cmd/go/internal/web" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "internal/testenv" + "io" + "log" + "net/http" + "net/http/httptest" + "net/url" + "os" + "path/filepath" + "testing" +) + +var Hosts = []string{ + "vcs-test.golang.org", +} + +type Server struct { + vcweb *vcweb.Server + workDir string + HTTP *httptest.Server + HTTPS *httptest.Server +} + +// NewServer returns a new test-local vcweb server that serves VCS requests +// for modules with paths that begin with "vcs-test.golang.org" using the +// scripts in cmd/go/testdata/vcstest. +func NewServer() (srv *Server, err error) { + if vcs.VCSTestRepoURL != "" { + panic("vcs URL hooks already set") + } + + scriptDir := filepath.Join(testenv.GOROOT(nil), "src/cmd/go/testdata/vcstest") + + workDir, err := os.MkdirTemp("", "vcstest") + if err != nil { + return nil, err + } + defer func() { + if err != nil { + os.RemoveAll(workDir) + } + }() + + logger := log.Default() + if !testing.Verbose() { + logger = log.New(io.Discard, "", log.LstdFlags) + } + handler, err := vcweb.NewServer(scriptDir, workDir, logger) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + handler.Close() + } + }() + + srvHTTP := httptest.NewServer(handler) + httpURL, err := url.Parse(srvHTTP.URL) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + srvHTTP.Close() + } + }() + + srvHTTPS := httptest.NewTLSServer(handler) + httpsURL, err := url.Parse(srvHTTPS.URL) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + srvHTTPS.Close() + } + }() + + srv = &Server{ + vcweb: handler, + workDir: workDir, + HTTP: srvHTTP, + HTTPS: srvHTTPS, + } + vcs.VCSTestRepoURL = srv.HTTP.URL + vcs.VCSTestHosts = Hosts + + var interceptors []web.Interceptor + for _, host := range Hosts { + interceptors = append(interceptors, + web.Interceptor{Scheme: "http", FromHost: host, ToHost: httpURL.Host, Client: srv.HTTP.Client()}, + web.Interceptor{Scheme: "https", FromHost: host, ToHost: httpsURL.Host, Client: srv.HTTPS.Client()}) + } + web.EnableTestHooks(interceptors) + + fmt.Fprintln(os.Stderr, "vcs-test.golang.org rerouted to "+srv.HTTP.URL) + fmt.Fprintln(os.Stderr, "https://vcs-test.golang.org rerouted to "+srv.HTTPS.URL) + + return srv, nil +} + +func (srv *Server) Close() error { + if vcs.VCSTestRepoURL != srv.HTTP.URL { + panic("vcs URL hooks modified before Close") + } + vcs.VCSTestRepoURL = "" + vcs.VCSTestHosts = nil + web.DisableTestHooks() + + srv.HTTP.Close() + srv.HTTPS.Close() + err := srv.vcweb.Close() + if rmErr := os.RemoveAll(srv.workDir); err == nil { + err = rmErr + } + return err +} + +func (srv *Server) WriteCertificateFile() (string, error) { + b := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: srv.HTTPS.Certificate().Raw, + }) + + filename := filepath.Join(srv.workDir, "cert.pem") + if err := os.WriteFile(filename, b, 0644); err != nil { + return "", err + } + return filename, nil +} + +// TLSClient returns an http.Client that can talk to the httptest.Server +// whose certificate is written to the given file path. +func TLSClient(certFile string) (*http.Client, error) { + client := &http.Client{ + Transport: http.DefaultTransport.(*http.Transport).Clone(), + } + + pemBytes, err := os.ReadFile(certFile) + if err != nil { + return nil, err + } + + certpool := x509.NewCertPool() + if !certpool.AppendCertsFromPEM(pemBytes) { + return nil, fmt.Errorf("no certificates found in %s", certFile) + } + client.Transport.(*http.Transport).TLSClientConfig = &tls.Config{ + RootCAs: certpool, + } + + return client, nil +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4a6d60039ed0b233b4dbcb995830c78c8dea85ac --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcstest/vcstest_test.go @@ -0,0 +1,170 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vcstest_test + +import ( + "cmd/go/internal/vcweb" + "errors" + "flag" + "fmt" + "io" + "io/fs" + "log" + "net" + "net/http" + "net/http/httptest" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" +) + +var ( + dir = flag.String("dir", "../../../testdata/vcstest", "directory containing scripts to serve") + host = flag.String("host", "localhost", "hostname on which to serve HTTP") + port = flag.Int("port", -1, "port on which to serve HTTP; if nonnegative, skips running tests") +) + +func TestMain(m *testing.M) { + flag.Parse() + + if *port >= 0 { + err := serveStandalone(*host, *port) + if err != nil { + log.Fatal(err) + } + os.Exit(0) + } + + m.Run() +} + +// serveStandalone serves the vcweb testdata in a standalone HTTP server. +func serveStandalone(host string, port int) (err error) { + scriptDir, err := filepath.Abs(*dir) + if err != nil { + return err + } + work, err := os.MkdirTemp("", "vcweb") + if err != nil { + return err + } + defer func() { + if rmErr := os.RemoveAll(work); err == nil { + err = rmErr + } + }() + + log.Printf("running scripts in %s", work) + + v, err := vcweb.NewServer(scriptDir, work, log.Default()) + if err != nil { + return err + } + + l, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port)) + if err != nil { + return err + } + log.Printf("serving on http://%s:%d/", host, l.Addr().(*net.TCPAddr).Port) + + return http.Serve(l, v) +} + +// TestScripts verifies that the VCS setup scripts in cmd/go/testdata/vcstest +// run successfully. +func TestScripts(t *testing.T) { + scriptDir, err := filepath.Abs(*dir) + if err != nil { + t.Fatal(err) + } + s, err := vcweb.NewServer(scriptDir, t.TempDir(), log.Default()) + if err != nil { + t.Fatal(err) + } + srv := httptest.NewServer(s) + + // To check for data races in the handler, run the root handler to produce an + // overview of the script status at an arbitrary point during the test. + // (We ignore the output because the expected failure mode is a friendly stack + // dump from the race detector.) + t.Run("overview", func(t *testing.T) { + t.Parallel() + + time.Sleep(1 * time.Millisecond) // Give the other handlers time to race. + + resp, err := http.Get(srv.URL) + if err == nil { + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } else { + t.Error(err) + } + }) + + t.Cleanup(func() { + // The subtests spawned by WalkDir run in parallel. When they complete, this + // Cleanup callback will run. At that point we fetch the root URL (which + // contains a status page), both to test that the root handler runs without + // crashing and to display a nice summary of the server's view of the test + // coverage. + resp, err := http.Get(srv.URL) + if err == nil { + var body []byte + body, err = io.ReadAll(resp.Body) + if err == nil && testing.Verbose() { + t.Logf("GET %s:\n%s", srv.URL, body) + } + resp.Body.Close() + } + if err != nil { + t.Error(err) + } + + srv.Close() + }) + + err = filepath.WalkDir(scriptDir, func(path string, d fs.DirEntry, err error) error { + if err != nil || d.IsDir() { + return err + } + + rel, err := filepath.Rel(scriptDir, path) + if err != nil { + return err + } + if rel == "README" { + return nil + } + + t.Run(filepath.ToSlash(rel), func(t *testing.T) { + t.Parallel() + + buf := new(strings.Builder) + logger := log.New(buf, "", log.LstdFlags) + // Load the script but don't try to serve the results: + // different VCS tools have different handler protocols, + // and the tests that actually use these repos will ensure + // that they are served correctly as a side effect anyway. + err := s.HandleScript(rel, logger, func(http.Handler) {}) + if buf.Len() > 0 { + t.Log(buf) + } + if err != nil { + if notInstalled := (vcweb.ServerNotInstalledError{}); errors.As(err, ¬Installed) || errors.Is(err, exec.ErrNotFound) { + t.Skip(err) + } + t.Error(err) + } + }) + return nil + }) + + if err != nil { + t.Error(err) + } +} diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcweb.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcweb.go new file mode 100644 index 0000000000000000000000000000000000000000..f748b343585661b3776b246802075401c0011ef8 --- /dev/null +++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcweb.go @@ -0,0 +1,425 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package vcweb serves version control repos for testing the go command. +// +// It is loosely derived from golang.org/x/build/vcs-test/vcweb, +// which ran as a service hosted at vcs-test.golang.org. +// +// When a repository URL is first requested, the vcweb [Server] dynamically +// regenerates the repository using a script interpreted by a [script.Engine]. +// The script produces the server's contents for a corresponding root URL and +// all subdirectories of that URL, which are then cached: subsequent requests +// for any URL generated by the script will serve the script's previous output +// until the script is modified. +// +// The script engine includes all of the engine's default commands and +// conditions, as well as commands for each supported VCS binary (bzr, fossil, +// git, hg, and svn), a "handle" command that informs the script which protocol +// or handler to use to serve the request, and utilities "at" (which sets +// environment variables for Git timestamps) and "unquote" (which unquotes its +// argument as if it were a Go string literal). +// +// The server's "/" endpoint provides a summary of the available scripts, +// and "/help" provides documentation for the script environment. +// +// To run a standalone server based on the vcweb engine, use: +// +// go test cmd/go/internal/vcweb/vcstest -v --port=0 +package vcweb + +import ( + "bufio" + "cmd/go/internal/script" + "context" + "crypto/sha256" + "errors" + "fmt" + "io" + "io/fs" + "log" + "net/http" + "os" + "os/exec" + "path" + "path/filepath" + "runtime/debug" + "strings" + "sync" + "text/tabwriter" + "time" +) + +// A Server serves cached, dynamically-generated version control repositories. +type Server struct { + env []string + logger *log.Logger + + scriptDir string + workDir string + homeDir string // $workdir/home + engine *script.Engine + + scriptCache sync.Map // script path → *scriptResult + + vcsHandlers map[string]vcsHandler +} + +// A vcsHandler serves repositories over HTTP for a known version-control tool. +type vcsHandler interface { + Available() bool + Handler(dir string, env []string, logger *log.Logger) (http.Handler, error) +} + +// A scriptResult describes the cached result of executing a vcweb script. +type scriptResult struct { + mu sync.RWMutex + + hash [sha256.Size]byte // hash of the script file, for cache invalidation + hashTime time.Time // timestamp at which the script was run, for diagnostics + + handler http.Handler // HTTP handler configured by the script + err error // error from executing the script, if any +} + +// NewServer returns a Server that generates and serves repositories in workDir +// using the scripts found in scriptDir and its subdirectories. +// +// A request for the path /foo/bar/baz will be handled by the first script along +// that path that exists: $scriptDir/foo.txt, $scriptDir/foo/bar.txt, or +// $scriptDir/foo/bar/baz.txt. +func NewServer(scriptDir, workDir string, logger *log.Logger) (*Server, error) { + if scriptDir == "" { + panic("vcweb.NewServer: scriptDir is required") + } + var err error + scriptDir, err = filepath.Abs(scriptDir) + if err != nil { + return nil, err + } + + if workDir == "" { + workDir, err = os.MkdirTemp("", "vcweb-*") + if err != nil { + return nil, err + } + logger.Printf("vcweb work directory: %s", workDir) + } else { + workDir, err = filepath.Abs(workDir) + if err != nil { + return nil, err + } + } + + homeDir := filepath.Join(workDir, "home") + if err := os.MkdirAll(homeDir, 0755); err != nil { + return nil, err + } + + env := scriptEnviron(homeDir) + + s := &Server{ + env: env, + logger: logger, + scriptDir: scriptDir, + workDir: workDir, + homeDir: homeDir, + engine: newScriptEngine(), + vcsHandlers: map[string]vcsHandler{ + "auth": new(authHandler), + "dir": new(dirHandler), + "bzr": new(bzrHandler), + "fossil": new(fossilHandler), + "git": new(gitHandler), + "hg": new(hgHandler), + "insecure": new(insecureHandler), + "svn": &svnHandler{svnRoot: workDir, logger: logger}, + }, + } + + if err := os.WriteFile(filepath.Join(s.homeDir, ".gitconfig"), []byte(gitConfig), 0644); err != nil { + return nil, err + } + gitConfigDir := filepath.Join(s.homeDir, ".config", "git") + if err := os.MkdirAll(gitConfigDir, 0755); err != nil { + return nil, err + } + if err := os.WriteFile(filepath.Join(gitConfigDir, "ignore"), []byte(""), 0644); err != nil { + return nil, err + } + + if err := os.WriteFile(filepath.Join(s.homeDir, ".hgrc"), []byte(hgrc), 0644); err != nil { + return nil, err + } + + return s, nil +} + +func (s *Server) Close() error { + var firstErr error + for _, h := range s.vcsHandlers { + if c, ok := h.(io.Closer); ok { + if closeErr := c.Close(); firstErr == nil { + firstErr = closeErr + } + } + } + return firstErr +} + +// gitConfig contains a ~/.gitconfg file that attempts to provide +// deterministic, platform-agnostic behavior for the 'git' command. +var gitConfig = ` +[user] + name = Go Gopher + email = gopher@golang.org +[init] + defaultBranch = main +[core] + eol = lf +[gui] + encoding = utf-8 +`[1:] + +// hgrc contains a ~/.hgrc file that attempts to provide +// deterministic, platform-agnostic behavior for the 'hg' command. +var hgrc = ` +[ui] +username=Go Gopher +[phases] +new-commit=public +[extensions] +convert= +`[1:] + +// ServeHTTP implements [http.Handler] for version-control repositories. +func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { + s.logger.Printf("serving %s", req.URL) + + defer func() { + if v := recover(); v != nil { + debug.PrintStack() + s.logger.Fatal(v) + } + }() + + urlPath := req.URL.Path + if !strings.HasPrefix(urlPath, "/") { + urlPath = "/" + urlPath + } + clean := path.Clean(urlPath)[1:] + if clean == "" { + s.overview(w, req) + return + } + if clean == "help" { + s.help(w, req) + return + } + + // Locate the script that generates the requested path. + // We follow directories all the way to the end, then look for a ".txt" file + // matching the first component that doesn't exist. That guarantees + // uniqueness: if a path exists as a directory, then it cannot exist as a + // ".txt" script (because the search would ignore that file). + scriptPath := "." + for _, part := range strings.Split(clean, "/") { + scriptPath = filepath.Join(scriptPath, part) + dir := filepath.Join(s.scriptDir, scriptPath) + if _, err := os.Stat(dir); err != nil { + if !os.IsNotExist(err) { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + // scriptPath does not exist as a directory, so it either is the script + // location or the script doesn't exist. + break + } + } + scriptPath += ".txt" + + err := s.HandleScript(scriptPath, s.logger, func(handler http.Handler) { + handler.ServeHTTP(w, req) + }) + if err != nil { + s.logger.Print(err) + if notFound := (ScriptNotFoundError{}); errors.As(err, ¬Found) { + http.NotFound(w, req) + } else if notInstalled := (ServerNotInstalledError{}); errors.As(err, ¬Installed) || errors.Is(err, exec.ErrNotFound) { + http.Error(w, err.Error(), http.StatusNotImplemented) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + } +} + +// A ScriptNotFoundError indicates that the requested script file does not exist. +// (It typically wraps a "stat" error for the script file.) +type ScriptNotFoundError struct{ err error } + +func (e ScriptNotFoundError) Error() string { return e.err.Error() } +func (e ScriptNotFoundError) Unwrap() error { return e.err } + +// A ServerNotInstalledError indicates that the server binary required for the +// indicated VCS does not exist. +type ServerNotInstalledError struct{ name string } + +func (v ServerNotInstalledError) Error() string { + return fmt.Sprintf("server for %#q VCS is not installed", v.name) +} + +// HandleScript ensures that the script at scriptRelPath has been evaluated +// with its current contents. +// +// If the script completed successfully, HandleScript invokes f on the handler +// with the script's result still read-locked, and waits for it to return. (That +// ensures that cache invalidation does not race with an in-flight handler.) +// +// Otherwise, HandleScript returns the (cached) error from executing the script. +func (s *Server) HandleScript(scriptRelPath string, logger *log.Logger, f func(http.Handler)) error { + ri, ok := s.scriptCache.Load(scriptRelPath) + if !ok { + ri, _ = s.scriptCache.LoadOrStore(scriptRelPath, new(scriptResult)) + } + r := ri.(*scriptResult) + + relDir := strings.TrimSuffix(scriptRelPath, filepath.Ext(scriptRelPath)) + workDir := filepath.Join(s.workDir, relDir) + prefix := path.Join("/", filepath.ToSlash(relDir)) + + r.mu.RLock() + defer r.mu.RUnlock() + for { + // For efficiency, we cache the script's output (in the work directory) + // across invocations. However, to allow for rapid iteration, we hash the + // script's contents and regenerate its output if the contents change. + // + // That way, one can use 'go run main.go' in this directory to stand up a + // server and see the output of the test script in order to fine-tune it. + content, err := os.ReadFile(filepath.Join(s.scriptDir, scriptRelPath)) + if err != nil { + if !os.IsNotExist(err) { + return err + } + return ScriptNotFoundError{err} + } + + hash := sha256.Sum256(content) + if prevHash := r.hash; prevHash != hash { + // The script's hash has changed, so regenerate its output. + func() { + r.mu.RUnlock() + r.mu.Lock() + defer func() { + r.mu.Unlock() + r.mu.RLock() + }() + if r.hash != prevHash { + // The cached result changed while we were waiting on the lock. + // It may have been updated to our hash or something even newer, + // so don't overwrite it. + return + } + + r.hash = hash + r.hashTime = time.Now() + r.handler, r.err = nil, nil + + if err := os.RemoveAll(workDir); err != nil { + r.err = err + return + } + + // Note: we use context.Background here instead of req.Context() so that we + // don't cache a spurious error (and lose work) if the request is canceled + // while the script is still running. + scriptHandler, err := s.loadScript(context.Background(), logger, scriptRelPath, content, workDir) + if err != nil { + r.err = err + return + } + r.handler = http.StripPrefix(prefix, scriptHandler) + }() + } + + if r.hash != hash { + continue // Raced with an update from another handler; try again. + } + + if r.err != nil { + return r.err + } + f(r.handler) + return nil + } +} + +// overview serves an HTML summary of the status of the scripts in the server's +// script directory. +func (s *Server) overview(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, "vcweb\n
\n")
+	fmt.Fprintf(w, "vcweb\n\n")
+	fmt.Fprintf(w, "This server serves various version control repos for testing the go command.\n\n")
+	fmt.Fprintf(w, "For an overview of the script language, see /help.\n\n")
+
+	fmt.Fprintf(w, "cache\n")
+
+	tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
+	err := filepath.WalkDir(s.scriptDir, func(path string, d fs.DirEntry, err error) error {
+		if err != nil {
+			return err
+		}
+		if filepath.Ext(path) != ".txt" {
+			return nil
+		}
+
+		rel, err := filepath.Rel(s.scriptDir, path)
+		if err != nil {
+			return err
+		}
+		hashTime := "(not loaded)"
+		status := ""
+		if ri, ok := s.scriptCache.Load(rel); ok {
+			r := ri.(*scriptResult)
+			r.mu.RLock()
+			defer r.mu.RUnlock()
+
+			if !r.hashTime.IsZero() {
+				hashTime = r.hashTime.Format(time.RFC3339)
+			}
+			if r.err == nil {
+				status = "ok"
+			} else {
+				status = r.err.Error()
+			}
+		}
+		fmt.Fprintf(tw, "%s\t%s\t%s\n", rel, hashTime, status)
+		return nil
+	})
+	tw.Flush()
+
+	if err != nil {
+		fmt.Fprintln(w, err)
+	}
+}
+
+// help serves a plain-text summary of the server's supported script language.
+func (s *Server) help(w http.ResponseWriter, req *http.Request) {
+	st, err := s.newState(req.Context(), s.workDir)
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	scriptLog := new(strings.Builder)
+	err = s.engine.Execute(st, "help", bufio.NewReader(strings.NewReader("help")), scriptLog)
+	if err != nil {
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+	w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
+	io.WriteString(w, scriptLog.String())
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcweb_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcweb_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..20b213786feefb065d2c3a6e8f6121c75a4056a1
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vcweb/vcweb_test.go
@@ -0,0 +1,63 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vcweb_test
+
+import (
+	"cmd/go/internal/vcweb"
+	"io"
+	"log"
+	"net/http"
+	"net/http/httptest"
+	"os"
+	"testing"
+)
+
+func TestHelp(t *testing.T) {
+	s, err := vcweb.NewServer(os.DevNull, t.TempDir(), log.Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	srv := httptest.NewServer(s)
+	defer srv.Close()
+
+	resp, err := http.Get(srv.URL + "/help")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode != 200 {
+		t.Fatal(resp.Status)
+	}
+	body, err := io.ReadAll(resp.Body)
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Logf("%s", body)
+}
+
+func TestOverview(t *testing.T) {
+	s, err := vcweb.NewServer(os.DevNull, t.TempDir(), log.Default())
+	if err != nil {
+		t.Fatal(err)
+	}
+	srv := httptest.NewServer(s)
+	defer srv.Close()
+
+	resp, err := http.Get(srv.URL)
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode != 200 {
+		t.Fatal(resp.Status)
+	}
+	body, err := io.ReadAll(resp.Body)
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Logf("%s", body)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/version/version.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/version/version.go
new file mode 100644
index 0000000000000000000000000000000000000000..4a0132a3fe92f40675e6ea76c81a14041742272a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/version/version.go
@@ -0,0 +1,173 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package version implements the “go version” command.
+package version
+
+import (
+	"context"
+	"debug/buildinfo"
+	"errors"
+	"fmt"
+	"io/fs"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/gover"
+)
+
+var CmdVersion = &base.Command{
+	UsageLine: "go version [-m] [-v] [file ...]",
+	Short:     "print Go version",
+	Long: `Version prints the build information for Go binary files.
+
+Go version reports the Go version used to build each of the named files.
+
+If no files are named on the command line, go version prints its own
+version information.
+
+If a directory is named, go version walks that directory, recursively,
+looking for recognized Go binaries and reporting their versions.
+By default, go version does not report unrecognized files found
+during a directory scan. The -v flag causes it to report unrecognized files.
+
+The -m flag causes go version to print each file's embedded
+module version information, when available. In the output, the module
+information consists of multiple lines following the version line, each
+indented by a leading tab character.
+
+See also: go doc runtime/debug.BuildInfo.
+`,
+}
+
+func init() {
+	base.AddChdirFlag(&CmdVersion.Flag)
+	CmdVersion.Run = runVersion // break init cycle
+}
+
+var (
+	versionM = CmdVersion.Flag.Bool("m", false, "")
+	versionV = CmdVersion.Flag.Bool("v", false, "")
+)
+
+func runVersion(ctx context.Context, cmd *base.Command, args []string) {
+	if len(args) == 0 {
+		// If any of this command's flags were passed explicitly, error
+		// out, because they only make sense with arguments.
+		//
+		// Don't error if the flags came from GOFLAGS, since that can be
+		// a reasonable use case. For example, imagine GOFLAGS=-v to
+		// turn "verbose mode" on for all Go commands, which should not
+		// break "go version".
+		var argOnlyFlag string
+		if !base.InGOFLAGS("-m") && *versionM {
+			argOnlyFlag = "-m"
+		} else if !base.InGOFLAGS("-v") && *versionV {
+			argOnlyFlag = "-v"
+		}
+		if argOnlyFlag != "" {
+			fmt.Fprintf(os.Stderr, "go: 'go version' only accepts %s flag with arguments\n", argOnlyFlag)
+			base.SetExitStatus(2)
+			return
+		}
+		v := runtime.Version()
+		if gover.TestVersion != "" {
+			v = gover.TestVersion + " (TESTGO_VERSION)"
+		}
+		fmt.Printf("go version %s %s/%s\n", v, runtime.GOOS, runtime.GOARCH)
+		return
+	}
+
+	for _, arg := range args {
+		info, err := os.Stat(arg)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "%v\n", err)
+			base.SetExitStatus(1)
+			continue
+		}
+		if info.IsDir() {
+			scanDir(arg)
+		} else {
+			scanFile(arg, info, true)
+		}
+	}
+}
+
+// scanDir scans a directory for binary to run scanFile on.
+func scanDir(dir string) {
+	filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
+		if d.Type().IsRegular() || d.Type()&fs.ModeSymlink != 0 {
+			info, err := d.Info()
+			if err != nil {
+				if *versionV {
+					fmt.Fprintf(os.Stderr, "%s: %v\n", path, err)
+				}
+				return nil
+			}
+			scanFile(path, info, *versionV)
+		}
+		return nil
+	})
+}
+
+// isGoBinaryCandidate reports whether the file is a candidate to be a Go binary.
+func isGoBinaryCandidate(file string, info fs.FileInfo) bool {
+	if info.Mode().IsRegular() && info.Mode()&0111 != 0 {
+		return true
+	}
+	name := strings.ToLower(file)
+	switch filepath.Ext(name) {
+	case ".so", ".exe", ".dll":
+		return true
+	default:
+		return strings.Contains(name, ".so.")
+	}
+}
+
+// scanFile scans file to try to report the Go and module versions.
+// If mustPrint is true, scanFile will report any error reading file.
+// Otherwise (mustPrint is false, because scanFile is being called
+// by scanDir) scanFile prints nothing for non-Go binaries.
+func scanFile(file string, info fs.FileInfo, mustPrint bool) {
+	if info.Mode()&fs.ModeSymlink != 0 {
+		// Accept file symlinks only.
+		i, err := os.Stat(file)
+		if err != nil || !i.Mode().IsRegular() {
+			if mustPrint {
+				fmt.Fprintf(os.Stderr, "%s: symlink\n", file)
+			}
+			return
+		}
+		info = i
+	}
+
+	bi, err := buildinfo.ReadFile(file)
+	if err != nil {
+		if mustPrint {
+			if pathErr := (*os.PathError)(nil); errors.As(err, &pathErr) && filepath.Clean(pathErr.Path) == filepath.Clean(file) {
+				fmt.Fprintf(os.Stderr, "%v\n", file)
+			} else {
+
+				// Skip errors for non-Go binaries.
+				// buildinfo.ReadFile errors are not fine-grained enough
+				// to know if the file is a Go binary or not,
+				// so try to infer it from the file mode and extension.
+				if isGoBinaryCandidate(file, info) {
+					fmt.Fprintf(os.Stderr, "%s: %v\n", file, err)
+				}
+			}
+		}
+		return
+	}
+
+	fmt.Printf("%s: %s\n", file, bi.GoVersion)
+	bi.GoVersion = "" // suppress printing go version again
+	mod := bi.String()
+	if *versionM && len(mod) > 0 {
+		fmt.Printf("\t%s\n", strings.ReplaceAll(mod[:len(mod)-1], "\n", "\n\t"))
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vet/vet.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vet/vet.go
new file mode 100644
index 0000000000000000000000000000000000000000..2d420971200bbcd4a5301bc985cb89c217a8be85
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vet/vet.go
@@ -0,0 +1,120 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package vet implements the “go vet” command.
+package vet
+
+import (
+	"context"
+	"fmt"
+	"path/filepath"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/load"
+	"cmd/go/internal/modload"
+	"cmd/go/internal/trace"
+	"cmd/go/internal/work"
+)
+
+// Break init loop.
+func init() {
+	CmdVet.Run = runVet
+}
+
+var CmdVet = &base.Command{
+	CustomFlags: true,
+	UsageLine:   "go vet [build flags] [-vettool prog] [vet flags] [packages]",
+	Short:       "report likely mistakes in packages",
+	Long: `
+Vet runs the Go vet command on the packages named by the import paths.
+
+For more about vet and its flags, see 'go doc cmd/vet'.
+For more about specifying packages, see 'go help packages'.
+For a list of checkers and their flags, see 'go tool vet help'.
+For details of a specific checker such as 'printf', see 'go tool vet help printf'.
+
+The -vettool=prog flag selects a different analysis tool with alternative
+or additional checks.
+For example, the 'shadow' analyzer can be built and run using these commands:
+
+  go install golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow@latest
+  go vet -vettool=$(which shadow)
+
+The build flags supported by go vet are those that control package resolution
+and execution, such as -C, -n, -x, -v, -tags, and -toolexec.
+For more about these flags, see 'go help build'.
+
+See also: go fmt, go fix.
+	`,
+}
+
+func runVet(ctx context.Context, cmd *base.Command, args []string) {
+	vetFlags, pkgArgs := vetFlags(args)
+	modload.InitWorkfile() // The vet command does custom flag processing; initialize workspaces after that.
+
+	if cfg.DebugTrace != "" {
+		var close func() error
+		var err error
+		ctx, close, err = trace.Start(ctx, cfg.DebugTrace)
+		if err != nil {
+			base.Fatalf("failed to start trace: %v", err)
+		}
+		defer func() {
+			if err := close(); err != nil {
+				base.Fatalf("failed to stop trace: %v", err)
+			}
+		}()
+	}
+
+	ctx, span := trace.StartSpan(ctx, fmt.Sprint("Running ", cmd.Name(), " command"))
+	defer span.Done()
+
+	work.BuildInit()
+	work.VetFlags = vetFlags
+	if len(vetFlags) > 0 {
+		work.VetExplicit = true
+	}
+	if vetTool != "" {
+		var err error
+		work.VetTool, err = filepath.Abs(vetTool)
+		if err != nil {
+			base.Fatalf("%v", err)
+		}
+	}
+
+	pkgOpts := load.PackageOpts{ModResolveTests: true}
+	pkgs := load.PackagesAndErrors(ctx, pkgOpts, pkgArgs)
+	load.CheckPackageErrors(pkgs)
+	if len(pkgs) == 0 {
+		base.Fatalf("no packages to vet")
+	}
+
+	b := work.NewBuilder("")
+	defer func() {
+		if err := b.Close(); err != nil {
+			base.Fatal(err)
+		}
+	}()
+
+	root := &work.Action{Mode: "go vet"}
+	for _, p := range pkgs {
+		_, ptest, pxtest, err := load.TestPackagesFor(ctx, pkgOpts, p, nil)
+		if err != nil {
+			base.Errorf("%v", err)
+			continue
+		}
+		if len(ptest.GoFiles) == 0 && len(ptest.CgoFiles) == 0 && pxtest == nil {
+			base.Errorf("go: can't vet %s: no Go files in %s", p.ImportPath, p.Dir)
+			continue
+		}
+		if len(ptest.GoFiles) > 0 || len(ptest.CgoFiles) > 0 {
+			root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, ptest))
+		}
+		if pxtest != nil {
+			root.Deps = append(root.Deps, b.VetAction(work.ModeBuild, work.ModeBuild, pxtest))
+		}
+	}
+	b.Do(ctx, root)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/vet/vetflag.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/vet/vetflag.go
new file mode 100644
index 0000000000000000000000000000000000000000..eb7af6508d00be6debf34945e1188c9babd63ead
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/vet/vetflag.go
@@ -0,0 +1,191 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vet
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"flag"
+	"fmt"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/cmdflag"
+	"cmd/go/internal/work"
+)
+
+// go vet flag processing
+//
+// We query the flags of the tool specified by -vettool and accept any
+// of those flags plus any flag valid for 'go build'. The tool must
+// support -flags, which prints a description of its flags in JSON to
+// stdout.
+
+// vetTool specifies the vet command to run.
+// Any tool that supports the (still unpublished) vet
+// command-line protocol may be supplied; see
+// golang.org/x/tools/go/analysis/unitchecker for one
+// implementation. It is also used by tests.
+//
+// The default behavior (vetTool=="") runs 'go tool vet'.
+var vetTool string // -vettool
+
+func init() {
+	work.AddBuildFlags(CmdVet, work.DefaultBuildFlags)
+	CmdVet.Flag.StringVar(&vetTool, "vettool", "", "")
+}
+
+func parseVettoolFlag(args []string) {
+	// Extract -vettool by ad hoc flag processing:
+	// its value is needed even before we can declare
+	// the flags available during main flag processing.
+	for i, arg := range args {
+		if arg == "-vettool" || arg == "--vettool" {
+			if i+1 >= len(args) {
+				log.Fatalf("%s requires a filename", arg)
+			}
+			vetTool = args[i+1]
+			return
+		} else if strings.HasPrefix(arg, "-vettool=") ||
+			strings.HasPrefix(arg, "--vettool=") {
+			vetTool = arg[strings.IndexByte(arg, '=')+1:]
+			return
+		}
+	}
+}
+
+// vetFlags processes the command line, splitting it at the first non-flag
+// into the list of flags and list of packages.
+func vetFlags(args []string) (passToVet, packageNames []string) {
+	parseVettoolFlag(args)
+
+	// Query the vet command for its flags.
+	var tool string
+	if vetTool == "" {
+		tool = base.Tool("vet")
+	} else {
+		var err error
+		tool, err = filepath.Abs(vetTool)
+		if err != nil {
+			log.Fatal(err)
+		}
+	}
+	out := new(bytes.Buffer)
+	vetcmd := exec.Command(tool, "-flags")
+	vetcmd.Stdout = out
+	if err := vetcmd.Run(); err != nil {
+		fmt.Fprintf(os.Stderr, "go: can't execute %s -flags: %v\n", tool, err)
+		base.SetExitStatus(2)
+		base.Exit()
+	}
+	var analysisFlags []struct {
+		Name  string
+		Bool  bool
+		Usage string
+	}
+	if err := json.Unmarshal(out.Bytes(), &analysisFlags); err != nil {
+		fmt.Fprintf(os.Stderr, "go: can't unmarshal JSON from %s -flags: %v", tool, err)
+		base.SetExitStatus(2)
+		base.Exit()
+	}
+
+	// Add vet's flags to CmdVet.Flag.
+	//
+	// Some flags, in particular -tags and -v, are known to vet but
+	// also defined as build flags. This works fine, so we omit duplicates here.
+	// However some, like -x, are known to the build but not to vet.
+	isVetFlag := make(map[string]bool, len(analysisFlags))
+	cf := CmdVet.Flag
+	for _, f := range analysisFlags {
+		isVetFlag[f.Name] = true
+		if cf.Lookup(f.Name) == nil {
+			if f.Bool {
+				cf.Bool(f.Name, false, "")
+			} else {
+				cf.String(f.Name, "", "")
+			}
+		}
+	}
+
+	// Record the set of vet tool flags set by GOFLAGS. We want to pass them to
+	// the vet tool, but only if they aren't overridden by an explicit argument.
+	base.SetFromGOFLAGS(&CmdVet.Flag)
+	addFromGOFLAGS := map[string]bool{}
+	CmdVet.Flag.Visit(func(f *flag.Flag) {
+		if isVetFlag[f.Name] {
+			addFromGOFLAGS[f.Name] = true
+		}
+	})
+
+	explicitFlags := make([]string, 0, len(args))
+	for len(args) > 0 {
+		f, remainingArgs, err := cmdflag.ParseOne(&CmdVet.Flag, args)
+
+		if errors.Is(err, flag.ErrHelp) {
+			exitWithUsage()
+		}
+
+		if errors.Is(err, cmdflag.ErrFlagTerminator) {
+			// All remaining args must be package names, but the flag terminator is
+			// not included.
+			packageNames = remainingArgs
+			break
+		}
+
+		if nf := (cmdflag.NonFlagError{}); errors.As(err, &nf) {
+			// Everything from here on out — including the argument we just consumed —
+			// must be a package name.
+			packageNames = args
+			break
+		}
+
+		if err != nil {
+			fmt.Fprintln(os.Stderr, err)
+			exitWithUsage()
+		}
+
+		if isVetFlag[f.Name] {
+			// Forward the raw arguments rather than cleaned equivalents, just in
+			// case the vet tool parses them idiosyncratically.
+			explicitFlags = append(explicitFlags, args[:len(args)-len(remainingArgs)]...)
+
+			// This flag has been overridden explicitly, so don't forward its implicit
+			// value from GOFLAGS.
+			delete(addFromGOFLAGS, f.Name)
+		}
+
+		args = remainingArgs
+	}
+
+	// Prepend arguments from GOFLAGS before other arguments.
+	CmdVet.Flag.Visit(func(f *flag.Flag) {
+		if addFromGOFLAGS[f.Name] {
+			passToVet = append(passToVet, fmt.Sprintf("-%s=%s", f.Name, f.Value))
+		}
+	})
+	passToVet = append(passToVet, explicitFlags...)
+	return passToVet, packageNames
+}
+
+func exitWithUsage() {
+	fmt.Fprintf(os.Stderr, "usage: %s\n", CmdVet.UsageLine)
+	fmt.Fprintf(os.Stderr, "Run 'go help %s' for details.\n", CmdVet.LongName())
+
+	// This part is additional to what (*Command).Usage does:
+	cmd := "go tool vet"
+	if vetTool != "" {
+		cmd = vetTool
+	}
+	fmt.Fprintf(os.Stderr, "Run '%s help' for a full list of flags and analyzers.\n", cmd)
+	fmt.Fprintf(os.Stderr, "Run '%s -help' for an overview.\n", cmd)
+
+	base.SetExitStatus(2)
+	base.Exit()
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/web/api.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/api.go
new file mode 100644
index 0000000000000000000000000000000000000000..7a6e0c310c9a7286441bb8f863de5d907b442a42
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/api.go
@@ -0,0 +1,246 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package web defines minimal helper routines for accessing HTTP/HTTPS
+// resources without requiring external dependencies on the net package.
+//
+// If the cmd_go_bootstrap build tag is present, web avoids the use of the net
+// package and returns errors for all network operations.
+package web
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/fs"
+	"net/url"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// SecurityMode specifies whether a function should make network
+// calls using insecure transports (eg, plain text HTTP).
+// The zero value is "secure".
+type SecurityMode int
+
+const (
+	SecureOnly      SecurityMode = iota // Reject plain HTTP; validate HTTPS.
+	DefaultSecurity                     // Allow plain HTTP if explicit; validate HTTPS.
+	Insecure                            // Allow plain HTTP if not explicitly HTTPS; skip HTTPS validation.
+)
+
+// An HTTPError describes an HTTP error response (non-200 result).
+type HTTPError struct {
+	URL        string // redacted
+	Status     string
+	StatusCode int
+	Err        error  // underlying error, if known
+	Detail     string // limited to maxErrorDetailLines and maxErrorDetailBytes
+}
+
+const (
+	maxErrorDetailLines = 8
+	maxErrorDetailBytes = maxErrorDetailLines * 81
+)
+
+func (e *HTTPError) Error() string {
+	if e.Detail != "" {
+		detailSep := " "
+		if strings.ContainsRune(e.Detail, '\n') {
+			detailSep = "\n\t"
+		}
+		return fmt.Sprintf("reading %s: %v\n\tserver response:%s%s", e.URL, e.Status, detailSep, e.Detail)
+	}
+
+	if eErr := e.Err; eErr != nil {
+		if pErr, ok := e.Err.(*fs.PathError); ok {
+			if u, err := url.Parse(e.URL); err == nil {
+				if fp, err := urlToFilePath(u); err == nil && pErr.Path == fp {
+					// Remove the redundant copy of the path.
+					eErr = pErr.Err
+				}
+			}
+		}
+		return fmt.Sprintf("reading %s: %v", e.URL, eErr)
+	}
+
+	return fmt.Sprintf("reading %s: %v", e.URL, e.Status)
+}
+
+func (e *HTTPError) Is(target error) bool {
+	return target == fs.ErrNotExist && (e.StatusCode == 404 || e.StatusCode == 410)
+}
+
+func (e *HTTPError) Unwrap() error {
+	return e.Err
+}
+
+// GetBytes returns the body of the requested resource, or an error if the
+// response status was not http.StatusOK.
+//
+// GetBytes is a convenience wrapper around Get and Response.Err.
+func GetBytes(u *url.URL) ([]byte, error) {
+	resp, err := Get(DefaultSecurity, u)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+	if err := resp.Err(); err != nil {
+		return nil, err
+	}
+	b, err := io.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("reading %s: %v", u.Redacted(), err)
+	}
+	return b, nil
+}
+
+type Response struct {
+	URL        string // redacted
+	Status     string
+	StatusCode int
+	Header     map[string][]string
+	Body       io.ReadCloser // Either the original body or &errorDetail.
+
+	fileErr     error
+	errorDetail errorDetailBuffer
+}
+
+// Err returns an *HTTPError corresponding to the response r.
+// If the response r has StatusCode 200 or 0 (unset), Err returns nil.
+// Otherwise, Err may read from r.Body in order to extract relevant error detail.
+func (r *Response) Err() error {
+	if r.StatusCode == 200 || r.StatusCode == 0 {
+		return nil
+	}
+
+	return &HTTPError{
+		URL:        r.URL,
+		Status:     r.Status,
+		StatusCode: r.StatusCode,
+		Err:        r.fileErr,
+		Detail:     r.formatErrorDetail(),
+	}
+}
+
+// formatErrorDetail converts r.errorDetail (a prefix of the output of r.Body)
+// into a short, tab-indented summary.
+func (r *Response) formatErrorDetail() string {
+	if r.Body != &r.errorDetail {
+		return "" // Error detail collection not enabled.
+	}
+
+	// Ensure that r.errorDetail has been populated.
+	_, _ = io.Copy(io.Discard, r.Body)
+
+	s := r.errorDetail.buf.String()
+	if !utf8.ValidString(s) {
+		return "" // Don't try to recover non-UTF-8 error messages.
+	}
+	for _, r := range s {
+		if !unicode.IsGraphic(r) && !unicode.IsSpace(r) {
+			return "" // Don't let the server do any funny business with the user's terminal.
+		}
+	}
+
+	var detail strings.Builder
+	for i, line := range strings.Split(s, "\n") {
+		if strings.TrimSpace(line) == "" {
+			break // Stop at the first blank line.
+		}
+		if i > 0 {
+			detail.WriteString("\n\t")
+		}
+		if i >= maxErrorDetailLines {
+			detail.WriteString("[Truncated: too many lines.]")
+			break
+		}
+		if detail.Len()+len(line) > maxErrorDetailBytes {
+			detail.WriteString("[Truncated: too long.]")
+			break
+		}
+		detail.WriteString(line)
+	}
+
+	return detail.String()
+}
+
+// Get returns the body of the HTTP or HTTPS resource specified at the given URL.
+//
+// If the URL does not include an explicit scheme, Get first tries "https".
+// If the server does not respond under that scheme and the security mode is
+// Insecure, Get then tries "http".
+// The URL included in the response indicates which scheme was actually used,
+// and it is a redacted URL suitable for use in error messages.
+//
+// For the "https" scheme only, credentials are attached using the
+// cmd/go/internal/auth package. If the URL itself includes a username and
+// password, it will not be attempted under the "http" scheme unless the
+// security mode is Insecure.
+//
+// Get returns a non-nil error only if the request did not receive a response
+// under any applicable scheme. (A non-2xx response does not cause an error.)
+func Get(security SecurityMode, u *url.URL) (*Response, error) {
+	return get(security, u)
+}
+
+// OpenBrowser attempts to open the requested URL in a web browser.
+func OpenBrowser(url string) (opened bool) {
+	return openBrowser(url)
+}
+
+// Join returns the result of adding the slash-separated
+// path elements to the end of u's path.
+func Join(u *url.URL, path string) *url.URL {
+	j := *u
+	if path == "" {
+		return &j
+	}
+	j.Path = strings.TrimSuffix(u.Path, "/") + "/" + strings.TrimPrefix(path, "/")
+	j.RawPath = strings.TrimSuffix(u.RawPath, "/") + "/" + strings.TrimPrefix(path, "/")
+	return &j
+}
+
+// An errorDetailBuffer is an io.ReadCloser that copies up to
+// maxErrorDetailLines into a buffer for later inspection.
+type errorDetailBuffer struct {
+	r        io.ReadCloser
+	buf      strings.Builder
+	bufLines int
+}
+
+func (b *errorDetailBuffer) Close() error {
+	return b.r.Close()
+}
+
+func (b *errorDetailBuffer) Read(p []byte) (n int, err error) {
+	n, err = b.r.Read(p)
+
+	// Copy the first maxErrorDetailLines+1 lines into b.buf,
+	// discarding any further lines.
+	//
+	// Note that the read may begin or end in the middle of a UTF-8 character,
+	// so don't try to do anything fancy with characters that encode to larger
+	// than one byte.
+	if b.bufLines <= maxErrorDetailLines {
+		for _, line := range bytes.SplitAfterN(p[:n], []byte("\n"), maxErrorDetailLines-b.bufLines) {
+			b.buf.Write(line)
+			if len(line) > 0 && line[len(line)-1] == '\n' {
+				b.bufLines++
+				if b.bufLines > maxErrorDetailLines {
+					break
+				}
+			}
+		}
+	}
+
+	return n, err
+}
+
+// IsLocalHost reports whether the given URL refers to a local
+// (loopback) host, such as "localhost" or "127.0.0.1:8080".
+func IsLocalHost(u *url.URL) bool {
+	return isLocalHost(u)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/web/bootstrap.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/bootstrap.go
new file mode 100644
index 0000000000000000000000000000000000000000..6312169ef00534a0dfb6a6cb9a835dd08292e2fb
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/bootstrap.go
@@ -0,0 +1,25 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cmd_go_bootstrap
+
+// This code is compiled only into the bootstrap 'go' binary.
+// These stubs avoid importing packages with large dependency
+// trees that potentially require C linking,
+// like the use of "net/http" in vcs.go.
+
+package web
+
+import (
+	"errors"
+	urlpkg "net/url"
+)
+
+func get(security SecurityMode, url *urlpkg.URL) (*Response, error) {
+	return nil, errors.New("no http in bootstrap go command")
+}
+
+func openBrowser(url string) bool { return false }
+
+func isLocalHost(u *urlpkg.URL) bool { return false }
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/web/file_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/file_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..3734df5c4e9bc0382f28ed87e2257527e17fc64f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/file_test.go
@@ -0,0 +1,60 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package web
+
+import (
+	"errors"
+	"io/fs"
+	"os"
+	"path/filepath"
+	"testing"
+)
+
+func TestGetFileURL(t *testing.T) {
+	const content = "Hello, file!\n"
+
+	f, err := os.CreateTemp("", "web-TestGetFileURL")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.Remove(f.Name())
+
+	if _, err := f.WriteString(content); err != nil {
+		t.Error(err)
+	}
+	if err := f.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	u, err := urlFromFilePath(f.Name())
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	b, err := GetBytes(u)
+	if err != nil {
+		t.Fatalf("GetBytes(%v) = _, %v", u, err)
+	}
+	if string(b) != content {
+		t.Fatalf("after writing %q to %s, GetBytes(%v) read %q", content, f.Name(), u, b)
+	}
+}
+
+func TestGetNonexistentFile(t *testing.T) {
+	path, err := filepath.Abs("nonexistent")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	u, err := urlFromFilePath(path)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	b, err := GetBytes(u)
+	if !errors.Is(err, fs.ErrNotExist) {
+		t.Fatalf("GetBytes(%v) = %q, %v; want _, fs.ErrNotExist", u, b, err)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/web/http.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/http.go
new file mode 100644
index 0000000000000000000000000000000000000000..bd5f82856ae6b4df8a3ef560ebc83ecb20c47ea6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/http.go
@@ -0,0 +1,399 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cmd_go_bootstrap
+
+// This code is compiled into the real 'go' binary, but it is not
+// compiled into the binary that is built during all.bash, so as
+// to avoid needing to build net (and thus use cgo) during the
+// bootstrap process.
+
+package web
+
+import (
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"mime"
+	"net"
+	"net/http"
+	urlpkg "net/url"
+	"os"
+	"strings"
+	"time"
+
+	"cmd/go/internal/auth"
+	"cmd/go/internal/base"
+	"cmd/go/internal/cfg"
+	"cmd/internal/browser"
+)
+
+// impatientInsecureHTTPClient is used with GOINSECURE,
+// when we're connecting to https servers that might not be there
+// or might be using self-signed certificates.
+var impatientInsecureHTTPClient = &http.Client{
+	CheckRedirect: checkRedirect,
+	Timeout:       5 * time.Second,
+	Transport: &http.Transport{
+		Proxy: http.ProxyFromEnvironment,
+		TLSClientConfig: &tls.Config{
+			InsecureSkipVerify: true,
+		},
+	},
+}
+
+var securityPreservingDefaultClient = securityPreservingHTTPClient(http.DefaultClient)
+
+// securityPreservingHTTPClient returns a client that is like the original
+// but rejects redirects to plain-HTTP URLs if the original URL was secure.
+func securityPreservingHTTPClient(original *http.Client) *http.Client {
+	c := new(http.Client)
+	*c = *original
+	c.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+		if len(via) > 0 && via[0].URL.Scheme == "https" && req.URL.Scheme != "https" {
+			lastHop := via[len(via)-1].URL
+			return fmt.Errorf("redirected from secure URL %s to insecure URL %s", lastHop, req.URL)
+		}
+		return checkRedirect(req, via)
+	}
+	return c
+}
+
+func checkRedirect(req *http.Request, via []*http.Request) error {
+	// Go's http.DefaultClient allows 10 redirects before returning an error.
+	// Mimic that behavior here.
+	if len(via) >= 10 {
+		return errors.New("stopped after 10 redirects")
+	}
+
+	interceptRequest(req)
+	return nil
+}
+
+type Interceptor struct {
+	Scheme   string
+	FromHost string
+	ToHost   string
+	Client   *http.Client
+}
+
+func EnableTestHooks(interceptors []Interceptor) error {
+	if enableTestHooks {
+		return errors.New("web: test hooks already enabled")
+	}
+
+	for _, t := range interceptors {
+		if t.FromHost == "" {
+			panic("EnableTestHooks: missing FromHost")
+		}
+		if t.ToHost == "" {
+			panic("EnableTestHooks: missing ToHost")
+		}
+	}
+
+	testInterceptors = interceptors
+	enableTestHooks = true
+	return nil
+}
+
+func DisableTestHooks() {
+	if !enableTestHooks {
+		panic("web: test hooks not enabled")
+	}
+	enableTestHooks = false
+	testInterceptors = nil
+}
+
+var (
+	enableTestHooks  = false
+	testInterceptors []Interceptor
+)
+
+func interceptURL(u *urlpkg.URL) (*Interceptor, bool) {
+	if !enableTestHooks {
+		return nil, false
+	}
+	for i, t := range testInterceptors {
+		if u.Host == t.FromHost && (u.Scheme == "" || u.Scheme == t.Scheme) {
+			return &testInterceptors[i], true
+		}
+	}
+	return nil, false
+}
+
+func interceptRequest(req *http.Request) {
+	if t, ok := interceptURL(req.URL); ok {
+		req.Host = req.URL.Host
+		req.URL.Host = t.ToHost
+	}
+}
+
+func get(security SecurityMode, url *urlpkg.URL) (*Response, error) {
+	start := time.Now()
+
+	if url.Scheme == "file" {
+		return getFile(url)
+	}
+
+	if enableTestHooks {
+		switch url.Host {
+		case "proxy.golang.org":
+			if os.Getenv("TESTGOPROXY404") == "1" {
+				res := &Response{
+					URL:        url.Redacted(),
+					Status:     "404 testing",
+					StatusCode: 404,
+					Header:     make(map[string][]string),
+					Body:       http.NoBody,
+				}
+				if cfg.BuildX {
+					fmt.Fprintf(os.Stderr, "# get %s: %v (%.3fs)\n", url.Redacted(), res.Status, time.Since(start).Seconds())
+				}
+				return res, nil
+			}
+
+		case "localhost.localdev":
+			return nil, fmt.Errorf("no such host localhost.localdev")
+
+		default:
+			if os.Getenv("TESTGONETWORK") == "panic" {
+				if _, ok := interceptURL(url); !ok {
+					host := url.Host
+					if h, _, err := net.SplitHostPort(url.Host); err == nil && h != "" {
+						host = h
+					}
+					addr := net.ParseIP(host)
+					if addr == nil || (!addr.IsLoopback() && !addr.IsUnspecified()) {
+						panic("use of network: " + url.String())
+					}
+				}
+			}
+		}
+	}
+
+	fetch := func(url *urlpkg.URL) (*http.Response, error) {
+		// Note: The -v build flag does not mean "print logging information",
+		// despite its historical misuse for this in GOPATH-based go get.
+		// We print extra logging in -x mode instead, which traces what
+		// commands are executed.
+		if cfg.BuildX {
+			fmt.Fprintf(os.Stderr, "# get %s\n", url.Redacted())
+		}
+
+		req, err := http.NewRequest("GET", url.String(), nil)
+		if err != nil {
+			return nil, err
+		}
+		if url.Scheme == "https" {
+			auth.AddCredentials(req)
+		}
+		t, intercepted := interceptURL(req.URL)
+		if intercepted {
+			req.Host = req.URL.Host
+			req.URL.Host = t.ToHost
+		}
+
+		release, err := base.AcquireNet()
+		if err != nil {
+			return nil, err
+		}
+
+		var res *http.Response
+		if security == Insecure && url.Scheme == "https" { // fail earlier
+			res, err = impatientInsecureHTTPClient.Do(req)
+		} else {
+			if intercepted && t.Client != nil {
+				client := securityPreservingHTTPClient(t.Client)
+				res, err = client.Do(req)
+			} else {
+				res, err = securityPreservingDefaultClient.Do(req)
+			}
+		}
+
+		if err != nil {
+			// Per the docs for [net/http.Client.Do], “On error, any Response can be
+			// ignored. A non-nil Response with a non-nil error only occurs when
+			// CheckRedirect fails, and even then the returned Response.Body is
+			// already closed.”
+			release()
+			return nil, err
+		}
+
+		// “If the returned error is nil, the Response will contain a non-nil Body
+		// which the user is expected to close.”
+		body := res.Body
+		res.Body = hookCloser{
+			ReadCloser: body,
+			afterClose: release,
+		}
+		return res, err
+	}
+
+	var (
+		fetched *urlpkg.URL
+		res     *http.Response
+		err     error
+	)
+	if url.Scheme == "" || url.Scheme == "https" {
+		secure := new(urlpkg.URL)
+		*secure = *url
+		secure.Scheme = "https"
+
+		res, err = fetch(secure)
+		if err == nil {
+			fetched = secure
+		} else {
+			if cfg.BuildX {
+				fmt.Fprintf(os.Stderr, "# get %s: %v\n", secure.Redacted(), err)
+			}
+			if security != Insecure || url.Scheme == "https" {
+				// HTTPS failed, and we can't fall back to plain HTTP.
+				// Report the error from the HTTPS attempt.
+				return nil, err
+			}
+		}
+	}
+
+	if res == nil {
+		switch url.Scheme {
+		case "http":
+			if security == SecureOnly {
+				if cfg.BuildX {
+					fmt.Fprintf(os.Stderr, "# get %s: insecure\n", url.Redacted())
+				}
+				return nil, fmt.Errorf("insecure URL: %s", url.Redacted())
+			}
+		case "":
+			if security != Insecure {
+				panic("should have returned after HTTPS failure")
+			}
+		default:
+			if cfg.BuildX {
+				fmt.Fprintf(os.Stderr, "# get %s: unsupported\n", url.Redacted())
+			}
+			return nil, fmt.Errorf("unsupported scheme: %s", url.Redacted())
+		}
+
+		insecure := new(urlpkg.URL)
+		*insecure = *url
+		insecure.Scheme = "http"
+		if insecure.User != nil && security != Insecure {
+			if cfg.BuildX {
+				fmt.Fprintf(os.Stderr, "# get %s: insecure credentials\n", insecure.Redacted())
+			}
+			return nil, fmt.Errorf("refusing to pass credentials to insecure URL: %s", insecure.Redacted())
+		}
+
+		res, err = fetch(insecure)
+		if err == nil {
+			fetched = insecure
+		} else {
+			if cfg.BuildX {
+				fmt.Fprintf(os.Stderr, "# get %s: %v\n", insecure.Redacted(), err)
+			}
+			// HTTP failed, and we already tried HTTPS if applicable.
+			// Report the error from the HTTP attempt.
+			return nil, err
+		}
+	}
+
+	// Note: accepting a non-200 OK here, so people can serve a
+	// meta import in their http 404 page.
+	if cfg.BuildX {
+		fmt.Fprintf(os.Stderr, "# get %s: %v (%.3fs)\n", fetched.Redacted(), res.Status, time.Since(start).Seconds())
+	}
+
+	r := &Response{
+		URL:        fetched.Redacted(),
+		Status:     res.Status,
+		StatusCode: res.StatusCode,
+		Header:     map[string][]string(res.Header),
+		Body:       res.Body,
+	}
+
+	if res.StatusCode != http.StatusOK {
+		contentType := res.Header.Get("Content-Type")
+		if mediaType, params, _ := mime.ParseMediaType(contentType); mediaType == "text/plain" {
+			switch charset := strings.ToLower(params["charset"]); charset {
+			case "us-ascii", "utf-8", "":
+				// Body claims to be plain text in UTF-8 or a subset thereof.
+				// Try to extract a useful error message from it.
+				r.errorDetail.r = res.Body
+				r.Body = &r.errorDetail
+			}
+		}
+	}
+
+	return r, nil
+}
+
+func getFile(u *urlpkg.URL) (*Response, error) {
+	path, err := urlToFilePath(u)
+	if err != nil {
+		return nil, err
+	}
+	f, err := os.Open(path)
+
+	if os.IsNotExist(err) {
+		return &Response{
+			URL:        u.Redacted(),
+			Status:     http.StatusText(http.StatusNotFound),
+			StatusCode: http.StatusNotFound,
+			Body:       http.NoBody,
+			fileErr:    err,
+		}, nil
+	}
+
+	if os.IsPermission(err) {
+		return &Response{
+			URL:        u.Redacted(),
+			Status:     http.StatusText(http.StatusForbidden),
+			StatusCode: http.StatusForbidden,
+			Body:       http.NoBody,
+			fileErr:    err,
+		}, nil
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return &Response{
+		URL:        u.Redacted(),
+		Status:     http.StatusText(http.StatusOK),
+		StatusCode: http.StatusOK,
+		Body:       f,
+	}, nil
+}
+
+func openBrowser(url string) bool { return browser.Open(url) }
+
+func isLocalHost(u *urlpkg.URL) bool {
+	// VCSTestRepoURL itself is secure, and it may redirect requests to other
+	// ports (such as a port serving the "svn" protocol) which should also be
+	// considered secure.
+	host, _, err := net.SplitHostPort(u.Host)
+	if err != nil {
+		host = u.Host
+	}
+	if host == "localhost" {
+		return true
+	}
+	if ip := net.ParseIP(host); ip != nil && ip.IsLoopback() {
+		return true
+	}
+	return false
+}
+
+type hookCloser struct {
+	io.ReadCloser
+	afterClose func()
+}
+
+func (c hookCloser) Close() error {
+	err := c.ReadCloser.Close()
+	c.afterClose()
+	return err
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url.go
new file mode 100644
index 0000000000000000000000000000000000000000..146c51f0aecfca50ab5f2fdfd26087e27146864c
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url.go
@@ -0,0 +1,95 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package web
+
+import (
+	"errors"
+	"net/url"
+	"path/filepath"
+	"strings"
+)
+
+// TODO(golang.org/issue/32456): If accepted, move these functions into the
+// net/url package.
+
+var errNotAbsolute = errors.New("path is not absolute")
+
+func urlToFilePath(u *url.URL) (string, error) {
+	if u.Scheme != "file" {
+		return "", errors.New("non-file URL")
+	}
+
+	checkAbs := func(path string) (string, error) {
+		if !filepath.IsAbs(path) {
+			return "", errNotAbsolute
+		}
+		return path, nil
+	}
+
+	if u.Path == "" {
+		if u.Host != "" || u.Opaque == "" {
+			return "", errors.New("file URL missing path")
+		}
+		return checkAbs(filepath.FromSlash(u.Opaque))
+	}
+
+	path, err := convertFileURLPath(u.Host, u.Path)
+	if err != nil {
+		return path, err
+	}
+	return checkAbs(path)
+}
+
+func urlFromFilePath(path string) (*url.URL, error) {
+	if !filepath.IsAbs(path) {
+		return nil, errNotAbsolute
+	}
+
+	// If path has a Windows volume name, convert the volume to a host and prefix
+	// per https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/.
+	if vol := filepath.VolumeName(path); vol != "" {
+		if strings.HasPrefix(vol, `\\`) {
+			path = filepath.ToSlash(path[2:])
+			i := strings.IndexByte(path, '/')
+
+			if i < 0 {
+				// A degenerate case.
+				// \\host.example.com (without a share name)
+				// becomes
+				// file://host.example.com/
+				return &url.URL{
+					Scheme: "file",
+					Host:   path,
+					Path:   "/",
+				}, nil
+			}
+
+			// \\host.example.com\Share\path\to\file
+			// becomes
+			// file://host.example.com/Share/path/to/file
+			return &url.URL{
+				Scheme: "file",
+				Host:   path[:i],
+				Path:   filepath.ToSlash(path[i:]),
+			}, nil
+		}
+
+		// C:\path\to\file
+		// becomes
+		// file:///C:/path/to/file
+		return &url.URL{
+			Scheme: "file",
+			Path:   "/" + filepath.ToSlash(path),
+		}, nil
+	}
+
+	// /path/to/file
+	// becomes
+	// file:///path/to/file
+	return &url.URL{
+		Scheme: "file",
+		Path:   filepath.ToSlash(path),
+	}, nil
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_other.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_other.go
new file mode 100644
index 0000000000000000000000000000000000000000..84bbd72820fcab516624f09755265dcd97f0f81d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_other.go
@@ -0,0 +1,21 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package web
+
+import (
+	"errors"
+	"path/filepath"
+)
+
+func convertFileURLPath(host, path string) (string, error) {
+	switch host {
+	case "", "localhost":
+	default:
+		return "", errors.New("file URL specifies non-local host")
+	}
+	return filepath.FromSlash(path), nil
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_other_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_other_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c197de800dc1f31e73ca132da21218f7c8df148
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_other_test.go
@@ -0,0 +1,36 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !windows
+
+package web
+
+var urlTests = []struct {
+	url          string
+	filePath     string
+	canonicalURL string // If empty, assume equal to url.
+	wantErr      string
+}{
+	// Examples from RFC 8089:
+	{
+		url:      `file:///path/to/file`,
+		filePath: `/path/to/file`,
+	},
+	{
+		url:          `file:/path/to/file`,
+		filePath:     `/path/to/file`,
+		canonicalURL: `file:///path/to/file`,
+	},
+	{
+		url:          `file://localhost/path/to/file`,
+		filePath:     `/path/to/file`,
+		canonicalURL: `file:///path/to/file`,
+	},
+
+	// We reject non-local files.
+	{
+		url:     `file://host.example.com/path/to/file`,
+		wantErr: "file URL specifies non-local host",
+	},
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..8f462f53259b5c50ce27e828312ee41dbe32d4b2
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_test.go
@@ -0,0 +1,77 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package web
+
+import (
+	"net/url"
+	"testing"
+)
+
+func TestURLToFilePath(t *testing.T) {
+	for _, tc := range urlTests {
+		if tc.url == "" {
+			continue
+		}
+		tc := tc
+
+		t.Run(tc.url, func(t *testing.T) {
+			u, err := url.Parse(tc.url)
+			if err != nil {
+				t.Fatalf("url.Parse(%q): %v", tc.url, err)
+			}
+
+			path, err := urlToFilePath(u)
+			if err != nil {
+				if err.Error() == tc.wantErr {
+					return
+				}
+				if tc.wantErr == "" {
+					t.Fatalf("urlToFilePath(%v): %v; want ", u, err)
+				} else {
+					t.Fatalf("urlToFilePath(%v): %v; want %s", u, err, tc.wantErr)
+				}
+			}
+
+			if path != tc.filePath || tc.wantErr != "" {
+				t.Fatalf("urlToFilePath(%v) = %q, ; want %q, %s", u, path, tc.filePath, tc.wantErr)
+			}
+		})
+	}
+}
+
+func TestURLFromFilePath(t *testing.T) {
+	for _, tc := range urlTests {
+		if tc.filePath == "" {
+			continue
+		}
+		tc := tc
+
+		t.Run(tc.filePath, func(t *testing.T) {
+			u, err := urlFromFilePath(tc.filePath)
+			if err != nil {
+				if err.Error() == tc.wantErr {
+					return
+				}
+				if tc.wantErr == "" {
+					t.Fatalf("urlFromFilePath(%v): %v; want ", tc.filePath, err)
+				} else {
+					t.Fatalf("urlFromFilePath(%v): %v; want %s", tc.filePath, err, tc.wantErr)
+				}
+			}
+
+			if tc.wantErr != "" {
+				t.Fatalf("urlFromFilePath(%v) = ; want error: %s", tc.filePath, tc.wantErr)
+			}
+
+			wantURL := tc.url
+			if tc.canonicalURL != "" {
+				wantURL = tc.canonicalURL
+			}
+			if u.String() != wantURL {
+				t.Errorf("urlFromFilePath(%v) = %v; want %s", tc.filePath, u, wantURL)
+			}
+		})
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_windows.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..2a65ec83f60e8e40ef2a19d75ef871e7a6856b52
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_windows.go
@@ -0,0 +1,43 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package web
+
+import (
+	"errors"
+	"path/filepath"
+	"strings"
+)
+
+func convertFileURLPath(host, path string) (string, error) {
+	if len(path) == 0 || path[0] != '/' {
+		return "", errNotAbsolute
+	}
+
+	path = filepath.FromSlash(path)
+
+	// We interpret Windows file URLs per the description in
+	// https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/.
+
+	// The host part of a file URL (if any) is the UNC volume name,
+	// but RFC 8089 reserves the authority "localhost" for the local machine.
+	if host != "" && host != "localhost" {
+		// A common "legacy" format omits the leading slash before a drive letter,
+		// encoding the drive letter as the host instead of part of the path.
+		// (See https://blogs.msdn.microsoft.com/freeassociations/2005/05/19/the-bizarre-and-unhappy-story-of-file-urls/.)
+		// We do not support that format, but we should at least emit a more
+		// helpful error message for it.
+		if filepath.VolumeName(host) != "" {
+			return "", errors.New("file URL encodes volume in host field: too few slashes?")
+		}
+		return `\\` + host + path, nil
+	}
+
+	// If host is empty, path must contain an initial slash followed by a
+	// drive letter and path. Remove the slash and verify that the path is valid.
+	if vol := filepath.VolumeName(path[1:]); vol == "" || strings.HasPrefix(vol, `\\`) {
+		return "", errors.New("file URL missing drive letter")
+	}
+	return path[1:], nil
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_windows_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_windows_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..06386a038988ca9a3bc34dcf5b7b072e1bdaf9ca
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/web/url_windows_test.go
@@ -0,0 +1,94 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package web
+
+var urlTests = []struct {
+	url          string
+	filePath     string
+	canonicalURL string // If empty, assume equal to url.
+	wantErr      string
+}{
+	// Examples from https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/:
+
+	{
+		url:      `file://laptop/My%20Documents/FileSchemeURIs.doc`,
+		filePath: `\\laptop\My Documents\FileSchemeURIs.doc`,
+	},
+	{
+		url:      `file:///C:/Documents%20and%20Settings/davris/FileSchemeURIs.doc`,
+		filePath: `C:\Documents and Settings\davris\FileSchemeURIs.doc`,
+	},
+	{
+		url:      `file:///D:/Program%20Files/Viewer/startup.htm`,
+		filePath: `D:\Program Files\Viewer\startup.htm`,
+	},
+	{
+		url:          `file:///C:/Program%20Files/Music/Web%20Sys/main.html?REQUEST=RADIO`,
+		filePath:     `C:\Program Files\Music\Web Sys\main.html`,
+		canonicalURL: `file:///C:/Program%20Files/Music/Web%20Sys/main.html`,
+	},
+	{
+		url:      `file://applib/products/a-b/abc_9/4148.920a/media/start.swf`,
+		filePath: `\\applib\products\a-b\abc_9\4148.920a\media\start.swf`,
+	},
+	{
+		url:     `file:////applib/products/a%2Db/abc%5F9/4148.920a/media/start.swf`,
+		wantErr: "file URL missing drive letter",
+	},
+	{
+		url:     `C:\Program Files\Music\Web Sys\main.html?REQUEST=RADIO`,
+		wantErr: "non-file URL",
+	},
+
+	// The example "file://D:\Program Files\Viewer\startup.htm" errors out in
+	// url.Parse, so we substitute a slash-based path for testing instead.
+	{
+		url:     `file://D:/Program Files/Viewer/startup.htm`,
+		wantErr: "file URL encodes volume in host field: too few slashes?",
+	},
+
+	// The blog post discourages the use of non-ASCII characters because they
+	// depend on the user's current codepage. However, when we are working with Go
+	// strings we assume UTF-8 encoding, and our url package refuses to encode
+	// URLs to non-ASCII strings.
+	{
+		url:          `file:///C:/exampleㄓ.txt`,
+		filePath:     `C:\exampleㄓ.txt`,
+		canonicalURL: `file:///C:/example%E3%84%93.txt`,
+	},
+	{
+		url:      `file:///C:/example%E3%84%93.txt`,
+		filePath: `C:\exampleㄓ.txt`,
+	},
+
+	// Examples from RFC 8089:
+
+	// We allow the drive-letter variation from section E.2, because it is
+	// simpler to support than not to. However, we do not generate the shorter
+	// form in the reverse direction.
+	{
+		url:          `file:c:/path/to/file`,
+		filePath:     `c:\path\to\file`,
+		canonicalURL: `file:///c:/path/to/file`,
+	},
+
+	// We encode the UNC share name as the authority following section E.3.1,
+	// because that is what the Microsoft blog post explicitly recommends.
+	{
+		url:      `file://host.example.com/Share/path/to/file.txt`,
+		filePath: `\\host.example.com\Share\path\to\file.txt`,
+	},
+
+	// We decline the four- and five-slash variations from section E.3.2.
+	// The paths in these URLs would change meaning under path.Clean.
+	{
+		url:     `file:////host.example.com/path/to/file`,
+		wantErr: "file URL missing drive letter",
+	},
+	{
+		url:     `file://///host.example.com/path/to/file`,
+		wantErr: "file URL missing drive letter",
+	},
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/action.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/action.go
new file mode 100644
index 0000000000000000000000000000000000000000..a59072e591d3549277abad2fc4e73b4e87f71aac
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/action.go
@@ -0,0 +1,945 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Action graph creation (planning).
+
+package work
+
+import (
+	"bufio"
+	"bytes"
+	"cmd/internal/cov/covcmd"
+	"container/heap"
+	"context"
+	"debug/elf"
+	"encoding/json"
+	"fmt"
+	"internal/platform"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/cache"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/load"
+	"cmd/go/internal/robustio"
+	"cmd/go/internal/str"
+	"cmd/go/internal/trace"
+	"cmd/internal/buildid"
+)
+
+// A Builder holds global state about a build.
+// It does not hold per-package state, because we
+// build packages in parallel, and the builder is shared.
+type Builder struct {
+	WorkDir            string                    // the temporary work directory (ends in filepath.Separator)
+	actionCache        map[cacheKey]*Action      // a cache of already-constructed actions
+	flagCache          map[[2]string]bool        // a cache of supported compiler flags
+	gccCompilerIDCache map[string]cache.ActionID // cache for gccCompilerID
+
+	IsCmdList           bool // running as part of go list; set p.Stale and additional fields below
+	NeedError           bool // list needs p.Error
+	NeedExport          bool // list needs p.Export
+	NeedCompiledGoFiles bool // list needs p.CompiledGoFiles
+	AllowErrors         bool // errors don't immediately exit the program
+
+	objdirSeq int // counter for NewObjdir
+	pkgSeq    int
+
+	backgroundSh *Shell // Shell that per-Action Shells are derived from
+
+	exec      sync.Mutex
+	readySema chan bool
+	ready     actionQueue
+
+	id           sync.Mutex
+	toolIDCache  map[string]string // tool name -> tool ID
+	buildIDCache map[string]string // file name -> build ID
+}
+
+// NOTE: Much of Action would not need to be exported if not for test.
+// Maybe test functionality should move into this package too?
+
+// An Actor runs an action.
+type Actor interface {
+	Act(*Builder, context.Context, *Action) error
+}
+
+// An ActorFunc is an Actor that calls the function.
+type ActorFunc func(*Builder, context.Context, *Action) error
+
+func (f ActorFunc) Act(b *Builder, ctx context.Context, a *Action) error {
+	return f(b, ctx, a)
+}
+
+// An Action represents a single action in the action graph.
+type Action struct {
+	Mode       string        // description of action operation
+	Package    *load.Package // the package this action works on
+	Deps       []*Action     // actions that must happen before this one
+	Actor      Actor         // the action itself (nil = no-op)
+	IgnoreFail bool          // whether to run f even if dependencies fail
+	TestOutput *bytes.Buffer // test output buffer
+	Args       []string      // additional args for runProgram
+
+	triggers []*Action // inverse of deps
+
+	buggyInstall bool // is this a buggy install (see -linkshared)?
+
+	TryCache func(*Builder, *Action) bool // callback for cache bypass
+
+	// Generated files, directories.
+	Objdir   string         // directory for intermediate objects
+	Target   string         // goal of the action: the created package or executable
+	built    string         // the actual created package or executable
+	actionID cache.ActionID // cache ID of action input
+	buildID  string         // build ID of action output
+
+	VetxOnly  bool       // Mode=="vet": only being called to supply info about dependencies
+	needVet   bool       // Mode=="build": need to fill in vet config
+	needBuild bool       // Mode=="build": need to do actual build (can be false if needVet is true)
+	vetCfg    *vetConfig // vet config
+	output    []byte     // output redirect buffer (nil means use b.Print)
+
+	sh *Shell // lazily created per-Action shell; see Builder.Shell
+
+	// Execution state.
+	pending      int               // number of deps yet to complete
+	priority     int               // relative execution priority
+	Failed       bool              // whether the action failed
+	json         *actionJSON       // action graph information
+	nonGoOverlay map[string]string // map from non-.go source files to copied files in objdir. Nil if no overlay is used.
+	traceSpan    *trace.Span
+}
+
+// BuildActionID returns the action ID section of a's build ID.
+func (a *Action) BuildActionID() string { return actionID(a.buildID) }
+
+// BuildContentID returns the content ID section of a's build ID.
+func (a *Action) BuildContentID() string { return contentID(a.buildID) }
+
+// BuildID returns a's build ID.
+func (a *Action) BuildID() string { return a.buildID }
+
+// BuiltTarget returns the actual file that was built. This differs
+// from Target when the result was cached.
+func (a *Action) BuiltTarget() string { return a.built }
+
+// An actionQueue is a priority queue of actions.
+type actionQueue []*Action
+
+// Implement heap.Interface
+func (q *actionQueue) Len() int           { return len(*q) }
+func (q *actionQueue) Swap(i, j int)      { (*q)[i], (*q)[j] = (*q)[j], (*q)[i] }
+func (q *actionQueue) Less(i, j int) bool { return (*q)[i].priority < (*q)[j].priority }
+func (q *actionQueue) Push(x any)         { *q = append(*q, x.(*Action)) }
+func (q *actionQueue) Pop() any {
+	n := len(*q) - 1
+	x := (*q)[n]
+	*q = (*q)[:n]
+	return x
+}
+
+func (q *actionQueue) push(a *Action) {
+	if a.json != nil {
+		a.json.TimeReady = time.Now()
+	}
+	heap.Push(q, a)
+}
+
+func (q *actionQueue) pop() *Action {
+	return heap.Pop(q).(*Action)
+}
+
+type actionJSON struct {
+	ID         int
+	Mode       string
+	Package    string
+	Deps       []int     `json:",omitempty"`
+	IgnoreFail bool      `json:",omitempty"`
+	Args       []string  `json:",omitempty"`
+	Link       bool      `json:",omitempty"`
+	Objdir     string    `json:",omitempty"`
+	Target     string    `json:",omitempty"`
+	Priority   int       `json:",omitempty"`
+	Failed     bool      `json:",omitempty"`
+	Built      string    `json:",omitempty"`
+	VetxOnly   bool      `json:",omitempty"`
+	NeedVet    bool      `json:",omitempty"`
+	NeedBuild  bool      `json:",omitempty"`
+	ActionID   string    `json:",omitempty"`
+	BuildID    string    `json:",omitempty"`
+	TimeReady  time.Time `json:",omitempty"`
+	TimeStart  time.Time `json:",omitempty"`
+	TimeDone   time.Time `json:",omitempty"`
+
+	Cmd     []string      // `json:",omitempty"`
+	CmdReal time.Duration `json:",omitempty"`
+	CmdUser time.Duration `json:",omitempty"`
+	CmdSys  time.Duration `json:",omitempty"`
+}
+
+// cacheKey is the key for the action cache.
+type cacheKey struct {
+	mode string
+	p    *load.Package
+}
+
+func actionGraphJSON(a *Action) string {
+	var workq []*Action
+	var inWorkq = make(map[*Action]int)
+
+	add := func(a *Action) {
+		if _, ok := inWorkq[a]; ok {
+			return
+		}
+		inWorkq[a] = len(workq)
+		workq = append(workq, a)
+	}
+	add(a)
+
+	for i := 0; i < len(workq); i++ {
+		for _, dep := range workq[i].Deps {
+			add(dep)
+		}
+	}
+
+	var list []*actionJSON
+	for id, a := range workq {
+		if a.json == nil {
+			a.json = &actionJSON{
+				Mode:       a.Mode,
+				ID:         id,
+				IgnoreFail: a.IgnoreFail,
+				Args:       a.Args,
+				Objdir:     a.Objdir,
+				Target:     a.Target,
+				Failed:     a.Failed,
+				Priority:   a.priority,
+				Built:      a.built,
+				VetxOnly:   a.VetxOnly,
+				NeedBuild:  a.needBuild,
+				NeedVet:    a.needVet,
+			}
+			if a.Package != nil {
+				// TODO(rsc): Make this a unique key for a.Package somehow.
+				a.json.Package = a.Package.ImportPath
+			}
+			for _, a1 := range a.Deps {
+				a.json.Deps = append(a.json.Deps, inWorkq[a1])
+			}
+		}
+		list = append(list, a.json)
+	}
+
+	js, err := json.MarshalIndent(list, "", "\t")
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "go: writing debug action graph: %v\n", err)
+		return ""
+	}
+	return string(js)
+}
+
+// BuildMode specifies the build mode:
+// are we just building things or also installing the results?
+type BuildMode int
+
+const (
+	ModeBuild BuildMode = iota
+	ModeInstall
+	ModeBuggyInstall
+
+	ModeVetOnly = 1 << 8
+)
+
+// NewBuilder returns a new Builder ready for use.
+//
+// If workDir is the empty string, NewBuilder creates a WorkDir if needed
+// and arranges for it to be removed in case of an unclean exit.
+// The caller must Close the builder explicitly to clean up the WorkDir
+// before a clean exit.
+func NewBuilder(workDir string) *Builder {
+	b := new(Builder)
+
+	b.actionCache = make(map[cacheKey]*Action)
+	b.toolIDCache = make(map[string]string)
+	b.buildIDCache = make(map[string]string)
+
+	if workDir != "" {
+		b.WorkDir = workDir
+	} else if cfg.BuildN {
+		b.WorkDir = "$WORK"
+	} else {
+		if !buildInitStarted {
+			panic("internal error: NewBuilder called before BuildInit")
+		}
+		tmp, err := os.MkdirTemp(cfg.Getenv("GOTMPDIR"), "go-build")
+		if err != nil {
+			base.Fatalf("go: creating work dir: %v", err)
+		}
+		if !filepath.IsAbs(tmp) {
+			abs, err := filepath.Abs(tmp)
+			if err != nil {
+				os.RemoveAll(tmp)
+				base.Fatalf("go: creating work dir: %v", err)
+			}
+			tmp = abs
+		}
+		b.WorkDir = tmp
+		builderWorkDirs.Store(b, b.WorkDir)
+		if cfg.BuildX || cfg.BuildWork {
+			fmt.Fprintf(os.Stderr, "WORK=%s\n", b.WorkDir)
+		}
+	}
+
+	b.backgroundSh = NewShell(b.WorkDir, nil)
+
+	if err := CheckGOOSARCHPair(cfg.Goos, cfg.Goarch); err != nil {
+		fmt.Fprintf(os.Stderr, "go: %v\n", err)
+		base.SetExitStatus(2)
+		base.Exit()
+	}
+
+	for _, tag := range cfg.BuildContext.BuildTags {
+		if strings.Contains(tag, ",") {
+			fmt.Fprintf(os.Stderr, "go: -tags space-separated list contains comma\n")
+			base.SetExitStatus(2)
+			base.Exit()
+		}
+	}
+
+	return b
+}
+
+var builderWorkDirs sync.Map // *Builder → WorkDir
+
+func (b *Builder) Close() error {
+	wd, ok := builderWorkDirs.Load(b)
+	if !ok {
+		return nil
+	}
+	defer builderWorkDirs.Delete(b)
+
+	if b.WorkDir != wd.(string) {
+		base.Errorf("go: internal error: Builder WorkDir unexpectedly changed from %s to %s", wd, b.WorkDir)
+	}
+
+	if !cfg.BuildWork {
+		if err := robustio.RemoveAll(b.WorkDir); err != nil {
+			return err
+		}
+	}
+	b.WorkDir = ""
+	return nil
+}
+
+func closeBuilders() {
+	leakedBuilders := 0
+	builderWorkDirs.Range(func(bi, _ any) bool {
+		leakedBuilders++
+		if err := bi.(*Builder).Close(); err != nil {
+			base.Error(err)
+		}
+		return true
+	})
+
+	if leakedBuilders > 0 && base.GetExitStatus() == 0 {
+		fmt.Fprintf(os.Stderr, "go: internal error: Builder leaked on successful exit\n")
+		base.SetExitStatus(1)
+	}
+}
+
+func CheckGOOSARCHPair(goos, goarch string) error {
+	if !platform.BuildModeSupported(cfg.BuildContext.Compiler, "default", goos, goarch) {
+		return fmt.Errorf("unsupported GOOS/GOARCH pair %s/%s", goos, goarch)
+	}
+	return nil
+}
+
+// NewObjdir returns the name of a fresh object directory under b.WorkDir.
+// It is up to the caller to call b.Mkdir on the result at an appropriate time.
+// The result ends in a slash, so that file names in that directory
+// can be constructed with direct string addition.
+//
+// NewObjdir must be called only from a single goroutine at a time,
+// so it is safe to call during action graph construction, but it must not
+// be called during action graph execution.
+func (b *Builder) NewObjdir() string {
+	b.objdirSeq++
+	return str.WithFilePathSeparator(filepath.Join(b.WorkDir, fmt.Sprintf("b%03d", b.objdirSeq)))
+}
+
+// readpkglist returns the list of packages that were built into the shared library
+// at shlibpath. For the native toolchain this list is stored, newline separated, in
+// an ELF note with name "Go\x00\x00" and type 1. For GCCGO it is extracted from the
+// .go_export section.
+func readpkglist(shlibpath string) (pkgs []*load.Package) {
+	var stk load.ImportStack
+	if cfg.BuildToolchainName == "gccgo" {
+		f, err := elf.Open(shlibpath)
+		if err != nil {
+			base.Fatal(fmt.Errorf("failed to open shared library: %v", err))
+		}
+		sect := f.Section(".go_export")
+		if sect == nil {
+			base.Fatal(fmt.Errorf("%s: missing .go_export section", shlibpath))
+		}
+		data, err := sect.Data()
+		if err != nil {
+			base.Fatal(fmt.Errorf("%s: failed to read .go_export section: %v", shlibpath, err))
+		}
+		pkgpath := []byte("pkgpath ")
+		for _, line := range bytes.Split(data, []byte{'\n'}) {
+			if path, found := bytes.CutPrefix(line, pkgpath); found {
+				path = bytes.TrimSuffix(path, []byte{';'})
+				pkgs = append(pkgs, load.LoadPackageWithFlags(string(path), base.Cwd(), &stk, nil, 0))
+			}
+		}
+	} else {
+		pkglistbytes, err := buildid.ReadELFNote(shlibpath, "Go\x00\x00", 1)
+		if err != nil {
+			base.Fatalf("readELFNote failed: %v", err)
+		}
+		scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes))
+		for scanner.Scan() {
+			t := scanner.Text()
+			pkgs = append(pkgs, load.LoadPackageWithFlags(t, base.Cwd(), &stk, nil, 0))
+		}
+	}
+	return
+}
+
+// cacheAction looks up {mode, p} in the cache and returns the resulting action.
+// If the cache has no such action, f() is recorded and returned.
+// TODO(rsc): Change the second key from *load.Package to interface{},
+// to make the caching in linkShared less awkward?
+func (b *Builder) cacheAction(mode string, p *load.Package, f func() *Action) *Action {
+	a := b.actionCache[cacheKey{mode, p}]
+	if a == nil {
+		a = f()
+		b.actionCache[cacheKey{mode, p}] = a
+	}
+	return a
+}
+
+// AutoAction returns the "right" action for go build or go install of p.
+func (b *Builder) AutoAction(mode, depMode BuildMode, p *load.Package) *Action {
+	if p.Name == "main" {
+		return b.LinkAction(mode, depMode, p)
+	}
+	return b.CompileAction(mode, depMode, p)
+}
+
+// buildActor implements the Actor interface for package build
+// actions. For most package builds this simply means invoking th
+// *Builder.build method; in the case of "go test -cover" for
+// a package with no test files, we stores some additional state
+// information in the build actor to help with reporting.
+type buildActor struct {
+	// name of static meta-data file fragment emitted by the cover
+	// tool as part of the package build action, for selected
+	// "go test -cover" runs.
+	covMetaFileName string
+}
+
+// newBuildActor returns a new buildActor object, setting up the
+// covMetaFileName field if 'genCoverMeta' flag is set.
+func newBuildActor(p *load.Package, genCoverMeta bool) *buildActor {
+	ba := &buildActor{}
+	if genCoverMeta {
+		ba.covMetaFileName = covcmd.MetaFileForPackage(p.ImportPath)
+	}
+	return ba
+}
+
+func (ba *buildActor) Act(b *Builder, ctx context.Context, a *Action) error {
+	return b.build(ctx, a)
+}
+
+// CompileAction returns the action for compiling and possibly installing
+// (according to mode) the given package. The resulting action is only
+// for building packages (archives), never for linking executables.
+// depMode is the action (build or install) to use when building dependencies.
+// To turn package main into an executable, call b.Link instead.
+func (b *Builder) CompileAction(mode, depMode BuildMode, p *load.Package) *Action {
+	vetOnly := mode&ModeVetOnly != 0
+	mode &^= ModeVetOnly
+
+	if mode != ModeBuild && p.Target == "" {
+		// No permanent target.
+		mode = ModeBuild
+	}
+	if mode != ModeBuild && p.Name == "main" {
+		// We never install the .a file for a main package.
+		mode = ModeBuild
+	}
+
+	// Construct package build action.
+	a := b.cacheAction("build", p, func() *Action {
+		a := &Action{
+			Mode:    "build",
+			Package: p,
+			Actor:   newBuildActor(p, p.Internal.Cover.GenMeta),
+			Objdir:  b.NewObjdir(),
+		}
+
+		if p.Error == nil || !p.Error.IsImportCycle {
+			for _, p1 := range p.Internal.Imports {
+				a.Deps = append(a.Deps, b.CompileAction(depMode, depMode, p1))
+			}
+		}
+
+		if p.Standard {
+			switch p.ImportPath {
+			case "builtin", "unsafe":
+				// Fake packages - nothing to build.
+				a.Mode = "built-in package"
+				a.Actor = nil
+				return a
+			}
+
+			// gccgo standard library is "fake" too.
+			if cfg.BuildToolchainName == "gccgo" {
+				// the target name is needed for cgo.
+				a.Mode = "gccgo stdlib"
+				a.Target = p.Target
+				a.Actor = nil
+				return a
+			}
+		}
+
+		return a
+	})
+
+	// Find the build action; the cache entry may have been replaced
+	// by the install action during (*Builder).installAction.
+	buildAction := a
+	switch buildAction.Mode {
+	case "build", "built-in package", "gccgo stdlib":
+		// ok
+	case "build-install":
+		buildAction = a.Deps[0]
+	default:
+		panic("lost build action: " + buildAction.Mode)
+	}
+	buildAction.needBuild = buildAction.needBuild || !vetOnly
+
+	// Construct install action.
+	if mode == ModeInstall || mode == ModeBuggyInstall {
+		a = b.installAction(a, mode)
+	}
+
+	return a
+}
+
+// VetAction returns the action for running go vet on package p.
+// It depends on the action for compiling p.
+// If the caller may be causing p to be installed, it is up to the caller
+// to make sure that the install depends on (runs after) vet.
+func (b *Builder) VetAction(mode, depMode BuildMode, p *load.Package) *Action {
+	a := b.vetAction(mode, depMode, p)
+	a.VetxOnly = false
+	return a
+}
+
+func (b *Builder) vetAction(mode, depMode BuildMode, p *load.Package) *Action {
+	// Construct vet action.
+	a := b.cacheAction("vet", p, func() *Action {
+		a1 := b.CompileAction(mode|ModeVetOnly, depMode, p)
+
+		// vet expects to be able to import "fmt".
+		var stk load.ImportStack
+		stk.Push("vet")
+		p1, err := load.LoadImportWithFlags("fmt", p.Dir, p, &stk, nil, 0)
+		if err != nil {
+			base.Fatalf("unexpected error loading fmt package from package %s: %v", p.ImportPath, err)
+		}
+		stk.Pop()
+		aFmt := b.CompileAction(ModeBuild, depMode, p1)
+
+		var deps []*Action
+		if a1.buggyInstall {
+			// (*Builder).vet expects deps[0] to be the package
+			// and deps[1] to be "fmt". If we see buggyInstall
+			// here then a1 is an install of a shared library,
+			// and the real package is a1.Deps[0].
+			deps = []*Action{a1.Deps[0], aFmt, a1}
+		} else {
+			deps = []*Action{a1, aFmt}
+		}
+		for _, p1 := range p.Internal.Imports {
+			deps = append(deps, b.vetAction(mode, depMode, p1))
+		}
+
+		a := &Action{
+			Mode:       "vet",
+			Package:    p,
+			Deps:       deps,
+			Objdir:     a1.Objdir,
+			VetxOnly:   true,
+			IgnoreFail: true, // it's OK if vet of dependencies "fails" (reports problems)
+		}
+		if a1.Actor == nil {
+			// Built-in packages like unsafe.
+			return a
+		}
+		deps[0].needVet = true
+		a.Actor = ActorFunc((*Builder).vet)
+		return a
+	})
+	return a
+}
+
+// LinkAction returns the action for linking p into an executable
+// and possibly installing the result (according to mode).
+// depMode is the action (build or install) to use when compiling dependencies.
+func (b *Builder) LinkAction(mode, depMode BuildMode, p *load.Package) *Action {
+	// Construct link action.
+	a := b.cacheAction("link", p, func() *Action {
+		a := &Action{
+			Mode:    "link",
+			Package: p,
+		}
+
+		a1 := b.CompileAction(ModeBuild, depMode, p)
+		a.Actor = ActorFunc((*Builder).link)
+		a.Deps = []*Action{a1}
+		a.Objdir = a1.Objdir
+
+		// An executable file. (This is the name of a temporary file.)
+		// Because we run the temporary file in 'go run' and 'go test',
+		// the name will show up in ps listings. If the caller has specified
+		// a name, use that instead of a.out. The binary is generated
+		// in an otherwise empty subdirectory named exe to avoid
+		// naming conflicts. The only possible conflict is if we were
+		// to create a top-level package named exe.
+		name := "a.out"
+		if p.Internal.ExeName != "" {
+			name = p.Internal.ExeName
+		} else if (cfg.Goos == "darwin" || cfg.Goos == "windows") && cfg.BuildBuildmode == "c-shared" && p.Target != "" {
+			// On OS X, the linker output name gets recorded in the
+			// shared library's LC_ID_DYLIB load command.
+			// The code invoking the linker knows to pass only the final
+			// path element. Arrange that the path element matches what
+			// we'll install it as; otherwise the library is only loadable as "a.out".
+			// On Windows, DLL file name is recorded in PE file
+			// export section, so do like on OS X.
+			_, name = filepath.Split(p.Target)
+		}
+		a.Target = a.Objdir + filepath.Join("exe", name) + cfg.ExeSuffix
+		a.built = a.Target
+		b.addTransitiveLinkDeps(a, a1, "")
+
+		// Sequence the build of the main package (a1) strictly after the build
+		// of all other dependencies that go into the link. It is likely to be after
+		// them anyway, but just make sure. This is required by the build ID-based
+		// shortcut in (*Builder).useCache(a1), which will call b.linkActionID(a).
+		// In order for that linkActionID call to compute the right action ID, all the
+		// dependencies of a (except a1) must have completed building and have
+		// recorded their build IDs.
+		a1.Deps = append(a1.Deps, &Action{Mode: "nop", Deps: a.Deps[1:]})
+		return a
+	})
+
+	if mode == ModeInstall || mode == ModeBuggyInstall {
+		a = b.installAction(a, mode)
+	}
+
+	return a
+}
+
+// installAction returns the action for installing the result of a1.
+func (b *Builder) installAction(a1 *Action, mode BuildMode) *Action {
+	// Because we overwrite the build action with the install action below,
+	// a1 may already be an install action fetched from the "build" cache key,
+	// and the caller just doesn't realize.
+	if strings.HasSuffix(a1.Mode, "-install") {
+		if a1.buggyInstall && mode == ModeInstall {
+			//  Congratulations! The buggy install is now a proper install.
+			a1.buggyInstall = false
+		}
+		return a1
+	}
+
+	// If there's no actual action to build a1,
+	// there's nothing to install either.
+	// This happens if a1 corresponds to reusing an already-built object.
+	if a1.Actor == nil {
+		return a1
+	}
+
+	p := a1.Package
+	return b.cacheAction(a1.Mode+"-install", p, func() *Action {
+		// The install deletes the temporary build result,
+		// so we need all other actions, both past and future,
+		// that attempt to depend on the build to depend instead
+		// on the install.
+
+		// Make a private copy of a1 (the build action),
+		// no longer accessible to any other rules.
+		buildAction := new(Action)
+		*buildAction = *a1
+
+		// Overwrite a1 with the install action.
+		// This takes care of updating past actions that
+		// point at a1 for the build action; now they will
+		// point at a1 and get the install action.
+		// We also leave a1 in the action cache as the result
+		// for "build", so that actions not yet created that
+		// try to depend on the build will instead depend
+		// on the install.
+		*a1 = Action{
+			Mode:    buildAction.Mode + "-install",
+			Actor:   ActorFunc(BuildInstallFunc),
+			Package: p,
+			Objdir:  buildAction.Objdir,
+			Deps:    []*Action{buildAction},
+			Target:  p.Target,
+			built:   p.Target,
+
+			buggyInstall: mode == ModeBuggyInstall,
+		}
+
+		b.addInstallHeaderAction(a1)
+		return a1
+	})
+}
+
+// addTransitiveLinkDeps adds to the link action a all packages
+// that are transitive dependencies of a1.Deps.
+// That is, if a is a link of package main, a1 is the compile of package main
+// and a1.Deps is the actions for building packages directly imported by
+// package main (what the compiler needs). The linker needs all packages
+// transitively imported by the whole program; addTransitiveLinkDeps
+// makes sure those are present in a.Deps.
+// If shlib is non-empty, then a corresponds to the build and installation of shlib,
+// so any rebuild of shlib should not be added as a dependency.
+func (b *Builder) addTransitiveLinkDeps(a, a1 *Action, shlib string) {
+	// Expand Deps to include all built packages, for the linker.
+	// Use breadth-first search to find rebuilt-for-test packages
+	// before the standard ones.
+	// TODO(rsc): Eliminate the standard ones from the action graph,
+	// which will require doing a little bit more rebuilding.
+	workq := []*Action{a1}
+	haveDep := map[string]bool{}
+	if a1.Package != nil {
+		haveDep[a1.Package.ImportPath] = true
+	}
+	for i := 0; i < len(workq); i++ {
+		a1 := workq[i]
+		for _, a2 := range a1.Deps {
+			// TODO(rsc): Find a better discriminator than the Mode strings, once the dust settles.
+			if a2.Package == nil || (a2.Mode != "build-install" && a2.Mode != "build") || haveDep[a2.Package.ImportPath] {
+				continue
+			}
+			haveDep[a2.Package.ImportPath] = true
+			a.Deps = append(a.Deps, a2)
+			if a2.Mode == "build-install" {
+				a2 = a2.Deps[0] // walk children of "build" action
+			}
+			workq = append(workq, a2)
+		}
+	}
+
+	// If this is go build -linkshared, then the link depends on the shared libraries
+	// in addition to the packages themselves. (The compile steps do not.)
+	if cfg.BuildLinkshared {
+		haveShlib := map[string]bool{shlib: true}
+		for _, a1 := range a.Deps {
+			p1 := a1.Package
+			if p1 == nil || p1.Shlib == "" || haveShlib[filepath.Base(p1.Shlib)] {
+				continue
+			}
+			haveShlib[filepath.Base(p1.Shlib)] = true
+			// TODO(rsc): The use of ModeInstall here is suspect, but if we only do ModeBuild,
+			// we'll end up building an overall library or executable that depends at runtime
+			// on other libraries that are out-of-date, which is clearly not good either.
+			// We call it ModeBuggyInstall to make clear that this is not right.
+			a.Deps = append(a.Deps, b.linkSharedAction(ModeBuggyInstall, ModeBuggyInstall, p1.Shlib, nil))
+		}
+	}
+}
+
+// addInstallHeaderAction adds an install header action to a, if needed.
+// The action a should be an install action as generated by either
+// b.CompileAction or b.LinkAction with mode=ModeInstall,
+// and so a.Deps[0] is the corresponding build action.
+func (b *Builder) addInstallHeaderAction(a *Action) {
+	// Install header for cgo in c-archive and c-shared modes.
+	p := a.Package
+	if p.UsesCgo() && (cfg.BuildBuildmode == "c-archive" || cfg.BuildBuildmode == "c-shared") {
+		hdrTarget := a.Target[:len(a.Target)-len(filepath.Ext(a.Target))] + ".h"
+		if cfg.BuildContext.Compiler == "gccgo" && cfg.BuildO == "" {
+			// For the header file, remove the "lib"
+			// added by go/build, so we generate pkg.h
+			// rather than libpkg.h.
+			dir, file := filepath.Split(hdrTarget)
+			file = strings.TrimPrefix(file, "lib")
+			hdrTarget = filepath.Join(dir, file)
+		}
+		ah := &Action{
+			Mode:    "install header",
+			Package: a.Package,
+			Deps:    []*Action{a.Deps[0]},
+			Actor:   ActorFunc((*Builder).installHeader),
+			Objdir:  a.Deps[0].Objdir,
+			Target:  hdrTarget,
+		}
+		a.Deps = append(a.Deps, ah)
+	}
+}
+
+// buildmodeShared takes the "go build" action a1 into the building of a shared library of a1.Deps.
+// That is, the input a1 represents "go build pkgs" and the result represents "go build -buildmode=shared pkgs".
+func (b *Builder) buildmodeShared(mode, depMode BuildMode, args []string, pkgs []*load.Package, a1 *Action) *Action {
+	name, err := libname(args, pkgs)
+	if err != nil {
+		base.Fatalf("%v", err)
+	}
+	return b.linkSharedAction(mode, depMode, name, a1)
+}
+
+// linkSharedAction takes a grouping action a1 corresponding to a list of built packages
+// and returns an action that links them together into a shared library with the name shlib.
+// If a1 is nil, shlib should be an absolute path to an existing shared library,
+// and then linkSharedAction reads that library to find out the package list.
+func (b *Builder) linkSharedAction(mode, depMode BuildMode, shlib string, a1 *Action) *Action {
+	fullShlib := shlib
+	shlib = filepath.Base(shlib)
+	a := b.cacheAction("build-shlib "+shlib, nil, func() *Action {
+		if a1 == nil {
+			// TODO(rsc): Need to find some other place to store config,
+			// not in pkg directory. See golang.org/issue/22196.
+			pkgs := readpkglist(fullShlib)
+			a1 = &Action{
+				Mode: "shlib packages",
+			}
+			for _, p := range pkgs {
+				a1.Deps = append(a1.Deps, b.CompileAction(mode, depMode, p))
+			}
+		}
+
+		// Fake package to hold ldflags.
+		// As usual shared libraries are a kludgy, abstraction-violating special case:
+		// we let them use the flags specified for the command-line arguments.
+		p := &load.Package{}
+		p.Internal.CmdlinePkg = true
+		p.Internal.Ldflags = load.BuildLdflags.For(p)
+		p.Internal.Gccgoflags = load.BuildGccgoflags.For(p)
+
+		// Add implicit dependencies to pkgs list.
+		// Currently buildmode=shared forces external linking mode, and
+		// external linking mode forces an import of runtime/cgo (and
+		// math on arm). So if it was not passed on the command line and
+		// it is not present in another shared library, add it here.
+		// TODO(rsc): Maybe this should only happen if "runtime" is in the original package set.
+		// TODO(rsc): This should probably be changed to use load.LinkerDeps(p).
+		// TODO(rsc): We don't add standard library imports for gccgo
+		// because they are all always linked in anyhow.
+		// Maybe load.LinkerDeps should be used and updated.
+		a := &Action{
+			Mode:    "go build -buildmode=shared",
+			Package: p,
+			Objdir:  b.NewObjdir(),
+			Actor:   ActorFunc((*Builder).linkShared),
+			Deps:    []*Action{a1},
+		}
+		a.Target = filepath.Join(a.Objdir, shlib)
+		if cfg.BuildToolchainName != "gccgo" {
+			add := func(a1 *Action, pkg string, force bool) {
+				for _, a2 := range a1.Deps {
+					if a2.Package != nil && a2.Package.ImportPath == pkg {
+						return
+					}
+				}
+				var stk load.ImportStack
+				p := load.LoadPackageWithFlags(pkg, base.Cwd(), &stk, nil, 0)
+				if p.Error != nil {
+					base.Fatalf("load %s: %v", pkg, p.Error)
+				}
+				// Assume that if pkg (runtime/cgo or math)
+				// is already accounted for in a different shared library,
+				// then that shared library also contains runtime,
+				// so that anything we do will depend on that library,
+				// so we don't need to include pkg in our shared library.
+				if force || p.Shlib == "" || filepath.Base(p.Shlib) == pkg {
+					a1.Deps = append(a1.Deps, b.CompileAction(depMode, depMode, p))
+				}
+			}
+			add(a1, "runtime/cgo", false)
+			if cfg.Goarch == "arm" {
+				add(a1, "math", false)
+			}
+
+			// The linker step still needs all the usual linker deps.
+			// (For example, the linker always opens runtime.a.)
+			ldDeps, err := load.LinkerDeps(nil)
+			if err != nil {
+				base.Error(err)
+			}
+			for _, dep := range ldDeps {
+				add(a, dep, true)
+			}
+		}
+		b.addTransitiveLinkDeps(a, a1, shlib)
+		return a
+	})
+
+	// Install result.
+	if (mode == ModeInstall || mode == ModeBuggyInstall) && a.Actor != nil {
+		buildAction := a
+
+		a = b.cacheAction("install-shlib "+shlib, nil, func() *Action {
+			// Determine the eventual install target.
+			// The install target is root/pkg/shlib, where root is the source root
+			// in which all the packages lie.
+			// TODO(rsc): Perhaps this cross-root check should apply to the full
+			// transitive package dependency list, not just the ones named
+			// on the command line?
+			pkgDir := a1.Deps[0].Package.Internal.Build.PkgTargetRoot
+			for _, a2 := range a1.Deps {
+				if dir := a2.Package.Internal.Build.PkgTargetRoot; dir != pkgDir {
+					base.Fatalf("installing shared library: cannot use packages %s and %s from different roots %s and %s",
+						a1.Deps[0].Package.ImportPath,
+						a2.Package.ImportPath,
+						pkgDir,
+						dir)
+				}
+			}
+			// TODO(rsc): Find out and explain here why gccgo is different.
+			if cfg.BuildToolchainName == "gccgo" {
+				pkgDir = filepath.Join(pkgDir, "shlibs")
+			}
+			target := filepath.Join(pkgDir, shlib)
+
+			a := &Action{
+				Mode:   "go install -buildmode=shared",
+				Objdir: buildAction.Objdir,
+				Actor:  ActorFunc(BuildInstallFunc),
+				Deps:   []*Action{buildAction},
+				Target: target,
+			}
+			for _, a2 := range buildAction.Deps[0].Deps {
+				p := a2.Package
+				pkgTargetRoot := p.Internal.Build.PkgTargetRoot
+				if pkgTargetRoot == "" {
+					continue
+				}
+				a.Deps = append(a.Deps, &Action{
+					Mode:    "shlibname",
+					Package: p,
+					Actor:   ActorFunc((*Builder).installShlibname),
+					Target:  filepath.Join(pkgTargetRoot, p.ImportPath+".shlibname"),
+					Deps:    []*Action{a.Deps[0]},
+				})
+			}
+			return a
+		})
+	}
+
+	return a
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/build.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/build.go
new file mode 100644
index 0000000000000000000000000000000000000000..408edb51193d35e015e2cc6bc2f09de93ce748d7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/build.go
@@ -0,0 +1,961 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+	"context"
+	"errors"
+	"flag"
+	"fmt"
+	"go/build"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/fsys"
+	"cmd/go/internal/load"
+	"cmd/go/internal/modload"
+	"cmd/go/internal/search"
+	"cmd/go/internal/trace"
+)
+
+var CmdBuild = &base.Command{
+	UsageLine: "go build [-o output] [build flags] [packages]",
+	Short:     "compile packages and dependencies",
+	Long: `
+Build compiles the packages named by the import paths,
+along with their dependencies, but it does not install the results.
+
+If the arguments to build are a list of .go files from a single directory,
+build treats them as a list of source files specifying a single package.
+
+When compiling packages, build ignores files that end in '_test.go'.
+
+When compiling a single main package, build writes the resulting
+executable to an output file named after the last non-major-version
+component of the package import path. The '.exe' suffix is added
+when writing a Windows executable.
+So 'go build example/sam' writes 'sam' or 'sam.exe'.
+'go build example.com/foo/v2' writes 'foo' or 'foo.exe', not 'v2.exe'.
+
+When compiling a package from a list of .go files, the executable
+is named after the first source file.
+'go build ed.go rx.go' writes 'ed' or 'ed.exe'.
+
+When compiling multiple packages or a single non-main package,
+build compiles the packages but discards the resulting object,
+serving only as a check that the packages can be built.
+
+The -o flag forces build to write the resulting executable or object
+to the named output file or directory, instead of the default behavior described
+in the last two paragraphs. If the named output is an existing directory or
+ends with a slash or backslash, then any resulting executables
+will be written to that directory.
+
+The build flags are shared by the build, clean, get, install, list, run,
+and test commands:
+
+	-C dir
+		Change to dir before running the command.
+		Any files named on the command line are interpreted after
+		changing directories.
+		If used, this flag must be the first one in the command line.
+	-a
+		force rebuilding of packages that are already up-to-date.
+	-n
+		print the commands but do not run them.
+	-p n
+		the number of programs, such as build commands or
+		test binaries, that can be run in parallel.
+		The default is GOMAXPROCS, normally the number of CPUs available.
+	-race
+		enable data race detection.
+		Supported only on linux/amd64, freebsd/amd64, darwin/amd64, darwin/arm64, windows/amd64,
+		linux/ppc64le and linux/arm64 (only for 48-bit VMA).
+	-msan
+		enable interoperation with memory sanitizer.
+		Supported only on linux/amd64, linux/arm64, linux/loong64, freebsd/amd64
+		and only with Clang/LLVM as the host C compiler.
+		PIE build mode will be used on all platforms except linux/amd64.
+	-asan
+		enable interoperation with address sanitizer.
+		Supported only on linux/arm64, linux/amd64, linux/loong64.
+		Supported on linux/amd64 or linux/arm64 and only with GCC 7 and higher
+		or Clang/LLVM 9 and higher.
+		And supported on linux/loong64 only with Clang/LLVM 16 and higher.
+	-cover
+		enable code coverage instrumentation.
+	-covermode set,count,atomic
+		set the mode for coverage analysis.
+		The default is "set" unless -race is enabled,
+		in which case it is "atomic".
+		The values:
+		set: bool: does this statement run?
+		count: int: how many times does this statement run?
+		atomic: int: count, but correct in multithreaded tests;
+			significantly more expensive.
+		Sets -cover.
+	-coverpkg pattern1,pattern2,pattern3
+		For a build that targets package 'main' (e.g. building a Go
+		executable), apply coverage analysis to each package matching
+		the patterns. The default is to apply coverage analysis to
+		packages in the main Go module. See 'go help packages' for a
+		description of package patterns.  Sets -cover.
+	-v
+		print the names of packages as they are compiled.
+	-work
+		print the name of the temporary work directory and
+		do not delete it when exiting.
+	-x
+		print the commands.
+	-asmflags '[pattern=]arg list'
+		arguments to pass on each go tool asm invocation.
+	-buildmode mode
+		build mode to use. See 'go help buildmode' for more.
+	-buildvcs
+		Whether to stamp binaries with version control information
+		("true", "false", or "auto"). By default ("auto"), version control
+		information is stamped into a binary if the main package, the main module
+		containing it, and the current directory are all in the same repository.
+		Use -buildvcs=false to always omit version control information, or
+		-buildvcs=true to error out if version control information is available but
+		cannot be included due to a missing tool or ambiguous directory structure.
+	-compiler name
+		name of compiler to use, as in runtime.Compiler (gccgo or gc).
+	-gccgoflags '[pattern=]arg list'
+		arguments to pass on each gccgo compiler/linker invocation.
+	-gcflags '[pattern=]arg list'
+		arguments to pass on each go tool compile invocation.
+	-installsuffix suffix
+		a suffix to use in the name of the package installation directory,
+		in order to keep output separate from default builds.
+		If using the -race flag, the install suffix is automatically set to race
+		or, if set explicitly, has _race appended to it. Likewise for the -msan
+		and -asan flags. Using a -buildmode option that requires non-default compile
+		flags has a similar effect.
+	-ldflags '[pattern=]arg list'
+		arguments to pass on each go tool link invocation.
+	-linkshared
+		build code that will be linked against shared libraries previously
+		created with -buildmode=shared.
+	-mod mode
+		module download mode to use: readonly, vendor, or mod.
+		By default, if a vendor directory is present and the go version in go.mod
+		is 1.14 or higher, the go command acts as if -mod=vendor were set.
+		Otherwise, the go command acts as if -mod=readonly were set.
+		See https://golang.org/ref/mod#build-commands for details.
+	-modcacherw
+		leave newly-created directories in the module cache read-write
+		instead of making them read-only.
+	-modfile file
+		in module aware mode, read (and possibly write) an alternate go.mod
+		file instead of the one in the module root directory. A file named
+		"go.mod" must still be present in order to determine the module root
+		directory, but it is not accessed. When -modfile is specified, an
+		alternate go.sum file is also used: its path is derived from the
+		-modfile flag by trimming the ".mod" extension and appending ".sum".
+	-overlay file
+		read a JSON config file that provides an overlay for build operations.
+		The file is a JSON struct with a single field, named 'Replace', that
+		maps each disk file path (a string) to its backing file path, so that
+		a build will run as if the disk file path exists with the contents
+		given by the backing file paths, or as if the disk file path does not
+		exist if its backing file path is empty. Support for the -overlay flag
+		has some limitations: importantly, cgo files included from outside the
+		include path must be in the same directory as the Go package they are
+		included from, and overlays will not appear when binaries and tests are
+		run through go run and go test respectively.
+	-pgo file
+		specify the file path of a profile for profile-guided optimization (PGO).
+		When the special name "auto" is specified, for each main package in the
+		build, the go command selects a file named "default.pgo" in the package's
+		directory if that file exists, and applies it to the (transitive)
+		dependencies of the main package (other packages are not affected).
+		Special name "off" turns off PGO. The default is "auto".
+	-pkgdir dir
+		install and load all packages from dir instead of the usual locations.
+		For example, when building with a non-standard configuration,
+		use -pkgdir to keep generated packages in a separate location.
+	-tags tag,list
+		a comma-separated list of additional build tags to consider satisfied
+		during the build. For more information about build tags, see
+		'go help buildconstraint'. (Earlier versions of Go used a
+		space-separated list, and that form is deprecated but still recognized.)
+	-trimpath
+		remove all file system paths from the resulting executable.
+		Instead of absolute file system paths, the recorded file names
+		will begin either a module path@version (when using modules),
+		or a plain import path (when using the standard library, or GOPATH).
+	-toolexec 'cmd args'
+		a program to use to invoke toolchain programs like vet and asm.
+		For example, instead of running asm, the go command will run
+		'cmd args /path/to/asm '.
+		The TOOLEXEC_IMPORTPATH environment variable will be set,
+		matching 'go list -f {{.ImportPath}}' for the package being built.
+
+The -asmflags, -gccgoflags, -gcflags, and -ldflags flags accept a
+space-separated list of arguments to pass to an underlying tool
+during the build. To embed spaces in an element in the list, surround
+it with either single or double quotes. The argument list may be
+preceded by a package pattern and an equal sign, which restricts
+the use of that argument list to the building of packages matching
+that pattern (see 'go help packages' for a description of package
+patterns). Without a pattern, the argument list applies only to the
+packages named on the command line. The flags may be repeated
+with different patterns in order to specify different arguments for
+different sets of packages. If a package matches patterns given in
+multiple flags, the latest match on the command line wins.
+For example, 'go build -gcflags=-S fmt' prints the disassembly
+only for package fmt, while 'go build -gcflags=all=-S fmt'
+prints the disassembly for fmt and all its dependencies.
+
+For more about specifying packages, see 'go help packages'.
+For more about where packages and binaries are installed,
+run 'go help gopath'.
+For more about calling between Go and C/C++, run 'go help c'.
+
+Note: Build adheres to certain conventions such as those described
+by 'go help gopath'. Not all projects can follow these conventions,
+however. Installations that have their own conventions or that use
+a separate software build system may choose to use lower-level
+invocations such as 'go tool compile' and 'go tool link' to avoid
+some of the overheads and design decisions of the build tool.
+
+See also: go install, go get, go clean.
+	`,
+}
+
+const concurrentGCBackendCompilationEnabledByDefault = true
+
+func init() {
+	// break init cycle
+	CmdBuild.Run = runBuild
+	CmdInstall.Run = runInstall
+
+	CmdBuild.Flag.StringVar(&cfg.BuildO, "o", "", "output file or directory")
+
+	AddBuildFlags(CmdBuild, DefaultBuildFlags)
+	AddBuildFlags(CmdInstall, DefaultBuildFlags)
+	if cfg.Experiment != nil && cfg.Experiment.CoverageRedesign {
+		AddCoverFlags(CmdBuild, nil)
+		AddCoverFlags(CmdInstall, nil)
+	}
+}
+
+// Note that flags consulted by other parts of the code
+// (for example, buildV) are in cmd/go/internal/cfg.
+
+var (
+	forcedAsmflags   []string // internally-forced flags for cmd/asm
+	forcedGcflags    []string // internally-forced flags for cmd/compile
+	forcedLdflags    []string // internally-forced flags for cmd/link
+	forcedGccgoflags []string // internally-forced flags for gccgo
+)
+
+var BuildToolchain toolchain = noToolchain{}
+var ldBuildmode string
+
+// buildCompiler implements flag.Var.
+// It implements Set by updating both
+// BuildToolchain and buildContext.Compiler.
+type buildCompiler struct{}
+
+func (c buildCompiler) Set(value string) error {
+	switch value {
+	case "gc":
+		BuildToolchain = gcToolchain{}
+	case "gccgo":
+		BuildToolchain = gccgoToolchain{}
+	default:
+		return fmt.Errorf("unknown compiler %q", value)
+	}
+	cfg.BuildToolchainName = value
+	cfg.BuildToolchainCompiler = BuildToolchain.compiler
+	cfg.BuildToolchainLinker = BuildToolchain.linker
+	cfg.BuildContext.Compiler = value
+	return nil
+}
+
+func (c buildCompiler) String() string {
+	return cfg.BuildContext.Compiler
+}
+
+func init() {
+	switch build.Default.Compiler {
+	case "gc", "gccgo":
+		buildCompiler{}.Set(build.Default.Compiler)
+	}
+}
+
+type BuildFlagMask int
+
+const (
+	DefaultBuildFlags BuildFlagMask = 0
+	OmitModFlag       BuildFlagMask = 1 << iota
+	OmitModCommonFlags
+	OmitVFlag
+)
+
+// AddBuildFlags adds the flags common to the build, clean, get,
+// install, list, run, and test commands.
+func AddBuildFlags(cmd *base.Command, mask BuildFlagMask) {
+	base.AddBuildFlagsNX(&cmd.Flag)
+	base.AddChdirFlag(&cmd.Flag)
+	cmd.Flag.BoolVar(&cfg.BuildA, "a", false, "")
+	cmd.Flag.IntVar(&cfg.BuildP, "p", cfg.BuildP, "")
+	if mask&OmitVFlag == 0 {
+		cmd.Flag.BoolVar(&cfg.BuildV, "v", false, "")
+	}
+
+	cmd.Flag.Var(&load.BuildAsmflags, "asmflags", "")
+	cmd.Flag.Var(buildCompiler{}, "compiler", "")
+	cmd.Flag.StringVar(&cfg.BuildBuildmode, "buildmode", "default", "")
+	cmd.Flag.Var(&load.BuildGcflags, "gcflags", "")
+	cmd.Flag.Var(&load.BuildGccgoflags, "gccgoflags", "")
+	if mask&OmitModFlag == 0 {
+		base.AddModFlag(&cmd.Flag)
+	}
+	if mask&OmitModCommonFlags == 0 {
+		base.AddModCommonFlags(&cmd.Flag)
+	} else {
+		// Add the overlay flag even when we don't add the rest of the mod common flags.
+		// This only affects 'go get' in GOPATH mode, but add the flag anyway for
+		// consistency.
+		cmd.Flag.StringVar(&fsys.OverlayFile, "overlay", "", "")
+	}
+	cmd.Flag.StringVar(&cfg.BuildContext.InstallSuffix, "installsuffix", "", "")
+	cmd.Flag.Var(&load.BuildLdflags, "ldflags", "")
+	cmd.Flag.BoolVar(&cfg.BuildLinkshared, "linkshared", false, "")
+	cmd.Flag.StringVar(&cfg.BuildPGO, "pgo", "auto", "")
+	cmd.Flag.StringVar(&cfg.BuildPkgdir, "pkgdir", "", "")
+	cmd.Flag.BoolVar(&cfg.BuildRace, "race", false, "")
+	cmd.Flag.BoolVar(&cfg.BuildMSan, "msan", false, "")
+	cmd.Flag.BoolVar(&cfg.BuildASan, "asan", false, "")
+	cmd.Flag.Var((*tagsFlag)(&cfg.BuildContext.BuildTags), "tags", "")
+	cmd.Flag.Var((*base.StringsFlag)(&cfg.BuildToolexec), "toolexec", "")
+	cmd.Flag.BoolVar(&cfg.BuildTrimpath, "trimpath", false, "")
+	cmd.Flag.BoolVar(&cfg.BuildWork, "work", false, "")
+	cmd.Flag.Var((*buildvcsFlag)(&cfg.BuildBuildvcs), "buildvcs", "")
+
+	// Undocumented, unstable debugging flags.
+	cmd.Flag.StringVar(&cfg.DebugActiongraph, "debug-actiongraph", "", "")
+	cmd.Flag.StringVar(&cfg.DebugTrace, "debug-trace", "", "")
+	cmd.Flag.StringVar(&cfg.DebugRuntimeTrace, "debug-runtime-trace", "", "")
+}
+
+// AddCoverFlags adds coverage-related flags to "cmd". If the
+// CoverageRedesign experiment is enabled, we add -cover{mode,pkg} to
+// the build command and only -coverprofile to the test command. If
+// the CoverageRedesign experiment is disabled, -cover* flags are
+// added only to the test command.
+func AddCoverFlags(cmd *base.Command, coverProfileFlag *string) {
+	addCover := false
+	if cfg.Experiment != nil && cfg.Experiment.CoverageRedesign {
+		// New coverage enabled: both build and test commands get
+		// coverage flags.
+		addCover = true
+	} else {
+		// New coverage disabled: only test command gets cover flags.
+		addCover = coverProfileFlag != nil
+	}
+	if addCover {
+		cmd.Flag.BoolVar(&cfg.BuildCover, "cover", false, "")
+		cmd.Flag.Var(coverFlag{(*coverModeFlag)(&cfg.BuildCoverMode)}, "covermode", "")
+		cmd.Flag.Var(coverFlag{commaListFlag{&cfg.BuildCoverPkg}}, "coverpkg", "")
+	}
+	if coverProfileFlag != nil {
+		cmd.Flag.Var(coverFlag{V: stringFlag{coverProfileFlag}}, "coverprofile", "")
+	}
+}
+
+// tagsFlag is the implementation of the -tags flag.
+type tagsFlag []string
+
+func (v *tagsFlag) Set(s string) error {
+	// For compatibility with Go 1.12 and earlier, allow "-tags='a b c'" or even just "-tags='a'".
+	if strings.Contains(s, " ") || strings.Contains(s, "'") {
+		return (*base.StringsFlag)(v).Set(s)
+	}
+
+	// Split on commas, ignore empty strings.
+	*v = []string{}
+	for _, s := range strings.Split(s, ",") {
+		if s != "" {
+			*v = append(*v, s)
+		}
+	}
+	return nil
+}
+
+func (v *tagsFlag) String() string {
+	return ""
+}
+
+// buildvcsFlag is the implementation of the -buildvcs flag.
+type buildvcsFlag string
+
+func (f *buildvcsFlag) IsBoolFlag() bool { return true } // allow -buildvcs (without arguments)
+
+func (f *buildvcsFlag) Set(s string) error {
+	// https://go.dev/issue/51748: allow "-buildvcs=auto",
+	// in addition to the usual "true" and "false".
+	if s == "" || s == "auto" {
+		*f = "auto"
+		return nil
+	}
+
+	b, err := strconv.ParseBool(s)
+	if err != nil {
+		return errors.New("value is neither 'auto' nor a valid bool")
+	}
+	*f = (buildvcsFlag)(strconv.FormatBool(b)) // convert to canonical "true" or "false"
+	return nil
+}
+
+func (f *buildvcsFlag) String() string { return string(*f) }
+
+// fileExtSplit expects a filename and returns the name
+// and ext (without the dot). If the file has no
+// extension, ext will be empty.
+func fileExtSplit(file string) (name, ext string) {
+	dotExt := filepath.Ext(file)
+	name = file[:len(file)-len(dotExt)]
+	if dotExt != "" {
+		ext = dotExt[1:]
+	}
+	return
+}
+
+func pkgsMain(pkgs []*load.Package) (res []*load.Package) {
+	for _, p := range pkgs {
+		if p.Name == "main" {
+			res = append(res, p)
+		}
+	}
+	return res
+}
+
+func pkgsNotMain(pkgs []*load.Package) (res []*load.Package) {
+	for _, p := range pkgs {
+		if p.Name != "main" {
+			res = append(res, p)
+		}
+	}
+	return res
+}
+
+func oneMainPkg(pkgs []*load.Package) []*load.Package {
+	if len(pkgs) != 1 || pkgs[0].Name != "main" {
+		base.Fatalf("-buildmode=%s requires exactly one main package", cfg.BuildBuildmode)
+	}
+	return pkgs
+}
+
+var pkgsFilter = func(pkgs []*load.Package) []*load.Package { return pkgs }
+
+func runBuild(ctx context.Context, cmd *base.Command, args []string) {
+	modload.InitWorkfile()
+	BuildInit()
+	b := NewBuilder("")
+	defer func() {
+		if err := b.Close(); err != nil {
+			base.Fatal(err)
+		}
+	}()
+
+	pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args)
+	load.CheckPackageErrors(pkgs)
+
+	explicitO := len(cfg.BuildO) > 0
+
+	if len(pkgs) == 1 && pkgs[0].Name == "main" && cfg.BuildO == "" {
+		cfg.BuildO = pkgs[0].DefaultExecName()
+		cfg.BuildO += cfg.ExeSuffix
+	}
+
+	// sanity check some often mis-used options
+	switch cfg.BuildContext.Compiler {
+	case "gccgo":
+		if load.BuildGcflags.Present() {
+			fmt.Println("go build: when using gccgo toolchain, please pass compiler flags using -gccgoflags, not -gcflags")
+		}
+		if load.BuildLdflags.Present() {
+			fmt.Println("go build: when using gccgo toolchain, please pass linker flags using -gccgoflags, not -ldflags")
+		}
+	case "gc":
+		if load.BuildGccgoflags.Present() {
+			fmt.Println("go build: when using gc toolchain, please pass compile flags using -gcflags, and linker flags using -ldflags")
+		}
+	}
+
+	depMode := ModeBuild
+
+	pkgs = omitTestOnly(pkgsFilter(pkgs))
+
+	// Special case -o /dev/null by not writing at all.
+	if base.IsNull(cfg.BuildO) {
+		cfg.BuildO = ""
+	}
+
+	if cfg.Experiment.CoverageRedesign && cfg.BuildCover {
+		load.PrepareForCoverageBuild(pkgs)
+	}
+
+	if cfg.BuildO != "" {
+		// If the -o name exists and is a directory or
+		// ends with a slash or backslash, then
+		// write all main packages to that directory.
+		// Otherwise require only a single package be built.
+		if fi, err := os.Stat(cfg.BuildO); (err == nil && fi.IsDir()) ||
+			strings.HasSuffix(cfg.BuildO, "/") ||
+			strings.HasSuffix(cfg.BuildO, string(os.PathSeparator)) {
+			if !explicitO {
+				base.Fatalf("go: build output %q already exists and is a directory", cfg.BuildO)
+			}
+			a := &Action{Mode: "go build"}
+			for _, p := range pkgs {
+				if p.Name != "main" {
+					continue
+				}
+
+				p.Target = filepath.Join(cfg.BuildO, p.DefaultExecName())
+				p.Target += cfg.ExeSuffix
+				p.Stale = true
+				p.StaleReason = "build -o flag in use"
+				a.Deps = append(a.Deps, b.AutoAction(ModeInstall, depMode, p))
+			}
+			if len(a.Deps) == 0 {
+				base.Fatalf("go: no main packages to build")
+			}
+			b.Do(ctx, a)
+			return
+		}
+		if len(pkgs) > 1 {
+			base.Fatalf("go: cannot write multiple packages to non-directory %s", cfg.BuildO)
+		} else if len(pkgs) == 0 {
+			base.Fatalf("no packages to build")
+		}
+		p := pkgs[0]
+		p.Target = cfg.BuildO
+		p.Stale = true // must build - not up to date
+		p.StaleReason = "build -o flag in use"
+		a := b.AutoAction(ModeInstall, depMode, p)
+		b.Do(ctx, a)
+		return
+	}
+
+	a := &Action{Mode: "go build"}
+	for _, p := range pkgs {
+		a.Deps = append(a.Deps, b.AutoAction(ModeBuild, depMode, p))
+	}
+	if cfg.BuildBuildmode == "shared" {
+		a = b.buildmodeShared(ModeBuild, depMode, args, pkgs, a)
+	}
+	b.Do(ctx, a)
+}
+
+var CmdInstall = &base.Command{
+	UsageLine: "go install [build flags] [packages]",
+	Short:     "compile and install packages and dependencies",
+	Long: `
+Install compiles and installs the packages named by the import paths.
+
+Executables are installed in the directory named by the GOBIN environment
+variable, which defaults to $GOPATH/bin or $HOME/go/bin if the GOPATH
+environment variable is not set. Executables in $GOROOT
+are installed in $GOROOT/bin or $GOTOOLDIR instead of $GOBIN.
+
+If the arguments have version suffixes (like @latest or @v1.0.0), "go install"
+builds packages in module-aware mode, ignoring the go.mod file in the current
+directory or any parent directory, if there is one. This is useful for
+installing executables without affecting the dependencies of the main module.
+To eliminate ambiguity about which module versions are used in the build, the
+arguments must satisfy the following constraints:
+
+- Arguments must be package paths or package patterns (with "..." wildcards).
+They must not be standard packages (like fmt), meta-patterns (std, cmd,
+all), or relative or absolute file paths.
+
+- All arguments must have the same version suffix. Different queries are not
+allowed, even if they refer to the same version.
+
+- All arguments must refer to packages in the same module at the same version.
+
+- Package path arguments must refer to main packages. Pattern arguments
+will only match main packages.
+
+- No module is considered the "main" module. If the module containing
+packages named on the command line has a go.mod file, it must not contain
+directives (replace and exclude) that would cause it to be interpreted
+differently than if it were the main module. The module must not require
+a higher version of itself.
+
+- Vendor directories are not used in any module. (Vendor directories are not
+included in the module zip files downloaded by 'go install'.)
+
+If the arguments don't have version suffixes, "go install" may run in
+module-aware mode or GOPATH mode, depending on the GO111MODULE environment
+variable and the presence of a go.mod file. See 'go help modules' for details.
+If module-aware mode is enabled, "go install" runs in the context of the main
+module.
+
+When module-aware mode is disabled, non-main packages are installed in the
+directory $GOPATH/pkg/$GOOS_$GOARCH. When module-aware mode is enabled,
+non-main packages are built and cached but not installed.
+
+Before Go 1.20, the standard library was installed to
+$GOROOT/pkg/$GOOS_$GOARCH.
+Starting in Go 1.20, the standard library is built and cached but not installed.
+Setting GODEBUG=installgoroot=all restores the use of
+$GOROOT/pkg/$GOOS_$GOARCH.
+
+For more about build flags, see 'go help build'.
+
+For more about specifying packages, see 'go help packages'.
+
+See also: go build, go get, go clean.
+	`,
+}
+
+// libname returns the filename to use for the shared library when using
+// -buildmode=shared. The rules we use are:
+// Use arguments for special 'meta' packages:
+//
+//	std --> libstd.so
+//	std cmd --> libstd,cmd.so
+//
+// A single non-meta argument with trailing "/..." is special cased:
+//
+//	foo/... --> libfoo.so
+//	(A relative path like "./..."  expands the "." first)
+//
+// Use import paths for other cases, changing '/' to '-':
+//
+//	somelib --> libsubdir-somelib.so
+//	./ or ../ --> libsubdir-somelib.so
+//	gopkg.in/tomb.v2 -> libgopkg.in-tomb.v2.so
+//	a/... b/... ---> liba/c,b/d.so - all matching import paths
+//
+// Name parts are joined with ','.
+func libname(args []string, pkgs []*load.Package) (string, error) {
+	var libname string
+	appendName := func(arg string) {
+		if libname == "" {
+			libname = arg
+		} else {
+			libname += "," + arg
+		}
+	}
+	var haveNonMeta bool
+	for _, arg := range args {
+		if search.IsMetaPackage(arg) {
+			appendName(arg)
+		} else {
+			haveNonMeta = true
+		}
+	}
+	if len(libname) == 0 { // non-meta packages only. use import paths
+		if len(args) == 1 && strings.HasSuffix(args[0], "/...") {
+			// Special case of "foo/..." as mentioned above.
+			arg := strings.TrimSuffix(args[0], "/...")
+			if build.IsLocalImport(arg) {
+				cwd, _ := os.Getwd()
+				bp, _ := cfg.BuildContext.ImportDir(filepath.Join(cwd, arg), build.FindOnly)
+				if bp.ImportPath != "" && bp.ImportPath != "." {
+					arg = bp.ImportPath
+				}
+			}
+			appendName(strings.ReplaceAll(arg, "/", "-"))
+		} else {
+			for _, pkg := range pkgs {
+				appendName(strings.ReplaceAll(pkg.ImportPath, "/", "-"))
+			}
+		}
+	} else if haveNonMeta { // have both meta package and a non-meta one
+		return "", errors.New("mixing of meta and non-meta packages is not allowed")
+	}
+	// TODO(mwhudson): Needs to change for platforms that use different naming
+	// conventions...
+	return "lib" + libname + ".so", nil
+}
+
+func runInstall(ctx context.Context, cmd *base.Command, args []string) {
+	for _, arg := range args {
+		if strings.Contains(arg, "@") && !build.IsLocalImport(arg) && !filepath.IsAbs(arg) {
+			installOutsideModule(ctx, args)
+			return
+		}
+	}
+
+	modload.InitWorkfile()
+	BuildInit()
+	pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{AutoVCS: true}, args)
+	if cfg.ModulesEnabled && !modload.HasModRoot() {
+		haveErrors := false
+		allMissingErrors := true
+		for _, pkg := range pkgs {
+			if pkg.Error == nil {
+				continue
+			}
+			haveErrors = true
+			if missingErr := (*modload.ImportMissingError)(nil); !errors.As(pkg.Error, &missingErr) {
+				allMissingErrors = false
+				break
+			}
+		}
+		if haveErrors && allMissingErrors {
+			latestArgs := make([]string, len(args))
+			for i := range args {
+				latestArgs[i] = args[i] + "@latest"
+			}
+			hint := strings.Join(latestArgs, " ")
+			base.Fatalf("go: 'go install' requires a version when current directory is not in a module\n\tTry 'go install %s' to install the latest version", hint)
+		}
+	}
+	load.CheckPackageErrors(pkgs)
+
+	if cfg.Experiment.CoverageRedesign && cfg.BuildCover {
+		load.PrepareForCoverageBuild(pkgs)
+	}
+
+	InstallPackages(ctx, args, pkgs)
+}
+
+// omitTestOnly returns pkgs with test-only packages removed.
+func omitTestOnly(pkgs []*load.Package) []*load.Package {
+	var list []*load.Package
+	for _, p := range pkgs {
+		if len(p.GoFiles)+len(p.CgoFiles) == 0 && !p.Internal.CmdlinePkgLiteral {
+			// Package has no source files,
+			// perhaps due to build tags or perhaps due to only having *_test.go files.
+			// Also, it is only being processed as the result of a wildcard match
+			// like ./..., not because it was listed as a literal path on the command line.
+			// Ignore it.
+			continue
+		}
+		list = append(list, p)
+	}
+	return list
+}
+
+func InstallPackages(ctx context.Context, patterns []string, pkgs []*load.Package) {
+	ctx, span := trace.StartSpan(ctx, "InstallPackages "+strings.Join(patterns, " "))
+	defer span.Done()
+
+	if cfg.GOBIN != "" && !filepath.IsAbs(cfg.GOBIN) {
+		base.Fatalf("cannot install, GOBIN must be an absolute path")
+	}
+
+	pkgs = omitTestOnly(pkgsFilter(pkgs))
+	for _, p := range pkgs {
+		if p.Target == "" {
+			switch {
+			case p.Name != "main" && p.Internal.Local && p.ConflictDir == "":
+				// Non-executables outside GOPATH need not have a target:
+				// we can use the cache to hold the built package archive for use in future builds.
+				// The ones inside GOPATH should have a target (in GOPATH/pkg)
+				// or else something is wrong and worth reporting (like a ConflictDir).
+			case p.Name != "main" && p.Module != nil:
+				// Non-executables have no target (except the cache) when building with modules.
+			case p.Name != "main" && p.Standard && p.Internal.Build.PkgObj == "":
+				// Most packages in std do not need an installed .a, because they can be
+				// rebuilt and used directly from the build cache.
+				// A few targets (notably those using cgo) still do need to be installed
+				// in case the user's environment lacks a C compiler.
+			case p.Internal.GobinSubdir:
+				base.Errorf("go: cannot install cross-compiled binaries when GOBIN is set")
+			case p.Internal.CmdlineFiles:
+				base.Errorf("go: no install location for .go files listed on command line (GOBIN not set)")
+			case p.ConflictDir != "":
+				base.Errorf("go: no install location for %s: hidden by %s", p.Dir, p.ConflictDir)
+			default:
+				base.Errorf("go: no install location for directory %s outside GOPATH\n"+
+					"\tFor more details see: 'go help gopath'", p.Dir)
+			}
+		}
+	}
+	base.ExitIfErrors()
+
+	b := NewBuilder("")
+	defer func() {
+		if err := b.Close(); err != nil {
+			base.Fatal(err)
+		}
+	}()
+
+	depMode := ModeBuild
+	a := &Action{Mode: "go install"}
+	var tools []*Action
+	for _, p := range pkgs {
+		// If p is a tool, delay the installation until the end of the build.
+		// This avoids installing assemblers/compilers that are being executed
+		// by other steps in the build.
+		a1 := b.AutoAction(ModeInstall, depMode, p)
+		if load.InstallTargetDir(p) == load.ToTool {
+			a.Deps = append(a.Deps, a1.Deps...)
+			a1.Deps = append(a1.Deps, a)
+			tools = append(tools, a1)
+			continue
+		}
+		a.Deps = append(a.Deps, a1)
+	}
+	if len(tools) > 0 {
+		a = &Action{
+			Mode: "go install (tools)",
+			Deps: tools,
+		}
+	}
+
+	if cfg.BuildBuildmode == "shared" {
+		// Note: If buildmode=shared then only non-main packages
+		// are present in the pkgs list, so all the special case code about
+		// tools above did not apply, and a is just a simple Action
+		// with a list of Deps, one per package named in pkgs,
+		// the same as in runBuild.
+		a = b.buildmodeShared(ModeInstall, ModeInstall, patterns, pkgs, a)
+	}
+
+	b.Do(ctx, a)
+	base.ExitIfErrors()
+
+	// Success. If this command is 'go install' with no arguments
+	// and the current directory (the implicit argument) is a command,
+	// remove any leftover command binary from a previous 'go build'.
+	// The binary is installed; it's not needed here anymore.
+	// And worse it might be a stale copy, which you don't want to find
+	// instead of the installed one if $PATH contains dot.
+	// One way to view this behavior is that it is as if 'go install' first
+	// runs 'go build' and the moves the generated file to the install dir.
+	// See issue 9645.
+	if len(patterns) == 0 && len(pkgs) == 1 && pkgs[0].Name == "main" {
+		// Compute file 'go build' would have created.
+		// If it exists and is an executable file, remove it.
+		targ := pkgs[0].DefaultExecName()
+		targ += cfg.ExeSuffix
+		if filepath.Join(pkgs[0].Dir, targ) != pkgs[0].Target { // maybe $GOBIN is the current directory
+			fi, err := os.Stat(targ)
+			if err == nil {
+				m := fi.Mode()
+				if m.IsRegular() {
+					if m&0111 != 0 || cfg.Goos == "windows" { // windows never sets executable bit
+						os.Remove(targ)
+					}
+				}
+			}
+		}
+	}
+}
+
+// installOutsideModule implements 'go install pkg@version'. It builds and
+// installs one or more main packages in module mode while ignoring any go.mod
+// in the current directory or parent directories.
+//
+// See golang.org/issue/40276 for details and rationale.
+func installOutsideModule(ctx context.Context, args []string) {
+	modload.ForceUseModules = true
+	modload.RootMode = modload.NoRoot
+	modload.AllowMissingModuleImports()
+	modload.Init()
+	BuildInit()
+
+	// Load packages. Ignore non-main packages.
+	// Print a warning if an argument contains "..." and matches no main packages.
+	// PackagesAndErrors already prints warnings for patterns that don't match any
+	// packages, so be careful not to double print.
+	// TODO(golang.org/issue/40276): don't report errors loading non-main packages
+	// matched by a pattern.
+	pkgOpts := load.PackageOpts{MainOnly: true}
+	pkgs, err := load.PackagesAndErrorsOutsideModule(ctx, pkgOpts, args)
+	if err != nil {
+		base.Fatal(err)
+	}
+	load.CheckPackageErrors(pkgs)
+	patterns := make([]string, len(args))
+	for i, arg := range args {
+		patterns[i] = arg[:strings.Index(arg, "@")]
+	}
+
+	// Build and install the packages.
+	InstallPackages(ctx, patterns, pkgs)
+}
+
+// ExecCmd is the command to use to run user binaries.
+// Normally it is empty, meaning run the binaries directly.
+// If cross-compiling and running on a remote system or
+// simulator, it is typically go_GOOS_GOARCH_exec, with
+// the target GOOS and GOARCH substituted.
+// The -exec flag overrides these defaults.
+var ExecCmd []string
+
+// FindExecCmd derives the value of ExecCmd to use.
+// It returns that value and leaves ExecCmd set for direct use.
+func FindExecCmd() []string {
+	if ExecCmd != nil {
+		return ExecCmd
+	}
+	ExecCmd = []string{} // avoid work the second time
+	if cfg.Goos == runtime.GOOS && cfg.Goarch == runtime.GOARCH {
+		return ExecCmd
+	}
+	path, err := cfg.LookPath(fmt.Sprintf("go_%s_%s_exec", cfg.Goos, cfg.Goarch))
+	if err == nil {
+		ExecCmd = []string{path}
+	}
+	return ExecCmd
+}
+
+// A coverFlag is a flag.Value that also implies -cover.
+type coverFlag struct{ V flag.Value }
+
+func (f coverFlag) String() string { return f.V.String() }
+
+func (f coverFlag) Set(value string) error {
+	if err := f.V.Set(value); err != nil {
+		return err
+	}
+	cfg.BuildCover = true
+	return nil
+}
+
+type coverModeFlag string
+
+func (f *coverModeFlag) String() string { return string(*f) }
+func (f *coverModeFlag) Set(value string) error {
+	switch value {
+	case "", "set", "count", "atomic":
+		*f = coverModeFlag(value)
+		cfg.BuildCoverMode = value
+		return nil
+	default:
+		return errors.New(`valid modes are "set", "count", or "atomic"`)
+	}
+}
+
+// A commaListFlag is a flag.Value representing a comma-separated list.
+type commaListFlag struct{ Vals *[]string }
+
+func (f commaListFlag) String() string { return strings.Join(*f.Vals, ",") }
+
+func (f commaListFlag) Set(value string) error {
+	if value == "" {
+		*f.Vals = nil
+	} else {
+		*f.Vals = strings.Split(value, ",")
+	}
+	return nil
+}
+
+// A stringFlag is a flag.Value representing a single string.
+type stringFlag struct{ val *string }
+
+func (f stringFlag) String() string { return *f.val }
+func (f stringFlag) Set(value string) error {
+	*f.val = value
+	return nil
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/build_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/build_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..f3059f219ca686dff1ba1e4f0077fd8514e3b41f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/build_test.go
@@ -0,0 +1,281 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+	"fmt"
+	"internal/testenv"
+	"io/fs"
+	"os"
+	"path/filepath"
+	"reflect"
+	"runtime"
+	"strings"
+	"testing"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/load"
+)
+
+func TestRemoveDevNull(t *testing.T) {
+	fi, err := os.Lstat(os.DevNull)
+	if err != nil {
+		t.Skip(err)
+	}
+	if fi.Mode().IsRegular() {
+		t.Errorf("Lstat(%s).Mode().IsRegular() = true; expected false", os.DevNull)
+	}
+	mayberemovefile(os.DevNull)
+	_, err = os.Lstat(os.DevNull)
+	if err != nil {
+		t.Errorf("mayberemovefile(%s) did remove it; oops", os.DevNull)
+	}
+}
+
+func TestSplitPkgConfigOutput(t *testing.T) {
+	for _, test := range []struct {
+		in   []byte
+		want []string
+	}{
+		{[]byte(`-r:foo -L/usr/white\ space/lib -lfoo\ bar -lbar\ baz`), []string{"-r:foo", "-L/usr/white space/lib", "-lfoo bar", "-lbar baz"}},
+		{[]byte(`-lextra\ fun\ arg\\`), []string{`-lextra fun arg\`}},
+		{[]byte("\textra     whitespace\r\n"), []string{"extra", "whitespace\r"}},
+		{[]byte("     \r\n      "), []string{"\r"}},
+		{[]byte(`"-r:foo" "-L/usr/white space/lib" "-lfoo bar" "-lbar baz"`), []string{"-r:foo", "-L/usr/white space/lib", "-lfoo bar", "-lbar baz"}},
+		{[]byte(`"-lextra fun arg\\"`), []string{`-lextra fun arg\`}},
+		{[]byte(`"     \r\n\      "`), []string{`     \r\n\      `}},
+		{[]byte(`""`), []string{""}},
+		{[]byte(``), nil},
+		{[]byte(`"\\"`), []string{`\`}},
+		{[]byte(`"\x"`), []string{`\x`}},
+		{[]byte(`"\\x"`), []string{`\x`}},
+		{[]byte(`'\\'`), []string{`\\`}},
+		{[]byte(`'\x'`), []string{`\x`}},
+		{[]byte(`"\\x"`), []string{`\x`}},
+		{[]byte("\\\n"), nil},
+		{[]byte(`-fPIC -I/test/include/foo -DQUOTED='"/test/share/doc"'`), []string{"-fPIC", "-I/test/include/foo", `-DQUOTED="/test/share/doc"`}},
+		{[]byte(`-fPIC -I/test/include/foo -DQUOTED="/test/share/doc"`), []string{"-fPIC", "-I/test/include/foo", "-DQUOTED=/test/share/doc"}},
+		{[]byte(`-fPIC -I/test/include/foo -DQUOTED=\"/test/share/doc\"`), []string{"-fPIC", "-I/test/include/foo", `-DQUOTED="/test/share/doc"`}},
+		{[]byte(`-fPIC -I/test/include/foo -DQUOTED='/test/share/doc'`), []string{"-fPIC", "-I/test/include/foo", "-DQUOTED=/test/share/doc"}},
+		{[]byte(`-DQUOTED='/te\st/share/d\oc'`), []string{`-DQUOTED=/te\st/share/d\oc`}},
+		{[]byte(`-Dhello=10 -Dworld=+32 -DDEFINED_FROM_PKG_CONFIG=hello\ world`), []string{"-Dhello=10", "-Dworld=+32", "-DDEFINED_FROM_PKG_CONFIG=hello world"}},
+		{[]byte(`"broken\"" \\\a "a"`), []string{"broken\"", "\\a", "a"}},
+	} {
+		got, err := splitPkgConfigOutput(test.in)
+		if err != nil {
+			t.Errorf("splitPkgConfigOutput on %#q failed with error %v", test.in, err)
+			continue
+		}
+		if !reflect.DeepEqual(got, test.want) {
+			t.Errorf("splitPkgConfigOutput(%#q) = %#q; want %#q", test.in, got, test.want)
+		}
+	}
+
+	for _, test := range []struct {
+		in   []byte
+		want []string
+	}{
+		// broken quotation
+		{[]byte(`"     \r\n      `), nil},
+		{[]byte(`"-r:foo" "-L/usr/white space/lib "-lfoo bar" "-lbar baz"`), nil},
+		{[]byte(`"-lextra fun arg\\`), nil},
+		// broken char escaping
+		{[]byte(`broken flag\`), nil},
+		{[]byte(`extra broken flag \`), nil},
+		{[]byte(`\`), nil},
+		{[]byte(`"broken\"" "extra" \`), nil},
+	} {
+		got, err := splitPkgConfigOutput(test.in)
+		if err == nil {
+			t.Errorf("splitPkgConfigOutput(%v) = %v; haven't failed with error as expected.", test.in, got)
+		}
+		if !reflect.DeepEqual(got, test.want) {
+			t.Errorf("splitPkgConfigOutput(%v) = %v; want %v", test.in, got, test.want)
+		}
+	}
+
+}
+
+func TestSharedLibName(t *testing.T) {
+	// TODO(avdva) - make these values platform-specific
+	prefix := "lib"
+	suffix := ".so"
+	testData := []struct {
+		args      []string
+		pkgs      []*load.Package
+		expected  string
+		expectErr bool
+		rootedAt  string
+	}{
+		{
+			args:     []string{"std"},
+			pkgs:     []*load.Package{},
+			expected: "std",
+		},
+		{
+			args:     []string{"std", "cmd"},
+			pkgs:     []*load.Package{},
+			expected: "std,cmd",
+		},
+		{
+			args:     []string{},
+			pkgs:     []*load.Package{pkgImportPath("gopkg.in/somelib")},
+			expected: "gopkg.in-somelib",
+		},
+		{
+			args:     []string{"./..."},
+			pkgs:     []*load.Package{pkgImportPath("somelib")},
+			expected: "somelib",
+			rootedAt: "somelib",
+		},
+		{
+			args:     []string{"../somelib", "../somelib"},
+			pkgs:     []*load.Package{pkgImportPath("somelib")},
+			expected: "somelib",
+		},
+		{
+			args:     []string{"../lib1", "../lib2"},
+			pkgs:     []*load.Package{pkgImportPath("gopkg.in/lib1"), pkgImportPath("gopkg.in/lib2")},
+			expected: "gopkg.in-lib1,gopkg.in-lib2",
+		},
+		{
+			args: []string{"./..."},
+			pkgs: []*load.Package{
+				pkgImportPath("gopkg.in/dir/lib1"),
+				pkgImportPath("gopkg.in/lib2"),
+				pkgImportPath("gopkg.in/lib3"),
+			},
+			expected: "gopkg.in",
+			rootedAt: "gopkg.in",
+		},
+		{
+			args:      []string{"std", "../lib2"},
+			pkgs:      []*load.Package{},
+			expectErr: true,
+		},
+		{
+			args:      []string{"all", "./"},
+			pkgs:      []*load.Package{},
+			expectErr: true,
+		},
+		{
+			args:      []string{"cmd", "fmt"},
+			pkgs:      []*load.Package{},
+			expectErr: true,
+		},
+	}
+	for _, data := range testData {
+		func() {
+			if data.rootedAt != "" {
+				tmpGopath, err := os.MkdirTemp("", "gopath")
+				if err != nil {
+					t.Fatal(err)
+				}
+				cwd := base.Cwd()
+				oldGopath := cfg.BuildContext.GOPATH
+				defer func() {
+					cfg.BuildContext.GOPATH = oldGopath
+					os.Chdir(cwd)
+					err := os.RemoveAll(tmpGopath)
+					if err != nil {
+						t.Error(err)
+					}
+				}()
+				root := filepath.Join(tmpGopath, "src", data.rootedAt)
+				err = os.MkdirAll(root, 0755)
+				if err != nil {
+					t.Fatal(err)
+				}
+				cfg.BuildContext.GOPATH = tmpGopath
+				os.Chdir(root)
+			}
+			computed, err := libname(data.args, data.pkgs)
+			if err != nil {
+				if !data.expectErr {
+					t.Errorf("libname returned an error %q, expected a name", err.Error())
+				}
+			} else if data.expectErr {
+				t.Errorf("libname returned %q, expected an error", computed)
+			} else {
+				expected := prefix + data.expected + suffix
+				if expected != computed {
+					t.Errorf("libname returned %q, expected %q", computed, expected)
+				}
+			}
+		}()
+	}
+}
+
+func pkgImportPath(pkgpath string) *load.Package {
+	return &load.Package{
+		PackagePublic: load.PackagePublic{
+			ImportPath: pkgpath,
+		},
+	}
+}
+
+// When installing packages, the installed package directory should
+// respect the SetGID bit and group name of the destination
+// directory.
+// See https://golang.org/issue/18878.
+func TestRespectSetgidDir(t *testing.T) {
+	// Check that `cp` is called instead of `mv` by looking at the output
+	// of `(*Shell).ShowCmd` afterwards as a sanity check.
+	cfg.BuildX = true
+	var cmdBuf strings.Builder
+	sh := NewShell("", func(a ...any) (int, error) {
+		return cmdBuf.WriteString(fmt.Sprint(a...))
+	})
+
+	setgiddir, err := os.MkdirTemp("", "SetGroupID")
+	if err != nil {
+		t.Fatal(err)
+	}
+	defer os.RemoveAll(setgiddir)
+
+	// BSD mkdir(2) inherits the parent directory group, and other platforms
+	// can inherit the parent directory group via setgid. The test setup (chmod
+	// setgid) will fail if the process does not have the group permission to
+	// the new temporary directory.
+	err = os.Chown(setgiddir, os.Getuid(), os.Getgid())
+	if err != nil {
+		if testenv.SyscallIsNotSupported(err) {
+			t.Skip("skipping: chown is not supported on " + runtime.GOOS)
+		}
+		t.Fatal(err)
+	}
+
+	// Change setgiddir's permissions to include the SetGID bit.
+	if err := os.Chmod(setgiddir, 0755|fs.ModeSetgid); err != nil {
+		if testenv.SyscallIsNotSupported(err) {
+			t.Skip("skipping: chmod is not supported on " + runtime.GOOS)
+		}
+		t.Fatal(err)
+	}
+	if fi, err := os.Stat(setgiddir); err != nil {
+		t.Fatal(err)
+	} else if fi.Mode()&fs.ModeSetgid == 0 {
+		t.Skip("skipping: Chmod ignored ModeSetgid on " + runtime.GOOS)
+	}
+
+	pkgfile, err := os.CreateTemp("", "pkgfile")
+	if err != nil {
+		t.Fatalf("os.CreateTemp(\"\", \"pkgfile\"): %v", err)
+	}
+	defer os.Remove(pkgfile.Name())
+	defer pkgfile.Close()
+
+	dirGIDFile := filepath.Join(setgiddir, "setgid")
+	if err := sh.moveOrCopyFile(dirGIDFile, pkgfile.Name(), 0666, true); err != nil {
+		t.Fatalf("moveOrCopyFile: %v", err)
+	}
+
+	got := strings.TrimSpace(cmdBuf.String())
+	want := sh.fmtCmd("", "cp %s %s", pkgfile.Name(), dirGIDFile)
+	if got != want {
+		t.Fatalf("moveOrCopyFile(%q, %q): want %q, got %q", dirGIDFile, pkgfile.Name(), want, got)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/buildid.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/buildid.go
new file mode 100644
index 0000000000000000000000000000000000000000..bf923d0d5e7d3274a52f83dc5b02fcf52aed8971
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/buildid.go
@@ -0,0 +1,715 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"os/exec"
+	"strings"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/cache"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/fsys"
+	"cmd/go/internal/str"
+	"cmd/internal/buildid"
+	"cmd/internal/quoted"
+)
+
+// Build IDs
+//
+// Go packages and binaries are stamped with build IDs that record both
+// the action ID, which is a hash of the inputs to the action that produced
+// the packages or binary, and the content ID, which is a hash of the action
+// output, namely the archive or binary itself. The hash is the same one
+// used by the build artifact cache (see cmd/go/internal/cache), but
+// truncated when stored in packages and binaries, as the full length is not
+// needed and is a bit unwieldy. The precise form is
+//
+//	actionID/[.../]contentID
+//
+// where the actionID and contentID are prepared by buildid.HashToString below.
+// and are found by looking for the first or last slash.
+// Usually the buildID is simply actionID/contentID, but see below for an
+// exception.
+//
+// The build ID serves two primary purposes.
+//
+// 1. The action ID half allows installed packages and binaries to serve as
+// one-element cache entries. If we intend to build math.a with a given
+// set of inputs summarized in the action ID, and the installed math.a already
+// has that action ID, we can reuse the installed math.a instead of rebuilding it.
+//
+// 2. The content ID half allows the easy preparation of action IDs for steps
+// that consume a particular package or binary. The content hash of every
+// input file for a given action must be included in the action ID hash.
+// Storing the content ID in the build ID lets us read it from the file with
+// minimal I/O, instead of reading and hashing the entire file.
+// This is especially effective since packages and binaries are typically
+// the largest inputs to an action.
+//
+// Separating action ID from content ID is important for reproducible builds.
+// The compiler is compiled with itself. If an output were represented by its
+// own action ID (instead of content ID) when computing the action ID of
+// the next step in the build process, then the compiler could never have its
+// own input action ID as its output action ID (short of a miraculous hash collision).
+// Instead we use the content IDs to compute the next action ID, and because
+// the content IDs converge, so too do the action IDs and therefore the
+// build IDs and the overall compiler binary. See cmd/dist's cmdbootstrap
+// for the actual convergence sequence.
+//
+// The “one-element cache” purpose is a bit more complex for installed
+// binaries. For a binary, like cmd/gofmt, there are two steps: compile
+// cmd/gofmt/*.go into main.a, and then link main.a into the gofmt binary.
+// We do not install gofmt's main.a, only the gofmt binary. Being able to
+// decide that the gofmt binary is up-to-date means computing the action ID
+// for the final link of the gofmt binary and comparing it against the
+// already-installed gofmt binary. But computing the action ID for the link
+// means knowing the content ID of main.a, which we did not keep.
+// To sidestep this problem, each binary actually stores an expanded build ID:
+//
+//	actionID(binary)/actionID(main.a)/contentID(main.a)/contentID(binary)
+//
+// (Note that this can be viewed equivalently as:
+//
+//	actionID(binary)/buildID(main.a)/contentID(binary)
+//
+// Storing the buildID(main.a) in the middle lets the computations that care
+// about the prefix or suffix halves ignore the middle and preserves the
+// original build ID as a contiguous string.)
+//
+// During the build, when it's time to build main.a, the gofmt binary has the
+// information needed to decide whether the eventual link would produce
+// the same binary: if the action ID for main.a's inputs matches and then
+// the action ID for the link step matches when assuming the given main.a
+// content ID, then the binary as a whole is up-to-date and need not be rebuilt.
+//
+// This is all a bit complex and may be simplified once we can rely on the
+// main cache, but at least at the start we will be using the content-based
+// staleness determination without a cache beyond the usual installed
+// package and binary locations.
+
+const buildIDSeparator = "/"
+
+// actionID returns the action ID half of a build ID.
+func actionID(buildID string) string {
+	i := strings.Index(buildID, buildIDSeparator)
+	if i < 0 {
+		return buildID
+	}
+	return buildID[:i]
+}
+
+// contentID returns the content ID half of a build ID.
+func contentID(buildID string) string {
+	return buildID[strings.LastIndex(buildID, buildIDSeparator)+1:]
+}
+
+// toolID returns the unique ID to use for the current copy of the
+// named tool (asm, compile, cover, link).
+//
+// It is important that if the tool changes (for example a compiler bug is fixed
+// and the compiler reinstalled), toolID returns a different string, so that old
+// package archives look stale and are rebuilt (with the fixed compiler).
+// This suggests using a content hash of the tool binary, as stored in the build ID.
+//
+// Unfortunately, we can't just open the tool binary, because the tool might be
+// invoked via a wrapper program specified by -toolexec and we don't know
+// what the wrapper program does. In particular, we want "-toolexec toolstash"
+// to continue working: it does no good if "-toolexec toolstash" is executing a
+// stashed copy of the compiler but the go command is acting as if it will run
+// the standard copy of the compiler. The solution is to ask the tool binary to tell
+// us its own build ID using the "-V=full" flag now supported by all tools.
+// Then we know we're getting the build ID of the compiler that will actually run
+// during the build. (How does the compiler binary know its own content hash?
+// We store it there using updateBuildID after the standard link step.)
+//
+// A final twist is that we'd prefer to have reproducible builds for release toolchains.
+// It should be possible to cross-compile for Windows from either Linux or Mac
+// or Windows itself and produce the same binaries, bit for bit. If the tool ID,
+// which influences the action ID half of the build ID, is based on the content ID,
+// then the Linux compiler binary and Mac compiler binary will have different tool IDs
+// and therefore produce executables with different action IDs.
+// To avoid this problem, for releases we use the release version string instead
+// of the compiler binary's content hash. This assumes that all compilers built
+// on all different systems are semantically equivalent, which is of course only true
+// modulo bugs. (Producing the exact same executables also requires that the different
+// build setups agree on details like $GOROOT and file name paths, but at least the
+// tool IDs do not make it impossible.)
+func (b *Builder) toolID(name string) string {
+	b.id.Lock()
+	id := b.toolIDCache[name]
+	b.id.Unlock()
+
+	if id != "" {
+		return id
+	}
+
+	path := base.Tool(name)
+	desc := "go tool " + name
+
+	// Special case: undocumented -vettool overrides usual vet,
+	// for testing vet or supplying an alternative analysis tool.
+	if name == "vet" && VetTool != "" {
+		path = VetTool
+		desc = VetTool
+	}
+
+	cmdline := str.StringList(cfg.BuildToolexec, path, "-V=full")
+	cmd := exec.Command(cmdline[0], cmdline[1:]...)
+	var stdout, stderr strings.Builder
+	cmd.Stdout = &stdout
+	cmd.Stderr = &stderr
+	if err := cmd.Run(); err != nil {
+		if stderr.Len() > 0 {
+			os.Stderr.WriteString(stderr.String())
+		}
+		base.Fatalf("go: error obtaining buildID for %s: %v", desc, err)
+	}
+
+	line := stdout.String()
+	f := strings.Fields(line)
+	if len(f) < 3 || f[0] != name && path != VetTool || f[1] != "version" || f[2] == "devel" && !strings.HasPrefix(f[len(f)-1], "buildID=") {
+		base.Fatalf("go: parsing buildID from %s -V=full: unexpected output:\n\t%s", desc, line)
+	}
+	if f[2] == "devel" {
+		// On the development branch, use the content ID part of the build ID.
+		id = contentID(f[len(f)-1])
+	} else {
+		// For a release, the output is like: "compile version go1.9.1 X:framepointer".
+		// Use the whole line.
+		id = strings.TrimSpace(line)
+	}
+
+	b.id.Lock()
+	b.toolIDCache[name] = id
+	b.id.Unlock()
+
+	return id
+}
+
+// gccToolID returns the unique ID to use for a tool that is invoked
+// by the GCC driver. This is used particularly for gccgo, but this can also
+// be used for gcc, g++, gfortran, etc.; those tools all use the GCC
+// driver under different names. The approach used here should also
+// work for sufficiently new versions of clang. Unlike toolID, the
+// name argument is the program to run. The language argument is the
+// type of input file as passed to the GCC driver's -x option.
+//
+// For these tools we have no -V=full option to dump the build ID,
+// but we can run the tool with -v -### to reliably get the compiler proper
+// and hash that. That will work in the presence of -toolexec.
+//
+// In order to get reproducible builds for released compilers, we
+// detect a released compiler by the absence of "experimental" in the
+// --version output, and in that case we just use the version string.
+//
+// gccToolID also returns the underlying executable for the compiler.
+// The caller assumes that stat of the exe can be used, combined with the id,
+// to detect changes in the underlying compiler. The returned exe can be empty,
+// which means to rely only on the id.
+func (b *Builder) gccToolID(name, language string) (id, exe string, err error) {
+	key := name + "." + language
+	b.id.Lock()
+	id = b.toolIDCache[key]
+	exe = b.toolIDCache[key+".exe"]
+	b.id.Unlock()
+
+	if id != "" {
+		return id, exe, nil
+	}
+
+	// Invoke the driver with -### to see the subcommands and the
+	// version strings. Use -x to set the language. Pretend to
+	// compile an empty file on standard input.
+	cmdline := str.StringList(cfg.BuildToolexec, name, "-###", "-x", language, "-c", "-")
+	cmd := exec.Command(cmdline[0], cmdline[1:]...)
+	// Force untranslated output so that we see the string "version".
+	cmd.Env = append(os.Environ(), "LC_ALL=C")
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		return "", "", fmt.Errorf("%s: %v; output: %q", name, err, out)
+	}
+
+	version := ""
+	lines := strings.Split(string(out), "\n")
+	for _, line := range lines {
+		fields := strings.Fields(line)
+		for i, field := range fields {
+			if strings.HasSuffix(field, ":") {
+				// Avoid parsing fields of lines like "Configured with: …", which may
+				// contain arbitrary substrings.
+				break
+			}
+			if field == "version" && i < len(fields)-1 {
+				// Check that the next field is plausibly a version number.
+				// We require only that it begins with an ASCII digit,
+				// since we don't know what version numbering schemes a given
+				// C compiler may use. (Clang and GCC mostly seem to follow the scheme X.Y.Z,
+				// but in https://go.dev/issue/64619 we saw "8.3 [DragonFly]", and who knows
+				// what other C compilers like "zig cc" might report?)
+				next := fields[i+1]
+				if len(next) > 0 && next[0] >= '0' && next[0] <= '9' {
+					version = line
+					break
+				}
+			}
+		}
+		if version != "" {
+			break
+		}
+	}
+	if version == "" {
+		return "", "", fmt.Errorf("%s: can not find version number in %q", name, out)
+	}
+
+	if !strings.Contains(version, "experimental") {
+		// This is a release. Use this line as the tool ID.
+		id = version
+	} else {
+		// This is a development version. The first line with
+		// a leading space is the compiler proper.
+		compiler := ""
+		for _, line := range lines {
+			if strings.HasPrefix(line, " ") && !strings.HasPrefix(line, " (in-process)") {
+				compiler = line
+				break
+			}
+		}
+		if compiler == "" {
+			return "", "", fmt.Errorf("%s: can not find compilation command in %q", name, out)
+		}
+
+		fields, _ := quoted.Split(compiler)
+		if len(fields) == 0 {
+			return "", "", fmt.Errorf("%s: compilation command confusion %q", name, out)
+		}
+		exe = fields[0]
+		if !strings.ContainsAny(exe, `/\`) {
+			if lp, err := cfg.LookPath(exe); err == nil {
+				exe = lp
+			}
+		}
+		id, err = buildid.ReadFile(exe)
+		if err != nil {
+			return "", "", err
+		}
+
+		// If we can't find a build ID, use a hash.
+		if id == "" {
+			id = b.fileHash(exe)
+		}
+	}
+
+	b.id.Lock()
+	b.toolIDCache[key] = id
+	b.toolIDCache[key+".exe"] = exe
+	b.id.Unlock()
+
+	return id, exe, nil
+}
+
+// Check if assembler used by gccgo is GNU as.
+func assemblerIsGas() bool {
+	cmd := exec.Command(BuildToolchain.compiler(), "-print-prog-name=as")
+	assembler, err := cmd.Output()
+	if err == nil {
+		cmd := exec.Command(strings.TrimSpace(string(assembler)), "--version")
+		out, err := cmd.Output()
+		return err == nil && strings.Contains(string(out), "GNU")
+	} else {
+		return false
+	}
+}
+
+// gccgoBuildIDFile creates an assembler file that records the
+// action's build ID in an SHF_EXCLUDE section for ELF files or
+// in a CSECT in XCOFF files.
+func (b *Builder) gccgoBuildIDFile(a *Action) (string, error) {
+	sfile := a.Objdir + "_buildid.s"
+
+	var buf bytes.Buffer
+	if cfg.Goos == "aix" {
+		fmt.Fprintf(&buf, "\t.csect .go.buildid[XO]\n")
+	} else if (cfg.Goos != "solaris" && cfg.Goos != "illumos") || assemblerIsGas() {
+		fmt.Fprintf(&buf, "\t"+`.section .go.buildid,"e"`+"\n")
+	} else if cfg.Goarch == "sparc" || cfg.Goarch == "sparc64" {
+		fmt.Fprintf(&buf, "\t"+`.section ".go.buildid",#exclude`+"\n")
+	} else { // cfg.Goarch == "386" || cfg.Goarch == "amd64"
+		fmt.Fprintf(&buf, "\t"+`.section .go.buildid,#exclude`+"\n")
+	}
+	fmt.Fprintf(&buf, "\t.byte ")
+	for i := 0; i < len(a.buildID); i++ {
+		if i > 0 {
+			if i%8 == 0 {
+				fmt.Fprintf(&buf, "\n\t.byte ")
+			} else {
+				fmt.Fprintf(&buf, ",")
+			}
+		}
+		fmt.Fprintf(&buf, "%#02x", a.buildID[i])
+	}
+	fmt.Fprintf(&buf, "\n")
+	if cfg.Goos != "solaris" && cfg.Goos != "illumos" && cfg.Goos != "aix" {
+		secType := "@progbits"
+		if cfg.Goarch == "arm" {
+			secType = "%progbits"
+		}
+		fmt.Fprintf(&buf, "\t"+`.section .note.GNU-stack,"",%s`+"\n", secType)
+		fmt.Fprintf(&buf, "\t"+`.section .note.GNU-split-stack,"",%s`+"\n", secType)
+	}
+
+	if err := b.Shell(a).writeFile(sfile, buf.Bytes()); err != nil {
+		return "", err
+	}
+
+	return sfile, nil
+}
+
+// buildID returns the build ID found in the given file.
+// If no build ID is found, buildID returns the content hash of the file.
+func (b *Builder) buildID(file string) string {
+	b.id.Lock()
+	id := b.buildIDCache[file]
+	b.id.Unlock()
+
+	if id != "" {
+		return id
+	}
+
+	id, err := buildid.ReadFile(file)
+	if err != nil {
+		id = b.fileHash(file)
+	}
+
+	b.id.Lock()
+	b.buildIDCache[file] = id
+	b.id.Unlock()
+
+	return id
+}
+
+// fileHash returns the content hash of the named file.
+func (b *Builder) fileHash(file string) string {
+	file, _ = fsys.OverlayPath(file)
+	sum, err := cache.FileHash(file)
+	if err != nil {
+		return ""
+	}
+	return buildid.HashToString(sum)
+}
+
+// useCache tries to satisfy the action a, which has action ID actionHash,
+// by using a cached result from an earlier build. At the moment, the only
+// cached result is the installed package or binary at target.
+// If useCache decides that the cache can be used, it sets a.buildID
+// and a.built for use by parent actions and then returns true.
+// Otherwise it sets a.buildID to a temporary build ID for use in the build
+// and returns false. When useCache returns false the expectation is that
+// the caller will build the target and then call updateBuildID to finish the
+// build ID computation.
+// When useCache returns false, it may have initiated buffering of output
+// during a's work. The caller should defer b.flushOutput(a), to make sure
+// that flushOutput is eventually called regardless of whether the action
+// succeeds. The flushOutput call must happen after updateBuildID.
+func (b *Builder) useCache(a *Action, actionHash cache.ActionID, target string, printOutput bool) bool {
+	// The second half of the build ID here is a placeholder for the content hash.
+	// It's important that the overall buildID be unlikely verging on impossible
+	// to appear in the output by chance, but that should be taken care of by
+	// the actionID half; if it also appeared in the input that would be like an
+	// engineered 120-bit partial SHA256 collision.
+	a.actionID = actionHash
+	actionID := buildid.HashToString(actionHash)
+	if a.json != nil {
+		a.json.ActionID = actionID
+	}
+	contentID := actionID // temporary placeholder, likely unique
+	a.buildID = actionID + buildIDSeparator + contentID
+
+	// Executable binaries also record the main build ID in the middle.
+	// See "Build IDs" comment above.
+	if a.Mode == "link" {
+		mainpkg := a.Deps[0]
+		a.buildID = actionID + buildIDSeparator + mainpkg.buildID + buildIDSeparator + contentID
+	}
+
+	// If user requested -a, we force a rebuild, so don't use the cache.
+	if cfg.BuildA {
+		if p := a.Package; p != nil && !p.Stale {
+			p.Stale = true
+			p.StaleReason = "build -a flag in use"
+		}
+		// Begin saving output for later writing to cache.
+		a.output = []byte{}
+		return false
+	}
+
+	c := cache.Default()
+
+	if target != "" {
+		buildID, _ := buildid.ReadFile(target)
+		if strings.HasPrefix(buildID, actionID+buildIDSeparator) {
+			a.buildID = buildID
+			if a.json != nil {
+				a.json.BuildID = a.buildID
+			}
+			a.built = target
+			// Poison a.Target to catch uses later in the build.
+			a.Target = "DO NOT USE - " + a.Mode
+			return true
+		}
+		// Special case for building a main package: if the only thing we
+		// want the package for is to link a binary, and the binary is
+		// already up-to-date, then to avoid a rebuild, report the package
+		// as up-to-date as well. See "Build IDs" comment above.
+		// TODO(rsc): Rewrite this code to use a TryCache func on the link action.
+		if !b.NeedExport && a.Mode == "build" && len(a.triggers) == 1 && a.triggers[0].Mode == "link" {
+			if id := strings.Split(buildID, buildIDSeparator); len(id) == 4 && id[1] == actionID {
+				// Temporarily assume a.buildID is the package build ID
+				// stored in the installed binary, and see if that makes
+				// the upcoming link action ID a match. If so, report that
+				// we built the package, safe in the knowledge that the
+				// link step will not ask us for the actual package file.
+				// Note that (*Builder).LinkAction arranged that all of
+				// a.triggers[0]'s dependencies other than a are also
+				// dependencies of a, so that we can be sure that,
+				// other than a.buildID, b.linkActionID is only accessing
+				// build IDs of completed actions.
+				oldBuildID := a.buildID
+				a.buildID = id[1] + buildIDSeparator + id[2]
+				linkID := buildid.HashToString(b.linkActionID(a.triggers[0]))
+				if id[0] == linkID {
+					// Best effort attempt to display output from the compile and link steps.
+					// If it doesn't work, it doesn't work: reusing the cached binary is more
+					// important than reprinting diagnostic information.
+					if printOutput {
+						showStdout(b, c, a, "stdout")      // compile output
+						showStdout(b, c, a, "link-stdout") // link output
+					}
+
+					// Poison a.Target to catch uses later in the build.
+					a.Target = "DO NOT USE - main build pseudo-cache Target"
+					a.built = "DO NOT USE - main build pseudo-cache built"
+					if a.json != nil {
+						a.json.BuildID = a.buildID
+					}
+					return true
+				}
+				// Otherwise restore old build ID for main build.
+				a.buildID = oldBuildID
+			}
+		}
+	}
+
+	// Special case for linking a test binary: if the only thing we
+	// want the binary for is to run the test, and the test result is cached,
+	// then to avoid the link step, report the link as up-to-date.
+	// We avoid the nested build ID problem in the previous special case
+	// by recording the test results in the cache under the action ID half.
+	if len(a.triggers) == 1 && a.triggers[0].TryCache != nil && a.triggers[0].TryCache(b, a.triggers[0]) {
+		// Best effort attempt to display output from the compile and link steps.
+		// If it doesn't work, it doesn't work: reusing the test result is more
+		// important than reprinting diagnostic information.
+		if printOutput {
+			showStdout(b, c, a.Deps[0], "stdout")      // compile output
+			showStdout(b, c, a.Deps[0], "link-stdout") // link output
+		}
+
+		// Poison a.Target to catch uses later in the build.
+		a.Target = "DO NOT USE -  pseudo-cache Target"
+		a.built = "DO NOT USE - pseudo-cache built"
+		return true
+	}
+
+	// Check to see if the action output is cached.
+	if file, _, err := cache.GetFile(c, actionHash); err == nil {
+		if buildID, err := buildid.ReadFile(file); err == nil {
+			if printOutput {
+				showStdout(b, c, a, "stdout")
+			}
+			a.built = file
+			a.Target = "DO NOT USE - using cache"
+			a.buildID = buildID
+			if a.json != nil {
+				a.json.BuildID = a.buildID
+			}
+			if p := a.Package; p != nil && target != "" {
+				p.Stale = true
+				// Clearer than explaining that something else is stale.
+				p.StaleReason = "not installed but available in build cache"
+			}
+			return true
+		}
+	}
+
+	// If we've reached this point, we can't use the cache for the action.
+	if p := a.Package; p != nil && !p.Stale {
+		p.Stale = true
+		p.StaleReason = "build ID mismatch"
+		if b.IsCmdList {
+			// Since we may end up printing StaleReason, include more detail.
+			for _, p1 := range p.Internal.Imports {
+				if p1.Stale && p1.StaleReason != "" {
+					if strings.HasPrefix(p1.StaleReason, "stale dependency: ") {
+						p.StaleReason = p1.StaleReason
+						break
+					}
+					if strings.HasPrefix(p.StaleReason, "build ID mismatch") {
+						p.StaleReason = "stale dependency: " + p1.ImportPath
+					}
+				}
+			}
+		}
+	}
+
+	// Begin saving output for later writing to cache.
+	a.output = []byte{}
+	return false
+}
+
+func showStdout(b *Builder, c cache.Cache, a *Action, key string) error {
+	actionID := a.actionID
+
+	stdout, stdoutEntry, err := cache.GetBytes(c, cache.Subkey(actionID, key))
+	if err != nil {
+		return err
+	}
+
+	if len(stdout) > 0 {
+		sh := b.Shell(a)
+		if cfg.BuildX || cfg.BuildN {
+			sh.ShowCmd("", "%s  # internal", joinUnambiguously(str.StringList("cat", c.OutputFile(stdoutEntry.OutputID))))
+		}
+		if !cfg.BuildN {
+			sh.Print(string(stdout))
+		}
+	}
+	return nil
+}
+
+// flushOutput flushes the output being queued in a.
+func (b *Builder) flushOutput(a *Action) {
+	b.Shell(a).Print(string(a.output))
+	a.output = nil
+}
+
+// updateBuildID updates the build ID in the target written by action a.
+// It requires that useCache was called for action a and returned false,
+// and that the build was then carried out and given the temporary
+// a.buildID to record as the build ID in the resulting package or binary.
+// updateBuildID computes the final content ID and updates the build IDs
+// in the binary.
+//
+// Keep in sync with src/cmd/buildid/buildid.go
+func (b *Builder) updateBuildID(a *Action, target string, rewrite bool) error {
+	sh := b.Shell(a)
+
+	if cfg.BuildX || cfg.BuildN {
+		if rewrite {
+			sh.ShowCmd("", "%s # internal", joinUnambiguously(str.StringList(base.Tool("buildid"), "-w", target)))
+		}
+		if cfg.BuildN {
+			return nil
+		}
+	}
+
+	c := cache.Default()
+
+	// Cache output from compile/link, even if we don't do the rest.
+	switch a.Mode {
+	case "build":
+		cache.PutBytes(c, cache.Subkey(a.actionID, "stdout"), a.output)
+	case "link":
+		// Even though we don't cache the binary, cache the linker text output.
+		// We might notice that an installed binary is up-to-date but still
+		// want to pretend to have run the linker.
+		// Store it under the main package's action ID
+		// to make it easier to find when that's all we have.
+		for _, a1 := range a.Deps {
+			if p1 := a1.Package; p1 != nil && p1.Name == "main" {
+				cache.PutBytes(c, cache.Subkey(a1.actionID, "link-stdout"), a.output)
+				break
+			}
+		}
+	}
+
+	// Find occurrences of old ID and compute new content-based ID.
+	r, err := os.Open(target)
+	if err != nil {
+		return err
+	}
+	matches, hash, err := buildid.FindAndHash(r, a.buildID, 0)
+	r.Close()
+	if err != nil {
+		return err
+	}
+	newID := a.buildID[:strings.LastIndex(a.buildID, buildIDSeparator)] + buildIDSeparator + buildid.HashToString(hash)
+	if len(newID) != len(a.buildID) {
+		return fmt.Errorf("internal error: build ID length mismatch %q vs %q", a.buildID, newID)
+	}
+
+	// Replace with new content-based ID.
+	a.buildID = newID
+	if a.json != nil {
+		a.json.BuildID = a.buildID
+	}
+	if len(matches) == 0 {
+		// Assume the user specified -buildid= to override what we were going to choose.
+		return nil
+	}
+
+	if rewrite {
+		w, err := os.OpenFile(target, os.O_RDWR, 0)
+		if err != nil {
+			return err
+		}
+		err = buildid.Rewrite(w, matches, newID)
+		if err != nil {
+			w.Close()
+			return err
+		}
+		if err := w.Close(); err != nil {
+			return err
+		}
+	}
+
+	// Cache package builds, but not binaries (link steps).
+	// The expectation is that binaries are not reused
+	// nearly as often as individual packages, and they're
+	// much larger, so the cache-footprint-to-utility ratio
+	// of binaries is much lower for binaries.
+	// Not caching the link step also makes sure that repeated "go run" at least
+	// always rerun the linker, so that they don't get too fast.
+	// (We don't want people thinking go is a scripting language.)
+	// Note also that if we start caching binaries, then we will
+	// copy the binaries out of the cache to run them, and then
+	// that will mean the go process is itself writing a binary
+	// and then executing it, so we will need to defend against
+	// ETXTBSY problems as discussed in exec.go and golang.org/issue/22220.
+	if a.Mode == "build" {
+		r, err := os.Open(target)
+		if err == nil {
+			if a.output == nil {
+				panic("internal error: a.output not set")
+			}
+			outputID, _, err := c.Put(a.actionID, r)
+			r.Close()
+			if err == nil && cfg.BuildX {
+				sh.ShowCmd("", "%s # internal", joinUnambiguously(str.StringList("cp", target, c.OutputFile(outputID))))
+			}
+			if b.NeedExport {
+				if err != nil {
+					return err
+				}
+				a.Package.Export = c.OutputFile(outputID)
+				a.Package.BuildID = a.buildID
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/cover.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/cover.go
new file mode 100644
index 0000000000000000000000000000000000000000..c0acc61987ef986e54e5b99cf167fe5e4f325fb5
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/cover.go
@@ -0,0 +1,150 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Action graph execution methods related to coverage.
+
+package work
+
+import (
+	"cmd/go/internal/base"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/str"
+	"cmd/internal/cov/covcmd"
+	"context"
+	"encoding/json"
+	"fmt"
+	"internal/coverage"
+	"io"
+	"os"
+	"path/filepath"
+)
+
+// CovData invokes "go tool covdata" with the specified arguments
+// as part of the execution of action 'a'.
+func (b *Builder) CovData(a *Action, cmdargs ...any) ([]byte, error) {
+	cmdline := str.StringList(cmdargs...)
+	args := append([]string{}, cfg.BuildToolexec...)
+	args = append(args, base.Tool("covdata"))
+	args = append(args, cmdline...)
+	return b.Shell(a).runOut(a.Objdir, nil, args)
+}
+
+// BuildActionCoverMetaFile locates and returns the path of the
+// meta-data file written by the "go tool cover" step as part of the
+// build action for the "go test -cover" run action 'runAct'. Note
+// that if the package has no functions the meta-data file will exist
+// but will be empty; in this case the return is an empty string.
+func BuildActionCoverMetaFile(runAct *Action) (string, error) {
+	p := runAct.Package
+	for i := range runAct.Deps {
+		pred := runAct.Deps[i]
+		if pred.Mode != "build" || pred.Package == nil {
+			continue
+		}
+		if pred.Package.ImportPath == p.ImportPath {
+			metaFile := pred.Objdir + covcmd.MetaFileForPackage(p.ImportPath)
+			f, err := os.Open(metaFile)
+			if err != nil {
+				return "", err
+			}
+			defer f.Close()
+			fi, err2 := f.Stat()
+			if err2 != nil {
+				return "", err2
+			}
+			if fi.Size() == 0 {
+				return "", nil
+			}
+			return metaFile, nil
+		}
+	}
+	return "", fmt.Errorf("internal error: unable to locate build action for package %q run action", p.ImportPath)
+}
+
+// WriteCoveragePercent writes out to the writer 'w' a "percent
+// statements covered" for the package whose test-run action is
+// 'runAct', based on the meta-data file 'mf'. This helper is used in
+// cases where a user runs "go test -cover" on a package that has
+// functions but no tests; in the normal case (package has tests)
+// the percentage is written by the test binary when it runs.
+func WriteCoveragePercent(b *Builder, runAct *Action, mf string, w io.Writer) error {
+	dir := filepath.Dir(mf)
+	output, cerr := b.CovData(runAct, "percent", "-i", dir)
+	if cerr != nil {
+		return b.Shell(runAct).reportCmd("", "", output, cerr)
+	}
+	_, werr := w.Write(output)
+	return werr
+}
+
+// WriteCoverageProfile writes out a coverage profile fragment for the
+// package whose test-run action is 'runAct'; content is written to
+// the file 'outf' based on the coverage meta-data info found in
+// 'mf'. This helper is used in cases where a user runs "go test
+// -cover" on a package that has functions but no tests.
+func WriteCoverageProfile(b *Builder, runAct *Action, mf, outf string, w io.Writer) error {
+	dir := filepath.Dir(mf)
+	output, err := b.CovData(runAct, "textfmt", "-i", dir, "-o", outf)
+	if err != nil {
+		return b.Shell(runAct).reportCmd("", "", output, err)
+	}
+	_, werr := w.Write(output)
+	return werr
+}
+
+// WriteCoverMetaFilesFile writes out a summary file ("meta-files
+// file") as part of the action function for the "writeCoverMeta"
+// pseudo action employed during "go test -coverpkg" runs where there
+// are multiple tests and multiple packages covered. It builds up a
+// table mapping package import path to meta-data file fragment and
+// writes it out to a file where it can be read by the various test
+// run actions. Note that this function has to be called A) after the
+// build actions are complete for all packages being tested, and B)
+// before any of the "run test" actions for those packages happen.
+// This requirement is enforced by adding making this action ("a")
+// dependent on all test package build actions, and making all test
+// run actions dependent on this action.
+func WriteCoverMetaFilesFile(b *Builder, ctx context.Context, a *Action) error {
+	sh := b.Shell(a)
+
+	// Build the metafilecollection object.
+	var collection coverage.MetaFileCollection
+	for i := range a.Deps {
+		dep := a.Deps[i]
+		if dep.Mode != "build" {
+			panic("unexpected mode " + dep.Mode)
+		}
+		metaFilesFile := dep.Objdir + covcmd.MetaFileForPackage(dep.Package.ImportPath)
+		// Check to make sure the meta-data file fragment exists
+		//  and has content (may be empty if package has no functions).
+		if fi, err := os.Stat(metaFilesFile); err != nil {
+			continue
+		} else if fi.Size() == 0 {
+			continue
+		}
+		collection.ImportPaths = append(collection.ImportPaths, dep.Package.ImportPath)
+		collection.MetaFileFragments = append(collection.MetaFileFragments, metaFilesFile)
+	}
+
+	// Serialize it.
+	data, err := json.Marshal(collection)
+	if err != nil {
+		return fmt.Errorf("marshal MetaFileCollection: %v", err)
+	}
+	data = append(data, '\n') // makes -x output more readable
+
+	// Create the directory for this action's objdir and
+	// then write out the serialized collection
+	// to a file in the directory.
+	if err := sh.Mkdir(a.Objdir); err != nil {
+		return err
+	}
+	mfpath := a.Objdir + coverage.MetaFilesFileName
+	if err := sh.writeFile(mfpath, data); err != nil {
+		return fmt.Errorf("writing metafiles file: %v", err)
+	}
+
+	// We're done.
+	return nil
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/exec.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/exec.go
new file mode 100644
index 0000000000000000000000000000000000000000..e05471b06cd1cb44545673ad321db2e594db9f02
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/exec.go
@@ -0,0 +1,3472 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Action graph execution.
+
+package work
+
+import (
+	"bytes"
+	"cmd/internal/cov/covcmd"
+	"context"
+	"crypto/sha256"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"go/token"
+	"internal/lazyregexp"
+	"io"
+	"io/fs"
+	"log"
+	"math/rand"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"regexp"
+	"runtime"
+	"slices"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/cache"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/fsys"
+	"cmd/go/internal/gover"
+	"cmd/go/internal/load"
+	"cmd/go/internal/modload"
+	"cmd/go/internal/str"
+	"cmd/go/internal/trace"
+	"cmd/internal/buildid"
+	"cmd/internal/quoted"
+	"cmd/internal/sys"
+)
+
+const defaultCFlags = "-O2 -g"
+
+// actionList returns the list of actions in the dag rooted at root
+// as visited in a depth-first post-order traversal.
+func actionList(root *Action) []*Action {
+	seen := map[*Action]bool{}
+	all := []*Action{}
+	var walk func(*Action)
+	walk = func(a *Action) {
+		if seen[a] {
+			return
+		}
+		seen[a] = true
+		for _, a1 := range a.Deps {
+			walk(a1)
+		}
+		all = append(all, a)
+	}
+	walk(root)
+	return all
+}
+
+// Do runs the action graph rooted at root.
+func (b *Builder) Do(ctx context.Context, root *Action) {
+	ctx, span := trace.StartSpan(ctx, "exec.Builder.Do ("+root.Mode+" "+root.Target+")")
+	defer span.Done()
+
+	if !b.IsCmdList {
+		// If we're doing real work, take time at the end to trim the cache.
+		c := cache.Default()
+		defer func() {
+			if err := c.Close(); err != nil {
+				base.Fatalf("go: failed to trim cache: %v", err)
+			}
+		}()
+	}
+
+	// Build list of all actions, assigning depth-first post-order priority.
+	// The original implementation here was a true queue
+	// (using a channel) but it had the effect of getting
+	// distracted by low-level leaf actions to the detriment
+	// of completing higher-level actions. The order of
+	// work does not matter much to overall execution time,
+	// but when running "go test std" it is nice to see each test
+	// results as soon as possible. The priorities assigned
+	// ensure that, all else being equal, the execution prefers
+	// to do what it would have done first in a simple depth-first
+	// dependency order traversal.
+	all := actionList(root)
+	for i, a := range all {
+		a.priority = i
+	}
+
+	// Write action graph, without timing information, in case we fail and exit early.
+	writeActionGraph := func() {
+		if file := cfg.DebugActiongraph; file != "" {
+			if strings.HasSuffix(file, ".go") {
+				// Do not overwrite Go source code in:
+				//	go build -debug-actiongraph x.go
+				base.Fatalf("go: refusing to write action graph to %v\n", file)
+			}
+			js := actionGraphJSON(root)
+			if err := os.WriteFile(file, []byte(js), 0666); err != nil {
+				fmt.Fprintf(os.Stderr, "go: writing action graph: %v\n", err)
+				base.SetExitStatus(1)
+			}
+		}
+	}
+	writeActionGraph()
+
+	b.readySema = make(chan bool, len(all))
+
+	// Initialize per-action execution state.
+	for _, a := range all {
+		for _, a1 := range a.Deps {
+			a1.triggers = append(a1.triggers, a)
+		}
+		a.pending = len(a.Deps)
+		if a.pending == 0 {
+			b.ready.push(a)
+			b.readySema <- true
+		}
+	}
+
+	// Handle runs a single action and takes care of triggering
+	// any actions that are runnable as a result.
+	handle := func(ctx context.Context, a *Action) {
+		if a.json != nil {
+			a.json.TimeStart = time.Now()
+		}
+		var err error
+		if a.Actor != nil && (!a.Failed || a.IgnoreFail) {
+			// TODO(matloob): Better action descriptions
+			desc := "Executing action "
+			if a.Package != nil {
+				desc += "(" + a.Mode + " " + a.Package.Desc() + ")"
+			}
+			ctx, span := trace.StartSpan(ctx, desc)
+			a.traceSpan = span
+			for _, d := range a.Deps {
+				trace.Flow(ctx, d.traceSpan, a.traceSpan)
+			}
+			err = a.Actor.Act(b, ctx, a)
+			span.Done()
+		}
+		if a.json != nil {
+			a.json.TimeDone = time.Now()
+		}
+
+		// The actions run in parallel but all the updates to the
+		// shared work state are serialized through b.exec.
+		b.exec.Lock()
+		defer b.exec.Unlock()
+
+		if err != nil {
+			if b.AllowErrors && a.Package != nil {
+				if a.Package.Error == nil {
+					a.Package.Error = &load.PackageError{Err: err}
+					a.Package.Incomplete = true
+				}
+			} else {
+				var ipe load.ImportPathError
+				if a.Package != nil && (!errors.As(err, &ipe) || ipe.ImportPath() != a.Package.ImportPath) {
+					err = fmt.Errorf("%s: %v", a.Package.ImportPath, err)
+				}
+				base.Errorf("%s", err)
+			}
+			a.Failed = true
+		}
+
+		for _, a0 := range a.triggers {
+			if a.Failed {
+				a0.Failed = true
+			}
+			if a0.pending--; a0.pending == 0 {
+				b.ready.push(a0)
+				b.readySema <- true
+			}
+		}
+
+		if a == root {
+			close(b.readySema)
+		}
+	}
+
+	var wg sync.WaitGroup
+
+	// Kick off goroutines according to parallelism.
+	// If we are using the -n flag (just printing commands)
+	// drop the parallelism to 1, both to make the output
+	// deterministic and because there is no real work anyway.
+	par := cfg.BuildP
+	if cfg.BuildN {
+		par = 1
+	}
+	for i := 0; i < par; i++ {
+		wg.Add(1)
+		go func() {
+			ctx := trace.StartGoroutine(ctx)
+			defer wg.Done()
+			for {
+				select {
+				case _, ok := <-b.readySema:
+					if !ok {
+						return
+					}
+					// Receiving a value from b.readySema entitles
+					// us to take from the ready queue.
+					b.exec.Lock()
+					a := b.ready.pop()
+					b.exec.Unlock()
+					handle(ctx, a)
+				case <-base.Interrupted:
+					base.SetExitStatus(1)
+					return
+				}
+			}
+		}()
+	}
+
+	wg.Wait()
+
+	// Write action graph again, this time with timing information.
+	writeActionGraph()
+}
+
+// buildActionID computes the action ID for a build action.
+func (b *Builder) buildActionID(a *Action) cache.ActionID {
+	p := a.Package
+	h := cache.NewHash("build " + p.ImportPath)
+
+	// Configuration independent of compiler toolchain.
+	// Note: buildmode has already been accounted for in buildGcflags
+	// and should not be inserted explicitly. Most buildmodes use the
+	// same compiler settings and can reuse each other's results.
+	// If not, the reason is already recorded in buildGcflags.
+	fmt.Fprintf(h, "compile\n")
+
+	// Include information about the origin of the package that
+	// may be embedded in the debug info for the object file.
+	if cfg.BuildTrimpath {
+		// When -trimpath is used with a package built from the module cache,
+		// its debug information refers to the module path and version
+		// instead of the directory.
+		if p.Module != nil {
+			fmt.Fprintf(h, "module %s@%s\n", p.Module.Path, p.Module.Version)
+		}
+	} else if p.Goroot {
+		// The Go compiler always hides the exact value of $GOROOT
+		// when building things in GOROOT.
+		//
+		// The C compiler does not, but for packages in GOROOT we rewrite the path
+		// as though -trimpath were set, so that we don't invalidate the build cache
+		// (and especially any precompiled C archive files) when changing
+		// GOROOT_FINAL. (See https://go.dev/issue/50183.)
+		//
+		// b.WorkDir is always either trimmed or rewritten to
+		// the literal string "/tmp/go-build".
+	} else if !strings.HasPrefix(p.Dir, b.WorkDir) {
+		// -trimpath is not set and no other rewrite rules apply,
+		// so the object file may refer to the absolute directory
+		// containing the package.
+		fmt.Fprintf(h, "dir %s\n", p.Dir)
+	}
+
+	if p.Module != nil {
+		fmt.Fprintf(h, "go %s\n", p.Module.GoVersion)
+	}
+	fmt.Fprintf(h, "goos %s goarch %s\n", cfg.Goos, cfg.Goarch)
+	fmt.Fprintf(h, "import %q\n", p.ImportPath)
+	fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix)
+	if cfg.BuildTrimpath {
+		fmt.Fprintln(h, "trimpath")
+	}
+	if p.Internal.ForceLibrary {
+		fmt.Fprintf(h, "forcelibrary\n")
+	}
+	if len(p.CgoFiles)+len(p.SwigFiles)+len(p.SwigCXXFiles) > 0 {
+		fmt.Fprintf(h, "cgo %q\n", b.toolID("cgo"))
+		cppflags, cflags, cxxflags, fflags, ldflags, _ := b.CFlags(p)
+
+		ccExe := b.ccExe()
+		fmt.Fprintf(h, "CC=%q %q %q %q\n", ccExe, cppflags, cflags, ldflags)
+		// Include the C compiler tool ID so that if the C
+		// compiler changes we rebuild the package.
+		if ccID, _, err := b.gccToolID(ccExe[0], "c"); err == nil {
+			fmt.Fprintf(h, "CC ID=%q\n", ccID)
+		}
+		if len(p.CXXFiles)+len(p.SwigCXXFiles) > 0 {
+			cxxExe := b.cxxExe()
+			fmt.Fprintf(h, "CXX=%q %q\n", cxxExe, cxxflags)
+			if cxxID, _, err := b.gccToolID(cxxExe[0], "c++"); err == nil {
+				fmt.Fprintf(h, "CXX ID=%q\n", cxxID)
+			}
+		}
+		if len(p.FFiles) > 0 {
+			fcExe := b.fcExe()
+			fmt.Fprintf(h, "FC=%q %q\n", fcExe, fflags)
+			if fcID, _, err := b.gccToolID(fcExe[0], "f95"); err == nil {
+				fmt.Fprintf(h, "FC ID=%q\n", fcID)
+			}
+		}
+		// TODO(rsc): Should we include the SWIG version?
+	}
+	if p.Internal.Cover.Mode != "" {
+		fmt.Fprintf(h, "cover %q %q\n", p.Internal.Cover.Mode, b.toolID("cover"))
+	}
+	if p.Internal.FuzzInstrument {
+		if fuzzFlags := fuzzInstrumentFlags(); fuzzFlags != nil {
+			fmt.Fprintf(h, "fuzz %q\n", fuzzFlags)
+		}
+	}
+	if p.Internal.BuildInfo != nil {
+		fmt.Fprintf(h, "modinfo %q\n", p.Internal.BuildInfo.String())
+	}
+
+	// Configuration specific to compiler toolchain.
+	switch cfg.BuildToolchainName {
+	default:
+		base.Fatalf("buildActionID: unknown build toolchain %q", cfg.BuildToolchainName)
+	case "gc":
+		fmt.Fprintf(h, "compile %s %q %q\n", b.toolID("compile"), forcedGcflags, p.Internal.Gcflags)
+		if len(p.SFiles) > 0 {
+			fmt.Fprintf(h, "asm %q %q %q\n", b.toolID("asm"), forcedAsmflags, p.Internal.Asmflags)
+		}
+
+		// GOARM, GOMIPS, etc.
+		key, val := cfg.GetArchEnv()
+		fmt.Fprintf(h, "%s=%s\n", key, val)
+
+		if cfg.CleanGOEXPERIMENT != "" {
+			fmt.Fprintf(h, "GOEXPERIMENT=%q\n", cfg.CleanGOEXPERIMENT)
+		}
+
+		// TODO(rsc): Convince compiler team not to add more magic environment variables,
+		// or perhaps restrict the environment variables passed to subprocesses.
+		// Because these are clumsy, undocumented special-case hacks
+		// for debugging the compiler, they are not settable using 'go env -w',
+		// and so here we use os.Getenv, not cfg.Getenv.
+		magic := []string{
+			"GOCLOBBERDEADHASH",
+			"GOSSAFUNC",
+			"GOSSADIR",
+			"GOCOMPILEDEBUG",
+		}
+		for _, env := range magic {
+			if x := os.Getenv(env); x != "" {
+				fmt.Fprintf(h, "magic %s=%s\n", env, x)
+			}
+		}
+
+	case "gccgo":
+		id, _, err := b.gccToolID(BuildToolchain.compiler(), "go")
+		if err != nil {
+			base.Fatalf("%v", err)
+		}
+		fmt.Fprintf(h, "compile %s %q %q\n", id, forcedGccgoflags, p.Internal.Gccgoflags)
+		fmt.Fprintf(h, "pkgpath %s\n", gccgoPkgpath(p))
+		fmt.Fprintf(h, "ar %q\n", BuildToolchain.(gccgoToolchain).ar())
+		if len(p.SFiles) > 0 {
+			id, _, _ = b.gccToolID(BuildToolchain.compiler(), "assembler-with-cpp")
+			// Ignore error; different assembler versions
+			// are unlikely to make any difference anyhow.
+			fmt.Fprintf(h, "asm %q\n", id)
+		}
+	}
+
+	// Input files.
+	inputFiles := str.StringList(
+		p.GoFiles,
+		p.CgoFiles,
+		p.CFiles,
+		p.CXXFiles,
+		p.FFiles,
+		p.MFiles,
+		p.HFiles,
+		p.SFiles,
+		p.SysoFiles,
+		p.SwigFiles,
+		p.SwigCXXFiles,
+		p.EmbedFiles,
+	)
+	for _, file := range inputFiles {
+		fmt.Fprintf(h, "file %s %s\n", file, b.fileHash(filepath.Join(p.Dir, file)))
+	}
+	if p.Internal.PGOProfile != "" {
+		fmt.Fprintf(h, "pgofile %s\n", b.fileHash(p.Internal.PGOProfile))
+	}
+	for _, a1 := range a.Deps {
+		p1 := a1.Package
+		if p1 != nil {
+			fmt.Fprintf(h, "import %s %s\n", p1.ImportPath, contentID(a1.buildID))
+		}
+	}
+
+	return h.Sum()
+}
+
+// needCgoHdr reports whether the actions triggered by this one
+// expect to be able to access the cgo-generated header file.
+func (b *Builder) needCgoHdr(a *Action) bool {
+	// If this build triggers a header install, run cgo to get the header.
+	if !b.IsCmdList && (a.Package.UsesCgo() || a.Package.UsesSwig()) && (cfg.BuildBuildmode == "c-archive" || cfg.BuildBuildmode == "c-shared") {
+		for _, t1 := range a.triggers {
+			if t1.Mode == "install header" {
+				return true
+			}
+		}
+		for _, t1 := range a.triggers {
+			for _, t2 := range t1.triggers {
+				if t2.Mode == "install header" {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+// allowedVersion reports whether the version v is an allowed version of go
+// (one that we can compile).
+// v is known to be of the form "1.23".
+func allowedVersion(v string) bool {
+	// Special case: no requirement.
+	if v == "" {
+		return true
+	}
+	return gover.Compare(gover.Local(), v) >= 0
+}
+
+const (
+	needBuild uint32 = 1 << iota
+	needCgoHdr
+	needVet
+	needCompiledGoFiles
+	needCovMetaFile
+	needStale
+)
+
+// build is the action for building a single package.
+// Note that any new influence on this logic must be reported in b.buildActionID above as well.
+func (b *Builder) build(ctx context.Context, a *Action) (err error) {
+	p := a.Package
+	sh := b.Shell(a)
+
+	bit := func(x uint32, b bool) uint32 {
+		if b {
+			return x
+		}
+		return 0
+	}
+
+	cachedBuild := false
+	needCovMeta := p.Internal.Cover.GenMeta
+	need := bit(needBuild, !b.IsCmdList && a.needBuild || b.NeedExport) |
+		bit(needCgoHdr, b.needCgoHdr(a)) |
+		bit(needVet, a.needVet) |
+		bit(needCovMetaFile, needCovMeta) |
+		bit(needCompiledGoFiles, b.NeedCompiledGoFiles)
+
+	if !p.BinaryOnly {
+		if b.useCache(a, b.buildActionID(a), p.Target, need&needBuild != 0) {
+			// We found the main output in the cache.
+			// If we don't need any other outputs, we can stop.
+			// Otherwise, we need to write files to a.Objdir (needVet, needCgoHdr).
+			// Remember that we might have them in cache
+			// and check again after we create a.Objdir.
+			cachedBuild = true
+			a.output = []byte{} // start saving output in case we miss any cache results
+			need &^= needBuild
+			if b.NeedExport {
+				p.Export = a.built
+				p.BuildID = a.buildID
+			}
+			if need&needCompiledGoFiles != 0 {
+				if err := b.loadCachedCompiledGoFiles(a); err == nil {
+					need &^= needCompiledGoFiles
+				}
+			}
+		}
+
+		// Source files might be cached, even if the full action is not
+		// (e.g., go list -compiled -find).
+		if !cachedBuild && need&needCompiledGoFiles != 0 {
+			if err := b.loadCachedCompiledGoFiles(a); err == nil {
+				need &^= needCompiledGoFiles
+			}
+		}
+
+		if need == 0 {
+			return nil
+		}
+		defer b.flushOutput(a)
+	}
+
+	defer func() {
+		if err != nil && b.IsCmdList && b.NeedError && p.Error == nil {
+			p.Error = &load.PackageError{Err: err}
+		}
+	}()
+	if cfg.BuildN {
+		// In -n mode, print a banner between packages.
+		// The banner is five lines so that when changes to
+		// different sections of the bootstrap script have to
+		// be merged, the banners give patch something
+		// to use to find its context.
+		sh.Print("\n#\n# " + p.ImportPath + "\n#\n\n")
+	}
+
+	if cfg.BuildV {
+		sh.Print(p.ImportPath + "\n")
+	}
+
+	if p.Error != nil {
+		// Don't try to build anything for packages with errors. There may be a
+		// problem with the inputs that makes the package unsafe to build.
+		return p.Error
+	}
+
+	if p.BinaryOnly {
+		p.Stale = true
+		p.StaleReason = "binary-only packages are no longer supported"
+		if b.IsCmdList {
+			return nil
+		}
+		return errors.New("binary-only packages are no longer supported")
+	}
+
+	if p.Module != nil && !allowedVersion(p.Module.GoVersion) {
+		return errors.New("module requires Go " + p.Module.GoVersion + " or later")
+	}
+
+	if err := b.checkDirectives(a); err != nil {
+		return err
+	}
+
+	if err := sh.Mkdir(a.Objdir); err != nil {
+		return err
+	}
+	objdir := a.Objdir
+
+	// Load cached cgo header, but only if we're skipping the main build (cachedBuild==true).
+	if cachedBuild && need&needCgoHdr != 0 {
+		if err := b.loadCachedCgoHdr(a); err == nil {
+			need &^= needCgoHdr
+		}
+	}
+
+	// Load cached coverage meta-data file fragment, but only if we're
+	// skipping the main build (cachedBuild==true).
+	if cachedBuild && need&needCovMetaFile != 0 {
+		bact := a.Actor.(*buildActor)
+		if err := b.loadCachedObjdirFile(a, cache.Default(), bact.covMetaFileName); err == nil {
+			need &^= needCovMetaFile
+		}
+	}
+
+	// Load cached vet config, but only if that's all we have left
+	// (need == needVet, not testing just the one bit).
+	// If we are going to do a full build anyway,
+	// we're going to regenerate the files below anyway.
+	if need == needVet {
+		if err := b.loadCachedVet(a); err == nil {
+			need &^= needVet
+		}
+	}
+	if need == 0 {
+		return nil
+	}
+
+	if err := AllowInstall(a); err != nil {
+		return err
+	}
+
+	// make target directory
+	dir, _ := filepath.Split(a.Target)
+	if dir != "" {
+		if err := sh.Mkdir(dir); err != nil {
+			return err
+		}
+	}
+
+	gofiles := str.StringList(p.GoFiles)
+	cgofiles := str.StringList(p.CgoFiles)
+	cfiles := str.StringList(p.CFiles)
+	sfiles := str.StringList(p.SFiles)
+	cxxfiles := str.StringList(p.CXXFiles)
+	var objects, cgoObjects, pcCFLAGS, pcLDFLAGS []string
+
+	if p.UsesCgo() || p.UsesSwig() {
+		if pcCFLAGS, pcLDFLAGS, err = b.getPkgConfigFlags(a); err != nil {
+			return
+		}
+	}
+
+	// Compute overlays for .c/.cc/.h/etc. and if there are any overlays
+	// put correct contents of all those files in the objdir, to ensure
+	// the correct headers are included. nonGoOverlay is the overlay that
+	// points from nongo files to the copied files in objdir.
+	nonGoFileLists := [][]string{p.CFiles, p.SFiles, p.CXXFiles, p.HFiles, p.FFiles}
+OverlayLoop:
+	for _, fs := range nonGoFileLists {
+		for _, f := range fs {
+			if _, ok := fsys.OverlayPath(mkAbs(p.Dir, f)); ok {
+				a.nonGoOverlay = make(map[string]string)
+				break OverlayLoop
+			}
+		}
+	}
+	if a.nonGoOverlay != nil {
+		for _, fs := range nonGoFileLists {
+			for i := range fs {
+				from := mkAbs(p.Dir, fs[i])
+				opath, _ := fsys.OverlayPath(from)
+				dst := objdir + filepath.Base(fs[i])
+				if err := sh.CopyFile(dst, opath, 0666, false); err != nil {
+					return err
+				}
+				a.nonGoOverlay[from] = dst
+			}
+		}
+	}
+
+	// If we're doing coverage, preprocess the .go files and put them in the work directory
+	if p.Internal.Cover.Mode != "" {
+		outfiles := []string{}
+		infiles := []string{}
+		for i, file := range str.StringList(gofiles, cgofiles) {
+			if base.IsTestFile(file) {
+				continue // Not covering this file.
+			}
+
+			var sourceFile string
+			var coverFile string
+			var key string
+			if base, found := strings.CutSuffix(file, ".cgo1.go"); found {
+				// cgo files have absolute paths
+				base = filepath.Base(base)
+				sourceFile = file
+				coverFile = objdir + base + ".cgo1.go"
+				key = base + ".go"
+			} else {
+				sourceFile = filepath.Join(p.Dir, file)
+				coverFile = objdir + file
+				key = file
+			}
+			coverFile = strings.TrimSuffix(coverFile, ".go") + ".cover.go"
+			if cfg.Experiment.CoverageRedesign {
+				infiles = append(infiles, sourceFile)
+				outfiles = append(outfiles, coverFile)
+			} else {
+				cover := p.Internal.CoverVars[key]
+				if cover == nil {
+					continue // Not covering this file.
+				}
+				if err := b.cover(a, coverFile, sourceFile, cover.Var); err != nil {
+					return err
+				}
+			}
+			if i < len(gofiles) {
+				gofiles[i] = coverFile
+			} else {
+				cgofiles[i-len(gofiles)] = coverFile
+			}
+		}
+
+		if cfg.Experiment.CoverageRedesign {
+			if len(infiles) != 0 {
+				// Coverage instrumentation creates new top level
+				// variables in the target package for things like
+				// meta-data containers, counter vars, etc. To avoid
+				// collisions with user variables, suffix the var name
+				// with 12 hex digits from the SHA-256 hash of the
+				// import path. Choice of 12 digits is historical/arbitrary,
+				// we just need enough of the hash to avoid accidents,
+				// as opposed to precluding determined attempts by
+				// users to break things.
+				sum := sha256.Sum256([]byte(a.Package.ImportPath))
+				coverVar := fmt.Sprintf("goCover_%x_", sum[:6])
+				mode := a.Package.Internal.Cover.Mode
+				if mode == "" {
+					panic("covermode should be set at this point")
+				}
+				if newoutfiles, err := b.cover2(a, infiles, outfiles, coverVar, mode); err != nil {
+					return err
+				} else {
+					outfiles = newoutfiles
+					gofiles = append([]string{newoutfiles[0]}, gofiles...)
+				}
+			} else {
+				// If there are no input files passed to cmd/cover,
+				// then we don't want to pass -covercfg when building
+				// the package with the compiler, so set covermode to
+				// the empty string so as to signal that we need to do
+				// that.
+				p.Internal.Cover.Mode = ""
+			}
+			if ba, ok := a.Actor.(*buildActor); ok && ba.covMetaFileName != "" {
+				b.cacheObjdirFile(a, cache.Default(), ba.covMetaFileName)
+			}
+		}
+	}
+
+	// Run SWIG on each .swig and .swigcxx file.
+	// Each run will generate two files, a .go file and a .c or .cxx file.
+	// The .go file will use import "C" and is to be processed by cgo.
+	// For -cover test or build runs, this needs to happen after the cover
+	// tool is run; we don't want to instrument swig-generated Go files,
+	// see issue #64661.
+	if p.UsesSwig() {
+		outGo, outC, outCXX, err := b.swig(a, objdir, pcCFLAGS)
+		if err != nil {
+			return err
+		}
+		cgofiles = append(cgofiles, outGo...)
+		cfiles = append(cfiles, outC...)
+		cxxfiles = append(cxxfiles, outCXX...)
+	}
+
+	// Run cgo.
+	if p.UsesCgo() || p.UsesSwig() {
+		// In a package using cgo, cgo compiles the C, C++ and assembly files with gcc.
+		// There is one exception: runtime/cgo's job is to bridge the
+		// cgo and non-cgo worlds, so it necessarily has files in both.
+		// In that case gcc only gets the gcc_* files.
+		var gccfiles []string
+		gccfiles = append(gccfiles, cfiles...)
+		cfiles = nil
+		if p.Standard && p.ImportPath == "runtime/cgo" {
+			filter := func(files, nongcc, gcc []string) ([]string, []string) {
+				for _, f := range files {
+					if strings.HasPrefix(f, "gcc_") {
+						gcc = append(gcc, f)
+					} else {
+						nongcc = append(nongcc, f)
+					}
+				}
+				return nongcc, gcc
+			}
+			sfiles, gccfiles = filter(sfiles, sfiles[:0], gccfiles)
+		} else {
+			for _, sfile := range sfiles {
+				data, err := os.ReadFile(filepath.Join(p.Dir, sfile))
+				if err == nil {
+					if bytes.HasPrefix(data, []byte("TEXT")) || bytes.Contains(data, []byte("\nTEXT")) ||
+						bytes.HasPrefix(data, []byte("DATA")) || bytes.Contains(data, []byte("\nDATA")) ||
+						bytes.HasPrefix(data, []byte("GLOBL")) || bytes.Contains(data, []byte("\nGLOBL")) {
+						return fmt.Errorf("package using cgo has Go assembly file %s", sfile)
+					}
+				}
+			}
+			gccfiles = append(gccfiles, sfiles...)
+			sfiles = nil
+		}
+
+		outGo, outObj, err := b.cgo(a, base.Tool("cgo"), objdir, pcCFLAGS, pcLDFLAGS, mkAbsFiles(p.Dir, cgofiles), gccfiles, cxxfiles, p.MFiles, p.FFiles)
+
+		// The files in cxxfiles have now been handled by b.cgo.
+		cxxfiles = nil
+
+		if err != nil {
+			return err
+		}
+		if cfg.BuildToolchainName == "gccgo" {
+			cgoObjects = append(cgoObjects, a.Objdir+"_cgo_flags")
+		}
+		cgoObjects = append(cgoObjects, outObj...)
+		gofiles = append(gofiles, outGo...)
+
+		switch cfg.BuildBuildmode {
+		case "c-archive", "c-shared":
+			b.cacheCgoHdr(a)
+		}
+	}
+
+	var srcfiles []string // .go and non-.go
+	srcfiles = append(srcfiles, gofiles...)
+	srcfiles = append(srcfiles, sfiles...)
+	srcfiles = append(srcfiles, cfiles...)
+	srcfiles = append(srcfiles, cxxfiles...)
+	b.cacheSrcFiles(a, srcfiles)
+
+	// Running cgo generated the cgo header.
+	need &^= needCgoHdr
+
+	// Sanity check only, since Package.load already checked as well.
+	if len(gofiles) == 0 {
+		return &load.NoGoError{Package: p}
+	}
+
+	// Prepare Go vet config if needed.
+	if need&needVet != 0 {
+		buildVetConfig(a, srcfiles)
+		need &^= needVet
+	}
+	if need&needCompiledGoFiles != 0 {
+		if err := b.loadCachedCompiledGoFiles(a); err != nil {
+			return fmt.Errorf("loading compiled Go files from cache: %w", err)
+		}
+		need &^= needCompiledGoFiles
+	}
+	if need == 0 {
+		// Nothing left to do.
+		return nil
+	}
+
+	// Collect symbol ABI requirements from assembly.
+	symabis, err := BuildToolchain.symabis(b, a, sfiles)
+	if err != nil {
+		return err
+	}
+
+	// Prepare Go import config.
+	// We start it off with a comment so it can't be empty, so icfg.Bytes() below is never nil.
+	// It should never be empty anyway, but there have been bugs in the past that resulted
+	// in empty configs, which then unfortunately turn into "no config passed to compiler",
+	// and the compiler falls back to looking in pkg itself, which mostly works,
+	// except when it doesn't.
+	var icfg bytes.Buffer
+	fmt.Fprintf(&icfg, "# import config\n")
+	for i, raw := range p.Internal.RawImports {
+		final := p.Imports[i]
+		if final != raw {
+			fmt.Fprintf(&icfg, "importmap %s=%s\n", raw, final)
+		}
+	}
+	for _, a1 := range a.Deps {
+		p1 := a1.Package
+		if p1 == nil || p1.ImportPath == "" || a1.built == "" {
+			continue
+		}
+		fmt.Fprintf(&icfg, "packagefile %s=%s\n", p1.ImportPath, a1.built)
+	}
+
+	// Prepare Go embed config if needed.
+	// Unlike the import config, it's okay for the embed config to be empty.
+	var embedcfg []byte
+	if len(p.Internal.Embed) > 0 {
+		var embed struct {
+			Patterns map[string][]string
+			Files    map[string]string
+		}
+		embed.Patterns = p.Internal.Embed
+		embed.Files = make(map[string]string)
+		for _, file := range p.EmbedFiles {
+			embed.Files[file] = filepath.Join(p.Dir, file)
+		}
+		js, err := json.MarshalIndent(&embed, "", "\t")
+		if err != nil {
+			return fmt.Errorf("marshal embedcfg: %v", err)
+		}
+		embedcfg = js
+	}
+
+	if p.Internal.BuildInfo != nil && cfg.ModulesEnabled {
+		prog := modload.ModInfoProg(p.Internal.BuildInfo.String(), cfg.BuildToolchainName == "gccgo")
+		if len(prog) > 0 {
+			if err := sh.writeFile(objdir+"_gomod_.go", prog); err != nil {
+				return err
+			}
+			gofiles = append(gofiles, objdir+"_gomod_.go")
+		}
+	}
+
+	// Compile Go.
+	objpkg := objdir + "_pkg_.a"
+	ofile, out, err := BuildToolchain.gc(b, a, objpkg, icfg.Bytes(), embedcfg, symabis, len(sfiles) > 0, gofiles)
+	if err := sh.reportCmd("", "", out, err); err != nil {
+		return err
+	}
+	if ofile != objpkg {
+		objects = append(objects, ofile)
+	}
+
+	// Copy .h files named for goos or goarch or goos_goarch
+	// to names using GOOS and GOARCH.
+	// For example, defs_linux_amd64.h becomes defs_GOOS_GOARCH.h.
+	_goos_goarch := "_" + cfg.Goos + "_" + cfg.Goarch
+	_goos := "_" + cfg.Goos
+	_goarch := "_" + cfg.Goarch
+	for _, file := range p.HFiles {
+		name, ext := fileExtSplit(file)
+		switch {
+		case strings.HasSuffix(name, _goos_goarch):
+			targ := file[:len(name)-len(_goos_goarch)] + "_GOOS_GOARCH." + ext
+			if err := sh.CopyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
+				return err
+			}
+		case strings.HasSuffix(name, _goarch):
+			targ := file[:len(name)-len(_goarch)] + "_GOARCH." + ext
+			if err := sh.CopyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
+				return err
+			}
+		case strings.HasSuffix(name, _goos):
+			targ := file[:len(name)-len(_goos)] + "_GOOS." + ext
+			if err := sh.CopyFile(objdir+targ, filepath.Join(p.Dir, file), 0666, true); err != nil {
+				return err
+			}
+		}
+	}
+
+	for _, file := range cfiles {
+		out := file[:len(file)-len(".c")] + ".o"
+		if err := BuildToolchain.cc(b, a, objdir+out, file); err != nil {
+			return err
+		}
+		objects = append(objects, out)
+	}
+
+	// Assemble .s files.
+	if len(sfiles) > 0 {
+		ofiles, err := BuildToolchain.asm(b, a, sfiles)
+		if err != nil {
+			return err
+		}
+		objects = append(objects, ofiles...)
+	}
+
+	// For gccgo on ELF systems, we write the build ID as an assembler file.
+	// This lets us set the SHF_EXCLUDE flag.
+	// This is read by readGccgoArchive in cmd/internal/buildid/buildid.go.
+	if a.buildID != "" && cfg.BuildToolchainName == "gccgo" {
+		switch cfg.Goos {
+		case "aix", "android", "dragonfly", "freebsd", "illumos", "linux", "netbsd", "openbsd", "solaris":
+			asmfile, err := b.gccgoBuildIDFile(a)
+			if err != nil {
+				return err
+			}
+			ofiles, err := BuildToolchain.asm(b, a, []string{asmfile})
+			if err != nil {
+				return err
+			}
+			objects = append(objects, ofiles...)
+		}
+	}
+
+	// NOTE(rsc): On Windows, it is critically important that the
+	// gcc-compiled objects (cgoObjects) be listed after the ordinary
+	// objects in the archive. I do not know why this is.
+	// https://golang.org/issue/2601
+	objects = append(objects, cgoObjects...)
+
+	// Add system object files.
+	for _, syso := range p.SysoFiles {
+		objects = append(objects, filepath.Join(p.Dir, syso))
+	}
+
+	// Pack into archive in objdir directory.
+	// If the Go compiler wrote an archive, we only need to add the
+	// object files for non-Go sources to the archive.
+	// If the Go compiler wrote an archive and the package is entirely
+	// Go sources, there is no pack to execute at all.
+	if len(objects) > 0 {
+		if err := BuildToolchain.pack(b, a, objpkg, objects); err != nil {
+			return err
+		}
+	}
+
+	if err := b.updateBuildID(a, objpkg, true); err != nil {
+		return err
+	}
+
+	a.built = objpkg
+	return nil
+}
+
+func (b *Builder) checkDirectives(a *Action) error {
+	var msg *bytes.Buffer
+	p := a.Package
+	var seen map[string]token.Position
+	for _, d := range p.Internal.Build.Directives {
+		if strings.HasPrefix(d.Text, "//go:debug") {
+			key, _, err := load.ParseGoDebug(d.Text)
+			if err != nil && err != load.ErrNotGoDebug {
+				if msg == nil {
+					msg = new(bytes.Buffer)
+				}
+				fmt.Fprintf(msg, "%s: invalid //go:debug: %v\n", d.Pos, err)
+				continue
+			}
+			if pos, ok := seen[key]; ok {
+				fmt.Fprintf(msg, "%s: repeated //go:debug for %v\n\t%s: previous //go:debug\n", d.Pos, key, pos)
+				continue
+			}
+			if seen == nil {
+				seen = make(map[string]token.Position)
+			}
+			seen[key] = d.Pos
+		}
+	}
+	if msg != nil {
+		// We pass a non-nil error to reportCmd to trigger the failure reporting
+		// path, but the content of the error doesn't matter because msg is
+		// non-empty.
+		err := errors.New("invalid directive")
+		return b.Shell(a).reportCmd("", "", msg.Bytes(), err)
+	}
+	return nil
+}
+
+func (b *Builder) cacheObjdirFile(a *Action, c cache.Cache, name string) error {
+	f, err := os.Open(a.Objdir + name)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	_, _, err = c.Put(cache.Subkey(a.actionID, name), f)
+	return err
+}
+
+func (b *Builder) findCachedObjdirFile(a *Action, c cache.Cache, name string) (string, error) {
+	file, _, err := cache.GetFile(c, cache.Subkey(a.actionID, name))
+	if err != nil {
+		return "", fmt.Errorf("loading cached file %s: %w", name, err)
+	}
+	return file, nil
+}
+
+func (b *Builder) loadCachedObjdirFile(a *Action, c cache.Cache, name string) error {
+	cached, err := b.findCachedObjdirFile(a, c, name)
+	if err != nil {
+		return err
+	}
+	return b.Shell(a).CopyFile(a.Objdir+name, cached, 0666, true)
+}
+
+func (b *Builder) cacheCgoHdr(a *Action) {
+	c := cache.Default()
+	b.cacheObjdirFile(a, c, "_cgo_install.h")
+}
+
+func (b *Builder) loadCachedCgoHdr(a *Action) error {
+	c := cache.Default()
+	return b.loadCachedObjdirFile(a, c, "_cgo_install.h")
+}
+
+func (b *Builder) cacheSrcFiles(a *Action, srcfiles []string) {
+	c := cache.Default()
+	var buf bytes.Buffer
+	for _, file := range srcfiles {
+		if !strings.HasPrefix(file, a.Objdir) {
+			// not generated
+			buf.WriteString("./")
+			buf.WriteString(file)
+			buf.WriteString("\n")
+			continue
+		}
+		name := file[len(a.Objdir):]
+		buf.WriteString(name)
+		buf.WriteString("\n")
+		if err := b.cacheObjdirFile(a, c, name); err != nil {
+			return
+		}
+	}
+	cache.PutBytes(c, cache.Subkey(a.actionID, "srcfiles"), buf.Bytes())
+}
+
+func (b *Builder) loadCachedVet(a *Action) error {
+	c := cache.Default()
+	list, _, err := cache.GetBytes(c, cache.Subkey(a.actionID, "srcfiles"))
+	if err != nil {
+		return fmt.Errorf("reading srcfiles list: %w", err)
+	}
+	var srcfiles []string
+	for _, name := range strings.Split(string(list), "\n") {
+		if name == "" { // end of list
+			continue
+		}
+		if strings.HasPrefix(name, "./") {
+			srcfiles = append(srcfiles, name[2:])
+			continue
+		}
+		if err := b.loadCachedObjdirFile(a, c, name); err != nil {
+			return err
+		}
+		srcfiles = append(srcfiles, a.Objdir+name)
+	}
+	buildVetConfig(a, srcfiles)
+	return nil
+}
+
+func (b *Builder) loadCachedCompiledGoFiles(a *Action) error {
+	c := cache.Default()
+	list, _, err := cache.GetBytes(c, cache.Subkey(a.actionID, "srcfiles"))
+	if err != nil {
+		return fmt.Errorf("reading srcfiles list: %w", err)
+	}
+	var gofiles []string
+	for _, name := range strings.Split(string(list), "\n") {
+		if name == "" { // end of list
+			continue
+		} else if !strings.HasSuffix(name, ".go") {
+			continue
+		}
+		if strings.HasPrefix(name, "./") {
+			gofiles = append(gofiles, name[len("./"):])
+			continue
+		}
+		file, err := b.findCachedObjdirFile(a, c, name)
+		if err != nil {
+			return fmt.Errorf("finding %s: %w", name, err)
+		}
+		gofiles = append(gofiles, file)
+	}
+	a.Package.CompiledGoFiles = gofiles
+	return nil
+}
+
+// vetConfig is the configuration passed to vet describing a single package.
+type vetConfig struct {
+	ID           string   // package ID (example: "fmt [fmt.test]")
+	Compiler     string   // compiler name (gc, gccgo)
+	Dir          string   // directory containing package
+	ImportPath   string   // canonical import path ("package path")
+	GoFiles      []string // absolute paths to package source files
+	NonGoFiles   []string // absolute paths to package non-Go files
+	IgnoredFiles []string // absolute paths to ignored source files
+
+	ImportMap   map[string]string // map import path in source code to package path
+	PackageFile map[string]string // map package path to .a file with export data
+	Standard    map[string]bool   // map package path to whether it's in the standard library
+	PackageVetx map[string]string // map package path to vetx data from earlier vet run
+	VetxOnly    bool              // only compute vetx data; don't report detected problems
+	VetxOutput  string            // write vetx data to this output file
+	GoVersion   string            // Go version for package
+
+	SucceedOnTypecheckFailure bool // awful hack; see #18395 and below
+}
+
+func buildVetConfig(a *Action, srcfiles []string) {
+	// Classify files based on .go extension.
+	// srcfiles does not include raw cgo files.
+	var gofiles, nongofiles []string
+	for _, name := range srcfiles {
+		if strings.HasSuffix(name, ".go") {
+			gofiles = append(gofiles, name)
+		} else {
+			nongofiles = append(nongofiles, name)
+		}
+	}
+
+	ignored := str.StringList(a.Package.IgnoredGoFiles, a.Package.IgnoredOtherFiles)
+
+	// Pass list of absolute paths to vet,
+	// so that vet's error messages will use absolute paths,
+	// so that we can reformat them relative to the directory
+	// in which the go command is invoked.
+	vcfg := &vetConfig{
+		ID:           a.Package.ImportPath,
+		Compiler:     cfg.BuildToolchainName,
+		Dir:          a.Package.Dir,
+		GoFiles:      mkAbsFiles(a.Package.Dir, gofiles),
+		NonGoFiles:   mkAbsFiles(a.Package.Dir, nongofiles),
+		IgnoredFiles: mkAbsFiles(a.Package.Dir, ignored),
+		ImportPath:   a.Package.ImportPath,
+		ImportMap:    make(map[string]string),
+		PackageFile:  make(map[string]string),
+		Standard:     make(map[string]bool),
+	}
+	if a.Package.Module != nil {
+		v := a.Package.Module.GoVersion
+		if v == "" {
+			v = gover.DefaultGoModVersion
+		}
+		vcfg.GoVersion = "go" + v
+	}
+	a.vetCfg = vcfg
+	for i, raw := range a.Package.Internal.RawImports {
+		final := a.Package.Imports[i]
+		vcfg.ImportMap[raw] = final
+	}
+
+	// Compute the list of mapped imports in the vet config
+	// so that we can add any missing mappings below.
+	vcfgMapped := make(map[string]bool)
+	for _, p := range vcfg.ImportMap {
+		vcfgMapped[p] = true
+	}
+
+	for _, a1 := range a.Deps {
+		p1 := a1.Package
+		if p1 == nil || p1.ImportPath == "" {
+			continue
+		}
+		// Add import mapping if needed
+		// (for imports like "runtime/cgo" that appear only in generated code).
+		if !vcfgMapped[p1.ImportPath] {
+			vcfg.ImportMap[p1.ImportPath] = p1.ImportPath
+		}
+		if a1.built != "" {
+			vcfg.PackageFile[p1.ImportPath] = a1.built
+		}
+		if p1.Standard {
+			vcfg.Standard[p1.ImportPath] = true
+		}
+	}
+}
+
+// VetTool is the path to an alternate vet tool binary.
+// The caller is expected to set it (if needed) before executing any vet actions.
+var VetTool string
+
+// VetFlags are the default flags to pass to vet.
+// The caller is expected to set them before executing any vet actions.
+var VetFlags []string
+
+// VetExplicit records whether the vet flags were set explicitly on the command line.
+var VetExplicit bool
+
+func (b *Builder) vet(ctx context.Context, a *Action) error {
+	// a.Deps[0] is the build of the package being vetted.
+	// a.Deps[1] is the build of the "fmt" package.
+
+	a.Failed = false // vet of dependency may have failed but we can still succeed
+
+	if a.Deps[0].Failed {
+		// The build of the package has failed. Skip vet check.
+		// Vet could return export data for non-typecheck errors,
+		// but we ignore it because the package cannot be compiled.
+		return nil
+	}
+
+	vcfg := a.Deps[0].vetCfg
+	if vcfg == nil {
+		// Vet config should only be missing if the build failed.
+		return fmt.Errorf("vet config not found")
+	}
+
+	sh := b.Shell(a)
+
+	vcfg.VetxOnly = a.VetxOnly
+	vcfg.VetxOutput = a.Objdir + "vet.out"
+	vcfg.PackageVetx = make(map[string]string)
+
+	h := cache.NewHash("vet " + a.Package.ImportPath)
+	fmt.Fprintf(h, "vet %q\n", b.toolID("vet"))
+
+	vetFlags := VetFlags
+
+	// In GOROOT, we enable all the vet tests during 'go test',
+	// not just the high-confidence subset. This gets us extra
+	// checking for the standard library (at some compliance cost)
+	// and helps us gain experience about how well the checks
+	// work, to help decide which should be turned on by default.
+	// The command-line still wins.
+	//
+	// Note that this flag change applies even when running vet as
+	// a dependency of vetting a package outside std.
+	// (Otherwise we'd have to introduce a whole separate
+	// space of "vet fmt as a dependency of a std top-level vet"
+	// versus "vet fmt as a dependency of a non-std top-level vet".)
+	// This is OK as long as the packages that are farther down the
+	// dependency tree turn on *more* analysis, as here.
+	// (The unsafeptr check does not write any facts for use by
+	// later vet runs, nor does unreachable.)
+	if a.Package.Goroot && !VetExplicit && VetTool == "" {
+		// Turn off -unsafeptr checks.
+		// There's too much unsafe.Pointer code
+		// that vet doesn't like in low-level packages
+		// like runtime, sync, and reflect.
+		// Note that $GOROOT/src/buildall.bash
+		// does the same
+		// and should be updated if these flags are
+		// changed here.
+		vetFlags = []string{"-unsafeptr=false"}
+
+		// Also turn off -unreachable checks during go test.
+		// During testing it is very common to make changes
+		// like hard-coded forced returns or panics that make
+		// code unreachable. It's unreasonable to insist on files
+		// not having any unreachable code during "go test".
+		// (buildall.bash still has -unreachable enabled
+		// for the overall whole-tree scan.)
+		if cfg.CmdName == "test" {
+			vetFlags = append(vetFlags, "-unreachable=false")
+		}
+	}
+
+	// Note: We could decide that vet should compute export data for
+	// all analyses, in which case we don't need to include the flags here.
+	// But that would mean that if an analysis causes problems like
+	// unexpected crashes there would be no way to turn it off.
+	// It seems better to let the flags disable export analysis too.
+	fmt.Fprintf(h, "vetflags %q\n", vetFlags)
+
+	fmt.Fprintf(h, "pkg %q\n", a.Deps[0].actionID)
+	for _, a1 := range a.Deps {
+		if a1.Mode == "vet" && a1.built != "" {
+			fmt.Fprintf(h, "vetout %q %s\n", a1.Package.ImportPath, b.fileHash(a1.built))
+			vcfg.PackageVetx[a1.Package.ImportPath] = a1.built
+		}
+	}
+	key := cache.ActionID(h.Sum())
+
+	if vcfg.VetxOnly && !cfg.BuildA {
+		c := cache.Default()
+		if file, _, err := cache.GetFile(c, key); err == nil {
+			a.built = file
+			return nil
+		}
+	}
+
+	js, err := json.MarshalIndent(vcfg, "", "\t")
+	if err != nil {
+		return fmt.Errorf("internal error marshaling vet config: %v", err)
+	}
+	js = append(js, '\n')
+	if err := sh.writeFile(a.Objdir+"vet.cfg", js); err != nil {
+		return err
+	}
+
+	// TODO(rsc): Why do we pass $GCCGO to go vet?
+	env := b.cCompilerEnv()
+	if cfg.BuildToolchainName == "gccgo" {
+		env = append(env, "GCCGO="+BuildToolchain.compiler())
+	}
+
+	p := a.Package
+	tool := VetTool
+	if tool == "" {
+		tool = base.Tool("vet")
+	}
+	runErr := sh.run(p.Dir, p.ImportPath, env, cfg.BuildToolexec, tool, vetFlags, a.Objdir+"vet.cfg")
+
+	// If vet wrote export data, save it for input to future vets.
+	if f, err := os.Open(vcfg.VetxOutput); err == nil {
+		a.built = vcfg.VetxOutput
+		cache.Default().Put(key, f)
+		f.Close()
+	}
+
+	return runErr
+}
+
+// linkActionID computes the action ID for a link action.
+func (b *Builder) linkActionID(a *Action) cache.ActionID {
+	p := a.Package
+	h := cache.NewHash("link " + p.ImportPath)
+
+	// Toolchain-independent configuration.
+	fmt.Fprintf(h, "link\n")
+	fmt.Fprintf(h, "buildmode %s goos %s goarch %s\n", cfg.BuildBuildmode, cfg.Goos, cfg.Goarch)
+	fmt.Fprintf(h, "import %q\n", p.ImportPath)
+	fmt.Fprintf(h, "omitdebug %v standard %v local %v prefix %q\n", p.Internal.OmitDebug, p.Standard, p.Internal.Local, p.Internal.LocalPrefix)
+	if cfg.BuildTrimpath {
+		fmt.Fprintln(h, "trimpath")
+	}
+
+	// Toolchain-dependent configuration, shared with b.linkSharedActionID.
+	b.printLinkerConfig(h, p)
+
+	// Input files.
+	for _, a1 := range a.Deps {
+		p1 := a1.Package
+		if p1 != nil {
+			if a1.built != "" || a1.buildID != "" {
+				buildID := a1.buildID
+				if buildID == "" {
+					buildID = b.buildID(a1.built)
+				}
+				fmt.Fprintf(h, "packagefile %s=%s\n", p1.ImportPath, contentID(buildID))
+			}
+			// Because we put package main's full action ID into the binary's build ID,
+			// we must also put the full action ID into the binary's action ID hash.
+			if p1.Name == "main" {
+				fmt.Fprintf(h, "packagemain %s\n", a1.buildID)
+			}
+			if p1.Shlib != "" {
+				fmt.Fprintf(h, "packageshlib %s=%s\n", p1.ImportPath, contentID(b.buildID(p1.Shlib)))
+			}
+		}
+	}
+
+	return h.Sum()
+}
+
+// printLinkerConfig prints the linker config into the hash h,
+// as part of the computation of a linker-related action ID.
+func (b *Builder) printLinkerConfig(h io.Writer, p *load.Package) {
+	switch cfg.BuildToolchainName {
+	default:
+		base.Fatalf("linkActionID: unknown toolchain %q", cfg.BuildToolchainName)
+
+	case "gc":
+		fmt.Fprintf(h, "link %s %q %s\n", b.toolID("link"), forcedLdflags, ldBuildmode)
+		if p != nil {
+			fmt.Fprintf(h, "linkflags %q\n", p.Internal.Ldflags)
+		}
+
+		// GOARM, GOMIPS, etc.
+		key, val := cfg.GetArchEnv()
+		fmt.Fprintf(h, "%s=%s\n", key, val)
+
+		if cfg.CleanGOEXPERIMENT != "" {
+			fmt.Fprintf(h, "GOEXPERIMENT=%q\n", cfg.CleanGOEXPERIMENT)
+		}
+
+		// The linker writes source file paths that say GOROOT_FINAL, but
+		// only if -trimpath is not specified (see ld() in gc.go).
+		gorootFinal := cfg.GOROOT_FINAL
+		if cfg.BuildTrimpath {
+			gorootFinal = trimPathGoRootFinal
+		}
+		fmt.Fprintf(h, "GOROOT=%s\n", gorootFinal)
+
+		// GO_EXTLINK_ENABLED controls whether the external linker is used.
+		fmt.Fprintf(h, "GO_EXTLINK_ENABLED=%s\n", cfg.Getenv("GO_EXTLINK_ENABLED"))
+
+		// TODO(rsc): Do cgo settings and flags need to be included?
+		// Or external linker settings and flags?
+
+	case "gccgo":
+		id, _, err := b.gccToolID(BuildToolchain.linker(), "go")
+		if err != nil {
+			base.Fatalf("%v", err)
+		}
+		fmt.Fprintf(h, "link %s %s\n", id, ldBuildmode)
+		// TODO(iant): Should probably include cgo flags here.
+	}
+}
+
+// link is the action for linking a single command.
+// Note that any new influence on this logic must be reported in b.linkActionID above as well.
+func (b *Builder) link(ctx context.Context, a *Action) (err error) {
+	if b.useCache(a, b.linkActionID(a), a.Package.Target, !b.IsCmdList) || b.IsCmdList {
+		return nil
+	}
+	defer b.flushOutput(a)
+
+	sh := b.Shell(a)
+	if err := sh.Mkdir(a.Objdir); err != nil {
+		return err
+	}
+
+	importcfg := a.Objdir + "importcfg.link"
+	if err := b.writeLinkImportcfg(a, importcfg); err != nil {
+		return err
+	}
+
+	if err := AllowInstall(a); err != nil {
+		return err
+	}
+
+	// make target directory
+	dir, _ := filepath.Split(a.Target)
+	if dir != "" {
+		if err := sh.Mkdir(dir); err != nil {
+			return err
+		}
+	}
+
+	if err := BuildToolchain.ld(b, a, a.Target, importcfg, a.Deps[0].built); err != nil {
+		return err
+	}
+
+	// Update the binary with the final build ID.
+	// But if OmitDebug is set, don't rewrite the binary, because we set OmitDebug
+	// on binaries that we are going to run and then delete.
+	// There's no point in doing work on such a binary.
+	// Worse, opening the binary for write here makes it
+	// essentially impossible to safely fork+exec due to a fundamental
+	// incompatibility between ETXTBSY and threads on modern Unix systems.
+	// See golang.org/issue/22220.
+	// We still call updateBuildID to update a.buildID, which is important
+	// for test result caching, but passing rewrite=false (final arg)
+	// means we don't actually rewrite the binary, nor store the
+	// result into the cache. That's probably a net win:
+	// less cache space wasted on large binaries we are not likely to
+	// need again. (On the other hand it does make repeated go test slower.)
+	// It also makes repeated go run slower, which is a win in itself:
+	// we don't want people to treat go run like a scripting environment.
+	if err := b.updateBuildID(a, a.Target, !a.Package.Internal.OmitDebug); err != nil {
+		return err
+	}
+
+	a.built = a.Target
+	return nil
+}
+
+func (b *Builder) writeLinkImportcfg(a *Action, file string) error {
+	// Prepare Go import cfg.
+	var icfg bytes.Buffer
+	for _, a1 := range a.Deps {
+		p1 := a1.Package
+		if p1 == nil {
+			continue
+		}
+		fmt.Fprintf(&icfg, "packagefile %s=%s\n", p1.ImportPath, a1.built)
+		if p1.Shlib != "" {
+			fmt.Fprintf(&icfg, "packageshlib %s=%s\n", p1.ImportPath, p1.Shlib)
+		}
+	}
+	info := ""
+	if a.Package.Internal.BuildInfo != nil {
+		info = a.Package.Internal.BuildInfo.String()
+	}
+	fmt.Fprintf(&icfg, "modinfo %q\n", modload.ModInfoData(info))
+	return b.Shell(a).writeFile(file, icfg.Bytes())
+}
+
+// PkgconfigCmd returns a pkg-config binary name
+// defaultPkgConfig is defined in zdefaultcc.go, written by cmd/dist.
+func (b *Builder) PkgconfigCmd() string {
+	return envList("PKG_CONFIG", cfg.DefaultPkgConfig)[0]
+}
+
+// splitPkgConfigOutput parses the pkg-config output into a slice of flags.
+// This implements the shell quoting semantics described in
+// https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02,
+// except that it does not support parameter or arithmetic expansion or command
+// substitution and hard-codes the  delimiters instead of reading them
+// from LC_LOCALE.
+func splitPkgConfigOutput(out []byte) ([]string, error) {
+	if len(out) == 0 {
+		return nil, nil
+	}
+	var flags []string
+	flag := make([]byte, 0, len(out))
+	didQuote := false // was the current flag parsed from a quoted string?
+	escaped := false  // did we just read `\` in a non-single-quoted context?
+	quote := byte(0)  // what is the quote character around the current string?
+
+	for _, c := range out {
+		if escaped {
+			if quote == '"' {
+				// “The  shall retain its special meaning as an escape
+				// character … only when followed by one of the following characters
+				// when considered special:”
+				switch c {
+				case '$', '`', '"', '\\', '\n':
+					// Handle the escaped character normally.
+				default:
+					// Not an escape character after all.
+					flag = append(flag, '\\', c)
+					escaped = false
+					continue
+				}
+			}
+
+			if c == '\n' {
+				// “If a  follows the , the shell shall interpret
+				// this as line continuation.”
+			} else {
+				flag = append(flag, c)
+			}
+			escaped = false
+			continue
+		}
+
+		if quote != 0 && c == quote {
+			quote = 0
+			continue
+		}
+		switch quote {
+		case '\'':
+			// “preserve the literal value of each character”
+			flag = append(flag, c)
+			continue
+		case '"':
+			// “preserve the literal value of all characters within the double-quotes,
+			// with the exception of …”
+			switch c {
+			case '`', '$', '\\':
+			default:
+				flag = append(flag, c)
+				continue
+			}
+		}
+
+		// “The application shall quote the following characters if they are to
+		// represent themselves:”
+		switch c {
+		case '|', '&', ';', '<', '>', '(', ')', '$', '`':
+			return nil, fmt.Errorf("unexpected shell character %q in pkgconf output", c)
+
+		case '\\':
+			// “A  that is not quoted shall preserve the literal value of
+			// the following character, with the exception of a .”
+			escaped = true
+			continue
+
+		case '"', '\'':
+			quote = c
+			didQuote = true
+			continue
+
+		case ' ', '\t', '\n':
+			if len(flag) > 0 || didQuote {
+				flags = append(flags, string(flag))
+			}
+			flag, didQuote = flag[:0], false
+			continue
+		}
+
+		flag = append(flag, c)
+	}
+
+	// Prefer to report a missing quote instead of a missing escape. If the string
+	// is something like `"foo\`, it's ambiguous as to whether the trailing
+	// backslash is really an escape at all.
+	if quote != 0 {
+		return nil, errors.New("unterminated quoted string in pkgconf output")
+	}
+	if escaped {
+		return nil, errors.New("broken character escaping in pkgconf output")
+	}
+
+	if len(flag) > 0 || didQuote {
+		flags = append(flags, string(flag))
+	}
+	return flags, nil
+}
+
+// Calls pkg-config if needed and returns the cflags/ldflags needed to build a's package.
+func (b *Builder) getPkgConfigFlags(a *Action) (cflags, ldflags []string, err error) {
+	p := a.Package
+	sh := b.Shell(a)
+	if pcargs := p.CgoPkgConfig; len(pcargs) > 0 {
+		// pkg-config permits arguments to appear anywhere in
+		// the command line. Move them all to the front, before --.
+		var pcflags []string
+		var pkgs []string
+		for _, pcarg := range pcargs {
+			if pcarg == "--" {
+				// We're going to add our own "--" argument.
+			} else if strings.HasPrefix(pcarg, "--") {
+				pcflags = append(pcflags, pcarg)
+			} else {
+				pkgs = append(pkgs, pcarg)
+			}
+		}
+		for _, pkg := range pkgs {
+			if !load.SafeArg(pkg) {
+				return nil, nil, fmt.Errorf("invalid pkg-config package name: %s", pkg)
+			}
+		}
+		var out []byte
+		out, err = sh.runOut(p.Dir, nil, b.PkgconfigCmd(), "--cflags", pcflags, "--", pkgs)
+		if err != nil {
+			desc := b.PkgconfigCmd() + " --cflags " + strings.Join(pcflags, " ") + " -- " + strings.Join(pkgs, " ")
+			return nil, nil, sh.reportCmd(desc, "", out, err)
+		}
+		if len(out) > 0 {
+			cflags, err = splitPkgConfigOutput(bytes.TrimSpace(out))
+			if err != nil {
+				return nil, nil, err
+			}
+			if err := checkCompilerFlags("CFLAGS", "pkg-config --cflags", cflags); err != nil {
+				return nil, nil, err
+			}
+		}
+		out, err = sh.runOut(p.Dir, nil, b.PkgconfigCmd(), "--libs", pcflags, "--", pkgs)
+		if err != nil {
+			desc := b.PkgconfigCmd() + " --libs " + strings.Join(pcflags, " ") + " -- " + strings.Join(pkgs, " ")
+			return nil, nil, sh.reportCmd(desc, "", out, err)
+		}
+		if len(out) > 0 {
+			// We need to handle path with spaces so that C:/Program\ Files can pass
+			// checkLinkerFlags. Use splitPkgConfigOutput here just like we treat cflags.
+			ldflags, err = splitPkgConfigOutput(bytes.TrimSpace(out))
+			if err != nil {
+				return nil, nil, err
+			}
+			if err := checkLinkerFlags("LDFLAGS", "pkg-config --libs", ldflags); err != nil {
+				return nil, nil, err
+			}
+		}
+	}
+
+	return
+}
+
+func (b *Builder) installShlibname(ctx context.Context, a *Action) error {
+	if err := AllowInstall(a); err != nil {
+		return err
+	}
+
+	sh := b.Shell(a)
+	a1 := a.Deps[0]
+	if !cfg.BuildN {
+		if err := sh.Mkdir(filepath.Dir(a.Target)); err != nil {
+			return err
+		}
+	}
+	return sh.writeFile(a.Target, []byte(filepath.Base(a1.Target)+"\n"))
+}
+
+func (b *Builder) linkSharedActionID(a *Action) cache.ActionID {
+	h := cache.NewHash("linkShared")
+
+	// Toolchain-independent configuration.
+	fmt.Fprintf(h, "linkShared\n")
+	fmt.Fprintf(h, "goos %s goarch %s\n", cfg.Goos, cfg.Goarch)
+
+	// Toolchain-dependent configuration, shared with b.linkActionID.
+	b.printLinkerConfig(h, nil)
+
+	// Input files.
+	for _, a1 := range a.Deps {
+		p1 := a1.Package
+		if a1.built == "" {
+			continue
+		}
+		if p1 != nil {
+			fmt.Fprintf(h, "packagefile %s=%s\n", p1.ImportPath, contentID(b.buildID(a1.built)))
+			if p1.Shlib != "" {
+				fmt.Fprintf(h, "packageshlib %s=%s\n", p1.ImportPath, contentID(b.buildID(p1.Shlib)))
+			}
+		}
+	}
+	// Files named on command line are special.
+	for _, a1 := range a.Deps[0].Deps {
+		p1 := a1.Package
+		fmt.Fprintf(h, "top %s=%s\n", p1.ImportPath, contentID(b.buildID(a1.built)))
+	}
+
+	return h.Sum()
+}
+
+func (b *Builder) linkShared(ctx context.Context, a *Action) (err error) {
+	if b.useCache(a, b.linkSharedActionID(a), a.Target, !b.IsCmdList) || b.IsCmdList {
+		return nil
+	}
+	defer b.flushOutput(a)
+
+	if err := AllowInstall(a); err != nil {
+		return err
+	}
+
+	if err := b.Shell(a).Mkdir(a.Objdir); err != nil {
+		return err
+	}
+
+	importcfg := a.Objdir + "importcfg.link"
+	if err := b.writeLinkImportcfg(a, importcfg); err != nil {
+		return err
+	}
+
+	// TODO(rsc): There is a missing updateBuildID here,
+	// but we have to decide where to store the build ID in these files.
+	a.built = a.Target
+	return BuildToolchain.ldShared(b, a, a.Deps[0].Deps, a.Target, importcfg, a.Deps)
+}
+
+// BuildInstallFunc is the action for installing a single package or executable.
+func BuildInstallFunc(b *Builder, ctx context.Context, a *Action) (err error) {
+	defer func() {
+		if err != nil {
+			// a.Package == nil is possible for the go install -buildmode=shared
+			// action that installs libmangledname.so, which corresponds to
+			// a list of packages, not just one.
+			sep, path := "", ""
+			if a.Package != nil {
+				sep, path = " ", a.Package.ImportPath
+			}
+			err = fmt.Errorf("go %s%s%s: %v", cfg.CmdName, sep, path, err)
+		}
+	}()
+	sh := b.Shell(a)
+
+	a1 := a.Deps[0]
+	a.buildID = a1.buildID
+	if a.json != nil {
+		a.json.BuildID = a.buildID
+	}
+
+	// If we are using the eventual install target as an up-to-date
+	// cached copy of the thing we built, then there's no need to
+	// copy it into itself (and that would probably fail anyway).
+	// In this case a1.built == a.Target because a1.built == p.Target,
+	// so the built target is not in the a1.Objdir tree that b.cleanup(a1) removes.
+	if a1.built == a.Target {
+		a.built = a.Target
+		if !a.buggyInstall {
+			b.cleanup(a1)
+		}
+		// Whether we're smart enough to avoid a complete rebuild
+		// depends on exactly what the staleness and rebuild algorithms
+		// are, as well as potentially the state of the Go build cache.
+		// We don't really want users to be able to infer (or worse start depending on)
+		// those details from whether the modification time changes during
+		// "go install", so do a best-effort update of the file times to make it
+		// look like we rewrote a.Target even if we did not. Updating the mtime
+		// may also help other mtime-based systems that depend on our
+		// previous mtime updates that happened more often.
+		// This is still not perfect - we ignore the error result, and if the file was
+		// unwritable for some reason then pretending to have written it is also
+		// confusing - but it's probably better than not doing the mtime update.
+		//
+		// But don't do that for the special case where building an executable
+		// with -linkshared implicitly installs all its dependent libraries.
+		// We want to hide that awful detail as much as possible, so don't
+		// advertise it by touching the mtimes (usually the libraries are up
+		// to date).
+		if !a.buggyInstall && !b.IsCmdList {
+			if cfg.BuildN {
+				sh.ShowCmd("", "touch %s", a.Target)
+			} else if err := AllowInstall(a); err == nil {
+				now := time.Now()
+				os.Chtimes(a.Target, now, now)
+			}
+		}
+		return nil
+	}
+
+	// If we're building for go list -export,
+	// never install anything; just keep the cache reference.
+	if b.IsCmdList {
+		a.built = a1.built
+		return nil
+	}
+	if err := AllowInstall(a); err != nil {
+		return err
+	}
+
+	if err := sh.Mkdir(a.Objdir); err != nil {
+		return err
+	}
+
+	perm := fs.FileMode(0666)
+	if a1.Mode == "link" {
+		switch cfg.BuildBuildmode {
+		case "c-archive", "c-shared", "plugin":
+		default:
+			perm = 0777
+		}
+	}
+
+	// make target directory
+	dir, _ := filepath.Split(a.Target)
+	if dir != "" {
+		if err := sh.Mkdir(dir); err != nil {
+			return err
+		}
+	}
+
+	if !a.buggyInstall {
+		defer b.cleanup(a1)
+	}
+
+	return sh.moveOrCopyFile(a.Target, a1.built, perm, false)
+}
+
+// AllowInstall returns a non-nil error if this invocation of the go command is
+// allowed to install a.Target.
+//
+// The build of cmd/go running under its own test is forbidden from installing
+// to its original GOROOT. The var is exported so it can be set by TestMain.
+var AllowInstall = func(*Action) error { return nil }
+
+// cleanup removes a's object dir to keep the amount of
+// on-disk garbage down in a large build. On an operating system
+// with aggressive buffering, cleaning incrementally like
+// this keeps the intermediate objects from hitting the disk.
+func (b *Builder) cleanup(a *Action) {
+	if !cfg.BuildWork {
+		b.Shell(a).RemoveAll(a.Objdir)
+	}
+}
+
+// Install the cgo export header file, if there is one.
+func (b *Builder) installHeader(ctx context.Context, a *Action) error {
+	sh := b.Shell(a)
+
+	src := a.Objdir + "_cgo_install.h"
+	if _, err := os.Stat(src); os.IsNotExist(err) {
+		// If the file does not exist, there are no exported
+		// functions, and we do not install anything.
+		// TODO(rsc): Once we know that caching is rebuilding
+		// at the right times (not missing rebuilds), here we should
+		// probably delete the installed header, if any.
+		if cfg.BuildX {
+			sh.ShowCmd("", "# %s not created", src)
+		}
+		return nil
+	}
+
+	if err := AllowInstall(a); err != nil {
+		return err
+	}
+
+	dir, _ := filepath.Split(a.Target)
+	if dir != "" {
+		if err := sh.Mkdir(dir); err != nil {
+			return err
+		}
+	}
+
+	return sh.moveOrCopyFile(a.Target, src, 0666, true)
+}
+
+// cover runs, in effect,
+//
+//	go tool cover -mode=b.coverMode -var="varName" -o dst.go src.go
+func (b *Builder) cover(a *Action, dst, src string, varName string) error {
+	return b.Shell(a).run(a.Objdir, "", nil,
+		cfg.BuildToolexec,
+		base.Tool("cover"),
+		"-mode", a.Package.Internal.Cover.Mode,
+		"-var", varName,
+		"-o", dst,
+		src)
+}
+
+// cover2 runs, in effect,
+//
+//	go tool cover -pkgcfg= -mode=b.coverMode -var="varName" -o  
+//
+// Return value is an updated output files list; in addition to the
+// regular outputs (instrumented source files) the cover tool also
+// writes a separate file (appearing first in the list of outputs)
+// that will contain coverage counters and meta-data.
+func (b *Builder) cover2(a *Action, infiles, outfiles []string, varName string, mode string) ([]string, error) {
+	pkgcfg := a.Objdir + "pkgcfg.txt"
+	covoutputs := a.Objdir + "coveroutfiles.txt"
+	odir := filepath.Dir(outfiles[0])
+	cv := filepath.Join(odir, "covervars.go")
+	outfiles = append([]string{cv}, outfiles...)
+	if err := b.writeCoverPkgInputs(a, pkgcfg, covoutputs, outfiles); err != nil {
+		return nil, err
+	}
+	args := []string{base.Tool("cover"),
+		"-pkgcfg", pkgcfg,
+		"-mode", mode,
+		"-var", varName,
+		"-outfilelist", covoutputs,
+	}
+	args = append(args, infiles...)
+	if err := b.Shell(a).run(a.Objdir, "", nil,
+		cfg.BuildToolexec, args); err != nil {
+		return nil, err
+	}
+	return outfiles, nil
+}
+
+func (b *Builder) writeCoverPkgInputs(a *Action, pconfigfile string, covoutputsfile string, outfiles []string) error {
+	sh := b.Shell(a)
+	p := a.Package
+	p.Internal.Cover.Cfg = a.Objdir + "coveragecfg"
+	pcfg := covcmd.CoverPkgConfig{
+		PkgPath: p.ImportPath,
+		PkgName: p.Name,
+		// Note: coverage granularity is currently hard-wired to
+		// 'perblock'; there isn't a way using "go build -cover" or "go
+		// test -cover" to select it. This may change in the future
+		// depending on user demand.
+		Granularity: "perblock",
+		OutConfig:   p.Internal.Cover.Cfg,
+		Local:       p.Internal.Local,
+	}
+	if ba, ok := a.Actor.(*buildActor); ok && ba.covMetaFileName != "" {
+		pcfg.EmitMetaFile = a.Objdir + ba.covMetaFileName
+	}
+	if a.Package.Module != nil {
+		pcfg.ModulePath = a.Package.Module.Path
+	}
+	data, err := json.Marshal(pcfg)
+	if err != nil {
+		return err
+	}
+	data = append(data, '\n')
+	if err := sh.writeFile(pconfigfile, data); err != nil {
+		return err
+	}
+	var sb strings.Builder
+	for i := range outfiles {
+		fmt.Fprintf(&sb, "%s\n", outfiles[i])
+	}
+	return sh.writeFile(covoutputsfile, []byte(sb.String()))
+}
+
+var objectMagic = [][]byte{
+	{'!', '<', 'a', 'r', 'c', 'h', '>', '\n'}, // Package archive
+	{'<', 'b', 'i', 'g', 'a', 'f', '>', '\n'}, // Package AIX big archive
+	{'\x7F', 'E', 'L', 'F'},                   // ELF
+	{0xFE, 0xED, 0xFA, 0xCE},                  // Mach-O big-endian 32-bit
+	{0xFE, 0xED, 0xFA, 0xCF},                  // Mach-O big-endian 64-bit
+	{0xCE, 0xFA, 0xED, 0xFE},                  // Mach-O little-endian 32-bit
+	{0xCF, 0xFA, 0xED, 0xFE},                  // Mach-O little-endian 64-bit
+	{0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00},      // PE (Windows) as generated by 6l/8l and gcc
+	{0x4d, 0x5a, 0x78, 0x00, 0x01, 0x00},      // PE (Windows) as generated by llvm for dll
+	{0x00, 0x00, 0x01, 0xEB},                  // Plan 9 i386
+	{0x00, 0x00, 0x8a, 0x97},                  // Plan 9 amd64
+	{0x00, 0x00, 0x06, 0x47},                  // Plan 9 arm
+	{0x00, 0x61, 0x73, 0x6D},                  // WASM
+	{0x01, 0xDF},                              // XCOFF 32bit
+	{0x01, 0xF7},                              // XCOFF 64bit
+}
+
+func isObject(s string) bool {
+	f, err := os.Open(s)
+	if err != nil {
+		return false
+	}
+	defer f.Close()
+	buf := make([]byte, 64)
+	io.ReadFull(f, buf)
+	for _, magic := range objectMagic {
+		if bytes.HasPrefix(buf, magic) {
+			return true
+		}
+	}
+	return false
+}
+
+// cCompilerEnv returns environment variables to set when running the
+// C compiler. This is needed to disable escape codes in clang error
+// messages that confuse tools like cgo.
+func (b *Builder) cCompilerEnv() []string {
+	return []string{"TERM=dumb"}
+}
+
+// mkAbs returns an absolute path corresponding to
+// evaluating f in the directory dir.
+// We always pass absolute paths of source files so that
+// the error messages will include the full path to a file
+// in need of attention.
+func mkAbs(dir, f string) string {
+	// Leave absolute paths alone.
+	// Also, during -n mode we use the pseudo-directory $WORK
+	// instead of creating an actual work directory that won't be used.
+	// Leave paths beginning with $WORK alone too.
+	if filepath.IsAbs(f) || strings.HasPrefix(f, "$WORK") {
+		return f
+	}
+	return filepath.Join(dir, f)
+}
+
+type toolchain interface {
+	// gc runs the compiler in a specific directory on a set of files
+	// and returns the name of the generated output file.
+	gc(b *Builder, a *Action, archive string, importcfg, embedcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, out []byte, err error)
+	// cc runs the toolchain's C compiler in a directory on a C file
+	// to produce an output file.
+	cc(b *Builder, a *Action, ofile, cfile string) error
+	// asm runs the assembler in a specific directory on specific files
+	// and returns a list of named output files.
+	asm(b *Builder, a *Action, sfiles []string) ([]string, error)
+	// symabis scans the symbol ABIs from sfiles and returns the
+	// path to the output symbol ABIs file, or "" if none.
+	symabis(b *Builder, a *Action, sfiles []string) (string, error)
+	// pack runs the archive packer in a specific directory to create
+	// an archive from a set of object files.
+	// typically it is run in the object directory.
+	pack(b *Builder, a *Action, afile string, ofiles []string) error
+	// ld runs the linker to create an executable starting at mainpkg.
+	ld(b *Builder, root *Action, targetPath, importcfg, mainpkg string) error
+	// ldShared runs the linker to create a shared library containing the pkgs built by toplevelactions
+	ldShared(b *Builder, root *Action, toplevelactions []*Action, targetPath, importcfg string, allactions []*Action) error
+
+	compiler() string
+	linker() string
+}
+
+type noToolchain struct{}
+
+func noCompiler() error {
+	log.Fatalf("unknown compiler %q", cfg.BuildContext.Compiler)
+	return nil
+}
+
+func (noToolchain) compiler() string {
+	noCompiler()
+	return ""
+}
+
+func (noToolchain) linker() string {
+	noCompiler()
+	return ""
+}
+
+func (noToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, out []byte, err error) {
+	return "", nil, noCompiler()
+}
+
+func (noToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) {
+	return nil, noCompiler()
+}
+
+func (noToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) {
+	return "", noCompiler()
+}
+
+func (noToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error {
+	return noCompiler()
+}
+
+func (noToolchain) ld(b *Builder, root *Action, targetPath, importcfg, mainpkg string) error {
+	return noCompiler()
+}
+
+func (noToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, targetPath, importcfg string, allactions []*Action) error {
+	return noCompiler()
+}
+
+func (noToolchain) cc(b *Builder, a *Action, ofile, cfile string) error {
+	return noCompiler()
+}
+
+// gcc runs the gcc C compiler to create an object from a single C file.
+func (b *Builder) gcc(a *Action, workdir, out string, flags []string, cfile string) error {
+	p := a.Package
+	return b.ccompile(a, out, flags, cfile, b.GccCmd(p.Dir, workdir))
+}
+
+// gxx runs the g++ C++ compiler to create an object from a single C++ file.
+func (b *Builder) gxx(a *Action, workdir, out string, flags []string, cxxfile string) error {
+	p := a.Package
+	return b.ccompile(a, out, flags, cxxfile, b.GxxCmd(p.Dir, workdir))
+}
+
+// gfortran runs the gfortran Fortran compiler to create an object from a single Fortran file.
+func (b *Builder) gfortran(a *Action, workdir, out string, flags []string, ffile string) error {
+	p := a.Package
+	return b.ccompile(a, out, flags, ffile, b.gfortranCmd(p.Dir, workdir))
+}
+
+// ccompile runs the given C or C++ compiler and creates an object from a single source file.
+func (b *Builder) ccompile(a *Action, outfile string, flags []string, file string, compiler []string) error {
+	p := a.Package
+	sh := b.Shell(a)
+	file = mkAbs(p.Dir, file)
+	outfile = mkAbs(p.Dir, outfile)
+
+	// Elide source directory paths if -trimpath or GOROOT_FINAL is set.
+	// This is needed for source files (e.g., a .c file in a package directory).
+	// TODO(golang.org/issue/36072): cgo also generates files with #line
+	// directives pointing to the source directory. It should not generate those
+	// when -trimpath is enabled.
+	if b.gccSupportsFlag(compiler, "-fdebug-prefix-map=a=b") {
+		if cfg.BuildTrimpath || p.Goroot {
+			prefixMapFlag := "-fdebug-prefix-map"
+			if b.gccSupportsFlag(compiler, "-ffile-prefix-map=a=b") {
+				prefixMapFlag = "-ffile-prefix-map"
+			}
+			// Keep in sync with Action.trimpath.
+			// The trimmed paths are a little different, but we need to trim in mostly the
+			// same situations.
+			var from, toPath string
+			if m := p.Module; m == nil {
+				if p.Root == "" { // command-line-arguments in GOPATH mode, maybe?
+					from = p.Dir
+					toPath = p.ImportPath
+				} else if p.Goroot {
+					from = p.Root
+					toPath = "GOROOT"
+				} else {
+					from = p.Root
+					toPath = "GOPATH"
+				}
+			} else if m.Dir == "" {
+				// The module is in the vendor directory. Replace the entire vendor
+				// directory path, because the module's Dir is not filled in.
+				from = modload.VendorDir()
+				toPath = "vendor"
+			} else {
+				from = m.Dir
+				toPath = m.Path
+				if m.Version != "" {
+					toPath += "@" + m.Version
+				}
+			}
+			// -fdebug-prefix-map (or -ffile-prefix-map) requires an absolute "to"
+			// path (or it joins the path  with the working directory). Pick something
+			// that makes sense for the target platform.
+			var to string
+			if cfg.BuildContext.GOOS == "windows" {
+				to = filepath.Join(`\\_\_`, toPath)
+			} else {
+				to = filepath.Join("/_", toPath)
+			}
+			flags = append(slices.Clip(flags), prefixMapFlag+"="+from+"="+to)
+		}
+	}
+
+	// Tell gcc to not insert truly random numbers into the build process
+	// this ensures LTO won't create random numbers for symbols.
+	if b.gccSupportsFlag(compiler, "-frandom-seed=1") {
+		flags = append(flags, "-frandom-seed="+buildid.HashToString(a.actionID))
+	}
+
+	overlayPath := file
+	if p, ok := a.nonGoOverlay[overlayPath]; ok {
+		overlayPath = p
+	}
+	output, err := sh.runOut(filepath.Dir(overlayPath), b.cCompilerEnv(), compiler, flags, "-o", outfile, "-c", filepath.Base(overlayPath))
+
+	// On FreeBSD 11, when we pass -g to clang 3.8 it
+	// invokes its internal assembler with -dwarf-version=2.
+	// When it sees .section .note.GNU-stack, it warns
+	// "DWARF2 only supports one section per compilation unit".
+	// This warning makes no sense, since the section is empty,
+	// but it confuses people.
+	// We work around the problem by detecting the warning
+	// and dropping -g and trying again.
+	if bytes.Contains(output, []byte("DWARF2 only supports one section per compilation unit")) {
+		newFlags := make([]string, 0, len(flags))
+		for _, f := range flags {
+			if !strings.HasPrefix(f, "-g") {
+				newFlags = append(newFlags, f)
+			}
+		}
+		if len(newFlags) < len(flags) {
+			return b.ccompile(a, outfile, newFlags, file, compiler)
+		}
+	}
+
+	if len(output) > 0 && err == nil && os.Getenv("GO_BUILDER_NAME") != "" {
+		output = append(output, "C compiler warning promoted to error on Go builders\n"...)
+		err = errors.New("warning promoted to error")
+	}
+
+	return sh.reportCmd("", "", output, err)
+}
+
+// gccld runs the gcc linker to create an executable from a set of object files.
+// Any error output is only displayed for BuildN or BuildX.
+func (b *Builder) gccld(a *Action, objdir, outfile string, flags []string, objs []string) error {
+	p := a.Package
+	sh := b.Shell(a)
+	var cmd []string
+	if len(p.CXXFiles) > 0 || len(p.SwigCXXFiles) > 0 {
+		cmd = b.GxxCmd(p.Dir, objdir)
+	} else {
+		cmd = b.GccCmd(p.Dir, objdir)
+	}
+
+	cmdargs := []any{cmd, "-o", outfile, objs, flags}
+	out, err := sh.runOut(base.Cwd(), b.cCompilerEnv(), cmdargs...)
+
+	if len(out) > 0 {
+		// Filter out useless linker warnings caused by bugs outside Go.
+		// See also cmd/link/internal/ld's hostlink method.
+		var save [][]byte
+		var skipLines int
+		for _, line := range bytes.SplitAfter(out, []byte("\n")) {
+			// golang.org/issue/26073 - Apple Xcode bug
+			if bytes.Contains(line, []byte("ld: warning: text-based stub file")) {
+				continue
+			}
+
+			if skipLines > 0 {
+				skipLines--
+				continue
+			}
+
+			// Remove duplicate main symbol with runtime/cgo on AIX.
+			// With runtime/cgo, two main are available:
+			// One is generated by cgo tool with {return 0;}.
+			// The other one is the main calling runtime.rt0_go
+			// in runtime/cgo.
+			// The second can't be used by cgo programs because
+			// runtime.rt0_go is unknown to them.
+			// Therefore, we let ld remove this main version
+			// and used the cgo generated one.
+			if p.ImportPath == "runtime/cgo" && bytes.Contains(line, []byte("ld: 0711-224 WARNING: Duplicate symbol: .main")) {
+				skipLines = 1
+				continue
+			}
+
+			save = append(save, line)
+		}
+		out = bytes.Join(save, nil)
+	}
+	// Note that failure is an expected outcome here, so we report output only
+	// in debug mode and don't report the error.
+	if cfg.BuildN || cfg.BuildX {
+		sh.reportCmd("", "", out, nil)
+	}
+	return err
+}
+
+// GccCmd returns a gcc command line prefix
+// defaultCC is defined in zdefaultcc.go, written by cmd/dist.
+func (b *Builder) GccCmd(incdir, workdir string) []string {
+	return b.compilerCmd(b.ccExe(), incdir, workdir)
+}
+
+// GxxCmd returns a g++ command line prefix
+// defaultCXX is defined in zdefaultcc.go, written by cmd/dist.
+func (b *Builder) GxxCmd(incdir, workdir string) []string {
+	return b.compilerCmd(b.cxxExe(), incdir, workdir)
+}
+
+// gfortranCmd returns a gfortran command line prefix.
+func (b *Builder) gfortranCmd(incdir, workdir string) []string {
+	return b.compilerCmd(b.fcExe(), incdir, workdir)
+}
+
+// ccExe returns the CC compiler setting without all the extra flags we add implicitly.
+func (b *Builder) ccExe() []string {
+	return envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch))
+}
+
+// cxxExe returns the CXX compiler setting without all the extra flags we add implicitly.
+func (b *Builder) cxxExe() []string {
+	return envList("CXX", cfg.DefaultCXX(cfg.Goos, cfg.Goarch))
+}
+
+// fcExe returns the FC compiler setting without all the extra flags we add implicitly.
+func (b *Builder) fcExe() []string {
+	return envList("FC", "gfortran")
+}
+
+// compilerCmd returns a command line prefix for the given environment
+// variable and using the default command when the variable is empty.
+func (b *Builder) compilerCmd(compiler []string, incdir, workdir string) []string {
+	a := append(compiler, "-I", incdir)
+
+	// Definitely want -fPIC but on Windows gcc complains
+	// "-fPIC ignored for target (all code is position independent)"
+	if cfg.Goos != "windows" {
+		a = append(a, "-fPIC")
+	}
+	a = append(a, b.gccArchArgs()...)
+	// gcc-4.5 and beyond require explicit "-pthread" flag
+	// for multithreading with pthread library.
+	if cfg.BuildContext.CgoEnabled {
+		switch cfg.Goos {
+		case "windows":
+			a = append(a, "-mthreads")
+		default:
+			a = append(a, "-pthread")
+		}
+	}
+
+	if cfg.Goos == "aix" {
+		// mcmodel=large must always be enabled to allow large TOC.
+		a = append(a, "-mcmodel=large")
+	}
+
+	// disable ASCII art in clang errors, if possible
+	if b.gccSupportsFlag(compiler, "-fno-caret-diagnostics") {
+		a = append(a, "-fno-caret-diagnostics")
+	}
+	// clang is too smart about command-line arguments
+	if b.gccSupportsFlag(compiler, "-Qunused-arguments") {
+		a = append(a, "-Qunused-arguments")
+	}
+
+	// zig cc passes --gc-sections to the underlying linker, which then causes
+	// undefined symbol errors when compiling with cgo but without C code.
+	// https://github.com/golang/go/issues/52690
+	if b.gccSupportsFlag(compiler, "-Wl,--no-gc-sections") {
+		a = append(a, "-Wl,--no-gc-sections")
+	}
+
+	// disable word wrapping in error messages
+	a = append(a, "-fmessage-length=0")
+
+	// Tell gcc not to include the work directory in object files.
+	if b.gccSupportsFlag(compiler, "-fdebug-prefix-map=a=b") {
+		if workdir == "" {
+			workdir = b.WorkDir
+		}
+		workdir = strings.TrimSuffix(workdir, string(filepath.Separator))
+		if b.gccSupportsFlag(compiler, "-ffile-prefix-map=a=b") {
+			a = append(a, "-ffile-prefix-map="+workdir+"=/tmp/go-build")
+		} else {
+			a = append(a, "-fdebug-prefix-map="+workdir+"=/tmp/go-build")
+		}
+	}
+
+	// Tell gcc not to include flags in object files, which defeats the
+	// point of -fdebug-prefix-map above.
+	if b.gccSupportsFlag(compiler, "-gno-record-gcc-switches") {
+		a = append(a, "-gno-record-gcc-switches")
+	}
+
+	// On OS X, some of the compilers behave as if -fno-common
+	// is always set, and the Mach-O linker in 6l/8l assumes this.
+	// See https://golang.org/issue/3253.
+	if cfg.Goos == "darwin" || cfg.Goos == "ios" {
+		a = append(a, "-fno-common")
+	}
+
+	return a
+}
+
+// gccNoPie returns the flag to use to request non-PIE. On systems
+// with PIE (position independent executables) enabled by default,
+// -no-pie must be passed when doing a partial link with -Wl,-r.
+// But -no-pie is not supported by all compilers, and clang spells it -nopie.
+func (b *Builder) gccNoPie(linker []string) string {
+	if b.gccSupportsFlag(linker, "-no-pie") {
+		return "-no-pie"
+	}
+	if b.gccSupportsFlag(linker, "-nopie") {
+		return "-nopie"
+	}
+	return ""
+}
+
+// gccSupportsFlag checks to see if the compiler supports a flag.
+func (b *Builder) gccSupportsFlag(compiler []string, flag string) bool {
+	// We use the background shell for operations here because, while this is
+	// triggered by some Action, it's not really about that Action, and often we
+	// just get the results from the global cache.
+	sh := b.BackgroundShell()
+
+	key := [2]string{compiler[0], flag}
+
+	// We used to write an empty C file, but that gets complicated with go
+	// build -n. We tried using a file that does not exist, but that fails on
+	// systems with GCC version 4.2.1; that is the last GPLv2 version of GCC,
+	// so some systems have frozen on it. Now we pass an empty file on stdin,
+	// which should work at least for GCC and clang.
+	//
+	// If the argument is "-Wl,", then it is testing the linker. In that case,
+	// skip "-c". If it's not "-Wl,", then we are testing the compiler and can
+	// omit the linking step with "-c".
+	//
+	// Using the same CFLAGS/LDFLAGS here and for building the program.
+
+	// On the iOS builder the command
+	//   $CC -Wl,--no-gc-sections -x c - -o /dev/null < /dev/null
+	// is failing with:
+	//   Unable to remove existing file: Invalid argument
+	tmp := os.DevNull
+	if runtime.GOOS == "windows" || runtime.GOOS == "ios" {
+		f, err := os.CreateTemp(b.WorkDir, "")
+		if err != nil {
+			return false
+		}
+		f.Close()
+		tmp = f.Name()
+		defer os.Remove(tmp)
+	}
+
+	cmdArgs := str.StringList(compiler, flag)
+	if strings.HasPrefix(flag, "-Wl,") /* linker flag */ {
+		ldflags, err := buildFlags("LDFLAGS", defaultCFlags, nil, checkLinkerFlags)
+		if err != nil {
+			return false
+		}
+		cmdArgs = append(cmdArgs, ldflags...)
+	} else { /* compiler flag, add "-c" */
+		cflags, err := buildFlags("CFLAGS", defaultCFlags, nil, checkCompilerFlags)
+		if err != nil {
+			return false
+		}
+		cmdArgs = append(cmdArgs, cflags...)
+		cmdArgs = append(cmdArgs, "-c")
+	}
+
+	cmdArgs = append(cmdArgs, "-x", "c", "-", "-o", tmp)
+
+	if cfg.BuildN {
+		sh.ShowCmd(b.WorkDir, "%s || true", joinUnambiguously(cmdArgs))
+		return false
+	}
+
+	// gccCompilerID acquires b.exec, so do before acquiring lock.
+	compilerID, cacheOK := b.gccCompilerID(compiler[0])
+
+	b.exec.Lock()
+	defer b.exec.Unlock()
+	if b, ok := b.flagCache[key]; ok {
+		return b
+	}
+	if b.flagCache == nil {
+		b.flagCache = make(map[[2]string]bool)
+	}
+
+	// Look in build cache.
+	var flagID cache.ActionID
+	if cacheOK {
+		flagID = cache.Subkey(compilerID, "gccSupportsFlag "+flag)
+		if data, _, err := cache.GetBytes(cache.Default(), flagID); err == nil {
+			supported := string(data) == "true"
+			b.flagCache[key] = supported
+			return supported
+		}
+	}
+
+	if cfg.BuildX {
+		sh.ShowCmd(b.WorkDir, "%s || true", joinUnambiguously(cmdArgs))
+	}
+	cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
+	cmd.Dir = b.WorkDir
+	cmd.Env = append(cmd.Environ(), "LC_ALL=C")
+	out, _ := cmd.CombinedOutput()
+	// GCC says "unrecognized command line option".
+	// clang says "unknown argument".
+	// tcc says "unsupported"
+	// AIX says "not recognized"
+	// Older versions of GCC say "unrecognised debug output level".
+	// For -fsplit-stack GCC says "'-fsplit-stack' is not supported".
+	supported := !bytes.Contains(out, []byte("unrecognized")) &&
+		!bytes.Contains(out, []byte("unknown")) &&
+		!bytes.Contains(out, []byte("unrecognised")) &&
+		!bytes.Contains(out, []byte("is not supported")) &&
+		!bytes.Contains(out, []byte("not recognized")) &&
+		!bytes.Contains(out, []byte("unsupported"))
+
+	if cacheOK {
+		s := "false"
+		if supported {
+			s = "true"
+		}
+		cache.PutBytes(cache.Default(), flagID, []byte(s))
+	}
+
+	b.flagCache[key] = supported
+	return supported
+}
+
+// statString returns a string form of an os.FileInfo, for serializing and comparison.
+func statString(info os.FileInfo) string {
+	return fmt.Sprintf("stat %d %x %v %v\n", info.Size(), uint64(info.Mode()), info.ModTime(), info.IsDir())
+}
+
+// gccCompilerID returns a build cache key for the current gcc,
+// as identified by running 'compiler'.
+// The caller can use subkeys of the key.
+// Other parts of cmd/go can use the id as a hash
+// of the installed compiler version.
+func (b *Builder) gccCompilerID(compiler string) (id cache.ActionID, ok bool) {
+	// We use the background shell for operations here because, while this is
+	// triggered by some Action, it's not really about that Action, and often we
+	// just get the results from the global cache.
+	sh := b.BackgroundShell()
+
+	if cfg.BuildN {
+		sh.ShowCmd(b.WorkDir, "%s || true", joinUnambiguously([]string{compiler, "--version"}))
+		return cache.ActionID{}, false
+	}
+
+	b.exec.Lock()
+	defer b.exec.Unlock()
+
+	if id, ok := b.gccCompilerIDCache[compiler]; ok {
+		return id, ok
+	}
+
+	// We hash the compiler's full path to get a cache entry key.
+	// That cache entry holds a validation description,
+	// which is of the form:
+	//
+	//	filename \x00 statinfo \x00
+	//	...
+	//	compiler id
+	//
+	// If os.Stat of each filename matches statinfo,
+	// then the entry is still valid, and we can use the
+	// compiler id without any further expense.
+	//
+	// Otherwise, we compute a new validation description
+	// and compiler id (below).
+	exe, err := cfg.LookPath(compiler)
+	if err != nil {
+		return cache.ActionID{}, false
+	}
+
+	h := cache.NewHash("gccCompilerID")
+	fmt.Fprintf(h, "gccCompilerID %q", exe)
+	key := h.Sum()
+	data, _, err := cache.GetBytes(cache.Default(), key)
+	if err == nil && len(data) > len(id) {
+		stats := strings.Split(string(data[:len(data)-len(id)]), "\x00")
+		if len(stats)%2 != 0 {
+			goto Miss
+		}
+		for i := 0; i+2 <= len(stats); i++ {
+			info, err := os.Stat(stats[i])
+			if err != nil || statString(info) != stats[i+1] {
+				goto Miss
+			}
+		}
+		copy(id[:], data[len(data)-len(id):])
+		return id, true
+	Miss:
+	}
+
+	// Validation failed. Compute a new description (in buf) and compiler ID (in h).
+	// For now, there are only at most two filenames in the stat information.
+	// The first one is the compiler executable we invoke.
+	// The second is the underlying compiler as reported by -v -###
+	// (see b.gccToolID implementation in buildid.go).
+	toolID, exe2, err := b.gccToolID(compiler, "c")
+	if err != nil {
+		return cache.ActionID{}, false
+	}
+
+	exes := []string{exe, exe2}
+	str.Uniq(&exes)
+	fmt.Fprintf(h, "gccCompilerID %q %q\n", exes, toolID)
+	id = h.Sum()
+
+	var buf bytes.Buffer
+	for _, exe := range exes {
+		if exe == "" {
+			continue
+		}
+		info, err := os.Stat(exe)
+		if err != nil {
+			return cache.ActionID{}, false
+		}
+		buf.WriteString(exe)
+		buf.WriteString("\x00")
+		buf.WriteString(statString(info))
+		buf.WriteString("\x00")
+	}
+	buf.Write(id[:])
+
+	cache.PutBytes(cache.Default(), key, buf.Bytes())
+	if b.gccCompilerIDCache == nil {
+		b.gccCompilerIDCache = make(map[string]cache.ActionID)
+	}
+	b.gccCompilerIDCache[compiler] = id
+	return id, true
+}
+
+// gccArchArgs returns arguments to pass to gcc based on the architecture.
+func (b *Builder) gccArchArgs() []string {
+	switch cfg.Goarch {
+	case "386":
+		return []string{"-m32"}
+	case "amd64":
+		if cfg.Goos == "darwin" {
+			return []string{"-arch", "x86_64", "-m64"}
+		}
+		return []string{"-m64"}
+	case "arm64":
+		if cfg.Goos == "darwin" {
+			return []string{"-arch", "arm64"}
+		}
+	case "arm":
+		return []string{"-marm"} // not thumb
+	case "s390x":
+		return []string{"-m64", "-march=z196"}
+	case "mips64", "mips64le":
+		args := []string{"-mabi=64"}
+		if cfg.GOMIPS64 == "hardfloat" {
+			return append(args, "-mhard-float")
+		} else if cfg.GOMIPS64 == "softfloat" {
+			return append(args, "-msoft-float")
+		}
+	case "mips", "mipsle":
+		args := []string{"-mabi=32", "-march=mips32"}
+		if cfg.GOMIPS == "hardfloat" {
+			return append(args, "-mhard-float", "-mfp32", "-mno-odd-spreg")
+		} else if cfg.GOMIPS == "softfloat" {
+			return append(args, "-msoft-float")
+		}
+	case "loong64":
+		return []string{"-mabi=lp64d"}
+	case "ppc64":
+		if cfg.Goos == "aix" {
+			return []string{"-maix64"}
+		}
+	}
+	return nil
+}
+
+// envList returns the value of the given environment variable broken
+// into fields, using the default value when the variable is empty.
+//
+// The environment variable must be quoted correctly for
+// quoted.Split. This should be done before building
+// anything, for example, in BuildInit.
+func envList(key, def string) []string {
+	v := cfg.Getenv(key)
+	if v == "" {
+		v = def
+	}
+	args, err := quoted.Split(v)
+	if err != nil {
+		panic(fmt.Sprintf("could not parse environment variable %s with value %q: %v", key, v, err))
+	}
+	return args
+}
+
+// CFlags returns the flags to use when invoking the C, C++ or Fortran compilers, or cgo.
+func (b *Builder) CFlags(p *load.Package) (cppflags, cflags, cxxflags, fflags, ldflags []string, err error) {
+	if cppflags, err = buildFlags("CPPFLAGS", "", p.CgoCPPFLAGS, checkCompilerFlags); err != nil {
+		return
+	}
+	if cflags, err = buildFlags("CFLAGS", defaultCFlags, p.CgoCFLAGS, checkCompilerFlags); err != nil {
+		return
+	}
+	if cxxflags, err = buildFlags("CXXFLAGS", defaultCFlags, p.CgoCXXFLAGS, checkCompilerFlags); err != nil {
+		return
+	}
+	if fflags, err = buildFlags("FFLAGS", defaultCFlags, p.CgoFFLAGS, checkCompilerFlags); err != nil {
+		return
+	}
+	if ldflags, err = buildFlags("LDFLAGS", defaultCFlags, p.CgoLDFLAGS, checkLinkerFlags); err != nil {
+		return
+	}
+
+	return
+}
+
+func buildFlags(name, defaults string, fromPackage []string, check func(string, string, []string) error) ([]string, error) {
+	if err := check(name, "#cgo "+name, fromPackage); err != nil {
+		return nil, err
+	}
+	return str.StringList(envList("CGO_"+name, defaults), fromPackage), nil
+}
+
+var cgoRe = lazyregexp.New(`[/\\:]`)
+
+func (b *Builder) cgo(a *Action, cgoExe, objdir string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles, ffiles []string) (outGo, outObj []string, err error) {
+	p := a.Package
+	sh := b.Shell(a)
+
+	cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS, err := b.CFlags(p)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
+	cgoLDFLAGS = append(cgoLDFLAGS, pcLDFLAGS...)
+	// If we are compiling Objective-C code, then we need to link against libobjc
+	if len(mfiles) > 0 {
+		cgoLDFLAGS = append(cgoLDFLAGS, "-lobjc")
+	}
+
+	// Likewise for Fortran, except there are many Fortran compilers.
+	// Support gfortran out of the box and let others pass the correct link options
+	// via CGO_LDFLAGS
+	if len(ffiles) > 0 {
+		fc := cfg.Getenv("FC")
+		if fc == "" {
+			fc = "gfortran"
+		}
+		if strings.Contains(fc, "gfortran") {
+			cgoLDFLAGS = append(cgoLDFLAGS, "-lgfortran")
+		}
+	}
+
+	// Scrutinize CFLAGS and related for flags that might cause
+	// problems if we are using internal linking (for example, use of
+	// plugins, LTO, etc) by calling a helper routine that builds on
+	// the existing CGO flags allow-lists. If we see anything
+	// suspicious, emit a special token file "preferlinkext" (known to
+	// the linker) in the object file to signal the that it should not
+	// try to link internally and should revert to external linking.
+	// The token we pass is a suggestion, not a mandate; if a user is
+	// explicitly asking for a specific linkmode via the "-linkmode"
+	// flag, the token will be ignored. NB: in theory we could ditch
+	// the token approach and just pass a flag to the linker when we
+	// eventually invoke it, and the linker flag could then be
+	// documented (although coming up with a simple explanation of the
+	// flag might be challenging). For more context see issues #58619,
+	// #58620, and #58848.
+	flagSources := []string{"CGO_CFLAGS", "CGO_CXXFLAGS", "CGO_FFLAGS"}
+	flagLists := [][]string{cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS}
+	if flagsNotCompatibleWithInternalLinking(flagSources, flagLists) {
+		tokenFile := objdir + "preferlinkext"
+		if err := sh.writeFile(tokenFile, nil); err != nil {
+			return nil, nil, err
+		}
+		outObj = append(outObj, tokenFile)
+	}
+
+	if cfg.BuildMSan {
+		cgoCFLAGS = append([]string{"-fsanitize=memory"}, cgoCFLAGS...)
+		cgoLDFLAGS = append([]string{"-fsanitize=memory"}, cgoLDFLAGS...)
+	}
+	if cfg.BuildASan {
+		cgoCFLAGS = append([]string{"-fsanitize=address"}, cgoCFLAGS...)
+		cgoLDFLAGS = append([]string{"-fsanitize=address"}, cgoLDFLAGS...)
+	}
+
+	// Allows including _cgo_export.h, as well as the user's .h files,
+	// from .[ch] files in the package.
+	cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", objdir)
+
+	// cgo
+	// TODO: CGO_FLAGS?
+	gofiles := []string{objdir + "_cgo_gotypes.go"}
+	cfiles := []string{"_cgo_export.c"}
+	for _, fn := range cgofiles {
+		f := strings.TrimSuffix(filepath.Base(fn), ".go")
+		gofiles = append(gofiles, objdir+f+".cgo1.go")
+		cfiles = append(cfiles, f+".cgo2.c")
+	}
+
+	// TODO: make cgo not depend on $GOARCH?
+
+	cgoflags := []string{}
+	if p.Standard && p.ImportPath == "runtime/cgo" {
+		cgoflags = append(cgoflags, "-import_runtime_cgo=false")
+	}
+	if p.Standard && (p.ImportPath == "runtime/race" || p.ImportPath == "runtime/msan" || p.ImportPath == "runtime/cgo" || p.ImportPath == "runtime/asan") {
+		cgoflags = append(cgoflags, "-import_syscall=false")
+	}
+
+	// Update $CGO_LDFLAGS with p.CgoLDFLAGS.
+	// These flags are recorded in the generated _cgo_gotypes.go file
+	// using //go:cgo_ldflag directives, the compiler records them in the
+	// object file for the package, and then the Go linker passes them
+	// along to the host linker. At this point in the code, cgoLDFLAGS
+	// consists of the original $CGO_LDFLAGS (unchecked) and all the
+	// flags put together from source code (checked).
+	cgoenv := b.cCompilerEnv()
+	if len(cgoLDFLAGS) > 0 {
+		flags := make([]string, len(cgoLDFLAGS))
+		for i, f := range cgoLDFLAGS {
+			flags[i] = strconv.Quote(f)
+		}
+		cgoenv = append(cgoenv, "CGO_LDFLAGS="+strings.Join(flags, " "))
+	}
+
+	if cfg.BuildToolchainName == "gccgo" {
+		if b.gccSupportsFlag([]string{BuildToolchain.compiler()}, "-fsplit-stack") {
+			cgoCFLAGS = append(cgoCFLAGS, "-fsplit-stack")
+		}
+		cgoflags = append(cgoflags, "-gccgo")
+		if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+			cgoflags = append(cgoflags, "-gccgopkgpath="+pkgpath)
+		}
+		if !BuildToolchain.(gccgoToolchain).supportsCgoIncomplete(b, a) {
+			cgoflags = append(cgoflags, "-gccgo_define_cgoincomplete")
+		}
+	}
+
+	switch cfg.BuildBuildmode {
+	case "c-archive", "c-shared":
+		// Tell cgo that if there are any exported functions
+		// it should generate a header file that C code can
+		// #include.
+		cgoflags = append(cgoflags, "-exportheader="+objdir+"_cgo_install.h")
+	}
+
+	// Rewrite overlaid paths in cgo files.
+	// cgo adds //line and #line pragmas in generated files with these paths.
+	var trimpath []string
+	for i := range cgofiles {
+		path := mkAbs(p.Dir, cgofiles[i])
+		if opath, ok := fsys.OverlayPath(path); ok {
+			cgofiles[i] = opath
+			trimpath = append(trimpath, opath+"=>"+path)
+		}
+	}
+	if len(trimpath) > 0 {
+		cgoflags = append(cgoflags, "-trimpath", strings.Join(trimpath, ";"))
+	}
+
+	if err := sh.run(p.Dir, p.ImportPath, cgoenv, cfg.BuildToolexec, cgoExe, "-objdir", objdir, "-importpath", p.ImportPath, cgoflags, "--", cgoCPPFLAGS, cgoCFLAGS, cgofiles); err != nil {
+		return nil, nil, err
+	}
+	outGo = append(outGo, gofiles...)
+
+	// Use sequential object file names to keep them distinct
+	// and short enough to fit in the .a header file name slots.
+	// We no longer collect them all into _all.o, and we'd like
+	// tools to see both the .o suffix and unique names, so
+	// we need to make them short enough not to be truncated
+	// in the final archive.
+	oseq := 0
+	nextOfile := func() string {
+		oseq++
+		return objdir + fmt.Sprintf("_x%03d.o", oseq)
+	}
+
+	// gcc
+	cflags := str.StringList(cgoCPPFLAGS, cgoCFLAGS)
+	for _, cfile := range cfiles {
+		ofile := nextOfile()
+		if err := b.gcc(a, a.Objdir, ofile, cflags, objdir+cfile); err != nil {
+			return nil, nil, err
+		}
+		outObj = append(outObj, ofile)
+	}
+
+	for _, file := range gccfiles {
+		ofile := nextOfile()
+		if err := b.gcc(a, a.Objdir, ofile, cflags, file); err != nil {
+			return nil, nil, err
+		}
+		outObj = append(outObj, ofile)
+	}
+
+	cxxflags := str.StringList(cgoCPPFLAGS, cgoCXXFLAGS)
+	for _, file := range gxxfiles {
+		ofile := nextOfile()
+		if err := b.gxx(a, a.Objdir, ofile, cxxflags, file); err != nil {
+			return nil, nil, err
+		}
+		outObj = append(outObj, ofile)
+	}
+
+	for _, file := range mfiles {
+		ofile := nextOfile()
+		if err := b.gcc(a, a.Objdir, ofile, cflags, file); err != nil {
+			return nil, nil, err
+		}
+		outObj = append(outObj, ofile)
+	}
+
+	fflags := str.StringList(cgoCPPFLAGS, cgoFFLAGS)
+	for _, file := range ffiles {
+		ofile := nextOfile()
+		if err := b.gfortran(a, a.Objdir, ofile, fflags, file); err != nil {
+			return nil, nil, err
+		}
+		outObj = append(outObj, ofile)
+	}
+
+	switch cfg.BuildToolchainName {
+	case "gc":
+		importGo := objdir + "_cgo_import.go"
+		dynOutGo, dynOutObj, err := b.dynimport(a, objdir, importGo, cgoExe, cflags, cgoLDFLAGS, outObj)
+		if err != nil {
+			return nil, nil, err
+		}
+		if dynOutGo != "" {
+			outGo = append(outGo, dynOutGo)
+		}
+		if dynOutObj != "" {
+			outObj = append(outObj, dynOutObj)
+		}
+
+	case "gccgo":
+		defunC := objdir + "_cgo_defun.c"
+		defunObj := objdir + "_cgo_defun.o"
+		if err := BuildToolchain.cc(b, a, defunObj, defunC); err != nil {
+			return nil, nil, err
+		}
+		outObj = append(outObj, defunObj)
+
+	default:
+		noCompiler()
+	}
+
+	// Double check the //go:cgo_ldflag comments in the generated files.
+	// The compiler only permits such comments in files whose base name
+	// starts with "_cgo_". Make sure that the comments in those files
+	// are safe. This is a backstop against people somehow smuggling
+	// such a comment into a file generated by cgo.
+	if cfg.BuildToolchainName == "gc" && !cfg.BuildN {
+		var flags []string
+		for _, f := range outGo {
+			if !strings.HasPrefix(filepath.Base(f), "_cgo_") {
+				continue
+			}
+
+			src, err := os.ReadFile(f)
+			if err != nil {
+				return nil, nil, err
+			}
+
+			const cgoLdflag = "//go:cgo_ldflag"
+			idx := bytes.Index(src, []byte(cgoLdflag))
+			for idx >= 0 {
+				// We are looking at //go:cgo_ldflag.
+				// Find start of line.
+				start := bytes.LastIndex(src[:idx], []byte("\n"))
+				if start == -1 {
+					start = 0
+				}
+
+				// Find end of line.
+				end := bytes.Index(src[idx:], []byte("\n"))
+				if end == -1 {
+					end = len(src)
+				} else {
+					end += idx
+				}
+
+				// Check for first line comment in line.
+				// We don't worry about /* */ comments,
+				// which normally won't appear in files
+				// generated by cgo.
+				commentStart := bytes.Index(src[start:], []byte("//"))
+				commentStart += start
+				// If that line comment is //go:cgo_ldflag,
+				// it's a match.
+				if bytes.HasPrefix(src[commentStart:], []byte(cgoLdflag)) {
+					// Pull out the flag, and unquote it.
+					// This is what the compiler does.
+					flag := string(src[idx+len(cgoLdflag) : end])
+					flag = strings.TrimSpace(flag)
+					flag = strings.Trim(flag, `"`)
+					flags = append(flags, flag)
+				}
+				src = src[end:]
+				idx = bytes.Index(src, []byte(cgoLdflag))
+			}
+		}
+
+		// We expect to find the contents of cgoLDFLAGS in flags.
+		if len(cgoLDFLAGS) > 0 {
+		outer:
+			for i := range flags {
+				for j, f := range cgoLDFLAGS {
+					if f != flags[i+j] {
+						continue outer
+					}
+				}
+				flags = append(flags[:i], flags[i+len(cgoLDFLAGS):]...)
+				break
+			}
+		}
+
+		if err := checkLinkerFlags("LDFLAGS", "go:cgo_ldflag", flags); err != nil {
+			return nil, nil, err
+		}
+	}
+
+	return outGo, outObj, nil
+}
+
+// flagsNotCompatibleWithInternalLinking scans the list of cgo
+// compiler flags (C/C++/Fortran) looking for flags that might cause
+// problems if the build in question uses internal linking. The
+// primary culprits are use of plugins or use of LTO, but we err on
+// the side of caution, supporting only those flags that are on the
+// allow-list for safe flags from security perspective. Return is TRUE
+// if a sensitive flag is found, FALSE otherwise.
+func flagsNotCompatibleWithInternalLinking(sourceList []string, flagListList [][]string) bool {
+	for i := range sourceList {
+		sn := sourceList[i]
+		fll := flagListList[i]
+		if err := checkCompilerFlagsForInternalLink(sn, sn, fll); err != nil {
+			return true
+		}
+	}
+	return false
+}
+
+// dynimport creates a Go source file named importGo containing
+// //go:cgo_import_dynamic directives for each symbol or library
+// dynamically imported by the object files outObj.
+// dynOutGo, if not empty, is a new Go file to build as part of the package.
+// dynOutObj, if not empty, is a new file to add to the generated archive.
+func (b *Builder) dynimport(a *Action, objdir, importGo, cgoExe string, cflags, cgoLDFLAGS, outObj []string) (dynOutGo, dynOutObj string, err error) {
+	p := a.Package
+	sh := b.Shell(a)
+
+	cfile := objdir + "_cgo_main.c"
+	ofile := objdir + "_cgo_main.o"
+	if err := b.gcc(a, objdir, ofile, cflags, cfile); err != nil {
+		return "", "", err
+	}
+
+	// Gather .syso files from this package and all (transitive) dependencies.
+	var syso []string
+	seen := make(map[*Action]bool)
+	var gatherSyso func(*Action)
+	gatherSyso = func(a1 *Action) {
+		if seen[a1] {
+			return
+		}
+		seen[a1] = true
+		if p1 := a1.Package; p1 != nil {
+			syso = append(syso, mkAbsFiles(p1.Dir, p1.SysoFiles)...)
+		}
+		for _, a2 := range a1.Deps {
+			gatherSyso(a2)
+		}
+	}
+	gatherSyso(a)
+	sort.Strings(syso)
+	str.Uniq(&syso)
+	linkobj := str.StringList(ofile, outObj, syso)
+	dynobj := objdir + "_cgo_.o"
+
+	ldflags := cgoLDFLAGS
+	if (cfg.Goarch == "arm" && cfg.Goos == "linux") || cfg.Goos == "android" {
+		if !str.Contains(ldflags, "-no-pie") {
+			// we need to use -pie for Linux/ARM to get accurate imported sym (added in https://golang.org/cl/5989058)
+			// this seems to be outdated, but we don't want to break existing builds depending on this (Issue 45940)
+			ldflags = append(ldflags, "-pie")
+		}
+		if str.Contains(ldflags, "-pie") && str.Contains(ldflags, "-static") {
+			// -static -pie doesn't make sense, and causes link errors.
+			// Issue 26197.
+			n := make([]string, 0, len(ldflags)-1)
+			for _, flag := range ldflags {
+				if flag != "-static" {
+					n = append(n, flag)
+				}
+			}
+			ldflags = n
+		}
+	}
+	if err := b.gccld(a, objdir, dynobj, ldflags, linkobj); err != nil {
+		// We only need this information for internal linking.
+		// If this link fails, mark the object as requiring
+		// external linking. This link can fail for things like
+		// syso files that have unexpected dependencies.
+		// cmd/link explicitly looks for the name "dynimportfail".
+		// See issue #52863.
+		fail := objdir + "dynimportfail"
+		if err := sh.writeFile(fail, nil); err != nil {
+			return "", "", err
+		}
+		return "", fail, nil
+	}
+
+	// cgo -dynimport
+	var cgoflags []string
+	if p.Standard && p.ImportPath == "runtime/cgo" {
+		cgoflags = []string{"-dynlinker"} // record path to dynamic linker
+	}
+	err = sh.run(base.Cwd(), p.ImportPath, b.cCompilerEnv(), cfg.BuildToolexec, cgoExe, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags)
+	if err != nil {
+		return "", "", err
+	}
+	return importGo, "", nil
+}
+
+// Run SWIG on all SWIG input files.
+// TODO: Don't build a shared library, once SWIG emits the necessary
+// pragmas for external linking.
+func (b *Builder) swig(a *Action, objdir string, pcCFLAGS []string) (outGo, outC, outCXX []string, err error) {
+	p := a.Package
+
+	if err := b.swigVersionCheck(); err != nil {
+		return nil, nil, nil, err
+	}
+
+	intgosize, err := b.swigIntSize(objdir)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+
+	for _, f := range p.SwigFiles {
+		goFile, cFile, err := b.swigOne(a, f, objdir, pcCFLAGS, false, intgosize)
+		if err != nil {
+			return nil, nil, nil, err
+		}
+		if goFile != "" {
+			outGo = append(outGo, goFile)
+		}
+		if cFile != "" {
+			outC = append(outC, cFile)
+		}
+	}
+	for _, f := range p.SwigCXXFiles {
+		goFile, cxxFile, err := b.swigOne(a, f, objdir, pcCFLAGS, true, intgosize)
+		if err != nil {
+			return nil, nil, nil, err
+		}
+		if goFile != "" {
+			outGo = append(outGo, goFile)
+		}
+		if cxxFile != "" {
+			outCXX = append(outCXX, cxxFile)
+		}
+	}
+	return outGo, outC, outCXX, nil
+}
+
+// Make sure SWIG is new enough.
+var (
+	swigCheckOnce sync.Once
+	swigCheck     error
+)
+
+func (b *Builder) swigDoVersionCheck() error {
+	sh := b.BackgroundShell()
+	out, err := sh.runOut(".", nil, "swig", "-version")
+	if err != nil {
+		return err
+	}
+	re := regexp.MustCompile(`[vV]ersion +(\d+)([.]\d+)?([.]\d+)?`)
+	matches := re.FindSubmatch(out)
+	if matches == nil {
+		// Can't find version number; hope for the best.
+		return nil
+	}
+
+	major, err := strconv.Atoi(string(matches[1]))
+	if err != nil {
+		// Can't find version number; hope for the best.
+		return nil
+	}
+	const errmsg = "must have SWIG version >= 3.0.6"
+	if major < 3 {
+		return errors.New(errmsg)
+	}
+	if major > 3 {
+		// 4.0 or later
+		return nil
+	}
+
+	// We have SWIG version 3.x.
+	if len(matches[2]) > 0 {
+		minor, err := strconv.Atoi(string(matches[2][1:]))
+		if err != nil {
+			return nil
+		}
+		if minor > 0 {
+			// 3.1 or later
+			return nil
+		}
+	}
+
+	// We have SWIG version 3.0.x.
+	if len(matches[3]) > 0 {
+		patch, err := strconv.Atoi(string(matches[3][1:]))
+		if err != nil {
+			return nil
+		}
+		if patch < 6 {
+			// Before 3.0.6.
+			return errors.New(errmsg)
+		}
+	}
+
+	return nil
+}
+
+func (b *Builder) swigVersionCheck() error {
+	swigCheckOnce.Do(func() {
+		swigCheck = b.swigDoVersionCheck()
+	})
+	return swigCheck
+}
+
+// Find the value to pass for the -intgosize option to swig.
+var (
+	swigIntSizeOnce  sync.Once
+	swigIntSize      string
+	swigIntSizeError error
+)
+
+// This code fails to build if sizeof(int) <= 32
+const swigIntSizeCode = `
+package main
+const i int = 1 << 32
+`
+
+// Determine the size of int on the target system for the -intgosize option
+// of swig >= 2.0.9. Run only once.
+func (b *Builder) swigDoIntSize(objdir string) (intsize string, err error) {
+	if cfg.BuildN {
+		return "$INTBITS", nil
+	}
+	src := filepath.Join(b.WorkDir, "swig_intsize.go")
+	if err = os.WriteFile(src, []byte(swigIntSizeCode), 0666); err != nil {
+		return
+	}
+	srcs := []string{src}
+
+	p := load.GoFilesPackage(context.TODO(), load.PackageOpts{}, srcs)
+
+	if _, _, e := BuildToolchain.gc(b, &Action{Mode: "swigDoIntSize", Package: p, Objdir: objdir}, "", nil, nil, "", false, srcs); e != nil {
+		return "32", nil
+	}
+	return "64", nil
+}
+
+// Determine the size of int on the target system for the -intgosize option
+// of swig >= 2.0.9.
+func (b *Builder) swigIntSize(objdir string) (intsize string, err error) {
+	swigIntSizeOnce.Do(func() {
+		swigIntSize, swigIntSizeError = b.swigDoIntSize(objdir)
+	})
+	return swigIntSize, swigIntSizeError
+}
+
+// Run SWIG on one SWIG input file.
+func (b *Builder) swigOne(a *Action, file, objdir string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) {
+	p := a.Package
+	sh := b.Shell(a)
+
+	cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _, err := b.CFlags(p)
+	if err != nil {
+		return "", "", err
+	}
+
+	var cflags []string
+	if cxx {
+		cflags = str.StringList(cgoCPPFLAGS, pcCFLAGS, cgoCXXFLAGS)
+	} else {
+		cflags = str.StringList(cgoCPPFLAGS, pcCFLAGS, cgoCFLAGS)
+	}
+
+	n := 5 // length of ".swig"
+	if cxx {
+		n = 8 // length of ".swigcxx"
+	}
+	base := file[:len(file)-n]
+	goFile := base + ".go"
+	gccBase := base + "_wrap."
+	gccExt := "c"
+	if cxx {
+		gccExt = "cxx"
+	}
+
+	gccgo := cfg.BuildToolchainName == "gccgo"
+
+	// swig
+	args := []string{
+		"-go",
+		"-cgo",
+		"-intgosize", intgosize,
+		"-module", base,
+		"-o", objdir + gccBase + gccExt,
+		"-outdir", objdir,
+	}
+
+	for _, f := range cflags {
+		if len(f) > 3 && f[:2] == "-I" {
+			args = append(args, f)
+		}
+	}
+
+	if gccgo {
+		args = append(args, "-gccgo")
+		if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+			args = append(args, "-go-pkgpath", pkgpath)
+		}
+	}
+	if cxx {
+		args = append(args, "-c++")
+	}
+
+	out, err := sh.runOut(p.Dir, nil, "swig", args, file)
+	if err != nil && (bytes.Contains(out, []byte("-intgosize")) || bytes.Contains(out, []byte("-cgo"))) {
+		return "", "", errors.New("must have SWIG version >= 3.0.6")
+	}
+	if err := sh.reportCmd("", "", out, err); err != nil {
+		return "", "", err
+	}
+
+	// If the input was x.swig, the output is x.go in the objdir.
+	// But there might be an x.go in the original dir too, and if it
+	// uses cgo as well, cgo will be processing both and will
+	// translate both into x.cgo1.go in the objdir, overwriting one.
+	// Rename x.go to _x_swig.go to avoid this problem.
+	// We ignore files in the original dir that begin with underscore
+	// so _x_swig.go cannot conflict with an original file we were
+	// going to compile.
+	goFile = objdir + goFile
+	newGoFile := objdir + "_" + base + "_swig.go"
+	if cfg.BuildX || cfg.BuildN {
+		sh.ShowCmd("", "mv %s %s", goFile, newGoFile)
+	}
+	if !cfg.BuildN {
+		if err := os.Rename(goFile, newGoFile); err != nil {
+			return "", "", err
+		}
+	}
+	return newGoFile, objdir + gccBase + gccExt, nil
+}
+
+// disableBuildID adjusts a linker command line to avoid creating a
+// build ID when creating an object file rather than an executable or
+// shared library. Some systems, such as Ubuntu, always add
+// --build-id to every link, but we don't want a build ID when we are
+// producing an object file. On some of those system a plain -r (not
+// -Wl,-r) will turn off --build-id, but clang 3.0 doesn't support a
+// plain -r. I don't know how to turn off --build-id when using clang
+// other than passing a trailing --build-id=none. So that is what we
+// do, but only on systems likely to support it, which is to say,
+// systems that normally use gold or the GNU linker.
+func (b *Builder) disableBuildID(ldflags []string) []string {
+	switch cfg.Goos {
+	case "android", "dragonfly", "linux", "netbsd":
+		ldflags = append(ldflags, "-Wl,--build-id=none")
+	}
+	return ldflags
+}
+
+// mkAbsFiles converts files into a list of absolute files,
+// assuming they were originally relative to dir,
+// and returns that new list.
+func mkAbsFiles(dir string, files []string) []string {
+	abs := make([]string, len(files))
+	for i, f := range files {
+		if !filepath.IsAbs(f) {
+			f = filepath.Join(dir, f)
+		}
+		abs[i] = f
+	}
+	return abs
+}
+
+// passLongArgsInResponseFiles modifies cmd such that, for
+// certain programs, long arguments are passed in "response files", a
+// file on disk with the arguments, with one arg per line. An actual
+// argument starting with '@' means that the rest of the argument is
+// a filename of arguments to expand.
+//
+// See issues 18468 (Windows) and 37768 (Darwin).
+func passLongArgsInResponseFiles(cmd *exec.Cmd) (cleanup func()) {
+	cleanup = func() {} // no cleanup by default
+
+	var argLen int
+	for _, arg := range cmd.Args {
+		argLen += len(arg)
+	}
+
+	// If we're not approaching 32KB of args, just pass args normally.
+	// (use 30KB instead to be conservative; not sure how accounting is done)
+	if !useResponseFile(cmd.Path, argLen) {
+		return
+	}
+
+	tf, err := os.CreateTemp("", "args")
+	if err != nil {
+		log.Fatalf("error writing long arguments to response file: %v", err)
+	}
+	cleanup = func() { os.Remove(tf.Name()) }
+	var buf bytes.Buffer
+	for _, arg := range cmd.Args[1:] {
+		fmt.Fprintf(&buf, "%s\n", encodeArg(arg))
+	}
+	if _, err := tf.Write(buf.Bytes()); err != nil {
+		tf.Close()
+		cleanup()
+		log.Fatalf("error writing long arguments to response file: %v", err)
+	}
+	if err := tf.Close(); err != nil {
+		cleanup()
+		log.Fatalf("error writing long arguments to response file: %v", err)
+	}
+	cmd.Args = []string{cmd.Args[0], "@" + tf.Name()}
+	return cleanup
+}
+
+func useResponseFile(path string, argLen int) bool {
+	// Unless the program uses objabi.Flagparse, which understands
+	// response files, don't use response files.
+	// TODO: Note that other toolchains like CC are missing here for now.
+	prog := strings.TrimSuffix(filepath.Base(path), ".exe")
+	switch prog {
+	case "compile", "link", "cgo", "asm", "cover":
+	default:
+		return false
+	}
+
+	if argLen > sys.ExecArgLengthLimit {
+		return true
+	}
+
+	// On the Go build system, use response files about 10% of the
+	// time, just to exercise this codepath.
+	isBuilder := os.Getenv("GO_BUILDER_NAME") != ""
+	if isBuilder && rand.Intn(10) == 0 {
+		return true
+	}
+
+	return false
+}
+
+// encodeArg encodes an argument for response file writing.
+func encodeArg(arg string) string {
+	// If there aren't any characters we need to reencode, fastpath out.
+	if !strings.ContainsAny(arg, "\\\n") {
+		return arg
+	}
+	var b strings.Builder
+	for _, r := range arg {
+		switch r {
+		case '\\':
+			b.WriteByte('\\')
+			b.WriteByte('\\')
+		case '\n':
+			b.WriteByte('\\')
+			b.WriteByte('n')
+		default:
+			b.WriteRune(r)
+		}
+	}
+	return b.String()
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/exec_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/exec_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..8bbf25bb337e8b44e49cf56ae3dd8dfba4aafcf7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/exec_test.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+	"bytes"
+	"cmd/internal/objabi"
+	"cmd/internal/sys"
+	"fmt"
+	"math/rand"
+	"testing"
+	"time"
+	"unicode/utf8"
+)
+
+func TestEncodeArgs(t *testing.T) {
+	t.Parallel()
+	tests := []struct {
+		arg, want string
+	}{
+		{"", ""},
+		{"hello", "hello"},
+		{"hello\n", "hello\\n"},
+		{"hello\\", "hello\\\\"},
+		{"hello\nthere", "hello\\nthere"},
+		{"\\\n", "\\\\\\n"},
+	}
+	for _, test := range tests {
+		if got := encodeArg(test.arg); got != test.want {
+			t.Errorf("encodeArg(%q) = %q, want %q", test.arg, got, test.want)
+		}
+	}
+}
+
+func TestEncodeDecode(t *testing.T) {
+	t.Parallel()
+	tests := []string{
+		"",
+		"hello",
+		"hello\\there",
+		"hello\nthere",
+		"hello 中国",
+		"hello \n中\\国",
+	}
+	for _, arg := range tests {
+		if got := objabi.DecodeArg(encodeArg(arg)); got != arg {
+			t.Errorf("objabi.DecodeArg(encodeArg(%q)) = %q", arg, got)
+		}
+	}
+}
+
+func TestEncodeDecodeFuzz(t *testing.T) {
+	if testing.Short() {
+		t.Skip("fuzz test is slow")
+	}
+	t.Parallel()
+
+	nRunes := sys.ExecArgLengthLimit + 100
+	rBuffer := make([]rune, nRunes)
+	buf := bytes.NewBuffer([]byte(string(rBuffer)))
+
+	seed := time.Now().UnixNano()
+	t.Logf("rand seed: %v", seed)
+	rng := rand.New(rand.NewSource(seed))
+
+	for i := 0; i < 50; i++ {
+		// Generate a random string of runes.
+		buf.Reset()
+		for buf.Len() < sys.ExecArgLengthLimit+1 {
+			var r rune
+			for {
+				r = rune(rng.Intn(utf8.MaxRune + 1))
+				if utf8.ValidRune(r) {
+					break
+				}
+			}
+			fmt.Fprintf(buf, "%c", r)
+		}
+		arg := buf.String()
+
+		if got := objabi.DecodeArg(encodeArg(arg)); got != arg {
+			t.Errorf("[%d] objabi.DecodeArg(encodeArg(%q)) = %q [seed: %v]", i, arg, got, seed)
+		}
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/gc.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/gc.go
new file mode 100644
index 0000000000000000000000000000000000000000..e2a5456bdec808f12da303235b94f3759f8e9196
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/gc.go
@@ -0,0 +1,708 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"internal/platform"
+	"io"
+	"log"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/fsys"
+	"cmd/go/internal/gover"
+	"cmd/go/internal/load"
+	"cmd/go/internal/str"
+	"cmd/internal/quoted"
+	"crypto/sha1"
+)
+
+// Tests can override this by setting $TESTGO_TOOLCHAIN_VERSION.
+var ToolchainVersion = runtime.Version()
+
+// The 'path' used for GOROOT_FINAL when -trimpath is specified
+const trimPathGoRootFinal string = "$GOROOT"
+
+// The Go toolchain.
+
+type gcToolchain struct{}
+
+func (gcToolchain) compiler() string {
+	return base.Tool("compile")
+}
+
+func (gcToolchain) linker() string {
+	return base.Tool("link")
+}
+
+func pkgPath(a *Action) string {
+	p := a.Package
+	ppath := p.ImportPath
+	if cfg.BuildBuildmode == "plugin" {
+		ppath = pluginPath(a)
+	} else if p.Name == "main" && !p.Internal.ForceLibrary {
+		ppath = "main"
+	}
+	return ppath
+}
+
+func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) {
+	p := a.Package
+	sh := b.Shell(a)
+	objdir := a.Objdir
+	if archive != "" {
+		ofile = archive
+	} else {
+		out := "_go_.o"
+		ofile = objdir + out
+	}
+
+	pkgpath := pkgPath(a)
+	defaultGcFlags := []string{"-p", pkgpath}
+	if p.Module != nil {
+		v := p.Module.GoVersion
+		if v == "" {
+			v = gover.DefaultGoModVersion
+		}
+		if allowedVersion(v) {
+			defaultGcFlags = append(defaultGcFlags, "-lang=go"+gover.Lang(v))
+		}
+	}
+	if p.Standard {
+		defaultGcFlags = append(defaultGcFlags, "-std")
+	}
+
+	// If we're giving the compiler the entire package (no C etc files), tell it that,
+	// so that it can give good error messages about forward declarations.
+	// Exceptions: a few standard packages have forward declarations for
+	// pieces supplied behind-the-scenes by package runtime.
+	extFiles := len(p.CgoFiles) + len(p.CFiles) + len(p.CXXFiles) + len(p.MFiles) + len(p.FFiles) + len(p.SFiles) + len(p.SysoFiles) + len(p.SwigFiles) + len(p.SwigCXXFiles)
+	if p.Standard {
+		switch p.ImportPath {
+		case "bytes", "internal/poll", "net", "os":
+			fallthrough
+		case "runtime/metrics", "runtime/pprof", "runtime/trace":
+			fallthrough
+		case "sync", "syscall", "time":
+			extFiles++
+		}
+	}
+	if extFiles == 0 {
+		defaultGcFlags = append(defaultGcFlags, "-complete")
+	}
+	if cfg.BuildContext.InstallSuffix != "" {
+		defaultGcFlags = append(defaultGcFlags, "-installsuffix", cfg.BuildContext.InstallSuffix)
+	}
+	if a.buildID != "" {
+		defaultGcFlags = append(defaultGcFlags, "-buildid", a.buildID)
+	}
+	if p.Internal.OmitDebug || cfg.Goos == "plan9" || cfg.Goarch == "wasm" {
+		defaultGcFlags = append(defaultGcFlags, "-dwarf=false")
+	}
+	if strings.HasPrefix(ToolchainVersion, "go1") && !strings.Contains(os.Args[0], "go_bootstrap") {
+		defaultGcFlags = append(defaultGcFlags, "-goversion", ToolchainVersion)
+	}
+	if p.Internal.Cover.Cfg != "" {
+		defaultGcFlags = append(defaultGcFlags, "-coveragecfg="+p.Internal.Cover.Cfg)
+	}
+	if p.Internal.PGOProfile != "" {
+		defaultGcFlags = append(defaultGcFlags, "-pgoprofile="+p.Internal.PGOProfile)
+	}
+	if symabis != "" {
+		defaultGcFlags = append(defaultGcFlags, "-symabis", symabis)
+	}
+
+	gcflags := str.StringList(forcedGcflags, p.Internal.Gcflags)
+	if p.Internal.FuzzInstrument {
+		gcflags = append(gcflags, fuzzInstrumentFlags()...)
+	}
+	// Add -c=N to use concurrent backend compilation, if possible.
+	if c := gcBackendConcurrency(gcflags); c > 1 {
+		defaultGcFlags = append(defaultGcFlags, fmt.Sprintf("-c=%d", c))
+	}
+
+	args := []any{cfg.BuildToolexec, base.Tool("compile"), "-o", ofile, "-trimpath", a.trimpath(), defaultGcFlags, gcflags}
+	if p.Internal.LocalPrefix == "" {
+		args = append(args, "-nolocalimports")
+	} else {
+		args = append(args, "-D", p.Internal.LocalPrefix)
+	}
+	if importcfg != nil {
+		if err := sh.writeFile(objdir+"importcfg", importcfg); err != nil {
+			return "", nil, err
+		}
+		args = append(args, "-importcfg", objdir+"importcfg")
+	}
+	if embedcfg != nil {
+		if err := sh.writeFile(objdir+"embedcfg", embedcfg); err != nil {
+			return "", nil, err
+		}
+		args = append(args, "-embedcfg", objdir+"embedcfg")
+	}
+	if ofile == archive {
+		args = append(args, "-pack")
+	}
+	if asmhdr {
+		args = append(args, "-asmhdr", objdir+"go_asm.h")
+	}
+
+	for _, f := range gofiles {
+		f := mkAbs(p.Dir, f)
+
+		// Handle overlays. Convert path names using OverlayPath
+		// so these paths can be handed directly to tools.
+		// Deleted files won't show up in when scanning directories earlier,
+		// so OverlayPath will never return "" (meaning a deleted file) here.
+		// TODO(#39958): Handle cases where the package directory
+		// doesn't exist on disk (this can happen when all the package's
+		// files are in an overlay): the code expects the package directory
+		// to exist and runs some tools in that directory.
+		// TODO(#39958): Process the overlays when the
+		// gofiles, cgofiles, cfiles, sfiles, and cxxfiles variables are
+		// created in (*Builder).build. Doing that requires rewriting the
+		// code that uses those values to expect absolute paths.
+		f, _ = fsys.OverlayPath(f)
+
+		args = append(args, f)
+	}
+
+	output, err = sh.runOut(base.Cwd(), nil, args...)
+	return ofile, output, err
+}
+
+// gcBackendConcurrency returns the backend compiler concurrency level for a package compilation.
+func gcBackendConcurrency(gcflags []string) int {
+	// First, check whether we can use -c at all for this compilation.
+	canDashC := concurrentGCBackendCompilationEnabledByDefault
+
+	switch e := os.Getenv("GO19CONCURRENTCOMPILATION"); e {
+	case "0":
+		canDashC = false
+	case "1":
+		canDashC = true
+	case "":
+		// Not set. Use default.
+	default:
+		log.Fatalf("GO19CONCURRENTCOMPILATION must be 0, 1, or unset, got %q", e)
+	}
+
+	// TODO: Test and delete these conditions.
+	if cfg.ExperimentErr != nil || cfg.Experiment.FieldTrack || cfg.Experiment.PreemptibleLoops {
+		canDashC = false
+	}
+
+	if !canDashC {
+		return 1
+	}
+
+	// Decide how many concurrent backend compilations to allow.
+	//
+	// If we allow too many, in theory we might end up with p concurrent processes,
+	// each with c concurrent backend compiles, all fighting over the same resources.
+	// However, in practice, that seems not to happen too much.
+	// Most build graphs are surprisingly serial, so p==1 for much of the build.
+	// Furthermore, concurrent backend compilation is only enabled for a part
+	// of the overall compiler execution, so c==1 for much of the build.
+	// So don't worry too much about that interaction for now.
+	//
+	// However, in practice, setting c above 4 tends not to help very much.
+	// See the analysis in CL 41192.
+	//
+	// TODO(josharian): attempt to detect whether this particular compilation
+	// is likely to be a bottleneck, e.g. when:
+	//   - it has no successor packages to compile (usually package main)
+	//   - all paths through the build graph pass through it
+	//   - critical path scheduling says it is high priority
+	// and in such a case, set c to runtime.GOMAXPROCS(0).
+	// By default this is the same as runtime.NumCPU.
+	// We do this now when p==1.
+	// To limit parallelism, set GOMAXPROCS below numCPU; this may be useful
+	// on a low-memory builder, or if a deterministic build order is required.
+	c := runtime.GOMAXPROCS(0)
+	if cfg.BuildP == 1 {
+		// No process parallelism, do not cap compiler parallelism.
+		return c
+	}
+	// Some process parallelism. Set c to min(4, maxprocs).
+	if c > 4 {
+		c = 4
+	}
+	return c
+}
+
+// trimpath returns the -trimpath argument to use
+// when compiling the action.
+func (a *Action) trimpath() string {
+	// Keep in sync with Builder.ccompile
+	// The trimmed paths are a little different, but we need to trim in the
+	// same situations.
+
+	// Strip the object directory entirely.
+	objdir := a.Objdir
+	if len(objdir) > 1 && objdir[len(objdir)-1] == filepath.Separator {
+		objdir = objdir[:len(objdir)-1]
+	}
+	rewrite := ""
+
+	rewriteDir := a.Package.Dir
+	if cfg.BuildTrimpath {
+		importPath := a.Package.Internal.OrigImportPath
+		if m := a.Package.Module; m != nil && m.Version != "" {
+			rewriteDir = m.Path + "@" + m.Version + strings.TrimPrefix(importPath, m.Path)
+		} else {
+			rewriteDir = importPath
+		}
+		rewrite += a.Package.Dir + "=>" + rewriteDir + ";"
+	}
+
+	// Add rewrites for overlays. The 'from' and 'to' paths in overlays don't need to have
+	// same basename, so go from the overlay contents file path (passed to the compiler)
+	// to the path the disk path would be rewritten to.
+
+	cgoFiles := make(map[string]bool)
+	for _, f := range a.Package.CgoFiles {
+		cgoFiles[f] = true
+	}
+
+	// TODO(matloob): Higher up in the stack, when the logic for deciding when to make copies
+	// of c/c++/m/f/hfiles is consolidated, use the same logic that Build uses to determine
+	// whether to create the copies in objdir to decide whether to rewrite objdir to the
+	// package directory here.
+	var overlayNonGoRewrites string // rewrites for non-go files
+	hasCgoOverlay := false
+	if fsys.OverlayFile != "" {
+		for _, filename := range a.Package.AllFiles() {
+			path := filename
+			if !filepath.IsAbs(path) {
+				path = filepath.Join(a.Package.Dir, path)
+			}
+			base := filepath.Base(path)
+			isGo := strings.HasSuffix(filename, ".go") || strings.HasSuffix(filename, ".s")
+			isCgo := cgoFiles[filename] || !isGo
+			overlayPath, isOverlay := fsys.OverlayPath(path)
+			if isCgo && isOverlay {
+				hasCgoOverlay = true
+			}
+			if !isCgo && isOverlay {
+				rewrite += overlayPath + "=>" + filepath.Join(rewriteDir, base) + ";"
+			} else if isCgo {
+				// Generate rewrites for non-Go files copied to files in objdir.
+				if filepath.Dir(path) == a.Package.Dir {
+					// This is a file copied to objdir.
+					overlayNonGoRewrites += filepath.Join(objdir, base) + "=>" + filepath.Join(rewriteDir, base) + ";"
+				}
+			} else {
+				// Non-overlay Go files are covered by the a.Package.Dir rewrite rule above.
+			}
+		}
+	}
+	if hasCgoOverlay {
+		rewrite += overlayNonGoRewrites
+	}
+	rewrite += objdir + "=>"
+
+	return rewrite
+}
+
+func asmArgs(a *Action, p *load.Package) []any {
+	// Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files.
+	inc := filepath.Join(cfg.GOROOT, "pkg", "include")
+	pkgpath := pkgPath(a)
+	args := []any{cfg.BuildToolexec, base.Tool("asm"), "-p", pkgpath, "-trimpath", a.trimpath(), "-I", a.Objdir, "-I", inc, "-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch, forcedAsmflags, p.Internal.Asmflags}
+	if p.ImportPath == "runtime" && cfg.Goarch == "386" {
+		for _, arg := range forcedAsmflags {
+			if arg == "-dynlink" {
+				args = append(args, "-D=GOBUILDMODE_shared=1")
+			}
+		}
+	}
+
+	if cfg.Goarch == "386" {
+		// Define GO386_value from cfg.GO386.
+		args = append(args, "-D", "GO386_"+cfg.GO386)
+	}
+
+	if cfg.Goarch == "amd64" {
+		// Define GOAMD64_value from cfg.GOAMD64.
+		args = append(args, "-D", "GOAMD64_"+cfg.GOAMD64)
+	}
+
+	if cfg.Goarch == "mips" || cfg.Goarch == "mipsle" {
+		// Define GOMIPS_value from cfg.GOMIPS.
+		args = append(args, "-D", "GOMIPS_"+cfg.GOMIPS)
+	}
+
+	if cfg.Goarch == "mips64" || cfg.Goarch == "mips64le" {
+		// Define GOMIPS64_value from cfg.GOMIPS64.
+		args = append(args, "-D", "GOMIPS64_"+cfg.GOMIPS64)
+	}
+
+	if cfg.Goarch == "ppc64" || cfg.Goarch == "ppc64le" {
+		// Define GOPPC64_power8..N from cfg.PPC64.
+		// We treat each powerpc version as a superset of functionality.
+		switch cfg.GOPPC64 {
+		case "power10":
+			args = append(args, "-D", "GOPPC64_power10")
+			fallthrough
+		case "power9":
+			args = append(args, "-D", "GOPPC64_power9")
+			fallthrough
+		default: // This should always be power8.
+			args = append(args, "-D", "GOPPC64_power8")
+		}
+	}
+
+	if cfg.Goarch == "arm" {
+		// Define GOARM_value from cfg.GOARM.
+		switch cfg.GOARM {
+		case "7":
+			args = append(args, "-D", "GOARM_7")
+			fallthrough
+		case "6":
+			args = append(args, "-D", "GOARM_6")
+			fallthrough
+		default:
+			args = append(args, "-D", "GOARM_5")
+		}
+	}
+
+	return args
+}
+
+func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) {
+	p := a.Package
+	args := asmArgs(a, p)
+
+	var ofiles []string
+	for _, sfile := range sfiles {
+		overlayPath, _ := fsys.OverlayPath(mkAbs(p.Dir, sfile))
+		ofile := a.Objdir + sfile[:len(sfile)-len(".s")] + ".o"
+		ofiles = append(ofiles, ofile)
+		args1 := append(args, "-o", ofile, overlayPath)
+		if err := b.Shell(a).run(p.Dir, p.ImportPath, nil, args1...); err != nil {
+			return nil, err
+		}
+	}
+	return ofiles, nil
+}
+
+func (gcToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) {
+	sh := b.Shell(a)
+
+	mkSymabis := func(p *load.Package, sfiles []string, path string) error {
+		args := asmArgs(a, p)
+		args = append(args, "-gensymabis", "-o", path)
+		for _, sfile := range sfiles {
+			if p.ImportPath == "runtime/cgo" && strings.HasPrefix(sfile, "gcc_") {
+				continue
+			}
+			op, _ := fsys.OverlayPath(mkAbs(p.Dir, sfile))
+			args = append(args, op)
+		}
+
+		// Supply an empty go_asm.h as if the compiler had been run.
+		// -gensymabis parsing is lax enough that we don't need the
+		// actual definitions that would appear in go_asm.h.
+		if err := sh.writeFile(a.Objdir+"go_asm.h", nil); err != nil {
+			return err
+		}
+
+		return sh.run(p.Dir, p.ImportPath, nil, args...)
+	}
+
+	var symabis string // Only set if we actually create the file
+	p := a.Package
+	if len(sfiles) != 0 {
+		symabis = a.Objdir + "symabis"
+		if err := mkSymabis(p, sfiles, symabis); err != nil {
+			return "", err
+		}
+	}
+
+	return symabis, nil
+}
+
+// toolVerify checks that the command line args writes the same output file
+// if run using newTool instead.
+// Unused now but kept around for future use.
+func toolVerify(a *Action, b *Builder, p *load.Package, newTool string, ofile string, args []any) error {
+	newArgs := make([]any, len(args))
+	copy(newArgs, args)
+	newArgs[1] = base.Tool(newTool)
+	newArgs[3] = ofile + ".new" // x.6 becomes x.6.new
+	if err := b.Shell(a).run(p.Dir, p.ImportPath, nil, newArgs...); err != nil {
+		return err
+	}
+	data1, err := os.ReadFile(ofile)
+	if err != nil {
+		return err
+	}
+	data2, err := os.ReadFile(ofile + ".new")
+	if err != nil {
+		return err
+	}
+	if !bytes.Equal(data1, data2) {
+		return fmt.Errorf("%s and %s produced different output files:\n%s\n%s", filepath.Base(args[1].(string)), newTool, strings.Join(str.StringList(args...), " "), strings.Join(str.StringList(newArgs...), " "))
+	}
+	os.Remove(ofile + ".new")
+	return nil
+}
+
+func (gcToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error {
+	var absOfiles []string
+	for _, f := range ofiles {
+		absOfiles = append(absOfiles, mkAbs(a.Objdir, f))
+	}
+	absAfile := mkAbs(a.Objdir, afile)
+
+	// The archive file should have been created by the compiler.
+	// Since it used to not work that way, verify.
+	if !cfg.BuildN {
+		if _, err := os.Stat(absAfile); err != nil {
+			base.Fatalf("os.Stat of archive file failed: %v", err)
+		}
+	}
+
+	p := a.Package
+	sh := b.Shell(a)
+	if cfg.BuildN || cfg.BuildX {
+		cmdline := str.StringList(base.Tool("pack"), "r", absAfile, absOfiles)
+		sh.ShowCmd(p.Dir, "%s # internal", joinUnambiguously(cmdline))
+	}
+	if cfg.BuildN {
+		return nil
+	}
+	if err := packInternal(absAfile, absOfiles); err != nil {
+		return sh.reportCmd("", "", nil, err)
+	}
+	return nil
+}
+
+func packInternal(afile string, ofiles []string) error {
+	dst, err := os.OpenFile(afile, os.O_WRONLY|os.O_APPEND, 0)
+	if err != nil {
+		return err
+	}
+	defer dst.Close() // only for error returns or panics
+	w := bufio.NewWriter(dst)
+
+	for _, ofile := range ofiles {
+		src, err := os.Open(ofile)
+		if err != nil {
+			return err
+		}
+		fi, err := src.Stat()
+		if err != nil {
+			src.Close()
+			return err
+		}
+		// Note: Not using %-16.16s format because we care
+		// about bytes, not runes.
+		name := fi.Name()
+		if len(name) > 16 {
+			name = name[:16]
+		} else {
+			name += strings.Repeat(" ", 16-len(name))
+		}
+		size := fi.Size()
+		fmt.Fprintf(w, "%s%-12d%-6d%-6d%-8o%-10d`\n",
+			name, 0, 0, 0, 0644, size)
+		n, err := io.Copy(w, src)
+		src.Close()
+		if err == nil && n < size {
+			err = io.ErrUnexpectedEOF
+		} else if err == nil && n > size {
+			err = fmt.Errorf("file larger than size reported by stat")
+		}
+		if err != nil {
+			return fmt.Errorf("copying %s to %s: %v", ofile, afile, err)
+		}
+		if size&1 != 0 {
+			w.WriteByte(0)
+		}
+	}
+
+	if err := w.Flush(); err != nil {
+		return err
+	}
+	return dst.Close()
+}
+
+// setextld sets the appropriate linker flags for the specified compiler.
+func setextld(ldflags []string, compiler []string) ([]string, error) {
+	for _, f := range ldflags {
+		if f == "-extld" || strings.HasPrefix(f, "-extld=") {
+			// don't override -extld if supplied
+			return ldflags, nil
+		}
+	}
+	joined, err := quoted.Join(compiler)
+	if err != nil {
+		return nil, err
+	}
+	return append(ldflags, "-extld="+joined), nil
+}
+
+// pluginPath computes the package path for a plugin main package.
+//
+// This is typically the import path of the main package p, unless the
+// plugin is being built directly from source files. In that case we
+// combine the package build ID with the contents of the main package
+// source files. This allows us to identify two different plugins
+// built from two source files with the same name.
+func pluginPath(a *Action) string {
+	p := a.Package
+	if p.ImportPath != "command-line-arguments" {
+		return p.ImportPath
+	}
+	h := sha1.New()
+	buildID := a.buildID
+	if a.Mode == "link" {
+		// For linking, use the main package's build ID instead of
+		// the binary's build ID, so it is the same hash used in
+		// compiling and linking.
+		// When compiling, we use actionID/actionID (instead of
+		// actionID/contentID) as a temporary build ID to compute
+		// the hash. Do the same here. (See buildid.go:useCache)
+		// The build ID matters because it affects the overall hash
+		// in the plugin's pseudo-import path returned below.
+		// We need to use the same import path when compiling and linking.
+		id := strings.Split(buildID, buildIDSeparator)
+		buildID = id[1] + buildIDSeparator + id[1]
+	}
+	fmt.Fprintf(h, "build ID: %s\n", buildID)
+	for _, file := range str.StringList(p.GoFiles, p.CgoFiles, p.SFiles) {
+		data, err := os.ReadFile(filepath.Join(p.Dir, file))
+		if err != nil {
+			base.Fatalf("go: %s", err)
+		}
+		h.Write(data)
+	}
+	return fmt.Sprintf("plugin/unnamed-%x", h.Sum(nil))
+}
+
+func (gcToolchain) ld(b *Builder, root *Action, targetPath, importcfg, mainpkg string) error {
+	cxx := len(root.Package.CXXFiles) > 0 || len(root.Package.SwigCXXFiles) > 0
+	for _, a := range root.Deps {
+		if a.Package != nil && (len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0) {
+			cxx = true
+		}
+	}
+	var ldflags []string
+	if cfg.BuildContext.InstallSuffix != "" {
+		ldflags = append(ldflags, "-installsuffix", cfg.BuildContext.InstallSuffix)
+	}
+	if root.Package.Internal.OmitDebug {
+		ldflags = append(ldflags, "-s", "-w")
+	}
+	if cfg.BuildBuildmode == "plugin" {
+		ldflags = append(ldflags, "-pluginpath", pluginPath(root))
+	}
+
+	// Store BuildID inside toolchain binaries as a unique identifier of the
+	// tool being run, for use by content-based staleness determination.
+	if root.Package.Goroot && strings.HasPrefix(root.Package.ImportPath, "cmd/") {
+		// External linking will include our build id in the external
+		// linker's build id, which will cause our build id to not
+		// match the next time the tool is built.
+		// Rely on the external build id instead.
+		if !platform.MustLinkExternal(cfg.Goos, cfg.Goarch, false) {
+			ldflags = append(ldflags, "-X=cmd/internal/objabi.buildID="+root.buildID)
+		}
+	}
+
+	// Store default GODEBUG in binaries.
+	if root.Package.DefaultGODEBUG != "" {
+		ldflags = append(ldflags, "-X=runtime.godebugDefault="+root.Package.DefaultGODEBUG)
+	}
+
+	// If the user has not specified the -extld option, then specify the
+	// appropriate linker. In case of C++ code, use the compiler named
+	// by the CXX environment variable or defaultCXX if CXX is not set.
+	// Else, use the CC environment variable and defaultCC as fallback.
+	var compiler []string
+	if cxx {
+		compiler = envList("CXX", cfg.DefaultCXX(cfg.Goos, cfg.Goarch))
+	} else {
+		compiler = envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch))
+	}
+	ldflags = append(ldflags, "-buildmode="+ldBuildmode)
+	if root.buildID != "" {
+		ldflags = append(ldflags, "-buildid="+root.buildID)
+	}
+	ldflags = append(ldflags, forcedLdflags...)
+	ldflags = append(ldflags, root.Package.Internal.Ldflags...)
+	ldflags, err := setextld(ldflags, compiler)
+	if err != nil {
+		return err
+	}
+
+	// On OS X when using external linking to build a shared library,
+	// the argument passed here to -o ends up recorded in the final
+	// shared library in the LC_ID_DYLIB load command.
+	// To avoid putting the temporary output directory name there
+	// (and making the resulting shared library useless),
+	// run the link in the output directory so that -o can name
+	// just the final path element.
+	// On Windows, DLL file name is recorded in PE file
+	// export section, so do like on OS X.
+	// On Linux, for a shared object, at least with the Gold linker,
+	// the output file path is recorded in the .gnu.version_d section.
+	dir := "."
+	if cfg.BuildBuildmode == "c-shared" || cfg.BuildBuildmode == "plugin" {
+		dir, targetPath = filepath.Split(targetPath)
+	}
+
+	env := []string{}
+	if cfg.BuildTrimpath {
+		env = append(env, "GOROOT_FINAL="+trimPathGoRootFinal)
+	}
+	return b.Shell(root).run(dir, root.Package.ImportPath, env, cfg.BuildToolexec, base.Tool("link"), "-o", targetPath, "-importcfg", importcfg, ldflags, mainpkg)
+}
+
+func (gcToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, targetPath, importcfg string, allactions []*Action) error {
+	ldflags := []string{"-installsuffix", cfg.BuildContext.InstallSuffix}
+	ldflags = append(ldflags, "-buildmode=shared")
+	ldflags = append(ldflags, forcedLdflags...)
+	ldflags = append(ldflags, root.Package.Internal.Ldflags...)
+	cxx := false
+	for _, a := range allactions {
+		if a.Package != nil && (len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0) {
+			cxx = true
+		}
+	}
+	// If the user has not specified the -extld option, then specify the
+	// appropriate linker. In case of C++ code, use the compiler named
+	// by the CXX environment variable or defaultCXX if CXX is not set.
+	// Else, use the CC environment variable and defaultCC as fallback.
+	var compiler []string
+	if cxx {
+		compiler = envList("CXX", cfg.DefaultCXX(cfg.Goos, cfg.Goarch))
+	} else {
+		compiler = envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch))
+	}
+	ldflags, err := setextld(ldflags, compiler)
+	if err != nil {
+		return err
+	}
+	for _, d := range toplevelactions {
+		if !strings.HasSuffix(d.Target, ".a") { // omit unsafe etc and actions for other shared libraries
+			continue
+		}
+		ldflags = append(ldflags, d.Package.ImportPath+"="+d.Target)
+	}
+	return b.Shell(root).run(".", targetPath, nil, cfg.BuildToolexec, base.Tool("link"), "-o", targetPath, "-importcfg", importcfg, ldflags)
+}
+
+func (gcToolchain) cc(b *Builder, a *Action, ofile, cfile string) error {
+	return fmt.Errorf("%s: C source files not supported without cgo", mkAbs(a.Package.Dir, cfile))
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/gccgo.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/gccgo.go
new file mode 100644
index 0000000000000000000000000000000000000000..2dce9f1acef08690bed3364e28abe76deb045968
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/gccgo.go
@@ -0,0 +1,672 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+	"sync"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/fsys"
+	"cmd/go/internal/load"
+	"cmd/go/internal/str"
+	"cmd/internal/pkgpath"
+)
+
+// The Gccgo toolchain.
+
+type gccgoToolchain struct{}
+
+var GccgoName, GccgoBin string
+var gccgoErr error
+
+func init() {
+	GccgoName = cfg.Getenv("GCCGO")
+	if GccgoName == "" {
+		GccgoName = "gccgo"
+	}
+	GccgoBin, gccgoErr = cfg.LookPath(GccgoName)
+}
+
+func (gccgoToolchain) compiler() string {
+	checkGccgoBin()
+	return GccgoBin
+}
+
+func (gccgoToolchain) linker() string {
+	checkGccgoBin()
+	return GccgoBin
+}
+
+func (gccgoToolchain) ar() []string {
+	return envList("AR", "ar")
+}
+
+func checkGccgoBin() {
+	if gccgoErr == nil {
+		return
+	}
+	fmt.Fprintf(os.Stderr, "cmd/go: gccgo: %s\n", gccgoErr)
+	base.SetExitStatus(2)
+	base.Exit()
+}
+
+func (tools gccgoToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) {
+	p := a.Package
+	sh := b.Shell(a)
+	objdir := a.Objdir
+	out := "_go_.o"
+	ofile = objdir + out
+	gcargs := []string{"-g"}
+	gcargs = append(gcargs, b.gccArchArgs()...)
+	gcargs = append(gcargs, "-fdebug-prefix-map="+b.WorkDir+"=/tmp/go-build")
+	gcargs = append(gcargs, "-gno-record-gcc-switches")
+	if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+		gcargs = append(gcargs, "-fgo-pkgpath="+pkgpath)
+	}
+	if p.Internal.LocalPrefix != "" {
+		gcargs = append(gcargs, "-fgo-relative-import-path="+p.Internal.LocalPrefix)
+	}
+
+	args := str.StringList(tools.compiler(), "-c", gcargs, "-o", ofile, forcedGccgoflags)
+	if importcfg != nil {
+		if b.gccSupportsFlag(args[:1], "-fgo-importcfg=/dev/null") {
+			if err := sh.writeFile(objdir+"importcfg", importcfg); err != nil {
+				return "", nil, err
+			}
+			args = append(args, "-fgo-importcfg="+objdir+"importcfg")
+		} else {
+			root := objdir + "_importcfgroot_"
+			if err := buildImportcfgSymlinks(sh, root, importcfg); err != nil {
+				return "", nil, err
+			}
+			args = append(args, "-I", root)
+		}
+	}
+	if embedcfg != nil && b.gccSupportsFlag(args[:1], "-fgo-embedcfg=/dev/null") {
+		if err := sh.writeFile(objdir+"embedcfg", embedcfg); err != nil {
+			return "", nil, err
+		}
+		args = append(args, "-fgo-embedcfg="+objdir+"embedcfg")
+	}
+
+	if b.gccSupportsFlag(args[:1], "-ffile-prefix-map=a=b") {
+		if cfg.BuildTrimpath {
+			args = append(args, "-ffile-prefix-map="+base.Cwd()+"=.")
+			args = append(args, "-ffile-prefix-map="+b.WorkDir+"=/tmp/go-build")
+		}
+		if fsys.OverlayFile != "" {
+			for _, name := range gofiles {
+				absPath := mkAbs(p.Dir, name)
+				overlayPath, ok := fsys.OverlayPath(absPath)
+				if !ok {
+					continue
+				}
+				toPath := absPath
+				// gccgo only applies the last matching rule, so also handle the case where
+				// BuildTrimpath is true and the path is relative to base.Cwd().
+				if cfg.BuildTrimpath && str.HasFilePathPrefix(toPath, base.Cwd()) {
+					toPath = "." + toPath[len(base.Cwd()):]
+				}
+				args = append(args, "-ffile-prefix-map="+overlayPath+"="+toPath)
+			}
+		}
+	}
+
+	args = append(args, a.Package.Internal.Gccgoflags...)
+	for _, f := range gofiles {
+		f := mkAbs(p.Dir, f)
+		// Overlay files if necessary.
+		// See comment on gctoolchain.gc about overlay TODOs
+		f, _ = fsys.OverlayPath(f)
+		args = append(args, f)
+	}
+
+	output, err = sh.runOut(p.Dir, nil, args)
+	return ofile, output, err
+}
+
+// buildImportcfgSymlinks builds in root a tree of symlinks
+// implementing the directives from importcfg.
+// This serves as a temporary transition mechanism until
+// we can depend on gccgo reading an importcfg directly.
+// (The Go 1.9 and later gc compilers already do.)
+func buildImportcfgSymlinks(sh *Shell, root string, importcfg []byte) error {
+	for lineNum, line := range strings.Split(string(importcfg), "\n") {
+		lineNum++ // 1-based
+		line = strings.TrimSpace(line)
+		if line == "" {
+			continue
+		}
+		if line == "" || strings.HasPrefix(line, "#") {
+			continue
+		}
+		var verb, args string
+		if i := strings.Index(line, " "); i < 0 {
+			verb = line
+		} else {
+			verb, args = line[:i], strings.TrimSpace(line[i+1:])
+		}
+		before, after, _ := strings.Cut(args, "=")
+		switch verb {
+		default:
+			base.Fatalf("importcfg:%d: unknown directive %q", lineNum, verb)
+		case "packagefile":
+			if before == "" || after == "" {
+				return fmt.Errorf(`importcfg:%d: invalid packagefile: syntax is "packagefile path=filename": %s`, lineNum, line)
+			}
+			archive := gccgoArchive(root, before)
+			if err := sh.Mkdir(filepath.Dir(archive)); err != nil {
+				return err
+			}
+			if err := sh.Symlink(after, archive); err != nil {
+				return err
+			}
+		case "importmap":
+			if before == "" || after == "" {
+				return fmt.Errorf(`importcfg:%d: invalid importmap: syntax is "importmap old=new": %s`, lineNum, line)
+			}
+			beforeA := gccgoArchive(root, before)
+			afterA := gccgoArchive(root, after)
+			if err := sh.Mkdir(filepath.Dir(beforeA)); err != nil {
+				return err
+			}
+			if err := sh.Mkdir(filepath.Dir(afterA)); err != nil {
+				return err
+			}
+			if err := sh.Symlink(afterA, beforeA); err != nil {
+				return err
+			}
+		case "packageshlib":
+			return fmt.Errorf("gccgo -importcfg does not support shared libraries")
+		}
+	}
+	return nil
+}
+
+func (tools gccgoToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) {
+	p := a.Package
+	var ofiles []string
+	for _, sfile := range sfiles {
+		base := filepath.Base(sfile)
+		ofile := a.Objdir + base[:len(base)-len(".s")] + ".o"
+		ofiles = append(ofiles, ofile)
+		sfile, _ = fsys.OverlayPath(mkAbs(p.Dir, sfile))
+		defs := []string{"-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch}
+		if pkgpath := tools.gccgoCleanPkgpath(b, p); pkgpath != "" {
+			defs = append(defs, `-D`, `GOPKGPATH=`+pkgpath)
+		}
+		defs = tools.maybePIC(defs)
+		defs = append(defs, b.gccArchArgs()...)
+		err := b.Shell(a).run(p.Dir, p.ImportPath, nil, tools.compiler(), "-xassembler-with-cpp", "-I", a.Objdir, "-c", "-o", ofile, defs, sfile)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return ofiles, nil
+}
+
+func (gccgoToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) {
+	return "", nil
+}
+
+func gccgoArchive(basedir, imp string) string {
+	end := filepath.FromSlash(imp + ".a")
+	afile := filepath.Join(basedir, end)
+	// add "lib" to the final element
+	return filepath.Join(filepath.Dir(afile), "lib"+filepath.Base(afile))
+}
+
+func (tools gccgoToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error {
+	p := a.Package
+	sh := b.Shell(a)
+	objdir := a.Objdir
+	var absOfiles []string
+	for _, f := range ofiles {
+		absOfiles = append(absOfiles, mkAbs(objdir, f))
+	}
+	var arArgs []string
+	if cfg.Goos == "aix" && cfg.Goarch == "ppc64" {
+		// AIX puts both 32-bit and 64-bit objects in the same archive.
+		// Tell the AIX "ar" command to only care about 64-bit objects.
+		arArgs = []string{"-X64"}
+	}
+	absAfile := mkAbs(objdir, afile)
+	// Try with D modifier first, then without if that fails.
+	output, err := sh.runOut(p.Dir, nil, tools.ar(), arArgs, "rcD", absAfile, absOfiles)
+	if err != nil {
+		return sh.run(p.Dir, p.ImportPath, nil, tools.ar(), arArgs, "rc", absAfile, absOfiles)
+	}
+
+	// Show the output if there is any even without errors.
+	return sh.reportCmd("", "", output, nil)
+}
+
+func (tools gccgoToolchain) link(b *Builder, root *Action, out, importcfg string, allactions []*Action, buildmode, desc string) error {
+	sh := b.Shell(root)
+
+	// gccgo needs explicit linking with all package dependencies,
+	// and all LDFLAGS from cgo dependencies.
+	afiles := []string{}
+	shlibs := []string{}
+	ldflags := b.gccArchArgs()
+	cgoldflags := []string{}
+	usesCgo := false
+	cxx := false
+	objc := false
+	fortran := false
+	if root.Package != nil {
+		cxx = len(root.Package.CXXFiles) > 0 || len(root.Package.SwigCXXFiles) > 0
+		objc = len(root.Package.MFiles) > 0
+		fortran = len(root.Package.FFiles) > 0
+	}
+
+	readCgoFlags := func(flagsFile string) error {
+		flags, err := os.ReadFile(flagsFile)
+		if err != nil {
+			return err
+		}
+		const ldflagsPrefix = "_CGO_LDFLAGS="
+		for _, line := range strings.Split(string(flags), "\n") {
+			if strings.HasPrefix(line, ldflagsPrefix) {
+				flag := line[len(ldflagsPrefix):]
+				// Every _cgo_flags file has -g and -O2 in _CGO_LDFLAGS
+				// but they don't mean anything to the linker so filter
+				// them out.
+				if flag != "-g" && !strings.HasPrefix(flag, "-O") {
+					cgoldflags = append(cgoldflags, flag)
+				}
+			}
+		}
+		return nil
+	}
+
+	var arArgs []string
+	if cfg.Goos == "aix" && cfg.Goarch == "ppc64" {
+		// AIX puts both 32-bit and 64-bit objects in the same archive.
+		// Tell the AIX "ar" command to only care about 64-bit objects.
+		arArgs = []string{"-X64"}
+	}
+
+	newID := 0
+	readAndRemoveCgoFlags := func(archive string) (string, error) {
+		newID++
+		newArchive := root.Objdir + fmt.Sprintf("_pkg%d_.a", newID)
+		if err := sh.CopyFile(newArchive, archive, 0666, false); err != nil {
+			return "", err
+		}
+		if cfg.BuildN || cfg.BuildX {
+			sh.ShowCmd("", "ar d %s _cgo_flags", newArchive)
+			if cfg.BuildN {
+				// TODO(rsc): We could do better about showing the right _cgo_flags even in -n mode.
+				// Either the archive is already built and we can read them out,
+				// or we're printing commands to build the archive and can
+				// forward the _cgo_flags directly to this step.
+				return "", nil
+			}
+		}
+		err := sh.run(root.Objdir, desc, nil, tools.ar(), arArgs, "x", newArchive, "_cgo_flags")
+		if err != nil {
+			return "", err
+		}
+		err = sh.run(".", desc, nil, tools.ar(), arArgs, "d", newArchive, "_cgo_flags")
+		if err != nil {
+			return "", err
+		}
+		err = readCgoFlags(filepath.Join(root.Objdir, "_cgo_flags"))
+		if err != nil {
+			return "", err
+		}
+		return newArchive, nil
+	}
+
+	// If using -linkshared, find the shared library deps.
+	haveShlib := make(map[string]bool)
+	targetBase := filepath.Base(root.Target)
+	if cfg.BuildLinkshared {
+		for _, a := range root.Deps {
+			p := a.Package
+			if p == nil || p.Shlib == "" {
+				continue
+			}
+
+			// The .a we are linking into this .so
+			// will have its Shlib set to this .so.
+			// Don't start thinking we want to link
+			// this .so into itself.
+			base := filepath.Base(p.Shlib)
+			if base != targetBase {
+				haveShlib[base] = true
+			}
+		}
+	}
+
+	// Arrange the deps into afiles and shlibs.
+	addedShlib := make(map[string]bool)
+	for _, a := range root.Deps {
+		p := a.Package
+		if p != nil && p.Shlib != "" && haveShlib[filepath.Base(p.Shlib)] {
+			// This is a package linked into a shared
+			// library that we will put into shlibs.
+			continue
+		}
+
+		if haveShlib[filepath.Base(a.Target)] {
+			// This is a shared library we want to link against.
+			if !addedShlib[a.Target] {
+				shlibs = append(shlibs, a.Target)
+				addedShlib[a.Target] = true
+			}
+			continue
+		}
+
+		if p != nil {
+			target := a.built
+			if p.UsesCgo() || p.UsesSwig() {
+				var err error
+				target, err = readAndRemoveCgoFlags(target)
+				if err != nil {
+					continue
+				}
+			}
+
+			afiles = append(afiles, target)
+		}
+	}
+
+	for _, a := range allactions {
+		if a.Package == nil {
+			continue
+		}
+		if len(a.Package.CgoFiles) > 0 {
+			usesCgo = true
+		}
+		if a.Package.UsesSwig() {
+			usesCgo = true
+		}
+		if len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0 {
+			cxx = true
+		}
+		if len(a.Package.MFiles) > 0 {
+			objc = true
+		}
+		if len(a.Package.FFiles) > 0 {
+			fortran = true
+		}
+	}
+
+	wholeArchive := []string{"-Wl,--whole-archive"}
+	noWholeArchive := []string{"-Wl,--no-whole-archive"}
+	if cfg.Goos == "aix" {
+		wholeArchive = nil
+		noWholeArchive = nil
+	}
+	ldflags = append(ldflags, wholeArchive...)
+	ldflags = append(ldflags, afiles...)
+	ldflags = append(ldflags, noWholeArchive...)
+
+	ldflags = append(ldflags, cgoldflags...)
+	ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...)
+	if cfg.Goos != "aix" {
+		ldflags = str.StringList("-Wl,-(", ldflags, "-Wl,-)")
+	}
+
+	if root.buildID != "" {
+		// On systems that normally use gold or the GNU linker,
+		// use the --build-id option to write a GNU build ID note.
+		switch cfg.Goos {
+		case "android", "dragonfly", "linux", "netbsd":
+			ldflags = append(ldflags, fmt.Sprintf("-Wl,--build-id=0x%x", root.buildID))
+		}
+	}
+
+	var rLibPath string
+	if cfg.Goos == "aix" {
+		rLibPath = "-Wl,-blibpath="
+	} else {
+		rLibPath = "-Wl,-rpath="
+	}
+	for _, shlib := range shlibs {
+		ldflags = append(
+			ldflags,
+			"-L"+filepath.Dir(shlib),
+			rLibPath+filepath.Dir(shlib),
+			"-l"+strings.TrimSuffix(
+				strings.TrimPrefix(filepath.Base(shlib), "lib"),
+				".so"))
+	}
+
+	var realOut string
+	goLibBegin := str.StringList(wholeArchive, "-lgolibbegin", noWholeArchive)
+	switch buildmode {
+	case "exe":
+		if usesCgo && cfg.Goos == "linux" {
+			ldflags = append(ldflags, "-Wl,-E")
+		}
+
+	case "c-archive":
+		// Link the Go files into a single .o, and also link
+		// in -lgolibbegin.
+		//
+		// We need to use --whole-archive with -lgolibbegin
+		// because it doesn't define any symbols that will
+		// cause the contents to be pulled in; it's just
+		// initialization code.
+		//
+		// The user remains responsible for linking against
+		// -lgo -lpthread -lm in the final link. We can't use
+		// -r to pick them up because we can't combine
+		// split-stack and non-split-stack code in a single -r
+		// link, and libgo picks up non-split-stack code from
+		// libffi.
+		ldflags = append(ldflags, "-Wl,-r", "-nostdlib")
+		ldflags = append(ldflags, goLibBegin...)
+
+		if nopie := b.gccNoPie([]string{tools.linker()}); nopie != "" {
+			ldflags = append(ldflags, nopie)
+		}
+
+		// We are creating an object file, so we don't want a build ID.
+		if root.buildID == "" {
+			ldflags = b.disableBuildID(ldflags)
+		}
+
+		realOut = out
+		out = out + ".o"
+
+	case "c-shared":
+		ldflags = append(ldflags, "-shared", "-nostdlib")
+		ldflags = append(ldflags, goLibBegin...)
+		ldflags = append(ldflags, "-lgo", "-lgcc_s", "-lgcc", "-lc", "-lgcc")
+
+	case "shared":
+		if cfg.Goos != "aix" {
+			ldflags = append(ldflags, "-zdefs")
+		}
+		ldflags = append(ldflags, "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc")
+
+	default:
+		base.Fatalf("-buildmode=%s not supported for gccgo", buildmode)
+	}
+
+	switch buildmode {
+	case "exe", "c-shared":
+		if cxx {
+			ldflags = append(ldflags, "-lstdc++")
+		}
+		if objc {
+			ldflags = append(ldflags, "-lobjc")
+		}
+		if fortran {
+			fc := cfg.Getenv("FC")
+			if fc == "" {
+				fc = "gfortran"
+			}
+			// support gfortran out of the box and let others pass the correct link options
+			// via CGO_LDFLAGS
+			if strings.Contains(fc, "gfortran") {
+				ldflags = append(ldflags, "-lgfortran")
+			}
+		}
+	}
+
+	if err := sh.run(".", desc, nil, tools.linker(), "-o", out, ldflags, forcedGccgoflags, root.Package.Internal.Gccgoflags); err != nil {
+		return err
+	}
+
+	switch buildmode {
+	case "c-archive":
+		if err := sh.run(".", desc, nil, tools.ar(), arArgs, "rc", realOut, out); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (tools gccgoToolchain) ld(b *Builder, root *Action, targetPath, importcfg, mainpkg string) error {
+	return tools.link(b, root, targetPath, importcfg, root.Deps, ldBuildmode, root.Package.ImportPath)
+}
+
+func (tools gccgoToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, targetPath, importcfg string, allactions []*Action) error {
+	return tools.link(b, root, targetPath, importcfg, allactions, "shared", targetPath)
+}
+
+func (tools gccgoToolchain) cc(b *Builder, a *Action, ofile, cfile string) error {
+	p := a.Package
+	inc := filepath.Join(cfg.GOROOT, "pkg", "include")
+	cfile = mkAbs(p.Dir, cfile)
+	defs := []string{"-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch}
+	defs = append(defs, b.gccArchArgs()...)
+	if pkgpath := tools.gccgoCleanPkgpath(b, p); pkgpath != "" {
+		defs = append(defs, `-D`, `GOPKGPATH="`+pkgpath+`"`)
+	}
+	compiler := envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch))
+	if b.gccSupportsFlag(compiler, "-fsplit-stack") {
+		defs = append(defs, "-fsplit-stack")
+	}
+	defs = tools.maybePIC(defs)
+	if b.gccSupportsFlag(compiler, "-ffile-prefix-map=a=b") {
+		defs = append(defs, "-ffile-prefix-map="+base.Cwd()+"=.")
+		defs = append(defs, "-ffile-prefix-map="+b.WorkDir+"=/tmp/go-build")
+	} else if b.gccSupportsFlag(compiler, "-fdebug-prefix-map=a=b") {
+		defs = append(defs, "-fdebug-prefix-map="+b.WorkDir+"=/tmp/go-build")
+	}
+	if b.gccSupportsFlag(compiler, "-gno-record-gcc-switches") {
+		defs = append(defs, "-gno-record-gcc-switches")
+	}
+	return b.Shell(a).run(p.Dir, p.ImportPath, nil, compiler, "-Wall", "-g",
+		"-I", a.Objdir, "-I", inc, "-o", ofile, defs, "-c", cfile)
+}
+
+// maybePIC adds -fPIC to the list of arguments if needed.
+func (tools gccgoToolchain) maybePIC(args []string) []string {
+	switch cfg.BuildBuildmode {
+	case "c-shared", "shared", "plugin":
+		args = append(args, "-fPIC")
+	}
+	return args
+}
+
+func gccgoPkgpath(p *load.Package) string {
+	if p.Internal.Build.IsCommand() && !p.Internal.ForceLibrary {
+		return ""
+	}
+	return p.ImportPath
+}
+
+var gccgoToSymbolFuncOnce sync.Once
+var gccgoToSymbolFunc func(string) string
+
+func (tools gccgoToolchain) gccgoCleanPkgpath(b *Builder, p *load.Package) string {
+	gccgoToSymbolFuncOnce.Do(func() {
+		tmpdir := b.WorkDir
+		if cfg.BuildN {
+			tmpdir = os.TempDir()
+		}
+		fn, err := pkgpath.ToSymbolFunc(tools.compiler(), tmpdir)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "cmd/go: %v\n", err)
+			base.SetExitStatus(2)
+			base.Exit()
+		}
+		gccgoToSymbolFunc = fn
+	})
+
+	return gccgoToSymbolFunc(gccgoPkgpath(p))
+}
+
+var (
+	gccgoSupportsCgoIncompleteOnce sync.Once
+	gccgoSupportsCgoIncomplete     bool
+)
+
+const gccgoSupportsCgoIncompleteCode = `
+package p
+
+import "runtime/cgo"
+
+type I cgo.Incomplete
+`
+
+// supportsCgoIncomplete reports whether the gccgo/GoLLVM compiler
+// being used supports cgo.Incomplete, which was added in GCC 13.
+//
+// This takes an Action only for output reporting purposes.
+// The result value is unrelated to the Action.
+func (tools gccgoToolchain) supportsCgoIncomplete(b *Builder, a *Action) bool {
+	gccgoSupportsCgoIncompleteOnce.Do(func() {
+		sh := b.Shell(a)
+
+		fail := func(err error) {
+			fmt.Fprintf(os.Stderr, "cmd/go: %v\n", err)
+			base.SetExitStatus(2)
+			base.Exit()
+		}
+
+		tmpdir := b.WorkDir
+		if cfg.BuildN {
+			tmpdir = os.TempDir()
+		}
+		f, err := os.CreateTemp(tmpdir, "*_gccgo_cgoincomplete.go")
+		if err != nil {
+			fail(err)
+		}
+		fn := f.Name()
+		f.Close()
+		defer os.Remove(fn)
+
+		if err := os.WriteFile(fn, []byte(gccgoSupportsCgoIncompleteCode), 0644); err != nil {
+			fail(err)
+		}
+
+		on := strings.TrimSuffix(fn, ".go") + ".o"
+		if cfg.BuildN || cfg.BuildX {
+			sh.ShowCmd(tmpdir, "%s -c -o %s %s || true", tools.compiler(), on, fn)
+			// Since this function affects later builds,
+			// and only generates temporary files,
+			// we run the command even with -n.
+		}
+		cmd := exec.Command(tools.compiler(), "-c", "-o", on, fn)
+		cmd.Dir = tmpdir
+		var buf bytes.Buffer
+		cmd.Stdout = &buf
+		cmd.Stderr = &buf
+		err = cmd.Run()
+		gccgoSupportsCgoIncomplete = err == nil
+		if cfg.BuildN || cfg.BuildX {
+			// Show output. We always pass a nil err because errors are an
+			// expected outcome in this case.
+			desc := sh.fmtCmd(tmpdir, "%s -c -o %s %s", tools.compiler(), on, fn)
+			sh.reportCmd(desc, tmpdir, buf.Bytes(), nil)
+		}
+	})
+	return gccgoSupportsCgoIncomplete
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/init.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/init.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d0921f0cc900b5463574d2de2590fd078cb9f5b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/init.go
@@ -0,0 +1,424 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Build initialization (after flag parsing).
+
+package work
+
+import (
+	"bytes"
+	"cmd/go/internal/base"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/fsys"
+	"cmd/go/internal/modload"
+	"cmd/internal/quoted"
+	"fmt"
+	"internal/platform"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"regexp"
+	"runtime"
+	"strconv"
+	"sync"
+)
+
+var buildInitStarted = false
+
+func BuildInit() {
+	if buildInitStarted {
+		base.Fatalf("go: internal error: work.BuildInit called more than once")
+	}
+	buildInitStarted = true
+	base.AtExit(closeBuilders)
+
+	modload.Init()
+	instrumentInit()
+	buildModeInit()
+	if err := fsys.Init(base.Cwd()); err != nil {
+		base.Fatal(err)
+	}
+
+	// Make sure -pkgdir is absolute, because we run commands
+	// in different directories.
+	if cfg.BuildPkgdir != "" && !filepath.IsAbs(cfg.BuildPkgdir) {
+		p, err := filepath.Abs(cfg.BuildPkgdir)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "go: evaluating -pkgdir: %v\n", err)
+			base.SetExitStatus(2)
+			base.Exit()
+		}
+		cfg.BuildPkgdir = p
+	}
+
+	if cfg.BuildP <= 0 {
+		base.Fatalf("go: -p must be a positive integer: %v\n", cfg.BuildP)
+	}
+
+	// Make sure CC, CXX, and FC are absolute paths.
+	for _, key := range []string{"CC", "CXX", "FC"} {
+		value := cfg.Getenv(key)
+		args, err := quoted.Split(value)
+		if err != nil {
+			base.Fatalf("go: %s environment variable could not be parsed: %v", key, err)
+		}
+		if len(args) == 0 {
+			continue
+		}
+		path := args[0]
+		if !filepath.IsAbs(path) && path != filepath.Base(path) {
+			base.Fatalf("go: %s environment variable is relative; must be absolute path: %s\n", key, path)
+		}
+	}
+
+	// Set covermode if not already set.
+	// Ensure that -race and -covermode are compatible.
+	if cfg.BuildCoverMode == "" {
+		cfg.BuildCoverMode = "set"
+		if cfg.BuildRace {
+			// Default coverage mode is atomic when -race is set.
+			cfg.BuildCoverMode = "atomic"
+		}
+	}
+	if cfg.BuildRace && cfg.BuildCoverMode != "atomic" {
+		base.Fatalf(`-covermode must be "atomic", not %q, when -race is enabled`, cfg.BuildCoverMode)
+	}
+}
+
+// fuzzInstrumentFlags returns compiler flags that enable fuzzing instrumentation
+// on supported platforms.
+//
+// On unsupported platforms, fuzzInstrumentFlags returns nil, meaning no
+// instrumentation is added. 'go test -fuzz' still works without coverage,
+// but it generates random inputs without guidance, so it's much less effective.
+func fuzzInstrumentFlags() []string {
+	if !platform.FuzzInstrumented(cfg.Goos, cfg.Goarch) {
+		return nil
+	}
+	return []string{"-d=libfuzzer"}
+}
+
+func instrumentInit() {
+	if !cfg.BuildRace && !cfg.BuildMSan && !cfg.BuildASan {
+		return
+	}
+	if cfg.BuildRace && cfg.BuildMSan {
+		fmt.Fprintf(os.Stderr, "go: may not use -race and -msan simultaneously\n")
+		base.SetExitStatus(2)
+		base.Exit()
+	}
+	if cfg.BuildRace && cfg.BuildASan {
+		fmt.Fprintf(os.Stderr, "go: may not use -race and -asan simultaneously\n")
+		base.SetExitStatus(2)
+		base.Exit()
+	}
+	if cfg.BuildMSan && cfg.BuildASan {
+		fmt.Fprintf(os.Stderr, "go: may not use -msan and -asan simultaneously\n")
+		base.SetExitStatus(2)
+		base.Exit()
+	}
+	if cfg.BuildMSan && !platform.MSanSupported(cfg.Goos, cfg.Goarch) {
+		fmt.Fprintf(os.Stderr, "-msan is not supported on %s/%s\n", cfg.Goos, cfg.Goarch)
+		base.SetExitStatus(2)
+		base.Exit()
+	}
+	if cfg.BuildRace && !platform.RaceDetectorSupported(cfg.Goos, cfg.Goarch) {
+		fmt.Fprintf(os.Stderr, "-race is not supported on %s/%s\n", cfg.Goos, cfg.Goarch)
+		base.SetExitStatus(2)
+		base.Exit()
+	}
+	if cfg.BuildASan && !platform.ASanSupported(cfg.Goos, cfg.Goarch) {
+		fmt.Fprintf(os.Stderr, "-asan is not supported on %s/%s\n", cfg.Goos, cfg.Goarch)
+		base.SetExitStatus(2)
+		base.Exit()
+	}
+	// The current implementation is only compatible with the ASan library from version
+	// v7 to v9 (See the description in src/runtime/asan/asan.go). Therefore, using the
+	// -asan option must use a compatible version of ASan library, which requires that
+	// the gcc version is not less than 7 and the clang version is not less than 9,
+	// otherwise a segmentation fault will occur.
+	if cfg.BuildASan {
+		if err := compilerRequiredAsanVersion(); err != nil {
+			fmt.Fprintf(os.Stderr, "%v\n", err)
+			base.SetExitStatus(2)
+			base.Exit()
+		}
+	}
+
+	mode := "race"
+	if cfg.BuildMSan {
+		mode = "msan"
+		// MSAN needs PIE on all platforms except linux/amd64.
+		// https://github.com/llvm/llvm-project/blob/llvmorg-13.0.1/clang/lib/Driver/SanitizerArgs.cpp#L621
+		if cfg.BuildBuildmode == "default" && (cfg.Goos != "linux" || cfg.Goarch != "amd64") {
+			cfg.BuildBuildmode = "pie"
+		}
+	}
+	if cfg.BuildASan {
+		mode = "asan"
+	}
+	modeFlag := "-" + mode
+
+	// Check that cgo is enabled.
+	// Note: On macOS, -race does not require cgo. -asan and -msan still do.
+	if !cfg.BuildContext.CgoEnabled && (cfg.Goos != "darwin" || cfg.BuildASan || cfg.BuildMSan) {
+		if runtime.GOOS != cfg.Goos || runtime.GOARCH != cfg.Goarch {
+			fmt.Fprintf(os.Stderr, "go: %s requires cgo\n", modeFlag)
+		} else {
+			fmt.Fprintf(os.Stderr, "go: %s requires cgo; enable cgo by setting CGO_ENABLED=1\n", modeFlag)
+		}
+
+		base.SetExitStatus(2)
+		base.Exit()
+	}
+	forcedGcflags = append(forcedGcflags, modeFlag)
+	forcedLdflags = append(forcedLdflags, modeFlag)
+
+	if cfg.BuildContext.InstallSuffix != "" {
+		cfg.BuildContext.InstallSuffix += "_"
+	}
+	cfg.BuildContext.InstallSuffix += mode
+	cfg.BuildContext.ToolTags = append(cfg.BuildContext.ToolTags, mode)
+}
+
+func buildModeInit() {
+	gccgo := cfg.BuildToolchainName == "gccgo"
+	var codegenArg string
+
+	// Configure the build mode first, then verify that it is supported.
+	// That way, if the flag is completely bogus we will prefer to error out with
+	// "-buildmode=%s not supported" instead of naming the specific platform.
+
+	switch cfg.BuildBuildmode {
+	case "archive":
+		pkgsFilter = pkgsNotMain
+	case "c-archive":
+		pkgsFilter = oneMainPkg
+		if gccgo {
+			codegenArg = "-fPIC"
+		} else {
+			switch cfg.Goos {
+			case "darwin", "ios":
+				switch cfg.Goarch {
+				case "arm64":
+					codegenArg = "-shared"
+				}
+
+			case "dragonfly", "freebsd", "illumos", "linux", "netbsd", "openbsd", "solaris":
+				// Use -shared so that the result is
+				// suitable for inclusion in a PIE or
+				// shared library.
+				codegenArg = "-shared"
+			}
+		}
+		cfg.ExeSuffix = ".a"
+		ldBuildmode = "c-archive"
+	case "c-shared":
+		pkgsFilter = oneMainPkg
+		if gccgo {
+			codegenArg = "-fPIC"
+		} else {
+			switch cfg.Goos {
+			case "linux", "android", "freebsd":
+				codegenArg = "-shared"
+			case "windows":
+				// Do not add usual .exe suffix to the .dll file.
+				cfg.ExeSuffix = ""
+			}
+		}
+		ldBuildmode = "c-shared"
+	case "default":
+		ldBuildmode = "exe"
+		if platform.DefaultPIE(cfg.Goos, cfg.Goarch, cfg.BuildRace) {
+			ldBuildmode = "pie"
+			if cfg.Goos != "windows" && !gccgo {
+				codegenArg = "-shared"
+			}
+		}
+	case "exe":
+		pkgsFilter = pkgsMain
+		ldBuildmode = "exe"
+		// Set the pkgsFilter to oneMainPkg if the user passed a specific binary output
+		// and is using buildmode=exe for a better error message.
+		// See issue #20017.
+		if cfg.BuildO != "" {
+			pkgsFilter = oneMainPkg
+		}
+	case "pie":
+		if cfg.BuildRace && !platform.DefaultPIE(cfg.Goos, cfg.Goarch, cfg.BuildRace) {
+			base.Fatalf("-buildmode=pie not supported when -race is enabled on %s/%s", cfg.Goos, cfg.Goarch)
+		}
+		if gccgo {
+			codegenArg = "-fPIE"
+		} else {
+			switch cfg.Goos {
+			case "aix", "windows":
+			default:
+				codegenArg = "-shared"
+			}
+		}
+		ldBuildmode = "pie"
+	case "shared":
+		pkgsFilter = pkgsNotMain
+		if gccgo {
+			codegenArg = "-fPIC"
+		} else {
+			codegenArg = "-dynlink"
+		}
+		if cfg.BuildO != "" {
+			base.Fatalf("-buildmode=shared and -o not supported together")
+		}
+		ldBuildmode = "shared"
+	case "plugin":
+		pkgsFilter = oneMainPkg
+		if gccgo {
+			codegenArg = "-fPIC"
+		} else {
+			codegenArg = "-dynlink"
+		}
+		cfg.ExeSuffix = ".so"
+		ldBuildmode = "plugin"
+	default:
+		base.Fatalf("buildmode=%s not supported", cfg.BuildBuildmode)
+	}
+
+	if cfg.BuildBuildmode != "default" && !platform.BuildModeSupported(cfg.BuildToolchainName, cfg.BuildBuildmode, cfg.Goos, cfg.Goarch) {
+		base.Fatalf("-buildmode=%s not supported on %s/%s\n", cfg.BuildBuildmode, cfg.Goos, cfg.Goarch)
+	}
+
+	if cfg.BuildLinkshared {
+		if !platform.BuildModeSupported(cfg.BuildToolchainName, "shared", cfg.Goos, cfg.Goarch) {
+			base.Fatalf("-linkshared not supported on %s/%s\n", cfg.Goos, cfg.Goarch)
+		}
+		if gccgo {
+			codegenArg = "-fPIC"
+		} else {
+			forcedAsmflags = append(forcedAsmflags, "-D=GOBUILDMODE_shared=1",
+				"-linkshared")
+			codegenArg = "-dynlink"
+			forcedGcflags = append(forcedGcflags, "-linkshared")
+			// TODO(mwhudson): remove -w when that gets fixed in linker.
+			forcedLdflags = append(forcedLdflags, "-linkshared", "-w")
+		}
+	}
+	if codegenArg != "" {
+		if gccgo {
+			forcedGccgoflags = append([]string{codegenArg}, forcedGccgoflags...)
+		} else {
+			forcedAsmflags = append([]string{codegenArg}, forcedAsmflags...)
+			forcedGcflags = append([]string{codegenArg}, forcedGcflags...)
+		}
+		// Don't alter InstallSuffix when modifying default codegen args.
+		if cfg.BuildBuildmode != "default" || cfg.BuildLinkshared {
+			if cfg.BuildContext.InstallSuffix != "" {
+				cfg.BuildContext.InstallSuffix += "_"
+			}
+			cfg.BuildContext.InstallSuffix += codegenArg[1:]
+		}
+	}
+
+	switch cfg.BuildMod {
+	case "":
+		// Behavior will be determined automatically, as if no flag were passed.
+	case "readonly", "vendor", "mod":
+		if !cfg.ModulesEnabled && !base.InGOFLAGS("-mod") {
+			base.Fatalf("build flag -mod=%s only valid when using modules", cfg.BuildMod)
+		}
+	default:
+		base.Fatalf("-mod=%s not supported (can be '', 'mod', 'readonly', or 'vendor')", cfg.BuildMod)
+	}
+	if !cfg.ModulesEnabled {
+		if cfg.ModCacheRW && !base.InGOFLAGS("-modcacherw") {
+			base.Fatalf("build flag -modcacherw only valid when using modules")
+		}
+		if cfg.ModFile != "" && !base.InGOFLAGS("-mod") {
+			base.Fatalf("build flag -modfile only valid when using modules")
+		}
+	}
+}
+
+type version struct {
+	name         string
+	major, minor int
+}
+
+var compiler struct {
+	sync.Once
+	version
+	err error
+}
+
+// compilerVersion detects the version of $(go env CC).
+// It returns a non-nil error if the compiler matches a known version schema but
+// the version could not be parsed, or if $(go env CC) could not be determined.
+func compilerVersion() (version, error) {
+	compiler.Once.Do(func() {
+		compiler.err = func() error {
+			compiler.name = "unknown"
+			cc := os.Getenv("CC")
+			out, err := exec.Command(cc, "--version").Output()
+			if err != nil {
+				// Compiler does not support "--version" flag: not Clang or GCC.
+				return err
+			}
+
+			var match [][]byte
+			if bytes.HasPrefix(out, []byte("gcc")) {
+				compiler.name = "gcc"
+				out, err := exec.Command(cc, "-v").CombinedOutput()
+				if err != nil {
+					// gcc, but does not support gcc's "-v" flag?!
+					return err
+				}
+				gccRE := regexp.MustCompile(`gcc version (\d+)\.(\d+)`)
+				match = gccRE.FindSubmatch(out)
+			} else {
+				clangRE := regexp.MustCompile(`clang version (\d+)\.(\d+)`)
+				if match = clangRE.FindSubmatch(out); len(match) > 0 {
+					compiler.name = "clang"
+				}
+			}
+
+			if len(match) < 3 {
+				return nil // "unknown"
+			}
+			if compiler.major, err = strconv.Atoi(string(match[1])); err != nil {
+				return err
+			}
+			if compiler.minor, err = strconv.Atoi(string(match[2])); err != nil {
+				return err
+			}
+			return nil
+		}()
+	})
+	return compiler.version, compiler.err
+}
+
+// compilerRequiredAsanVersion is a copy of the function defined in
+// cmd/cgo/internal/testsanitizers/cc_test.go
+// compilerRequiredAsanVersion reports whether the compiler is the version
+// required by Asan.
+func compilerRequiredAsanVersion() error {
+	compiler, err := compilerVersion()
+	if err != nil {
+		return fmt.Errorf("-asan: the version of $(go env CC) could not be parsed")
+	}
+
+	switch compiler.name {
+	case "gcc":
+		if runtime.GOARCH == "ppc64le" && compiler.major < 9 {
+			return fmt.Errorf("-asan is not supported with %s compiler %d.%d\n", compiler.name, compiler.major, compiler.minor)
+		}
+		if compiler.major < 7 {
+			return fmt.Errorf("-asan is not supported with %s compiler %d.%d\n", compiler.name, compiler.major, compiler.minor)
+		}
+	case "clang":
+		if compiler.major < 9 {
+			return fmt.Errorf("-asan is not supported with %s compiler %d.%d\n", compiler.name, compiler.major, compiler.minor)
+		}
+	default:
+		return fmt.Errorf("-asan: C compiler is not gcc or clang")
+	}
+	return nil
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/security.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/security.go
new file mode 100644
index 0000000000000000000000000000000000000000..568eecd325bb7fc9739e1261711143cd35d2954e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/security.go
@@ -0,0 +1,349 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Checking of compiler and linker flags.
+// We must avoid flags like -fplugin=, which can allow
+// arbitrary code execution during the build.
+// Do not make changes here without carefully
+// considering the implications.
+// (That's why the code is isolated in a file named security.go.)
+//
+// Note that -Wl,foo means split foo on commas and pass to
+// the linker, so that -Wl,-foo,bar means pass -foo bar to
+// the linker. Similarly -Wa,foo for the assembler and so on.
+// If any of these are permitted, the wildcard portion must
+// disallow commas.
+//
+// Note also that GNU binutils accept any argument @foo
+// as meaning "read more flags from the file foo", so we must
+// guard against any command-line argument beginning with @,
+// even things like "-I @foo".
+// We use load.SafeArg (which is even more conservative)
+// to reject these.
+//
+// Even worse, gcc -I@foo (one arg) turns into cc1 -I @foo (two args),
+// so although gcc doesn't expand the @foo, cc1 will.
+// So out of paranoia, we reject @ at the beginning of every
+// flag argument that might be split into its own argument.
+
+package work
+
+import (
+	"fmt"
+	"internal/lazyregexp"
+	"regexp"
+	"strings"
+
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/load"
+)
+
+var re = lazyregexp.New
+
+var validCompilerFlags = []*lazyregexp.Regexp{
+	re(`-D([A-Za-z_][A-Za-z0-9_]*)(=[^@\-]*)?`),
+	re(`-U([A-Za-z_][A-Za-z0-9_]*)`),
+	re(`-F([^@\-].*)`),
+	re(`-I([^@\-].*)`),
+	re(`-O`),
+	re(`-O([^@\-].*)`),
+	re(`-W`),
+	re(`-W([^@,]+)`), // -Wall but not -Wa,-foo.
+	re(`-Wa,-mbig-obj`),
+	re(`-Wp,-D([A-Za-z_][A-Za-z0-9_]*)(=[^@,\-]*)?`),
+	re(`-Wp,-U([A-Za-z_][A-Za-z0-9_]*)`),
+	re(`-ansi`),
+	re(`-f(no-)?asynchronous-unwind-tables`),
+	re(`-f(no-)?blocks`),
+	re(`-f(no-)builtin-[a-zA-Z0-9_]*`),
+	re(`-f(no-)?common`),
+	re(`-f(no-)?constant-cfstrings`),
+	re(`-fdebug-prefix-map=([^@]+)=([^@]+)`),
+	re(`-fdiagnostics-show-note-include-stack`),
+	re(`-ffile-prefix-map=([^@]+)=([^@]+)`),
+	re(`-fno-canonical-system-headers`),
+	re(`-f(no-)?eliminate-unused-debug-types`),
+	re(`-f(no-)?exceptions`),
+	re(`-f(no-)?fast-math`),
+	re(`-f(no-)?inline-functions`),
+	re(`-finput-charset=([^@\-].*)`),
+	re(`-f(no-)?fat-lto-objects`),
+	re(`-f(no-)?keep-inline-dllexport`),
+	re(`-f(no-)?lto`),
+	re(`-fmacro-backtrace-limit=(.+)`),
+	re(`-fmessage-length=(.+)`),
+	re(`-f(no-)?modules`),
+	re(`-f(no-)?objc-arc`),
+	re(`-f(no-)?objc-nonfragile-abi`),
+	re(`-f(no-)?objc-legacy-dispatch`),
+	re(`-f(no-)?omit-frame-pointer`),
+	re(`-f(no-)?openmp(-simd)?`),
+	re(`-f(no-)?permissive`),
+	re(`-f(no-)?(pic|PIC|pie|PIE)`),
+	re(`-f(no-)?plt`),
+	re(`-f(no-)?rtti`),
+	re(`-f(no-)?split-stack`),
+	re(`-f(no-)?stack-(.+)`),
+	re(`-f(no-)?strict-aliasing`),
+	re(`-f(un)signed-char`),
+	re(`-f(no-)?use-linker-plugin`), // safe if -B is not used; we don't permit -B
+	re(`-f(no-)?visibility-inlines-hidden`),
+	re(`-fsanitize=(.+)`),
+	re(`-ftemplate-depth-(.+)`),
+	re(`-fvisibility=(.+)`),
+	re(`-g([^@\-].*)?`),
+	re(`-m32`),
+	re(`-m64`),
+	re(`-m(abi|arch|cpu|fpu|tune)=([^@\-].*)`),
+	re(`-m(no-)?v?aes`),
+	re(`-marm`),
+	re(`-m(no-)?avx[0-9a-z]*`),
+	re(`-mfloat-abi=([^@\-].*)`),
+	re(`-mfpmath=[0-9a-z,+]*`),
+	re(`-m(no-)?avx[0-9a-z.]*`),
+	re(`-m(no-)?ms-bitfields`),
+	re(`-m(no-)?stack-(.+)`),
+	re(`-mmacosx-(.+)`),
+	re(`-mios-simulator-version-min=(.+)`),
+	re(`-miphoneos-version-min=(.+)`),
+	re(`-mtvos-simulator-version-min=(.+)`),
+	re(`-mtvos-version-min=(.+)`),
+	re(`-mwatchos-simulator-version-min=(.+)`),
+	re(`-mwatchos-version-min=(.+)`),
+	re(`-mnop-fun-dllimport`),
+	re(`-m(no-)?sse[0-9.]*`),
+	re(`-m(no-)?ssse3`),
+	re(`-mthumb(-interwork)?`),
+	re(`-mthreads`),
+	re(`-mwindows`),
+	re(`-no-canonical-prefixes`),
+	re(`--param=ssp-buffer-size=[0-9]*`),
+	re(`-pedantic(-errors)?`),
+	re(`-pipe`),
+	re(`-pthread`),
+	re(`-?-std=([^@\-].*)`),
+	re(`-?-stdlib=([^@\-].*)`),
+	re(`--sysroot=([^@\-].*)`),
+	re(`-w`),
+	re(`-x([^@\-].*)`),
+	re(`-v`),
+}
+
+var validCompilerFlagsWithNextArg = []string{
+	"-arch",
+	"-D",
+	"-U",
+	"-I",
+	"-F",
+	"-framework",
+	"-include",
+	"-isysroot",
+	"-isystem",
+	"--sysroot",
+	"-target",
+	"-x",
+}
+
+var invalidLinkerFlags = []*lazyregexp.Regexp{
+	// On macOS this means the linker loads and executes the next argument.
+	// Have to exclude separately because -lfoo is allowed in general.
+	re(`-lto_library`),
+}
+
+var validLinkerFlags = []*lazyregexp.Regexp{
+	re(`-F([^@\-].*)`),
+	re(`-l([^@\-].*)`),
+	re(`-L([^@\-].*)`),
+	re(`-O`),
+	re(`-O([^@\-].*)`),
+	re(`-f(no-)?(pic|PIC|pie|PIE)`),
+	re(`-f(no-)?openmp(-simd)?`),
+	re(`-fsanitize=([^@\-].*)`),
+	re(`-flat_namespace`),
+	re(`-g([^@\-].*)?`),
+	re(`-headerpad_max_install_names`),
+	re(`-m(abi|arch|cpu|fpu|tune)=([^@\-].*)`),
+	re(`-mfloat-abi=([^@\-].*)`),
+	re(`-mmacosx-(.+)`),
+	re(`-mios-simulator-version-min=(.+)`),
+	re(`-miphoneos-version-min=(.+)`),
+	re(`-mthreads`),
+	re(`-mwindows`),
+	re(`-(pic|PIC|pie|PIE)`),
+	re(`-pthread`),
+	re(`-rdynamic`),
+	re(`-shared`),
+	re(`-?-static([-a-z0-9+]*)`),
+	re(`-?-stdlib=([^@\-].*)`),
+	re(`-v`),
+
+	// Note that any wildcards in -Wl need to exclude comma,
+	// since -Wl splits its argument at commas and passes
+	// them all to the linker uninterpreted. Allowing comma
+	// in a wildcard would allow tunneling arbitrary additional
+	// linker arguments through one of these.
+	re(`-Wl,--(no-)?allow-multiple-definition`),
+	re(`-Wl,--(no-)?allow-shlib-undefined`),
+	re(`-Wl,--(no-)?as-needed`),
+	re(`-Wl,-Bdynamic`),
+	re(`-Wl,-berok`),
+	re(`-Wl,-Bstatic`),
+	re(`-Wl,-Bsymbolic-functions`),
+	re(`-Wl,-O[0-9]+`),
+	re(`-Wl,-d[ny]`),
+	re(`-Wl,--disable-new-dtags`),
+	re(`-Wl,-e[=,][a-zA-Z0-9]+`),
+	re(`-Wl,--enable-new-dtags`),
+	re(`-Wl,--end-group`),
+	re(`-Wl,--(no-)?export-dynamic`),
+	re(`-Wl,-E`),
+	re(`-Wl,-framework,[^,@\-][^,]+`),
+	re(`-Wl,--hash-style=(sysv|gnu|both)`),
+	re(`-Wl,-headerpad_max_install_names`),
+	re(`-Wl,--no-undefined`),
+	re(`-Wl,-R,?([^@\-,][^,@]*$)`),
+	re(`-Wl,--just-symbols[=,]([^,@\-][^,@]+)`),
+	re(`-Wl,-rpath(-link)?[=,]([^,@\-][^,]+)`),
+	re(`-Wl,-s`),
+	re(`-Wl,-search_paths_first`),
+	re(`-Wl,-sectcreate,([^,@\-][^,]+),([^,@\-][^,]+),([^,@\-][^,]+)`),
+	re(`-Wl,--start-group`),
+	re(`-Wl,-?-static`),
+	re(`-Wl,-?-subsystem,(native|windows|console|posix|xbox)`),
+	re(`-Wl,-syslibroot[=,]([^,@\-][^,]+)`),
+	re(`-Wl,-undefined[=,]([^,@\-][^,]+)`),
+	re(`-Wl,-?-unresolved-symbols=[^,]+`),
+	re(`-Wl,--(no-)?warn-([^,]+)`),
+	re(`-Wl,-?-wrap[=,][^,@\-][^,]*`),
+	re(`-Wl,-z,(no)?execstack`),
+	re(`-Wl,-z,relro`),
+
+	re(`[a-zA-Z0-9_/].*\.(a|o|obj|dll|dylib|so|tbd)`), // direct linker inputs: x.o or libfoo.so (but not -foo.o or @foo.o)
+	re(`\./.*\.(a|o|obj|dll|dylib|so|tbd)`),
+}
+
+var validLinkerFlagsWithNextArg = []string{
+	"-arch",
+	"-F",
+	"-l",
+	"-L",
+	"-framework",
+	"-isysroot",
+	"--sysroot",
+	"-target",
+	"-Wl,-framework",
+	"-Wl,-rpath",
+	"-Wl,-R",
+	"-Wl,--just-symbols",
+	"-Wl,-undefined",
+}
+
+func checkCompilerFlags(name, source string, list []string) error {
+	checkOverrides := true
+	return checkFlags(name, source, list, nil, validCompilerFlags, validCompilerFlagsWithNextArg, checkOverrides)
+}
+
+func checkLinkerFlags(name, source string, list []string) error {
+	checkOverrides := true
+	return checkFlags(name, source, list, invalidLinkerFlags, validLinkerFlags, validLinkerFlagsWithNextArg, checkOverrides)
+}
+
+// checkCompilerFlagsForInternalLink returns an error if 'list'
+// contains a flag or flags that may not be fully supported by
+// internal linking (meaning that we should punt the link to the
+// external linker).
+func checkCompilerFlagsForInternalLink(name, source string, list []string) error {
+	checkOverrides := false
+	if err := checkFlags(name, source, list, nil, validCompilerFlags, validCompilerFlagsWithNextArg, checkOverrides); err != nil {
+		return err
+	}
+	// Currently the only flag on the allow list that causes problems
+	// for the linker is "-flto"; check for it manually here.
+	for _, fl := range list {
+		if strings.HasPrefix(fl, "-flto") {
+			return fmt.Errorf("flag %q triggers external linking", fl)
+		}
+	}
+	return nil
+}
+
+func checkFlags(name, source string, list []string, invalid, valid []*lazyregexp.Regexp, validNext []string, checkOverrides bool) error {
+	// Let users override rules with $CGO_CFLAGS_ALLOW, $CGO_CFLAGS_DISALLOW, etc.
+	var (
+		allow    *regexp.Regexp
+		disallow *regexp.Regexp
+	)
+	if checkOverrides {
+		if env := cfg.Getenv("CGO_" + name + "_ALLOW"); env != "" {
+			r, err := regexp.Compile(env)
+			if err != nil {
+				return fmt.Errorf("parsing $CGO_%s_ALLOW: %v", name, err)
+			}
+			allow = r
+		}
+		if env := cfg.Getenv("CGO_" + name + "_DISALLOW"); env != "" {
+			r, err := regexp.Compile(env)
+			if err != nil {
+				return fmt.Errorf("parsing $CGO_%s_DISALLOW: %v", name, err)
+			}
+			disallow = r
+		}
+	}
+
+Args:
+	for i := 0; i < len(list); i++ {
+		arg := list[i]
+		if disallow != nil && disallow.FindString(arg) == arg {
+			goto Bad
+		}
+		if allow != nil && allow.FindString(arg) == arg {
+			continue Args
+		}
+		for _, re := range invalid {
+			if re.FindString(arg) == arg { // must be complete match
+				goto Bad
+			}
+		}
+		for _, re := range valid {
+			if re.FindString(arg) == arg { // must be complete match
+				continue Args
+			}
+		}
+		for _, x := range validNext {
+			if arg == x {
+				if i+1 < len(list) && load.SafeArg(list[i+1]) {
+					i++
+					continue Args
+				}
+
+				// Permit -Wl,-framework -Wl,name.
+				if i+1 < len(list) &&
+					strings.HasPrefix(arg, "-Wl,") &&
+					strings.HasPrefix(list[i+1], "-Wl,") &&
+					load.SafeArg(list[i+1][4:]) &&
+					!strings.Contains(list[i+1][4:], ",") {
+					i++
+					continue Args
+				}
+
+				// Permit -I= /path, -I $SYSROOT.
+				if i+1 < len(list) && arg == "-I" {
+					if (strings.HasPrefix(list[i+1], "=") || strings.HasPrefix(list[i+1], "$SYSROOT")) &&
+						load.SafeArg(list[i+1][1:]) {
+						i++
+						continue Args
+					}
+				}
+
+				if i+1 < len(list) {
+					return fmt.Errorf("invalid flag in %s: %s %s (see https://golang.org/s/invalidflag)", source, arg, list[i+1])
+				}
+				return fmt.Errorf("invalid flag in %s: %s without argument (see https://golang.org/s/invalidflag)", source, arg)
+			}
+		}
+	Bad:
+		return fmt.Errorf("invalid flag in %s: %s", source, arg)
+	}
+	return nil
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/security_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/security_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..c05ba7b9a472e3dbab12c5c70b5d4bd50c865620
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/security_test.go
@@ -0,0 +1,318 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+	"os"
+	"strings"
+	"testing"
+)
+
+var goodCompilerFlags = [][]string{
+	{"-DFOO"},
+	{"-Dfoo=bar"},
+	{"-Ufoo"},
+	{"-Ufoo1"},
+	{"-F/Qt"},
+	{"-F", "/Qt"},
+	{"-I/"},
+	{"-I/etc/passwd"},
+	{"-I."},
+	{"-O"},
+	{"-O2"},
+	{"-Osmall"},
+	{"-W"},
+	{"-Wall"},
+	{"-Wp,-Dfoo=bar"},
+	{"-Wp,-Ufoo"},
+	{"-Wp,-Dfoo1"},
+	{"-Wp,-Ufoo1"},
+	{"-flto"},
+	{"-fobjc-arc"},
+	{"-fno-objc-arc"},
+	{"-fomit-frame-pointer"},
+	{"-fno-omit-frame-pointer"},
+	{"-fpic"},
+	{"-fno-pic"},
+	{"-fPIC"},
+	{"-fno-PIC"},
+	{"-fpie"},
+	{"-fno-pie"},
+	{"-fPIE"},
+	{"-fno-PIE"},
+	{"-fsplit-stack"},
+	{"-fno-split-stack"},
+	{"-fstack-xxx"},
+	{"-fno-stack-xxx"},
+	{"-fsanitize=hands"},
+	{"-g"},
+	{"-ggdb"},
+	{"-march=souza"},
+	{"-mcpu=123"},
+	{"-mfpu=123"},
+	{"-mtune=happybirthday"},
+	{"-mstack-overflow"},
+	{"-mno-stack-overflow"},
+	{"-mmacosx-version"},
+	{"-mnop-fun-dllimport"},
+	{"-pthread"},
+	{"-std=c99"},
+	{"-xc"},
+	{"-D", "FOO"},
+	{"-D", "foo=bar"},
+	{"-I", "."},
+	{"-I", "/etc/passwd"},
+	{"-I", "世界"},
+	{"-I", "=/usr/include/libxml2"},
+	{"-I", "dir"},
+	{"-I", "$SYSROOT/dir"},
+	{"-isystem", "/usr/include/mozjs-68"},
+	{"-include", "/usr/include/mozjs-68/RequiredDefines.h"},
+	{"-framework", "Chocolate"},
+	{"-x", "c"},
+	{"-v"},
+}
+
+var badCompilerFlags = [][]string{
+	{"-D@X"},
+	{"-D-X"},
+	{"-Ufoo=bar"},
+	{"-F@dir"},
+	{"-F-dir"},
+	{"-I@dir"},
+	{"-I-dir"},
+	{"-O@1"},
+	{"-Wa,-foo"},
+	{"-W@foo"},
+	{"-Wp,-DX,-D@X"},
+	{"-Wp,-UX,-U@X"},
+	{"-g@gdb"},
+	{"-g-gdb"},
+	{"-march=@dawn"},
+	{"-march=-dawn"},
+	{"-std=@c99"},
+	{"-std=-c99"},
+	{"-x@c"},
+	{"-x-c"},
+	{"-D", "@foo"},
+	{"-D", "-foo"},
+	{"-I", "@foo"},
+	{"-I", "-foo"},
+	{"-I", "=@obj"},
+	{"-include", "@foo"},
+	{"-framework", "-Caffeine"},
+	{"-framework", "@Home"},
+	{"-x", "--c"},
+	{"-x", "@obj"},
+}
+
+func TestCheckCompilerFlags(t *testing.T) {
+	for _, f := range goodCompilerFlags {
+		if err := checkCompilerFlags("test", "test", f); err != nil {
+			t.Errorf("unexpected error for %q: %v", f, err)
+		}
+	}
+	for _, f := range badCompilerFlags {
+		if err := checkCompilerFlags("test", "test", f); err == nil {
+			t.Errorf("missing error for %q", f)
+		}
+	}
+}
+
+var goodLinkerFlags = [][]string{
+	{"-Fbar"},
+	{"-lbar"},
+	{"-Lbar"},
+	{"-fpic"},
+	{"-fno-pic"},
+	{"-fPIC"},
+	{"-fno-PIC"},
+	{"-fpie"},
+	{"-fno-pie"},
+	{"-fPIE"},
+	{"-fno-PIE"},
+	{"-fsanitize=hands"},
+	{"-g"},
+	{"-ggdb"},
+	{"-march=souza"},
+	{"-mcpu=123"},
+	{"-mfpu=123"},
+	{"-mtune=happybirthday"},
+	{"-pic"},
+	{"-pthread"},
+	{"-Wl,--hash-style=both"},
+	{"-Wl,-rpath,foo"},
+	{"-Wl,-rpath,$ORIGIN/foo"},
+	{"-Wl,-R", "/foo"},
+	{"-Wl,-R", "foo"},
+	{"-Wl,-R,foo"},
+	{"-Wl,--just-symbols=foo"},
+	{"-Wl,--just-symbols,foo"},
+	{"-Wl,--warn-error"},
+	{"-Wl,--no-warn-error"},
+	{"foo.so"},
+	{"_世界.dll"},
+	{"./x.o"},
+	{"libcgosotest.dylib"},
+	{"-F", "framework"},
+	{"-l", "."},
+	{"-l", "/etc/passwd"},
+	{"-l", "世界"},
+	{"-L", "framework"},
+	{"-framework", "Chocolate"},
+	{"-v"},
+	{"-Wl,-sectcreate,__TEXT,__info_plist,${SRCDIR}/Info.plist"},
+	{"-Wl,-framework", "-Wl,Chocolate"},
+	{"-Wl,-framework,Chocolate"},
+	{"-Wl,-unresolved-symbols=ignore-all"},
+	{"libcgotbdtest.tbd"},
+	{"./libcgotbdtest.tbd"},
+}
+
+var badLinkerFlags = [][]string{
+	{"-DFOO"},
+	{"-Dfoo=bar"},
+	{"-W"},
+	{"-Wall"},
+	{"-fobjc-arc"},
+	{"-fno-objc-arc"},
+	{"-fomit-frame-pointer"},
+	{"-fno-omit-frame-pointer"},
+	{"-fsplit-stack"},
+	{"-fno-split-stack"},
+	{"-fstack-xxx"},
+	{"-fno-stack-xxx"},
+	{"-mstack-overflow"},
+	{"-mno-stack-overflow"},
+	{"-mnop-fun-dllimport"},
+	{"-std=c99"},
+	{"-xc"},
+	{"-D", "FOO"},
+	{"-D", "foo=bar"},
+	{"-I", "FOO"},
+	{"-L", "@foo"},
+	{"-L", "-foo"},
+	{"-x", "c"},
+	{"-D@X"},
+	{"-D-X"},
+	{"-I@dir"},
+	{"-I-dir"},
+	{"-O@1"},
+	{"-Wa,-foo"},
+	{"-W@foo"},
+	{"-g@gdb"},
+	{"-g-gdb"},
+	{"-march=@dawn"},
+	{"-march=-dawn"},
+	{"-std=@c99"},
+	{"-std=-c99"},
+	{"-x@c"},
+	{"-x-c"},
+	{"-D", "@foo"},
+	{"-D", "-foo"},
+	{"-I", "@foo"},
+	{"-I", "-foo"},
+	{"-l", "@foo"},
+	{"-l", "-foo"},
+	{"-framework", "-Caffeine"},
+	{"-framework", "@Home"},
+	{"-Wl,-framework,-Caffeine"},
+	{"-Wl,-framework", "-Wl,@Home"},
+	{"-Wl,-framework", "@Home"},
+	{"-Wl,-framework,Chocolate,@Home"},
+	{"-Wl,--hash-style=foo"},
+	{"-x", "--c"},
+	{"-x", "@obj"},
+	{"-Wl,-rpath,@foo"},
+	{"-Wl,-R,foo,bar"},
+	{"-Wl,-R,@foo"},
+	{"-Wl,--just-symbols,@foo"},
+	{"../x.o"},
+	{"-Wl,-R,"},
+	{"-Wl,-O"},
+	{"-Wl,-e="},
+	{"-Wl,-e,"},
+	{"-Wl,-R,-flag"},
+}
+
+func TestCheckLinkerFlags(t *testing.T) {
+	for _, f := range goodLinkerFlags {
+		if err := checkLinkerFlags("test", "test", f); err != nil {
+			t.Errorf("unexpected error for %q: %v", f, err)
+		}
+	}
+	for _, f := range badLinkerFlags {
+		if err := checkLinkerFlags("test", "test", f); err == nil {
+			t.Errorf("missing error for %q", f)
+		}
+	}
+}
+
+func TestCheckFlagAllowDisallow(t *testing.T) {
+	if err := checkCompilerFlags("TEST", "test", []string{"-disallow"}); err == nil {
+		t.Fatalf("missing error for -disallow")
+	}
+	os.Setenv("CGO_TEST_ALLOW", "-disallo")
+	if err := checkCompilerFlags("TEST", "test", []string{"-disallow"}); err == nil {
+		t.Fatalf("missing error for -disallow with CGO_TEST_ALLOW=-disallo")
+	}
+	os.Setenv("CGO_TEST_ALLOW", "-disallow")
+	if err := checkCompilerFlags("TEST", "test", []string{"-disallow"}); err != nil {
+		t.Fatalf("unexpected error for -disallow with CGO_TEST_ALLOW=-disallow: %v", err)
+	}
+	os.Unsetenv("CGO_TEST_ALLOW")
+
+	if err := checkCompilerFlags("TEST", "test", []string{"-Wall"}); err != nil {
+		t.Fatalf("unexpected error for -Wall: %v", err)
+	}
+	os.Setenv("CGO_TEST_DISALLOW", "-Wall")
+	if err := checkCompilerFlags("TEST", "test", []string{"-Wall"}); err == nil {
+		t.Fatalf("missing error for -Wall with CGO_TEST_DISALLOW=-Wall")
+	}
+	os.Setenv("CGO_TEST_ALLOW", "-Wall") // disallow wins
+	if err := checkCompilerFlags("TEST", "test", []string{"-Wall"}); err == nil {
+		t.Fatalf("missing error for -Wall with CGO_TEST_DISALLOW=-Wall and CGO_TEST_ALLOW=-Wall")
+	}
+
+	os.Setenv("CGO_TEST_ALLOW", "-fplugin.*")
+	os.Setenv("CGO_TEST_DISALLOW", "-fplugin=lint.so")
+	if err := checkCompilerFlags("TEST", "test", []string{"-fplugin=faster.so"}); err != nil {
+		t.Fatalf("unexpected error for -fplugin=faster.so: %v", err)
+	}
+	if err := checkCompilerFlags("TEST", "test", []string{"-fplugin=lint.so"}); err == nil {
+		t.Fatalf("missing error for -fplugin=lint.so: %v", err)
+	}
+}
+
+func TestCheckCompilerFlagsForInternalLink(t *testing.T) {
+	// Any "bad" compiler flag should trigger external linking.
+	for _, f := range badCompilerFlags {
+		if err := checkCompilerFlagsForInternalLink("test", "test", f); err == nil {
+			t.Errorf("missing error for %q", f)
+		}
+	}
+
+	// All "good" compiler flags should not trigger external linking,
+	// except for anything that begins with "-flto".
+	for _, f := range goodCompilerFlags {
+		foundLTO := false
+		for _, s := range f {
+			if strings.Contains(s, "-flto") {
+				foundLTO = true
+			}
+		}
+		if err := checkCompilerFlagsForInternalLink("test", "test", f); err != nil {
+			// expect error for -flto
+			if !foundLTO {
+				t.Errorf("unexpected error for %q: %v", f, err)
+			}
+		} else {
+			// expect no error for everything else
+			if foundLTO {
+				t.Errorf("missing error for %q: %v", f, err)
+			}
+		}
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/shell.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/shell.go
new file mode 100644
index 0000000000000000000000000000000000000000..6089170007fbf0dc435e802f313c7ef744ec0fe8
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/shell.go
@@ -0,0 +1,678 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package work
+
+import (
+	"bytes"
+	"cmd/go/internal/base"
+	"cmd/go/internal/cache"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/load"
+	"cmd/go/internal/par"
+	"cmd/go/internal/str"
+	"errors"
+	"fmt"
+	"internal/lazyregexp"
+	"io"
+	"io/fs"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+// A Shell runs shell commands and performs shell-like file system operations.
+//
+// Shell tracks context related to running commands, and form a tree much like
+// context.Context.
+type Shell struct {
+	action       *Action // nil for the root shell
+	*shellShared         // per-Builder state shared across Shells
+}
+
+// shellShared is Shell state shared across all Shells derived from a single
+// root shell (generally a single Builder).
+type shellShared struct {
+	workDir string // $WORK, immutable
+
+	printLock sync.Mutex
+	printFunc func(args ...any) (int, error)
+	scriptDir string // current directory in printed script
+
+	mkdirCache par.Cache[string, error] // a cache of created directories
+}
+
+// NewShell returns a new Shell.
+//
+// Shell will internally serialize calls to the print function.
+// If print is nil, it defaults to printing to stderr.
+func NewShell(workDir string, print func(a ...any) (int, error)) *Shell {
+	if print == nil {
+		print = func(a ...any) (int, error) {
+			return fmt.Fprint(os.Stderr, a...)
+		}
+	}
+	shared := &shellShared{
+		workDir:   workDir,
+		printFunc: print,
+	}
+	return &Shell{shellShared: shared}
+}
+
+// Print emits a to this Shell's output stream, formatting it like fmt.Print.
+// It is safe to call concurrently.
+func (sh *Shell) Print(a ...any) {
+	sh.printLock.Lock()
+	defer sh.printLock.Unlock()
+	sh.printFunc(a...)
+}
+
+func (sh *Shell) printLocked(a ...any) {
+	sh.printFunc(a...)
+}
+
+// WithAction returns a Shell identical to sh, but bound to Action a.
+func (sh *Shell) WithAction(a *Action) *Shell {
+	sh2 := *sh
+	sh2.action = a
+	return &sh2
+}
+
+// Shell returns a shell for running commands on behalf of Action a.
+func (b *Builder) Shell(a *Action) *Shell {
+	if a == nil {
+		// The root shell has a nil Action. The point of this method is to
+		// create a Shell bound to an Action, so disallow nil Actions here.
+		panic("nil Action")
+	}
+	if a.sh == nil {
+		a.sh = b.backgroundSh.WithAction(a)
+	}
+	return a.sh
+}
+
+// BackgroundShell returns a Builder-wide Shell that's not bound to any Action.
+// Try not to use this unless there's really no sensible Action available.
+func (b *Builder) BackgroundShell() *Shell {
+	return b.backgroundSh
+}
+
+// moveOrCopyFile is like 'mv src dst' or 'cp src dst'.
+func (sh *Shell) moveOrCopyFile(dst, src string, perm fs.FileMode, force bool) error {
+	if cfg.BuildN {
+		sh.ShowCmd("", "mv %s %s", src, dst)
+		return nil
+	}
+
+	// If we can update the mode and rename to the dst, do it.
+	// Otherwise fall back to standard copy.
+
+	// If the source is in the build cache, we need to copy it.
+	if strings.HasPrefix(src, cache.DefaultDir()) {
+		return sh.CopyFile(dst, src, perm, force)
+	}
+
+	// On Windows, always copy the file, so that we respect the NTFS
+	// permissions of the parent folder. https://golang.org/issue/22343.
+	// What matters here is not cfg.Goos (the system we are building
+	// for) but runtime.GOOS (the system we are building on).
+	if runtime.GOOS == "windows" {
+		return sh.CopyFile(dst, src, perm, force)
+	}
+
+	// If the destination directory has the group sticky bit set,
+	// we have to copy the file to retain the correct permissions.
+	// https://golang.org/issue/18878
+	if fi, err := os.Stat(filepath.Dir(dst)); err == nil {
+		if fi.IsDir() && (fi.Mode()&fs.ModeSetgid) != 0 {
+			return sh.CopyFile(dst, src, perm, force)
+		}
+	}
+
+	// The perm argument is meant to be adjusted according to umask,
+	// but we don't know what the umask is.
+	// Create a dummy file to find out.
+	// This avoids build tags and works even on systems like Plan 9
+	// where the file mask computation incorporates other information.
+	mode := perm
+	f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
+	if err == nil {
+		fi, err := f.Stat()
+		if err == nil {
+			mode = fi.Mode() & 0777
+		}
+		name := f.Name()
+		f.Close()
+		os.Remove(name)
+	}
+
+	if err := os.Chmod(src, mode); err == nil {
+		if err := os.Rename(src, dst); err == nil {
+			if cfg.BuildX {
+				sh.ShowCmd("", "mv %s %s", src, dst)
+			}
+			return nil
+		}
+	}
+
+	return sh.CopyFile(dst, src, perm, force)
+}
+
+// copyFile is like 'cp src dst'.
+func (sh *Shell) CopyFile(dst, src string, perm fs.FileMode, force bool) error {
+	if cfg.BuildN || cfg.BuildX {
+		sh.ShowCmd("", "cp %s %s", src, dst)
+		if cfg.BuildN {
+			return nil
+		}
+	}
+
+	sf, err := os.Open(src)
+	if err != nil {
+		return err
+	}
+	defer sf.Close()
+
+	// Be careful about removing/overwriting dst.
+	// Do not remove/overwrite if dst exists and is a directory
+	// or a non-empty non-object file.
+	if fi, err := os.Stat(dst); err == nil {
+		if fi.IsDir() {
+			return fmt.Errorf("build output %q already exists and is a directory", dst)
+		}
+		if !force && fi.Mode().IsRegular() && fi.Size() != 0 && !isObject(dst) {
+			return fmt.Errorf("build output %q already exists and is not an object file", dst)
+		}
+	}
+
+	// On Windows, remove lingering ~ file from last attempt.
+	if runtime.GOOS == "windows" {
+		if _, err := os.Stat(dst + "~"); err == nil {
+			os.Remove(dst + "~")
+		}
+	}
+
+	mayberemovefile(dst)
+	df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+	if err != nil && runtime.GOOS == "windows" {
+		// Windows does not allow deletion of a binary file
+		// while it is executing. Try to move it out of the way.
+		// If the move fails, which is likely, we'll try again the
+		// next time we do an install of this binary.
+		if err := os.Rename(dst, dst+"~"); err == nil {
+			os.Remove(dst + "~")
+		}
+		df, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+	}
+	if err != nil {
+		return fmt.Errorf("copying %s: %w", src, err) // err should already refer to dst
+	}
+
+	_, err = io.Copy(df, sf)
+	df.Close()
+	if err != nil {
+		mayberemovefile(dst)
+		return fmt.Errorf("copying %s to %s: %v", src, dst, err)
+	}
+	return nil
+}
+
+// mayberemovefile removes a file only if it is a regular file
+// When running as a user with sufficient privileges, we may delete
+// even device files, for example, which is not intended.
+func mayberemovefile(s string) {
+	if fi, err := os.Lstat(s); err == nil && !fi.Mode().IsRegular() {
+		return
+	}
+	os.Remove(s)
+}
+
+// writeFile writes the text to file.
+func (sh *Shell) writeFile(file string, text []byte) error {
+	if cfg.BuildN || cfg.BuildX {
+		switch {
+		case len(text) == 0:
+			sh.ShowCmd("", "echo -n > %s # internal", file)
+		case bytes.IndexByte(text, '\n') == len(text)-1:
+			// One line. Use a simpler "echo" command.
+			sh.ShowCmd("", "echo '%s' > %s # internal", bytes.TrimSuffix(text, []byte("\n")), file)
+		default:
+			// Use the most general form.
+			sh.ShowCmd("", "cat >%s << 'EOF' # internal\n%sEOF", file, text)
+		}
+	}
+	if cfg.BuildN {
+		return nil
+	}
+	return os.WriteFile(file, text, 0666)
+}
+
+// Mkdir makes the named directory.
+func (sh *Shell) Mkdir(dir string) error {
+	// Make Mkdir(a.Objdir) a no-op instead of an error when a.Objdir == "".
+	if dir == "" {
+		return nil
+	}
+
+	// We can be a little aggressive about being
+	// sure directories exist. Skip repeated calls.
+	return sh.mkdirCache.Do(dir, func() error {
+		if cfg.BuildN || cfg.BuildX {
+			sh.ShowCmd("", "mkdir -p %s", dir)
+			if cfg.BuildN {
+				return nil
+			}
+		}
+
+		return os.MkdirAll(dir, 0777)
+	})
+}
+
+// RemoveAll is like 'rm -rf'. It attempts to remove all paths even if there's
+// an error, and returns the first error.
+func (sh *Shell) RemoveAll(paths ...string) error {
+	if cfg.BuildN || cfg.BuildX {
+		// Don't say we are removing the directory if we never created it.
+		show := func() bool {
+			for _, path := range paths {
+				if _, ok := sh.mkdirCache.Get(path); ok {
+					return true
+				}
+				if _, err := os.Stat(path); !os.IsNotExist(err) {
+					return true
+				}
+			}
+			return false
+		}
+		if show() {
+			sh.ShowCmd("", "rm -rf %s", strings.Join(paths, " "))
+		}
+	}
+	if cfg.BuildN {
+		return nil
+	}
+
+	var err error
+	for _, path := range paths {
+		if err2 := os.RemoveAll(path); err2 != nil && err == nil {
+			err = err2
+		}
+	}
+	return err
+}
+
+// Symlink creates a symlink newname -> oldname.
+func (sh *Shell) Symlink(oldname, newname string) error {
+	// It's not an error to try to recreate an existing symlink.
+	if link, err := os.Readlink(newname); err == nil && link == oldname {
+		return nil
+	}
+
+	if cfg.BuildN || cfg.BuildX {
+		sh.ShowCmd("", "ln -s %s %s", oldname, newname)
+		if cfg.BuildN {
+			return nil
+		}
+	}
+	return os.Symlink(oldname, newname)
+}
+
+// fmtCmd formats a command in the manner of fmt.Sprintf but also:
+//
+//	fmtCmd replaces the value of b.WorkDir with $WORK.
+func (sh *Shell) fmtCmd(dir string, format string, args ...any) string {
+	cmd := fmt.Sprintf(format, args...)
+	if sh.workDir != "" && !strings.HasPrefix(cmd, "cat ") {
+		cmd = strings.ReplaceAll(cmd, sh.workDir, "$WORK")
+		escaped := strconv.Quote(sh.workDir)
+		escaped = escaped[1 : len(escaped)-1] // strip quote characters
+		if escaped != sh.workDir {
+			cmd = strings.ReplaceAll(cmd, escaped, "$WORK")
+		}
+	}
+	return cmd
+}
+
+// ShowCmd prints the given command to standard output
+// for the implementation of -n or -x.
+//
+// ShowCmd also replaces the name of the current script directory with dot (.)
+// but only when it is at the beginning of a space-separated token.
+//
+// If dir is not "" or "/" and not the current script directory, ShowCmd first
+// prints a "cd" command to switch to dir and updates the script directory.
+func (sh *Shell) ShowCmd(dir string, format string, args ...any) {
+	// Use the output lock directly so we can manage scriptDir.
+	sh.printLock.Lock()
+	defer sh.printLock.Unlock()
+
+	cmd := sh.fmtCmd(dir, format, args...)
+
+	if dir != "" && dir != "/" {
+		if dir != sh.scriptDir {
+			// Show changing to dir and update the current directory.
+			sh.printLocked(sh.fmtCmd("", "cd %s\n", dir))
+			sh.scriptDir = dir
+		}
+		// Replace scriptDir is our working directory. Replace it
+		// with "." in the command.
+		dot := " ."
+		if dir[len(dir)-1] == filepath.Separator {
+			dot += string(filepath.Separator)
+		}
+		cmd = strings.ReplaceAll(" "+cmd, " "+dir, dot)[1:]
+	}
+
+	sh.printLocked(cmd + "\n")
+}
+
+// reportCmd reports the output and exit status of a command. The cmdOut and
+// cmdErr arguments are the output and exit error of the command, respectively.
+//
+// The exact reporting behavior is as follows:
+//
+//	cmdOut  cmdErr  Result
+//	""      nil     print nothing, return nil
+//	!=""    nil     print output, return nil
+//	""      !=nil   print nothing, return cmdErr (later printed)
+//	!=""    !=nil   print nothing, ignore err, return output as error (later printed)
+//
+// reportCmd returns a non-nil error if and only if cmdErr != nil. It assumes
+// that the command output, if non-empty, is more detailed than the command
+// error (which is usually just an exit status), so prefers using the output as
+// the ultimate error. Typically, the caller should return this error from an
+// Action, which it will be printed by the Builder.
+//
+// reportCmd formats the output as "# desc" followed by the given output. The
+// output is expected to contain references to 'dir', usually the source
+// directory for the package that has failed to build. reportCmd rewrites
+// mentions of dir with a relative path to dir when the relative path is
+// shorter. This is usually more pleasant. For example, if fmt doesn't compile
+// and we are in src/html, the output is
+//
+//	$ go build
+//	# fmt
+//	../fmt/print.go:1090: undefined: asdf
+//	$
+//
+// instead of
+//
+//	$ go build
+//	# fmt
+//	/usr/gopher/go/src/fmt/print.go:1090: undefined: asdf
+//	$
+//
+// reportCmd also replaces references to the work directory with $WORK, replaces
+// cgo file paths with the original file path, and replaces cgo-mangled names
+// with "C.name".
+//
+// desc is optional. If "", a.Package.Desc() is used.
+//
+// dir is optional. If "", a.Package.Dir is used.
+func (sh *Shell) reportCmd(desc, dir string, cmdOut []byte, cmdErr error) error {
+	if len(cmdOut) == 0 && cmdErr == nil {
+		// Common case
+		return nil
+	}
+	if len(cmdOut) == 0 && cmdErr != nil {
+		// Just return the error.
+		//
+		// TODO: This is what we've done for a long time, but it may be a
+		// mistake because it loses all of the extra context and results in
+		// ultimately less descriptive output. We should probably just take the
+		// text of cmdErr as the output in this case and do everything we
+		// otherwise would. We could chain the errors if we feel like it.
+		return cmdErr
+	}
+
+	// Fetch defaults from the package.
+	var p *load.Package
+	a := sh.action
+	if a != nil {
+		p = a.Package
+	}
+	var importPath string
+	if p != nil {
+		importPath = p.ImportPath
+		if desc == "" {
+			desc = p.Desc()
+		}
+		if dir == "" {
+			dir = p.Dir
+		}
+	}
+
+	out := string(cmdOut)
+
+	if !strings.HasSuffix(out, "\n") {
+		out = out + "\n"
+	}
+
+	// Replace workDir with $WORK
+	out = replacePrefix(out, sh.workDir, "$WORK")
+
+	// Rewrite mentions of dir with a relative path to dir
+	// when the relative path is shorter.
+	for {
+		// Note that dir starts out long, something like
+		// /foo/bar/baz/root/a
+		// The target string to be reduced is something like
+		// (blah-blah-blah) /foo/bar/baz/root/sibling/whatever.go:blah:blah
+		// /foo/bar/baz/root/a doesn't match /foo/bar/baz/root/sibling, but the prefix
+		// /foo/bar/baz/root does.  And there may be other niblings sharing shorter
+		// prefixes, the only way to find them is to look.
+		// This doesn't always produce a relative path --
+		// /foo is shorter than ../../.., for example.
+		if reldir := base.ShortPath(dir); reldir != dir {
+			out = replacePrefix(out, dir, reldir)
+			if filepath.Separator == '\\' {
+				// Don't know why, sometimes this comes out with slashes, not backslashes.
+				wdir := strings.ReplaceAll(dir, "\\", "/")
+				out = replacePrefix(out, wdir, reldir)
+			}
+		}
+		dirP := filepath.Dir(dir)
+		if dir == dirP {
+			break
+		}
+		dir = dirP
+	}
+
+	// Fix up output referring to cgo-generated code to be more readable.
+	// Replace x.go:19[/tmp/.../x.cgo1.go:18] with x.go:19.
+	// Replace *[100]_Ctype_foo with *[100]C.foo.
+	// If we're using -x, assume we're debugging and want the full dump, so disable the rewrite.
+	if !cfg.BuildX && cgoLine.MatchString(out) {
+		out = cgoLine.ReplaceAllString(out, "")
+		out = cgoTypeSigRe.ReplaceAllString(out, "C.")
+	}
+
+	// Usually desc is already p.Desc(), but if not, signal cmdError.Error to
+	// add a line explicitly metioning the import path.
+	needsPath := importPath != "" && p != nil && desc != p.Desc()
+
+	err := &cmdError{desc, out, importPath, needsPath}
+	if cmdErr != nil {
+		// The command failed. Report the output up as an error.
+		return err
+	}
+	// The command didn't fail, so just print the output as appropriate.
+	if a != nil && a.output != nil {
+		// The Action is capturing output.
+		a.output = append(a.output, err.Error()...)
+	} else {
+		// Write directly to the Builder output.
+		sh.Print(err.Error())
+	}
+	return nil
+}
+
+// replacePrefix is like strings.ReplaceAll, but only replaces instances of old
+// that are preceded by ' ', '\t', or appear at the beginning of a line.
+func replacePrefix(s, old, new string) string {
+	n := strings.Count(s, old)
+	if n == 0 {
+		return s
+	}
+
+	s = strings.ReplaceAll(s, " "+old, " "+new)
+	s = strings.ReplaceAll(s, "\n"+old, "\n"+new)
+	s = strings.ReplaceAll(s, "\n\t"+old, "\n\t"+new)
+	if strings.HasPrefix(s, old) {
+		s = new + s[len(old):]
+	}
+	return s
+}
+
+type cmdError struct {
+	desc       string
+	text       string
+	importPath string
+	needsPath  bool // Set if desc does not already include the import path
+}
+
+func (e *cmdError) Error() string {
+	var msg string
+	if e.needsPath {
+		// Ensure the import path is part of the message.
+		// Clearly distinguish the description from the import path.
+		msg = fmt.Sprintf("# %s\n# [%s]\n", e.importPath, e.desc)
+	} else {
+		msg = "# " + e.desc + "\n"
+	}
+	return msg + e.text
+}
+
+func (e *cmdError) ImportPath() string {
+	return e.importPath
+}
+
+var cgoLine = lazyregexp.New(`\[[^\[\]]+\.(cgo1|cover)\.go:[0-9]+(:[0-9]+)?\]`)
+var cgoTypeSigRe = lazyregexp.New(`\b_C2?(type|func|var|macro)_\B`)
+
+// run runs the command given by cmdline in the directory dir.
+// If the command fails, run prints information about the failure
+// and returns a non-nil error.
+func (sh *Shell) run(dir string, desc string, env []string, cmdargs ...any) error {
+	out, err := sh.runOut(dir, env, cmdargs...)
+	if desc == "" {
+		desc = sh.fmtCmd(dir, "%s", strings.Join(str.StringList(cmdargs...), " "))
+	}
+	return sh.reportCmd(desc, dir, out, err)
+}
+
+// runOut runs the command given by cmdline in the directory dir.
+// It returns the command output and any errors that occurred.
+// It accumulates execution time in a.
+func (sh *Shell) runOut(dir string, env []string, cmdargs ...any) ([]byte, error) {
+	a := sh.action
+
+	cmdline := str.StringList(cmdargs...)
+
+	for _, arg := range cmdline {
+		// GNU binutils commands, including gcc and gccgo, interpret an argument
+		// @foo anywhere in the command line (even following --) as meaning
+		// "read and insert arguments from the file named foo."
+		// Don't say anything that might be misinterpreted that way.
+		if strings.HasPrefix(arg, "@") {
+			return nil, fmt.Errorf("invalid command-line argument %s in command: %s", arg, joinUnambiguously(cmdline))
+		}
+	}
+
+	if cfg.BuildN || cfg.BuildX {
+		var envcmdline string
+		for _, e := range env {
+			if j := strings.IndexByte(e, '='); j != -1 {
+				if strings.ContainsRune(e[j+1:], '\'') {
+					envcmdline += fmt.Sprintf("%s=%q", e[:j], e[j+1:])
+				} else {
+					envcmdline += fmt.Sprintf("%s='%s'", e[:j], e[j+1:])
+				}
+				envcmdline += " "
+			}
+		}
+		envcmdline += joinUnambiguously(cmdline)
+		sh.ShowCmd(dir, "%s", envcmdline)
+		if cfg.BuildN {
+			return nil, nil
+		}
+	}
+
+	var buf bytes.Buffer
+	path, err := cfg.LookPath(cmdline[0])
+	if err != nil {
+		return nil, err
+	}
+	cmd := exec.Command(path, cmdline[1:]...)
+	if cmd.Path != "" {
+		cmd.Args[0] = cmd.Path
+	}
+	cmd.Stdout = &buf
+	cmd.Stderr = &buf
+	cleanup := passLongArgsInResponseFiles(cmd)
+	defer cleanup()
+	if dir != "." {
+		cmd.Dir = dir
+	}
+	cmd.Env = cmd.Environ() // Pre-allocate with correct PWD.
+
+	// Add the TOOLEXEC_IMPORTPATH environment variable for -toolexec tools.
+	// It doesn't really matter if -toolexec isn't being used.
+	// Note that a.Package.Desc is not really an import path,
+	// but this is consistent with 'go list -f {{.ImportPath}}'.
+	// Plus, it is useful to uniquely identify packages in 'go list -json'.
+	if a != nil && a.Package != nil {
+		cmd.Env = append(cmd.Env, "TOOLEXEC_IMPORTPATH="+a.Package.Desc())
+	}
+
+	cmd.Env = append(cmd.Env, env...)
+	start := time.Now()
+	err = cmd.Run()
+	if a != nil && a.json != nil {
+		aj := a.json
+		aj.Cmd = append(aj.Cmd, joinUnambiguously(cmdline))
+		aj.CmdReal += time.Since(start)
+		if ps := cmd.ProcessState; ps != nil {
+			aj.CmdUser += ps.UserTime()
+			aj.CmdSys += ps.SystemTime()
+		}
+	}
+
+	// err can be something like 'exit status 1'.
+	// Add information about what program was running.
+	// Note that if buf.Bytes() is non-empty, the caller usually
+	// shows buf.Bytes() and does not print err at all, so the
+	// prefix here does not make most output any more verbose.
+	if err != nil {
+		err = errors.New(cmdline[0] + ": " + err.Error())
+	}
+	return buf.Bytes(), err
+}
+
+// joinUnambiguously prints the slice, quoting where necessary to make the
+// output unambiguous.
+// TODO: See issue 5279. The printing of commands needs a complete redo.
+func joinUnambiguously(a []string) string {
+	var buf strings.Builder
+	for i, s := range a {
+		if i > 0 {
+			buf.WriteByte(' ')
+		}
+		q := strconv.Quote(s)
+		// A gccgo command line can contain -( and -).
+		// Make sure we quote them since they are special to the shell.
+		// The trimpath argument can also contain > (part of =>) and ;. Quote those too.
+		if s == "" || strings.ContainsAny(s, " ()>;") || len(q) > len(s)+2 {
+			buf.WriteString(q)
+		} else {
+			buf.WriteString(s)
+		}
+	}
+	return buf.String()
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/work/shell_test.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/shell_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..24bef4e684a48b42c5a863ab0cb29ac789e489d6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/work/shell_test.go
@@ -0,0 +1,139 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package work
+
+import (
+	"bytes"
+	"internal/testenv"
+	"strings"
+	"testing"
+	"unicode"
+)
+
+func FuzzSplitPkgConfigOutput(f *testing.F) {
+	testenv.MustHaveExecPath(f, "/bin/sh")
+
+	f.Add([]byte(`$FOO`))
+	f.Add([]byte(`\$FOO`))
+	f.Add([]byte(`${FOO}`))
+	f.Add([]byte(`\${FOO}`))
+	f.Add([]byte(`$(/bin/false)`))
+	f.Add([]byte(`\$(/bin/false)`))
+	f.Add([]byte(`$((0))`))
+	f.Add([]byte(`\$((0))`))
+	f.Add([]byte(`unescaped space`))
+	f.Add([]byte(`escaped\ space`))
+	f.Add([]byte(`"unterminated quote`))
+	f.Add([]byte(`'unterminated quote`))
+	f.Add([]byte(`unterminated escape\`))
+	f.Add([]byte(`"quote with unterminated escape\`))
+	f.Add([]byte(`'quoted "double quotes"'`))
+	f.Add([]byte(`"quoted 'single quotes'"`))
+	f.Add([]byte(`"\$0"`))
+	f.Add([]byte(`"\$\0"`))
+	f.Add([]byte(`"\$"`))
+	f.Add([]byte(`"\$ "`))
+
+	// Example positive inputs from TestSplitPkgConfigOutput.
+	// Some bare newlines have been removed so that the inputs
+	// are valid in the shell script we use for comparison.
+	f.Add([]byte(`-r:foo -L/usr/white\ space/lib -lfoo\ bar -lbar\ baz`))
+	f.Add([]byte(`-lextra\ fun\ arg\\`))
+	f.Add([]byte("\textra     whitespace\r"))
+	f.Add([]byte("     \r      "))
+	f.Add([]byte(`"-r:foo" "-L/usr/white space/lib" "-lfoo bar" "-lbar baz"`))
+	f.Add([]byte(`"-lextra fun arg\\"`))
+	f.Add([]byte(`"     \r\n\      "`))
+	f.Add([]byte(`""`))
+	f.Add([]byte(``))
+	f.Add([]byte(`"\\"`))
+	f.Add([]byte(`"\x"`))
+	f.Add([]byte(`"\\x"`))
+	f.Add([]byte(`'\\'`))
+	f.Add([]byte(`'\x'`))
+	f.Add([]byte(`"\\x"`))
+	f.Add([]byte("\\\n"))
+	f.Add([]byte(`-fPIC -I/test/include/foo -DQUOTED='"/test/share/doc"'`))
+	f.Add([]byte(`-fPIC -I/test/include/foo -DQUOTED="/test/share/doc"`))
+	f.Add([]byte(`-fPIC -I/test/include/foo -DQUOTED=\"/test/share/doc\"`))
+	f.Add([]byte(`-fPIC -I/test/include/foo -DQUOTED='/test/share/doc'`))
+	f.Add([]byte(`-DQUOTED='/te\st/share/d\oc'`))
+	f.Add([]byte(`-Dhello=10 -Dworld=+32 -DDEFINED_FROM_PKG_CONFIG=hello\ world`))
+	f.Add([]byte(`"broken\"" \\\a "a"`))
+
+	// Example negative inputs from TestSplitPkgConfigOutput.
+	f.Add([]byte(`"     \r\n      `))
+	f.Add([]byte(`"-r:foo" "-L/usr/white space/lib "-lfoo bar" "-lbar baz"`))
+	f.Add([]byte(`"-lextra fun arg\\`))
+	f.Add([]byte(`broken flag\`))
+	f.Add([]byte(`extra broken flag \`))
+	f.Add([]byte(`\`))
+	f.Add([]byte(`"broken\"" "extra" \`))
+
+	f.Fuzz(func(t *testing.T, b []byte) {
+		t.Parallel()
+
+		if bytes.ContainsAny(b, "*?[#~%\x00{}!") {
+			t.Skipf("skipping %#q: contains a sometimes-quoted character", b)
+		}
+		// splitPkgConfigOutput itself rejects inputs that contain unquoted
+		// shell operator characters. (Quoted shell characters are fine.)
+
+		for _, c := range b {
+			if c > unicode.MaxASCII {
+				t.Skipf("skipping %#q: contains a non-ASCII character %q", b, c)
+			}
+			if !unicode.IsGraphic(rune(c)) && !unicode.IsSpace(rune(c)) {
+				t.Skipf("skipping %#q: contains non-graphic character %q", b, c)
+			}
+		}
+
+		args, err := splitPkgConfigOutput(b)
+		if err != nil {
+			// We haven't checked that the shell would actually reject this input too,
+			// but if splitPkgConfigOutput rejected it it's probably too dangerous to
+			// run in the script.
+			t.Logf("%#q: %v", b, err)
+			return
+		}
+		t.Logf("splitPkgConfigOutput(%#q) = %#q", b, args)
+		if len(args) == 0 {
+			t.Skipf("skipping %#q: contains no arguments", b)
+		}
+
+		var buf strings.Builder
+		for _, arg := range args {
+			buf.WriteString(arg)
+			buf.WriteString("\n")
+		}
+		wantOut := buf.String()
+
+		if strings.Count(wantOut, "\n") != len(args)+bytes.Count(b, []byte("\n")) {
+			// One of the newlines in b was treated as a delimiter and not part of an
+			// argument. Our bash test script would interpret that as a syntax error.
+			t.Skipf("skipping %#q: contains a bare newline", b)
+		}
+
+		// We use the printf shell command to echo the arguments because, per
+		// https://pubs.opengroup.org/onlinepubs/9699919799/utilities/echo.html#tag_20_37_16:
+		// “It is not possible to use echo portably across all POSIX systems unless
+		// both -n (as the first argument) and escape sequences are omitted.”
+		cmd := testenv.Command(t, "/bin/sh", "-c", "printf '%s\n' "+string(b))
+		cmd.Env = append(cmd.Environ(), "LC_ALL=POSIX", "POSIXLY_CORRECT=1")
+		cmd.Stderr = new(strings.Builder)
+		out, err := cmd.Output()
+		if err != nil {
+			t.Fatalf("%#q: %v\n%s", cmd.Args, err, cmd.Stderr)
+		}
+
+		if string(out) != wantOut {
+			t.Logf("%#q:\n%#q", cmd.Args, out)
+			t.Logf("want:\n%#q", wantOut)
+			t.Errorf("parsed args do not match")
+		}
+	})
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/edit.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/edit.go
new file mode 100644
index 0000000000000000000000000000000000000000..8d975b0b3d13df808af01a46a412b475b94c1824
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/edit.go
@@ -0,0 +1,340 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go work edit
+
+package workcmd
+
+import (
+	"cmd/go/internal/base"
+	"cmd/go/internal/gover"
+	"cmd/go/internal/modload"
+	"context"
+	"encoding/json"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"golang.org/x/mod/module"
+
+	"golang.org/x/mod/modfile"
+)
+
+var cmdEdit = &base.Command{
+	UsageLine: "go work edit [editing flags] [go.work]",
+	Short:     "edit go.work from tools or scripts",
+	Long: `Edit provides a command-line interface for editing go.work,
+for use primarily by tools or scripts. It only reads go.work;
+it does not look up information about the modules involved.
+If no file is specified, Edit looks for a go.work file in the current
+directory and its parent directories
+
+The editing flags specify a sequence of editing operations.
+
+The -fmt flag reformats the go.work file without making other changes.
+This reformatting is also implied by any other modifications that use or
+rewrite the go.mod file. The only time this flag is needed is if no other
+flags are specified, as in 'go work edit -fmt'.
+
+The -use=path and -dropuse=path flags
+add and drop a use directive from the go.work file's set of module directories.
+
+The -replace=old[@v]=new[@v] flag adds a replacement of the given
+module path and version pair. If the @v in old@v is omitted, a
+replacement without a version on the left side is added, which applies
+to all versions of the old module path. If the @v in new@v is omitted,
+the new path should be a local module root directory, not a module
+path. Note that -replace overrides any redundant replacements for old[@v],
+so omitting @v will drop existing replacements for specific versions.
+
+The -dropreplace=old[@v] flag drops a replacement of the given
+module path and version pair. If the @v is omitted, a replacement without
+a version on the left side is dropped.
+
+The -use, -dropuse, -replace, and -dropreplace,
+editing flags may be repeated, and the changes are applied in the order given.
+
+The -go=version flag sets the expected Go language version.
+
+The -toolchain=name flag sets the Go toolchain to use.
+
+The -print flag prints the final go.work in its text format instead of
+writing it back to go.mod.
+
+The -json flag prints the final go.work file in JSON format instead of
+writing it back to go.mod. The JSON output corresponds to these Go types:
+
+	type GoWork struct {
+		Go        string
+		Toolchain string
+		Use       []Use
+		Replace   []Replace
+	}
+
+	type Use struct {
+		DiskPath   string
+		ModulePath string
+	}
+
+	type Replace struct {
+		Old Module
+		New Module
+	}
+
+	type Module struct {
+		Path    string
+		Version string
+	}
+
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
+`,
+}
+
+var (
+	editFmt       = cmdEdit.Flag.Bool("fmt", false, "")
+	editGo        = cmdEdit.Flag.String("go", "", "")
+	editToolchain = cmdEdit.Flag.String("toolchain", "", "")
+	editJSON      = cmdEdit.Flag.Bool("json", false, "")
+	editPrint     = cmdEdit.Flag.Bool("print", false, "")
+	workedits     []func(file *modfile.WorkFile) // edits specified in flags
+)
+
+type flagFunc func(string)
+
+func (f flagFunc) String() string     { return "" }
+func (f flagFunc) Set(s string) error { f(s); return nil }
+
+func init() {
+	cmdEdit.Run = runEditwork // break init cycle
+
+	cmdEdit.Flag.Var(flagFunc(flagEditworkUse), "use", "")
+	cmdEdit.Flag.Var(flagFunc(flagEditworkDropUse), "dropuse", "")
+	cmdEdit.Flag.Var(flagFunc(flagEditworkReplace), "replace", "")
+	cmdEdit.Flag.Var(flagFunc(flagEditworkDropReplace), "dropreplace", "")
+	base.AddChdirFlag(&cmdEdit.Flag)
+}
+
+func runEditwork(ctx context.Context, cmd *base.Command, args []string) {
+	if *editJSON && *editPrint {
+		base.Fatalf("go: cannot use both -json and -print")
+	}
+
+	if len(args) > 1 {
+		base.Fatalf("go: 'go help work edit' accepts at most one argument")
+	}
+	var gowork string
+	if len(args) == 1 {
+		gowork = args[0]
+	} else {
+		modload.InitWorkfile()
+		gowork = modload.WorkFilePath()
+	}
+	if gowork == "" {
+		base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
+	}
+
+	if *editGo != "" && *editGo != "none" {
+		if !modfile.GoVersionRE.MatchString(*editGo) {
+			base.Fatalf(`go work: invalid -go option; expecting something like "-go %s"`, gover.Local())
+		}
+	}
+	if *editToolchain != "" && *editToolchain != "none" {
+		if !modfile.ToolchainRE.MatchString(*editToolchain) {
+			base.Fatalf(`go work: invalid -toolchain option; expecting something like "-toolchain go%s"`, gover.Local())
+		}
+	}
+
+	anyFlags := *editGo != "" ||
+		*editToolchain != "" ||
+		*editJSON ||
+		*editPrint ||
+		*editFmt ||
+		len(workedits) > 0
+
+	if !anyFlags {
+		base.Fatalf("go: no flags specified (see 'go help work edit').")
+	}
+
+	workFile, err := modload.ReadWorkFile(gowork)
+	if err != nil {
+		base.Fatalf("go: errors parsing %s:\n%s", base.ShortPath(gowork), err)
+	}
+
+	if *editGo == "none" {
+		workFile.DropGoStmt()
+	} else if *editGo != "" {
+		if err := workFile.AddGoStmt(*editGo); err != nil {
+			base.Fatalf("go: internal error: %v", err)
+		}
+	}
+	if *editToolchain == "none" {
+		workFile.DropToolchainStmt()
+	} else if *editToolchain != "" {
+		if err := workFile.AddToolchainStmt(*editToolchain); err != nil {
+			base.Fatalf("go: internal error: %v", err)
+		}
+	}
+
+	if len(workedits) > 0 {
+		for _, edit := range workedits {
+			edit(workFile)
+		}
+	}
+
+	workFile.SortBlocks()
+	workFile.Cleanup() // clean file after edits
+
+	// Note: No call to modload.UpdateWorkFile here.
+	// Edit's job is only to make the edits on the command line,
+	// not to apply the kinds of semantic changes that
+	// UpdateWorkFile does (or would eventually do, if we
+	// decide to add the module comments in go.work).
+
+	if *editJSON {
+		editPrintJSON(workFile)
+		return
+	}
+
+	if *editPrint {
+		os.Stdout.Write(modfile.Format(workFile.Syntax))
+		return
+	}
+
+	modload.WriteWorkFile(gowork, workFile)
+}
+
+// flagEditworkUse implements the -use flag.
+func flagEditworkUse(arg string) {
+	workedits = append(workedits, func(f *modfile.WorkFile) {
+		_, mf, err := modload.ReadModFile(filepath.Join(arg, "go.mod"), nil)
+		modulePath := ""
+		if err == nil {
+			modulePath = mf.Module.Mod.Path
+		}
+		f.AddUse(modload.ToDirectoryPath(arg), modulePath)
+		if err := f.AddUse(modload.ToDirectoryPath(arg), ""); err != nil {
+			base.Fatalf("go: -use=%s: %v", arg, err)
+		}
+	})
+}
+
+// flagEditworkDropUse implements the -dropuse flag.
+func flagEditworkDropUse(arg string) {
+	workedits = append(workedits, func(f *modfile.WorkFile) {
+		if err := f.DropUse(modload.ToDirectoryPath(arg)); err != nil {
+			base.Fatalf("go: -dropdirectory=%s: %v", arg, err)
+		}
+	})
+}
+
+// allowedVersionArg returns whether a token may be used as a version in go.mod.
+// We don't call modfile.CheckPathVersion, because that insists on versions
+// being in semver form, but here we want to allow versions like "master" or
+// "1234abcdef", which the go command will resolve the next time it runs (or
+// during -fix).  Even so, we need to make sure the version is a valid token.
+func allowedVersionArg(arg string) bool {
+	return !modfile.MustQuote(arg)
+}
+
+// parsePathVersionOptional parses path[@version], using adj to
+// describe any errors.
+func parsePathVersionOptional(adj, arg string, allowDirPath bool) (path, version string, err error) {
+	before, after, found := strings.Cut(arg, "@")
+	if !found {
+		path = arg
+	} else {
+		path, version = strings.TrimSpace(before), strings.TrimSpace(after)
+	}
+	if err := module.CheckImportPath(path); err != nil {
+		if !allowDirPath || !modfile.IsDirectoryPath(path) {
+			return path, version, fmt.Errorf("invalid %s path: %v", adj, err)
+		}
+	}
+	if path != arg && !allowedVersionArg(version) {
+		return path, version, fmt.Errorf("invalid %s version: %q", adj, version)
+	}
+	return path, version, nil
+}
+
+// flagEditworkReplace implements the -replace flag.
+func flagEditworkReplace(arg string) {
+	before, after, found := strings.Cut(arg, "=")
+	if !found {
+		base.Fatalf("go: -replace=%s: need old[@v]=new[@w] (missing =)", arg)
+	}
+	old, new := strings.TrimSpace(before), strings.TrimSpace(after)
+	if strings.HasPrefix(new, ">") {
+		base.Fatalf("go: -replace=%s: separator between old and new is =, not =>", arg)
+	}
+	oldPath, oldVersion, err := parsePathVersionOptional("old", old, false)
+	if err != nil {
+		base.Fatalf("go: -replace=%s: %v", arg, err)
+	}
+	newPath, newVersion, err := parsePathVersionOptional("new", new, true)
+	if err != nil {
+		base.Fatalf("go: -replace=%s: %v", arg, err)
+	}
+	if newPath == new && !modfile.IsDirectoryPath(new) {
+		base.Fatalf("go: -replace=%s: unversioned new path must be local directory", arg)
+	}
+
+	workedits = append(workedits, func(f *modfile.WorkFile) {
+		if err := f.AddReplace(oldPath, oldVersion, newPath, newVersion); err != nil {
+			base.Fatalf("go: -replace=%s: %v", arg, err)
+		}
+	})
+}
+
+// flagEditworkDropReplace implements the -dropreplace flag.
+func flagEditworkDropReplace(arg string) {
+	path, version, err := parsePathVersionOptional("old", arg, true)
+	if err != nil {
+		base.Fatalf("go: -dropreplace=%s: %v", arg, err)
+	}
+	workedits = append(workedits, func(f *modfile.WorkFile) {
+		if err := f.DropReplace(path, version); err != nil {
+			base.Fatalf("go: -dropreplace=%s: %v", arg, err)
+		}
+	})
+}
+
+type replaceJSON struct {
+	Old module.Version
+	New module.Version
+}
+
+// editPrintJSON prints the -json output.
+func editPrintJSON(workFile *modfile.WorkFile) {
+	var f workfileJSON
+	if workFile.Go != nil {
+		f.Go = workFile.Go.Version
+	}
+	for _, d := range workFile.Use {
+		f.Use = append(f.Use, useJSON{DiskPath: d.Path, ModPath: d.ModulePath})
+	}
+
+	for _, r := range workFile.Replace {
+		f.Replace = append(f.Replace, replaceJSON{r.Old, r.New})
+	}
+	data, err := json.MarshalIndent(&f, "", "\t")
+	if err != nil {
+		base.Fatalf("go: internal error: %v", err)
+	}
+	data = append(data, '\n')
+	os.Stdout.Write(data)
+}
+
+// workfileJSON is the -json output data structure.
+type workfileJSON struct {
+	Go      string `json:",omitempty"`
+	Use     []useJSON
+	Replace []replaceJSON
+}
+
+type useJSON struct {
+	DiskPath string
+	ModPath  string `json:",omitempty"`
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/init.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/init.go
new file mode 100644
index 0000000000000000000000000000000000000000..02240b8189fab531d6225f8f870d13fda2c07f01
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/init.go
@@ -0,0 +1,66 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go work init
+
+package workcmd
+
+import (
+	"context"
+	"path/filepath"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/fsys"
+	"cmd/go/internal/gover"
+	"cmd/go/internal/modload"
+
+	"golang.org/x/mod/modfile"
+)
+
+var cmdInit = &base.Command{
+	UsageLine: "go work init [moddirs]",
+	Short:     "initialize workspace file",
+	Long: `Init initializes and writes a new go.work file in the
+current directory, in effect creating a new workspace at the current
+directory.
+
+go work init optionally accepts paths to the workspace modules as
+arguments. If the argument is omitted, an empty workspace with no
+modules will be created.
+
+Each argument path is added to a use directive in the go.work file. The
+current go version will also be listed in the go.work file.
+
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
+`,
+	Run: runInit,
+}
+
+func init() {
+	base.AddChdirFlag(&cmdInit.Flag)
+	base.AddModCommonFlags(&cmdInit.Flag)
+}
+
+func runInit(ctx context.Context, cmd *base.Command, args []string) {
+	modload.InitWorkfile()
+
+	modload.ForceUseModules = true
+
+	gowork := modload.WorkFilePath()
+	if gowork == "" {
+		gowork = filepath.Join(base.Cwd(), "go.work")
+	}
+
+	if _, err := fsys.Stat(gowork); err == nil {
+		base.Fatalf("go: %s already exists", gowork)
+	}
+
+	goV := gover.Local() // Use current Go version by default
+	wf := new(modfile.WorkFile)
+	wf.Syntax = new(modfile.FileSyntax)
+	wf.AddGoStmt(goV)
+	workUse(ctx, gowork, wf, args)
+	modload.WriteWorkFile(gowork, wf)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/sync.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/sync.go
new file mode 100644
index 0000000000000000000000000000000000000000..719cf76c9bf12ddbaccf3aa7ab35789af42df969
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/sync.go
@@ -0,0 +1,146 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go work sync
+
+package workcmd
+
+import (
+	"cmd/go/internal/base"
+	"cmd/go/internal/gover"
+	"cmd/go/internal/imports"
+	"cmd/go/internal/modload"
+	"cmd/go/internal/toolchain"
+	"context"
+
+	"golang.org/x/mod/module"
+)
+
+var cmdSync = &base.Command{
+	UsageLine: "go work sync",
+	Short:     "sync workspace build list to modules",
+	Long: `Sync syncs the workspace's build list back to the
+workspace's modules
+
+The workspace's build list is the set of versions of all the
+(transitive) dependency modules used to do builds in the workspace. go
+work sync generates that build list using the Minimal Version Selection
+algorithm, and then syncs those versions back to each of modules
+specified in the workspace (with use directives).
+
+The syncing is done by sequentially upgrading each of the dependency
+modules specified in a workspace module to the version in the build list
+if the dependency module's version is not already the same as the build
+list's version. Note that Minimal Version Selection guarantees that the
+build list's version of each module is always the same or higher than
+that in each workspace module.
+
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
+`,
+	Run: runSync,
+}
+
+func init() {
+	base.AddChdirFlag(&cmdSync.Flag)
+	base.AddModCommonFlags(&cmdSync.Flag)
+}
+
+func runSync(ctx context.Context, cmd *base.Command, args []string) {
+	modload.ForceUseModules = true
+	modload.InitWorkfile()
+	if modload.WorkFilePath() == "" {
+		base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
+	}
+
+	_, err := modload.LoadModGraph(ctx, "")
+	if err != nil {
+		toolchain.SwitchOrFatal(ctx, err)
+	}
+	mustSelectFor := map[module.Version][]module.Version{}
+
+	mms := modload.MainModules
+
+	opts := modload.PackageOpts{
+		Tags:                     imports.AnyTags(),
+		VendorModulesInGOROOTSrc: true,
+		ResolveMissingImports:    false,
+		LoadTests:                true,
+		AllowErrors:              true,
+		SilencePackageErrors:     true,
+		SilenceUnmatchedWarnings: true,
+	}
+	for _, m := range mms.Versions() {
+		opts.MainModule = m
+		_, pkgs := modload.LoadPackages(ctx, opts, "all")
+		opts.MainModule = module.Version{} // reset
+
+		var (
+			mustSelect   []module.Version
+			inMustSelect = map[module.Version]bool{}
+		)
+		for _, pkg := range pkgs {
+			if r := modload.PackageModule(pkg); r.Version != "" && !inMustSelect[r] {
+				// r has a known version, so force that version.
+				mustSelect = append(mustSelect, r)
+				inMustSelect[r] = true
+			}
+		}
+		gover.ModSort(mustSelect) // ensure determinism
+		mustSelectFor[m] = mustSelect
+	}
+
+	workFilePath := modload.WorkFilePath() // save go.work path because EnterModule clobbers it.
+
+	var goV string
+	for _, m := range mms.Versions() {
+		if mms.ModRoot(m) == "" && m.Path == "command-line-arguments" {
+			// This is not a real module.
+			// TODO(#49228): Remove this special case once the special
+			// command-line-arguments module is gone.
+			continue
+		}
+
+		// Use EnterModule to reset the global state in modload to be in
+		// single-module mode using the modroot of m.
+		modload.EnterModule(ctx, mms.ModRoot(m))
+
+		// Edit the build list in the same way that 'go get' would if we
+		// requested the relevant module versions explicitly.
+		// TODO(#57001): Do we need a toolchain.SwitchOrFatal here,
+		// and do we need to pass a toolchain.Switcher in LoadPackages?
+		// If so, think about saving the WriteGoMods for after the loop,
+		// so we don't write some go.mods with the "before" toolchain
+		// and others with the "after" toolchain. If nothing else, that
+		// discrepancy could show up in auto-recorded toolchain lines.
+		changed, err := modload.EditBuildList(ctx, nil, mustSelectFor[m])
+		if err != nil {
+			continue
+		}
+		if changed {
+			modload.LoadPackages(ctx, modload.PackageOpts{
+				Tags:                     imports.AnyTags(),
+				Tidy:                     true,
+				VendorModulesInGOROOTSrc: true,
+				ResolveMissingImports:    false,
+				LoadTests:                true,
+				AllowErrors:              true,
+				SilenceMissingStdImports: true,
+				SilencePackageErrors:     true,
+			}, "all")
+			modload.WriteGoMod(ctx, modload.WriteOpts{})
+		}
+		goV = gover.Max(goV, modload.MainModules.GoVersion())
+	}
+
+	wf, err := modload.ReadWorkFile(workFilePath)
+	if err != nil {
+		base.Fatal(err)
+	}
+	modload.UpdateWorkGoVersion(wf, goV)
+	modload.UpdateWorkFile(wf)
+	if err := modload.WriteWorkFile(workFilePath, wf); err != nil {
+		base.Fatal(err)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/use.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/use.go
new file mode 100644
index 0000000000000000000000000000000000000000..55477119d4605cb2e1f99a52f0d0ae96e78089c9
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/use.go
@@ -0,0 +1,254 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go work use
+
+package workcmd
+
+import (
+	"context"
+	"fmt"
+	"io/fs"
+	"os"
+	"path/filepath"
+
+	"cmd/go/internal/base"
+	"cmd/go/internal/fsys"
+	"cmd/go/internal/gover"
+	"cmd/go/internal/modload"
+	"cmd/go/internal/str"
+	"cmd/go/internal/toolchain"
+
+	"golang.org/x/mod/modfile"
+)
+
+var cmdUse = &base.Command{
+	UsageLine: "go work use [-r] [moddirs]",
+	Short:     "add modules to workspace file",
+	Long: `Use provides a command-line interface for adding
+directories, optionally recursively, to a go.work file.
+
+A use directive will be added to the go.work file for each argument
+directory listed on the command line go.work file, if it exists,
+or removed from the go.work file if it does not exist.
+Use fails if any remaining use directives refer to modules that
+do not exist.
+
+Use updates the go line in go.work to specify a version at least as
+new as all the go lines in the used modules, both preexisting ones
+and newly added ones. With no arguments, this update is the only
+thing that go work use does.
+
+The -r flag searches recursively for modules in the argument
+directories, and the use command operates as if each of the directories
+were specified as arguments: namely, use directives will be added for
+directories that exist, and removed for directories that do not exist.
+
+
+
+See the workspaces reference at https://go.dev/ref/mod#workspaces
+for more information.
+`,
+}
+
+var useR = cmdUse.Flag.Bool("r", false, "")
+
+func init() {
+	cmdUse.Run = runUse // break init cycle
+
+	base.AddChdirFlag(&cmdUse.Flag)
+	base.AddModCommonFlags(&cmdUse.Flag)
+}
+
+func runUse(ctx context.Context, cmd *base.Command, args []string) {
+	modload.ForceUseModules = true
+	modload.InitWorkfile()
+	gowork := modload.WorkFilePath()
+	if gowork == "" {
+		base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
+	}
+	wf, err := modload.ReadWorkFile(gowork)
+	if err != nil {
+		base.Fatal(err)
+	}
+	workUse(ctx, gowork, wf, args)
+	modload.WriteWorkFile(gowork, wf)
+}
+
+func workUse(ctx context.Context, gowork string, wf *modfile.WorkFile, args []string) {
+	workDir := filepath.Dir(gowork) // absolute, since gowork itself is absolute
+
+	haveDirs := make(map[string][]string) // absolute → original(s)
+	for _, use := range wf.Use {
+		var abs string
+		if filepath.IsAbs(use.Path) {
+			abs = filepath.Clean(use.Path)
+		} else {
+			abs = filepath.Join(workDir, use.Path)
+		}
+		haveDirs[abs] = append(haveDirs[abs], use.Path)
+	}
+
+	// keepDirs maps each absolute path to keep to the literal string to use for
+	// that path (either an absolute or a relative path), or the empty string if
+	// all entries for the absolute path should be removed.
+	keepDirs := make(map[string]string)
+
+	var sw toolchain.Switcher
+
+	// lookDir updates the entry in keepDirs for the directory dir,
+	// which is either absolute or relative to the current working directory
+	// (not necessarily the directory containing the workfile).
+	lookDir := func(dir string) {
+		absDir, dir := pathRel(workDir, dir)
+
+		file := base.ShortPath(filepath.Join(absDir, "go.mod"))
+		fi, err := fsys.Stat(file)
+		if err != nil {
+			if os.IsNotExist(err) {
+				keepDirs[absDir] = ""
+			} else {
+				sw.Error(err)
+			}
+			return
+		}
+
+		if !fi.Mode().IsRegular() {
+			sw.Error(fmt.Errorf("%v is not a regular file", file))
+			return
+		}
+
+		if dup := keepDirs[absDir]; dup != "" && dup != dir {
+			base.Errorf(`go: already added "%s" as "%s"`, dir, dup)
+		}
+		keepDirs[absDir] = dir
+	}
+
+	for _, useDir := range args {
+		absArg, _ := pathRel(workDir, useDir)
+
+		info, err := fsys.Stat(base.ShortPath(absArg))
+		if err != nil {
+			// Errors raised from os.Stat are formatted to be more user-friendly.
+			if os.IsNotExist(err) {
+				err = fmt.Errorf("directory %v does not exist", base.ShortPath(absArg))
+			}
+			sw.Error(err)
+			continue
+		} else if !info.IsDir() {
+			sw.Error(fmt.Errorf("%s is not a directory", base.ShortPath(absArg)))
+			continue
+		}
+
+		if !*useR {
+			lookDir(useDir)
+			continue
+		}
+
+		// Add or remove entries for any subdirectories that still exist.
+		// If the root itself is a symlink to a directory,
+		// we want to follow it (see https://go.dev/issue/50807).
+		// Add a trailing separator to force that to happen.
+		fsys.Walk(str.WithFilePathSeparator(useDir), func(path string, info fs.FileInfo, err error) error {
+			if err != nil {
+				return err
+			}
+
+			if !info.IsDir() {
+				if info.Mode()&fs.ModeSymlink != 0 {
+					if target, err := fsys.Stat(path); err == nil && target.IsDir() {
+						fmt.Fprintf(os.Stderr, "warning: ignoring symlink %s\n", base.ShortPath(path))
+					}
+				}
+				return nil
+			}
+			lookDir(path)
+			return nil
+		})
+
+		// Remove entries for subdirectories that no longer exist.
+		// Because they don't exist, they will be skipped by Walk.
+		for absDir := range haveDirs {
+			if str.HasFilePathPrefix(absDir, absArg) {
+				if _, ok := keepDirs[absDir]; !ok {
+					keepDirs[absDir] = "" // Mark for deletion.
+				}
+			}
+		}
+	}
+
+	// Update the work file.
+	for absDir, keepDir := range keepDirs {
+		nKept := 0
+		for _, dir := range haveDirs[absDir] {
+			if dir == keepDir { // (note that dir is always non-empty)
+				nKept++
+			} else {
+				wf.DropUse(dir)
+			}
+		}
+		if keepDir != "" && nKept != 1 {
+			// If we kept more than one copy, delete them all.
+			// We'll recreate a unique copy with AddUse.
+			if nKept > 1 {
+				wf.DropUse(keepDir)
+			}
+			wf.AddUse(keepDir, "")
+		}
+	}
+
+	// Read the Go versions from all the use entries, old and new (but not dropped).
+	goV := gover.FromGoWork(wf)
+	for _, use := range wf.Use {
+		if use.Path == "" { // deleted
+			continue
+		}
+		var abs string
+		if filepath.IsAbs(use.Path) {
+			abs = filepath.Clean(use.Path)
+		} else {
+			abs = filepath.Join(workDir, use.Path)
+		}
+		_, mf, err := modload.ReadModFile(base.ShortPath(filepath.Join(abs, "go.mod")), nil)
+		if err != nil {
+			sw.Error(err)
+			continue
+		}
+		goV = gover.Max(goV, gover.FromGoMod(mf))
+	}
+	sw.Switch(ctx)
+	base.ExitIfErrors()
+
+	modload.UpdateWorkGoVersion(wf, goV)
+	modload.UpdateWorkFile(wf)
+}
+
+// pathRel returns the absolute and canonical forms of dir for use in a
+// go.work file located in directory workDir.
+//
+// If dir is relative, it is interpreted relative to base.Cwd()
+// and its canonical form is relative to workDir if possible.
+// If dir is absolute or cannot be made relative to workDir,
+// its canonical form is absolute.
+//
+// Canonical absolute paths are clean.
+// Canonical relative paths are clean and slash-separated.
+func pathRel(workDir, dir string) (abs, canonical string) {
+	if filepath.IsAbs(dir) {
+		abs = filepath.Clean(dir)
+		return abs, abs
+	}
+
+	abs = filepath.Join(base.Cwd(), dir)
+	rel, err := filepath.Rel(workDir, abs)
+	if err != nil {
+		// The path can't be made relative to the go.work file,
+		// so it must be kept absolute instead.
+		return abs, abs
+	}
+
+	// Normalize relative paths to use slashes, so that checked-in go.work
+	// files with relative paths within the repo are platform-independent.
+	return abs, modload.ToDirectoryPath(rel)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/vendor.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/vendor.go
new file mode 100644
index 0000000000000000000000000000000000000000..f9f0cc0898836fec20a6cf43cefcd215d135a034
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/vendor.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package workcmd
+
+import (
+	"cmd/go/internal/base"
+	"cmd/go/internal/cfg"
+	"cmd/go/internal/modcmd"
+	"cmd/go/internal/modload"
+	"context"
+)
+
+var cmdVendor = &base.Command{
+	UsageLine: "go work vendor [-e] [-v] [-o outdir]",
+	Short:     "make vendored copy of dependencies",
+	Long: `
+Vendor resets the workspace's vendor directory to include all packages
+needed to build and test all the workspace's packages.
+It does not include test code for vendored packages.
+
+The -v flag causes vendor to print the names of vendored
+modules and packages to standard error.
+
+The -e flag causes vendor to attempt to proceed despite errors
+encountered while loading packages.
+
+The -o flag causes vendor to create the vendor directory at the given
+path instead of "vendor". The go command can only use a vendor directory
+named "vendor" within the module root directory, so this flag is
+primarily useful for other tools.`,
+
+	Run: runVendor,
+}
+
+var vendorE bool   // if true, report errors but proceed anyway
+var vendorO string // if set, overrides the default output directory
+
+func init() {
+	cmdVendor.Flag.BoolVar(&cfg.BuildV, "v", false, "")
+	cmdVendor.Flag.BoolVar(&vendorE, "e", false, "")
+	cmdVendor.Flag.StringVar(&vendorO, "o", "", "")
+	base.AddChdirFlag(&cmdVendor.Flag)
+	base.AddModCommonFlags(&cmdVendor.Flag)
+}
+
+func runVendor(ctx context.Context, cmd *base.Command, args []string) {
+	modload.InitWorkfile()
+	if modload.WorkFilePath() == "" {
+		base.Fatalf("go: no go.work file found\n\t(run 'go work init' first or specify path using GOWORK environment variable)")
+	}
+
+	modcmd.RunVendor(ctx, vendorE, vendorO, args)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/work.go b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/work.go
new file mode 100644
index 0000000000000000000000000000000000000000..bfbed83e889cb1ee0906a0b9de13756b145a035b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/internal/workcmd/work.go
@@ -0,0 +1,79 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package workcmd implements the “go work” command.
+package workcmd
+
+import (
+	"cmd/go/internal/base"
+)
+
+var CmdWork = &base.Command{
+	UsageLine: "go work",
+	Short:     "workspace maintenance",
+	Long: `Work provides access to operations on workspaces.
+
+Note that support for workspaces is built into many other commands, not
+just 'go work'.
+
+See 'go help modules' for information about Go's module system of which
+workspaces are a part.
+
+See https://go.dev/ref/mod#workspaces for an in-depth reference on
+workspaces.
+
+See https://go.dev/doc/tutorial/workspaces for an introductory
+tutorial on workspaces.
+
+A workspace is specified by a go.work file that specifies a set of
+module directories with the "use" directive. These modules are used as
+root modules by the go command for builds and related operations.  A
+workspace that does not specify modules to be used cannot be used to do
+builds from local modules.
+
+go.work files are line-oriented. Each line holds a single directive,
+made up of a keyword followed by arguments. For example:
+
+	go 1.18
+
+	use ../foo/bar
+	use ./baz
+
+	replace example.com/foo v1.2.3 => example.com/bar v1.4.5
+
+The leading keyword can be factored out of adjacent lines to create a block,
+like in Go imports.
+
+	use (
+	  ../foo/bar
+	  ./baz
+	)
+
+The use directive specifies a module to be included in the workspace's
+set of main modules. The argument to the use directive is the directory
+containing the module's go.mod file.
+
+The go directive specifies the version of Go the file was written at. It
+is possible there may be future changes in the semantics of workspaces
+that could be controlled by this version, but for now the version
+specified has no effect.
+
+The replace directive has the same syntax as the replace directive in a
+go.mod file and takes precedence over replaces in go.mod files.  It is
+primarily intended to override conflicting replaces in different workspace
+modules.
+
+To determine whether the go command is operating in workspace mode, use
+the "go env GOWORK" command. This will specify the workspace file being
+used.
+`,
+
+	Commands: []*base.Command{
+		cmdEdit,
+		cmdInit,
+		cmdSync,
+		cmdUse,
+		cmdVendor,
+	},
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/addmod.go b/platform/dbops/binaries/go/go/src/cmd/go/testdata/addmod.go
new file mode 100644
index 0000000000000000000000000000000000000000..7ef68b3edcc9d5537dc3f6be97878d2dcf498398
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/addmod.go
@@ -0,0 +1,154 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+// Addmod adds a module as a txtar archive to the testdata/mod directory.
+//
+// Usage:
+//
+//	go run addmod.go path@version...
+//
+// It should only be used for very small modules - we do not want to check
+// very large files into testdata/mod.
+//
+// It is acceptable to edit the archive afterward to remove or shorten files.
+// See mod/README for more information.
+package main
+
+import (
+	"bytes"
+	"cmd/go/internal/str"
+	"flag"
+	"fmt"
+	"internal/txtar"
+	"io/fs"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+)
+
+func usage() {
+	fmt.Fprintf(os.Stderr, "usage: go run addmod.go path@version...\n")
+	os.Exit(2)
+}
+
+var tmpdir string
+
+func fatalf(format string, args ...any) {
+	os.RemoveAll(tmpdir)
+	log.Fatalf(format, args...)
+}
+
+const goCmd = "go"
+
+func main() {
+	flag.Usage = usage
+	flag.Parse()
+	if flag.NArg() == 0 {
+		usage()
+	}
+
+	log.SetPrefix("addmod: ")
+	log.SetFlags(0)
+
+	var err error
+	tmpdir, err = os.MkdirTemp("", "addmod-")
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	run := func(command string, args ...string) string {
+		cmd := exec.Command(command, args...)
+		cmd.Dir = tmpdir
+		var stderr bytes.Buffer
+		cmd.Stderr = &stderr
+		out, err := cmd.Output()
+		if err != nil {
+			fatalf("%s %s: %v\n%s", command, strings.Join(args, " "), err, stderr.Bytes())
+		}
+		return string(out)
+	}
+
+	gopath := strings.TrimSpace(run("go", "env", "GOPATH"))
+	if gopath == "" {
+		fatalf("cannot find GOPATH")
+	}
+
+	exitCode := 0
+	for _, arg := range flag.Args() {
+		if err := os.WriteFile(filepath.Join(tmpdir, "go.mod"), []byte("module m\n"), 0666); err != nil {
+			fatalf("%v", err)
+		}
+		run(goCmd, "get", "-d", arg)
+		path := arg
+		if i := strings.Index(path, "@"); i >= 0 {
+			path = path[:i]
+		}
+		out := run(goCmd, "list", "-m", "-f={{.Path}} {{.Version}} {{.Dir}}", path)
+		f := strings.Fields(out)
+		if len(f) != 3 {
+			log.Printf("go list -m %s: unexpected output %q", arg, out)
+			exitCode = 1
+			continue
+		}
+		path, vers, dir := f[0], f[1], f[2]
+		mod, err := os.ReadFile(filepath.Join(gopath, "pkg/mod/cache/download", path, "@v", vers+".mod"))
+		if err != nil {
+			log.Printf("%s: %v", arg, err)
+			exitCode = 1
+			continue
+		}
+		info, err := os.ReadFile(filepath.Join(gopath, "pkg/mod/cache/download", path, "@v", vers+".info"))
+		if err != nil {
+			log.Printf("%s: %v", arg, err)
+			exitCode = 1
+			continue
+		}
+
+		a := new(txtar.Archive)
+		title := arg
+		if !strings.Contains(arg, "@") {
+			title += "@" + vers
+		}
+		a.Comment = []byte(fmt.Sprintf("module %s\n\n", title))
+		a.Files = []txtar.File{
+			{Name: ".mod", Data: mod},
+			{Name: ".info", Data: info},
+		}
+		dir = filepath.Clean(dir)
+		err = filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error {
+			if !info.Type().IsRegular() {
+				return nil
+			}
+			name := info.Name()
+			if name == "go.mod" || strings.HasSuffix(name, ".go") {
+				data, err := os.ReadFile(path)
+				if err != nil {
+					return err
+				}
+				a.Files = append(a.Files, txtar.File{Name: str.TrimFilePathPrefix(path, dir), Data: data})
+			}
+			return nil
+		})
+		if err != nil {
+			log.Printf("%s: %v", arg, err)
+			exitCode = 1
+			continue
+		}
+
+		data := txtar.Format(a)
+		target := filepath.Join("mod", strings.ReplaceAll(path, "/", "_")+"_"+vers+".txt")
+		if err := os.WriteFile(target, data, 0666); err != nil {
+			log.Printf("%s: %v", arg, err)
+			exitCode = 1
+			continue
+		}
+	}
+	os.RemoveAll(tmpdir)
+	os.Exit(exitCode)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/README b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/README
new file mode 100644
index 0000000000000000000000000000000000000000..43ddf77eff3844b631665485abe511380ae7066a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/README
@@ -0,0 +1,36 @@
+This directory holds Go modules served by a Go module proxy
+that runs on localhost during tests, both to make tests avoid
+requiring specific network servers and also to make them 
+significantly faster.
+
+A small go get'able test module can be added here by running
+
+	cd cmd/go/testdata
+	go run addmod.go path@vers
+
+where path and vers are the module path and version to add here.
+
+For interactive experimentation using this set of modules, run:
+
+	cd cmd/go
+	go test -proxy=localhost:1234 &
+	export GOPROXY=http://localhost:1234/mod
+
+and then run go commands as usual.
+
+Modules saved to this directory should be small: a few kilobytes at most.
+It is acceptable to edit the archives created by addmod.go to remove
+or shorten files. It is also acceptable to write module archives by hand: 
+they need not be backed by some public git repo.
+
+Each module archive is named path_vers.txt, where slashes in path
+have been replaced with underscores. The archive must contain
+two files ".info" and ".mod", to be served as the info and mod files
+in the proxy protocol (see https://research.swtch.com/vgo-module).
+The remaining files are served as the content of the module zip file.
+The path@vers prefix required of files in the zip file is added
+automatically by the proxy: the files in the archive have names without
+the prefix, like plain "go.mod", "x.go", and so on.
+
+See ../addmod.go and ../savedir.go for tools to generate txtar files,
+although again it is also fine to write them by hand.
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_ambiguous_a_b_v0.0.0-empty.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_ambiguous_a_b_v0.0.0-empty.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a86951981e2596a9a088018d0936e2d6f08ee7a1
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_ambiguous_a_b_v0.0.0-empty.txt
@@ -0,0 +1,12 @@
+Module example.com/ambiguous/a/b is a suffix of example.com/a.
+This version contains no package.
+-- .mod --
+module example.com/ambiguous/a/b
+
+go 1.16
+-- .info --
+{"Version":"v0.0.0-empty"}
+-- go.mod --
+module example.com/ambiguous/a/b
+
+go 1.16
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_ambiguous_a_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_ambiguous_a_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bb438262e134bf3d34b3d0af92193fc8966ac2a5
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_ambiguous_a_v1.0.0.txt
@@ -0,0 +1,18 @@
+Module example.com/ambiguous/a is a prefix of example.com/a/b.
+It contains package example.com/a/b.
+-- .mod --
+module example.com/ambiguous/a
+
+go 1.16
+
+require example.com/ambiguous/a/b v0.0.0-empty
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module example.com/ambiguous/a
+
+go 1.16
+
+require example.com/ambiguous/a/b v0.0.0-empty
+-- b/b.go --
+package b
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_a_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_a_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d7bf6471b7cdeb6bac3dca177dcb5ffef57901ef
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_a_v1.0.0.txt
@@ -0,0 +1,12 @@
+example.com/badchain/a v1.0.0
+
+-- .mod --
+module example.com/badchain/a
+
+require example.com/badchain/b v1.0.0
+-- .info --
+{"Version":"v1.0.0"}
+-- a.go --
+package a
+
+import _ "example.com/badchain/b"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_a_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_a_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..92190d8ac177a11226b09c435bae3a182727453d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_a_v1.1.0.txt
@@ -0,0 +1,12 @@
+example.com/badchain/a v1.1.0
+
+-- .mod --
+module example.com/badchain/a
+
+require example.com/badchain/b v1.1.0
+-- .info --
+{"Version":"v1.1.0"}
+-- a.go --
+package a
+
+import _ "example.com/badchain/b"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_b_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_b_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d42b8aab164536e3e5a157dd605f8bfc4d097662
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_b_v1.0.0.txt
@@ -0,0 +1,12 @@
+example.com/badchain/b v1.0.0
+
+-- .mod --
+module example.com/badchain/b
+
+require example.com/badchain/c v1.0.0
+-- .info --
+{"Version":"v1.0.0"}
+-- b.go --
+package b
+
+import _ "example.com/badchain/c"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_b_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_b_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..664818474ce815cb4bc882914c8c5d1e2ffa2263
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_b_v1.1.0.txt
@@ -0,0 +1,12 @@
+example.com/badchain/b v1.1.0
+
+-- .mod --
+module example.com/badchain/b
+
+require example.com/badchain/c v1.1.0
+-- .info --
+{"Version":"v1.1.0"}
+-- b.go --
+package b
+
+import _ "example.com/badchain/c"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_c_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_c_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9c717cb0e6efcf0fba2b30642a02aa54dee67b89
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_c_v1.0.0.txt
@@ -0,0 +1,8 @@
+example.com/badchain/c v1.0.0
+
+-- .mod --
+module example.com/badchain/c
+-- .info --
+{"Version":"v1.0.0"}
+-- c.go --
+package c
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_c_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_c_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..36bc2c67266e0c7906e9fd4e7ba5964b3b1a328b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_badchain_c_v1.1.0.txt
@@ -0,0 +1,8 @@
+example.com/badchain/c v1.1.0
+
+-- .mod --
+module badchain.example.com/c
+-- .info --
+{"Version":"v1.1.0"}
+-- c.go --
+package c
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0-exclude.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0-exclude.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c883d8a7744a15626377305cfca13ccfd7fa5d98
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0-exclude.txt
@@ -0,0 +1,28 @@
+example.com/cmd contains main packages.
+
+-- .info --
+{"Version":"v1.0.0-exclude"}
+-- .mod --
+module example.com/cmd
+
+go 1.16
+
+exclude rsc.io/quote v1.5.2
+-- go.mod --
+module example.com/cmd
+
+go 1.16
+
+exclude rsc.io/quote v1.5.2
+-- a/a.go --
+package main
+
+func main() {}
+-- b/b.go --
+package main
+
+func main() {}
+-- err/err.go --
+package err
+
+var X = DoesNotCompile
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0-newerself.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0-newerself.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7670f29ffd0024ea106a0de4269ee32ee11a71c7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0-newerself.txt
@@ -0,0 +1,28 @@
+example.com/cmd contains main packages.
+
+-- .info --
+{"Version":"v1.0.0-newerself"}
+-- .mod --
+module example.com/cmd
+
+go 1.16
+
+require example.com/cmd v1.0.0
+-- go.mod --
+module example.com/cmd
+
+go 1.16
+
+require example.com/cmd v1.0.0
+-- a/a.go --
+package main
+
+func main() {}
+-- b/b.go --
+package main
+
+func main() {}
+-- err/err.go --
+package err
+
+var X = DoesNotCompile
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0-replace.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0-replace.txt
new file mode 100644
index 0000000000000000000000000000000000000000..581a496035ec9ee072ca49c929cbd0ca420c893b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0-replace.txt
@@ -0,0 +1,28 @@
+example.com/cmd contains main packages.
+
+-- .info --
+{"Version":"v1.0.0-replace"}
+-- .mod --
+module example.com/cmd
+
+go 1.16
+
+replace rsc.io/quote => rsc.io/quote v1.5.2
+-- go.mod --
+module example.com/cmd
+
+go 1.16
+
+replace rsc.io/quote => rsc.io/quote v1.5.2
+-- a/a.go --
+package main
+
+func main() {}
+-- b/b.go --
+package main
+
+func main() {}
+-- err/err.go --
+package err
+
+var X = DoesNotCompile
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c1981391a13c74a57b1a326f4cd915b236594208
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.0.0.txt
@@ -0,0 +1,31 @@
+example.com/cmd contains main packages.
+
+v1.0.0 is the latest non-retracted version. Other versions contain errors or
+detectable problems.
+
+-- .info --
+{"Version":"v1.0.0"}
+-- .mod --
+module example.com/cmd
+
+go 1.16
+-- go.mod --
+module example.com/cmd
+
+go 1.16
+-- a/a.go --
+package main
+
+import "fmt"
+
+func main() { fmt.Println("a@v1.0.0") }
+-- b/b.go --
+package main
+
+import "fmt"
+
+func main() { fmt.Println("b@v1.0.0") }
+-- err/err.go --
+package err
+
+var X = DoesNotCompile
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.9.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.9.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9298afb1fb39175e7c936a96785c04f0ebe78c93
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_cmd_v1.9.0.txt
@@ -0,0 +1,30 @@
+example.com/cmd contains main packages.
+
+-- .info --
+{"Version":"v1.9.0"}
+-- .mod --
+module example.com/cmd
+
+go 1.16
+
+// this is a bad version
+retract v1.9.0
+-- go.mod --
+module example.com/cmd
+
+go 1.16
+
+// this is a bad version
+retract v1.9.0
+-- a/a.go --
+package main
+
+func main() {}
+-- b/b.go --
+package main
+
+func main() {}
+-- err/err.go --
+package err
+
+var X = DoesNotCompile
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_depends_on_generics_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_depends_on_generics_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..80d309552e58bac31a0bc03ab936fc441dfb1dbf
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_depends_on_generics_v1.0.0.txt
@@ -0,0 +1,23 @@
+example.com/depends/on/generics v1.0.0
+written by hand
+
+-- .mod --
+module example.com/depends/on/generics
+
+go 1.18
+
+require example.com/generics v1.0.0
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module example.com/depends/on/generics
+
+go 1.18
+
+require example.com/generics v1.0.0
+-- main.go --
+package main
+
+import "example.com/generics"
+
+func main() {generics.Bar()}
\ No newline at end of file
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7c29621e83db969f78eb860c4fd5457a2f1b2223
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.0.0.txt
@@ -0,0 +1,12 @@
+-- .info --
+{"Version":"v1.0.0"}
+-- .mod --
+module example.com/deprecated/a
+
+go 1.17
+-- go.mod --
+module example.com/deprecated/a
+
+go 1.17
+-- a.go --
+package a
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.9.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.9.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0613389d1f3874ffe6a1bbd981f41d679ee8748a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_a_v1.9.0.txt
@@ -0,0 +1,14 @@
+-- .info --
+{"Version":"v1.9.0"}
+-- .mod --
+// Deprecated: in example.com/deprecated/a@v1.9.0
+module example.com/deprecated/a
+
+go 1.17
+-- go.mod --
+// Deprecated: in example.com/deprecated/a@v1.9.0
+module example.com/deprecated/a
+
+go 1.17
+-- a.go --
+package a
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..50006aefb5f3aabeddc1763a1f39f7c74b276252
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.0.0.txt
@@ -0,0 +1,12 @@
+-- .info --
+{"Version":"v1.0.0"}
+-- .mod --
+module example.com/deprecated/b
+
+go 1.17
+-- go.mod --
+module example.com/deprecated/b
+
+go 1.17
+-- b.go --
+package b
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.9.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.9.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..163d6b543eb7a8ff08f4ad19debf24c4aea21024
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_deprecated_b_v1.9.0.txt
@@ -0,0 +1,14 @@
+-- .info --
+{"Version":"v1.9.0"}
+-- .mod --
+// Deprecated: in example.com/deprecated/b@v1.9.0
+module example.com/deprecated/b
+
+go 1.17
+-- go.mod --
+// Deprecated: in example.com/deprecated/b@v1.9.0
+module example.com/deprecated/b
+
+go 1.17
+-- b.go --
+package b
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_dotgo.go_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_dotgo.go_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4f7f4d7dd21cf50a507b9bfa56bfc6bf027635b7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_dotgo.go_v1.0.0.txt
@@ -0,0 +1,16 @@
+This module's path ends with ".go".
+Based on github.com/nats-io/nats.go.
+Used in regression tests for golang.org/issue/32483.
+
+-- .mod --
+module example.com/dotgo.go
+
+go 1.13
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module example.com/dotgo.go
+
+go 1.13
+-- dotgo.go --
+package dotgo
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_dotname_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_dotname_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2ada3a3f812af60861577b2383c619fde64c4126
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_dotname_v1.0.0.txt
@@ -0,0 +1,12 @@
+-- .info --
+{"Version":"v1.0.0"}
+-- .mod --
+module example.com/dotname
+
+go 1.16
+-- go.mod --
+module example.com/dotname
+
+go 1.16
+-- .dot/dot.go --
+package dot
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_downgrade_v2.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_downgrade_v2.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..88d50e5bbab8d6578a693e6865674b20b0936f1d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_downgrade_v2.0.0.txt
@@ -0,0 +1,9 @@
+example.com/downgrade v2.0.0
+written by hand
+
+-- .mod --
+module example.com/downgrade
+
+require rsc.io/quote v1.5.2
+-- .info --
+{"Version":"v2.0.0"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_downgrade_v2_v2.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_downgrade_v2_v2.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a4d665ff1be4f0dab02b1bf4d340538a4d3216e9
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_downgrade_v2_v2.0.1.txt
@@ -0,0 +1,13 @@
+example.com/downgrade/v2 v2.0.1
+written by hand
+
+-- .mod --
+module example.com/downgrade/v2
+
+require rsc.io/quote v1.5.2
+-- .info --
+{"Version":"v2.0.1"}
+-- go.mod --
+module example.com/downgrade/v2
+
+require rsc.io/quote v1.5.2
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_fuzzfail_v0.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_fuzzfail_v0.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..af005ffb4119080e158cd4b9cd7ce44d0b951112
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_fuzzfail_v0.1.0.txt
@@ -0,0 +1,20 @@
+-- .mod --
+module example.com/fuzzfail
+
+go 1.18
+-- .info --
+{"Version":"v0.1.0"}
+-- go.mod --
+module example.com/fuzzfail
+
+go 1.18
+-- fuzzfail_test.go --
+package fuzzfail
+
+import "testing"
+
+func FuzzFail(f *testing.F) {
+	f.Fuzz(func(t *testing.T, b []byte) {
+		t.Fatalf("oops: %q", b)
+	})
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_fuzzfail_v0.2.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_fuzzfail_v0.2.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ea599aa61109aa2138746686412c0d035582c2ea
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_fuzzfail_v0.2.0.txt
@@ -0,0 +1,23 @@
+-- .mod --
+module example.com/fuzzfail
+
+go 1.18
+-- .info --
+{"Version":"v0.2.0"}
+-- go.mod --
+module example.com/fuzzfail
+
+go 1.18
+-- fuzzfail_test.go --
+package fuzzfail
+
+import "testing"
+
+func FuzzFail(f *testing.F) {
+	f.Fuzz(func(t *testing.T, b []byte) {
+		t.Fatalf("oops: %q", b)
+	})
+}
+-- testdata/fuzz/FuzzFail/bbb0c2d22aa1a24617301566dc7486f8b625d38024603ba62757c1124013b49a --
+go test fuzz v1
+[]byte("\x05")
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_generics_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_generics_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..092241e93b20be4ed0accc4a54417277d0aed694
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_generics_v1.0.0.txt
@@ -0,0 +1,21 @@
+example.com/generics v1.0.0
+written by hand
+
+-- .mod --
+module example.com/generics
+
+go 1.18
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module example.com/generics
+
+go 1.18
+-- generics.go --
+package generics
+
+type Int interface {
+	~int
+}
+
+func Bar() {}
\ No newline at end of file
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_incompatiblewithsub_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_incompatiblewithsub_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..435578da8d8a5d7da42a1ad0446749e76e399ded
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_incompatiblewithsub_v1.0.0.txt
@@ -0,0 +1,8 @@
+Module example.com/incompatiblewithsub has an incompatible version
+and a package in a subdirectory.
+-- .info --
+{"Version":"v1.0.0"}
+-- .mod --
+module example.com/incompatiblewithsub
+-- sub/sub.go --
+package sub
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_incompatiblewithsub_v2.0.0+incompatible.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_incompatiblewithsub_v2.0.0+incompatible.txt
new file mode 100644
index 0000000000000000000000000000000000000000..198ec1702bdebbcffd3b96ad34328dc40bb9e933
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_incompatiblewithsub_v2.0.0+incompatible.txt
@@ -0,0 +1,8 @@
+Module example.com/incompatiblewithsub has an incompatible version
+and a package in a subdirectory.
+-- .info --
+{"Version":"v2.0.0+incompatible"}
+-- .mod --
+module example.com/incompatiblewithsub
+-- sub/sub.go --
+package sub
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_invalidpath_v1_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_invalidpath_v1_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7d9d1303a9043c2a9c7cf6015e2fc66d8da3f226
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_invalidpath_v1_v1.0.0.txt
@@ -0,0 +1,13 @@
+example.com/invalidpath/v1 v1.0.0
+written by hand
+
+-- .mod --
+module example.com/invalidpath/v1
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module example.com/invalidpath/v1
+-- version.go --
+package version
+
+const V = "v1.0.0"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_subpkg_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_subpkg_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1ecfa0b6de97248921e6bd89c6f66e138f208ce7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_subpkg_v1.0.0.txt
@@ -0,0 +1,9 @@
+Written by hand.
+Test case for package moved into a parent module.
+
+-- .mod --
+module example.com/join/subpkg
+-- .info --
+{"Version": "v1.0.0"}
+-- x.go --
+package subpkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_subpkg_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_subpkg_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9eb823adb76c0219b5f03e8674d9609f9ec4e4b2
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_subpkg_v1.1.0.txt
@@ -0,0 +1,9 @@
+Written by hand.
+Test case for package moved into a parent module.
+
+-- .mod --
+module example.com/join/subpkg
+
+require example.com/join v1.1.0
+-- .info --
+{"Version": "v1.1.0"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..84c68b13b6de6238856aab8a1e9547dcf47f9556
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_v1.0.0.txt
@@ -0,0 +1,7 @@
+Written by hand.
+Test case for package moved into a parent module.
+
+-- .mod --
+module example.com/join
+-- .info --
+{"Version": "v1.0.0"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5f92036d9e7b248f14a14f4f652f73152a001e0f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_join_v1.1.0.txt
@@ -0,0 +1,9 @@
+Written by hand.
+Test case for package moved into a parent module.
+
+-- .mod --
+module example.com/join
+-- .info --
+{"Version": "v1.1.0"}
+-- subpkg/x.go --
+package subpkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_latemigrate_v2_v2.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_latemigrate_v2_v2.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..25bd3d9d8fa2e802965bb851d45453dc2cbf4be5
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_latemigrate_v2_v2.0.0.txt
@@ -0,0 +1,14 @@
+example.com/latemigrate/v2 v2.0.0
+written by hand
+
+This repository migrated to modules in v2.0.1 after v2.0.0 was already tagged.
+All versions require rsc.io/quote so we can test downgrades.
+
+v2.0.0 is technically part of example.com/latemigrate as v2.0.0+incompatible.
+Proxies may serve it as part of the version list for example.com/latemigrate/v2.
+'go get' must be able to ignore these versions.
+
+-- .mod --
+module example.com/latemigrate
+-- .info --
+{"Version":"v2.0.0"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_latemigrate_v2_v2.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_latemigrate_v2_v2.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..be427a3185d693b901302fc0405ebfb4f0079dfb
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_latemigrate_v2_v2.0.1.txt
@@ -0,0 +1,20 @@
+example.com/latemigrate/v2 v2.0.1
+written by hand
+
+This repository migrated to modules in v2.0.1 after v2.0.0 was already tagged.
+All versions require rsc.io/quote so we can test downgrades.
+
+v2.0.1 belongs to example.com/latemigrate/v2.
+
+-- .mod --
+module example.com/latemigrate/v2
+
+require rsc.io/quote v1.3.0
+-- .info --
+{"Version":"v2.0.1"}
+-- go.mod --
+module example.com/latemigrate/v2
+
+require rsc.io/quote v1.3.0
+-- late.go --
+package late
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_missingpkg_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_missingpkg_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..15f3f69557d1ddf54ae15c86d6fc88e3832f10e1
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_missingpkg_v1.0.0.txt
@@ -0,0 +1,11 @@
+The deprecated package is present in this version (which is @latest) but
+is deleted in a newer prerelease version.
+
+-- .mod --
+module example.com/missingpkg
+-- .info --
+{"Version":"v1.0.0"}
+-- lib.go --
+package lib
+-- deprecated/deprecated.go --
+package deprecated
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_missingpkg_v1.0.1-beta.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_missingpkg_v1.0.1-beta.txt
new file mode 100644
index 0000000000000000000000000000000000000000..44580fe4cbe3900ba21de34bc9562950b3cc1e08
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_missingpkg_v1.0.1-beta.txt
@@ -0,0 +1,8 @@
+The deprecated package is deleted in this version.
+
+-- .mod --
+module example.com/missingpkg
+-- .info --
+{"Version":"v1.0.1-beta"}
+-- lib.go --
+package lib
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_nest_sub_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_nest_sub_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..90f1459803944054313dec639e713e95f9316773
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_nest_sub_v1.0.0.txt
@@ -0,0 +1,12 @@
+Written by hand.
+Test case for nested modules without an explicit relationship.
+This is nested below the top-level module.
+
+-- .mod --
+module example.com/nest/sub
+-- .info --
+{"Version": "v1.0.0"}
+-- go.mod --
+module example.com/nest/sub
+-- y/y.go --
+package y
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_nest_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_nest_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..593caf1d90abe48bd1d9476644ac0bab517971af
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_nest_v1.0.0.txt
@@ -0,0 +1,12 @@
+Written by hand.
+Test case for nested modules without an explicit relationship.
+This is the top-level module.
+
+-- .mod --
+module example.com/nest
+-- .info --
+{"Version": "v1.0.0"}
+-- go.mod --
+module example.com/nest
+-- sub/x/x.go --
+package x
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_nest_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_nest_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5a01550fd5d075167d7bfce1413bcf2dad1a090b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_nest_v1.1.0.txt
@@ -0,0 +1,12 @@
+Written by hand.
+Test case for nested modules without an explicit relationship.
+This is the top-level module.
+
+-- .mod --
+module example.com/nest
+-- .info --
+{"Version": "v1.1.0"}
+-- go.mod --
+module example.com/nest
+-- sub/x/x.go --
+package x
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_newcycle_a_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_newcycle_a_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..829065df9fb96989d2f9aa70e220f1fb75750396
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_newcycle_a_v1.0.0.txt
@@ -0,0 +1,10 @@
+example.com/newcycle/a v1.0.0
+
+Transitively requires v1.0.1 of itself via example.com/newcycle/b
+
+-- .mod --
+module example.com/newcycle/a
+
+require example.com/newcycle/b v1.0.0
+-- .info --
+{"Version":"v1.0.0"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_newcycle_a_v1.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_newcycle_a_v1.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a03f4b49fd5ec51f0c5196f2390163f0d90840ca
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_newcycle_a_v1.0.1.txt
@@ -0,0 +1,10 @@
+example.com/newcycle/a v1.0.1
+
+Transitively requires itself via example.com/newcycle/b
+
+-- .mod --
+module example.com/newcycle/a
+
+require example.com/newcycle/b v1.0.0
+-- .info --
+{"Version":"v1.0.1"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_newcycle_b_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_newcycle_b_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ff9e1f5ea5fa6e769071f37a022f7797cc817253
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_newcycle_b_v1.0.0.txt
@@ -0,0 +1,8 @@
+example.com/newcycle/b v1.0.0
+
+-- .mod --
+module example.com/newcycle/b
+
+require example.com/newcycle/a v1.0.1
+-- .info --
+{"Version":"v1.0.0"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_noroot_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_noroot_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..aa5febf71056fd75dd32f9c05ade375026e36cb0
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_noroot_v1.0.0.txt
@@ -0,0 +1,8 @@
+A module which has no root package.
+
+-- .mod --
+module example.com/noroot
+-- .info --
+{"Version":"v1.0.0"}
+-- pkg/pkg.go --
+package pkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_noroot_v1.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_noroot_v1.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9b93717c84f48e36a7b812c6f5c7d3b9b20db1a1
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_noroot_v1.0.1.txt
@@ -0,0 +1,8 @@
+A module which has no root package.
+
+-- .mod --
+module example.com/noroot
+-- .info --
+{"Version":"v1.0.1"}
+-- pkg/pkg.go --
+package pkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_notags_v0.0.0-20190507143103-cc8cbe209b64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_notags_v0.0.0-20190507143103-cc8cbe209b64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..259774d542554c675a3c83f056d0be544a1f2b94
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_notags_v0.0.0-20190507143103-cc8cbe209b64.txt
@@ -0,0 +1,9 @@
+Written by hand.
+The "latest" version of a module without any tags.
+
+-- .mod --
+module example.com/notags
+-- .info --
+{"Version":"v0.0.0-20190507143103-cc8cbe209b64","Time":"2019-05-07T07:31:03-07:00"}
+-- notags.go --
+package notags
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_printversion_v0.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_printversion_v0.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..606322ac862fe5c15672a61e71847e3f0d8dd2fc
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_printversion_v0.1.0.txt
@@ -0,0 +1,33 @@
+example.com/printversion v0.1.0
+
+-- .mod --
+module example.com/printversion
+-- .info --
+{"Version":"v0.1.0"}
+-- README.txt --
+There is no go.mod file for this version of the module.
+-- printversion.go --
+package main
+
+import (
+	"fmt"
+	"os"
+	"runtime/debug"
+
+	_ "example.com/version"
+)
+
+func main() {
+	info, _ := debug.ReadBuildInfo()
+	fmt.Fprintf(os.Stdout, "path is %s\n", info.Path)
+	fmt.Fprintf(os.Stdout, "main is %s %s\n", info.Main.Path, info.Main.Version)
+	if r := info.Main.Replace; r != nil {
+		fmt.Fprintf(os.Stdout, "\t(replaced by %s %s)\n", r.Path, r.Version)
+	}
+	for _, m := range info.Deps {
+		fmt.Fprintf(os.Stdout, "using %s %s\n", m.Path, m.Version)
+		if r := m.Replace; r != nil {
+			fmt.Fprintf(os.Stdout, "\t(replaced by %s %s)\n", r.Path, r.Version)
+		}
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_printversion_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_printversion_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b9b71e953825aef26891a3d9c9d8d51276371189
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_printversion_v1.0.0.txt
@@ -0,0 +1,41 @@
+example.com/printversion v1.0.0
+
+-- .mod --
+module example.com/printversion
+
+require example.com/version v1.0.0
+replace example.com/version v1.0.0 => ../oops v0.0.0
+exclude example.com/version v1.1.0
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module example.com/printversion
+
+require example.com/version v1.0.0
+replace example.com/version v1.0.0 => ../oops v0.0.0
+exclude example.com/version v1.0.1
+-- printversion.go --
+package main
+
+import (
+	"fmt"
+	"os"
+	"runtime/debug"
+
+	_ "example.com/version"
+)
+
+func main() {
+	info, _ := debug.ReadBuildInfo()
+	fmt.Fprintf(os.Stdout, "path is %s\n", info.Path)
+	fmt.Fprintf(os.Stdout, "main is %s %s\n", info.Main.Path, info.Main.Version)
+	if r := info.Main.Replace; r != nil {
+		fmt.Fprintf(os.Stdout, "\t(replaced by %s %s)\n", r.Path, r.Version)
+	}
+	for _, m := range info.Deps {
+		fmt.Fprintf(os.Stdout, "using %s %s\n", m.Path, m.Version)
+		if r := m.Replace; r != nil {
+			fmt.Fprintf(os.Stdout, "\t(replaced by %s %s)\n", r.Path, r.Version)
+		}
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_pseudoupgrade_v0.0.0-20190430073000-30950c05d534.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_pseudoupgrade_v0.0.0-20190430073000-30950c05d534.txt
new file mode 100644
index 0000000000000000000000000000000000000000..047ceb68c5b55caa73e98695cdb48a22754b1bcf
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_pseudoupgrade_v0.0.0-20190430073000-30950c05d534.txt
@@ -0,0 +1,13 @@
+example.com/pseudoupgrade v0.0.0-20190429073000-30950c05d534
+written by hand
+
+-- .mod --
+module example.com/pseudoupgrade
+
+-- .info --
+{"Version":"v0.0.0-20190430073000-30950c05d534","Name":"v0.0.0-20190430073000-30950c05d534","Short":"30950c05d534","Time":"2019-04-30T07:30:00Z"}
+
+-- pseudoupgrade.go --
+package pseudoupgrade
+
+const X = 1
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_pseudoupgrade_v0.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_pseudoupgrade_v0.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7ddb0dc724d5ba0ceb1695cab5b10dff222fc173
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_pseudoupgrade_v0.1.0.txt
@@ -0,0 +1,13 @@
+example.com/pseudoupgrade v0.1.0
+written by hand
+
+-- .mod --
+module example.com/pseudoupgrade
+
+-- .info --
+{"Version":"v0.1.0","Name":"","Short":"","Time":"2019-04-29T07:30:30Z"}
+
+-- pseudoupgrade.go --
+package pseudoupgrade
+
+const X = 1
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_pseudoupgrade_v0.1.1-0.20190429073117-b5426c86b553.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_pseudoupgrade_v0.1.1-0.20190429073117-b5426c86b553.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b3f48bbdab6cec1112aed97240b9e5aa879bd566
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_pseudoupgrade_v0.1.1-0.20190429073117-b5426c86b553.txt
@@ -0,0 +1,13 @@
+example.com/pseudoupgrade v0.1.1-0.20190429073117-b5426c86b553
+written by hand
+
+-- .mod --
+module example.com/pseudoupgrade
+
+-- .info --
+{"Version":"v0.1.1-0.20190429073117-b5426c86b553","Name":"v0.1.1-0.20190429073117-b5426c86b553","Short":"b5426c86b553","Time":"2019-04-29T07:31:00Z"}
+
+-- pseudoupgrade.go --
+package pseudoupgrade
+
+const X = 1
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_quote_v1.5.2.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_quote_v1.5.2.txt
new file mode 100644
index 0000000000000000000000000000000000000000..05f7ae28a396a1174b27cb1922908f19f09ea68f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_quote_v1.5.2.txt
@@ -0,0 +1,9 @@
+This module is a replacement for rsc.io/quote, but its go.mod file declares
+a module path different from its location and the original module.
+
+-- .mod --
+module rsc.io/Quote
+
+go 1.14
+-- .info --
+{"Version":"v1.5.2"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_ambiguous_nested_v1.9.0-bad.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_ambiguous_nested_v1.9.0-bad.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f8e623d56f7818f68f6c65dfed6e199f1da2314e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_ambiguous_nested_v1.9.0-bad.txt
@@ -0,0 +1,10 @@
+-- .mod --
+module example.com/retract/ambiguous/nested
+
+go 1.16
+
+retract v1.9.0-bad // nested modules are bad
+-- .info --
+{"Version":"v1.9.0-bad"}
+-- nested.go --
+package nested
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_ambiguous_other_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_ambiguous_other_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5ee01391a2423eff055d8aae097cb5d03c93f115
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_ambiguous_other_v1.0.0.txt
@@ -0,0 +1,12 @@
+-- .mod --
+module example.com/retract/ambiguous/other
+
+go 1.16
+
+require example.com/retract/ambiguous v1.0.0
+-- .info --
+{"Version":"v1.0.0"}
+-- other.go --
+package other
+
+import _ "example.com/retract/ambiguous/nested"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_ambiguous_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_ambiguous_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c8eeb1654f355f6d974806299ea10f47fc2e275a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_ambiguous_v1.0.0.txt
@@ -0,0 +1,9 @@
+-- .mod --
+module example.com/retract/ambiguous
+
+go 1.16
+-- .info --
+{"Version":"v1.0.0"}
+-- nested/nested.go --
+package nested
+
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_incompatible_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_incompatible_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a987685e248b7a45130289660f20e759653b7266
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_incompatible_v1.0.0.txt
@@ -0,0 +1,19 @@
+The v1.0.0 release of example.com/retract/incompatible retracts
+v2.0.0+incompatible.
+
+-- .mod --
+module example.com/retract/incompatible
+
+go 1.16
+
+retract v2.0.0+incompatible
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module example.com/retract/incompatible
+
+go 1.16
+
+retract v2.0.0+incompatible
+-- incompatible.go --
+package incompatible
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_incompatible_v2.0.0+incompatible.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_incompatible_v2.0.0+incompatible.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c668dbb7a9411c44d41a481a010601ae3ffe35b2
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_incompatible_v2.0.0+incompatible.txt
@@ -0,0 +1,9 @@
+The v1.0.0 release of example.com/retract/incompatible retracts
+v2.0.0+incompatible.
+
+-- .mod --
+module example.com/retract/incompatible
+-- .info --
+{"Version":"v2.0.0+incompatible"}
+-- incompatible.go --
+package incompatible
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1d8d81071ee0e2ea33867938b9663d9e7cc7978d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.0.0.txt
@@ -0,0 +1,10 @@
+This version should be retracted, but the go.mod file for the version that would
+contain the retraction is not available.
+-- .mod --
+module example.com/retract/missingmod
+
+go 1.14
+-- .info --
+{"Version":"v1.0.0"}
+-- missingmod.go --
+package missingmod
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.9.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.9.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bba919ec213e211114b8798f48968bbe7b276374
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_missingmod_v1.9.0.txt
@@ -0,0 +1,4 @@
+The go.mod file at this version will be loaded to check for retractions
+of earlier versions. However, the .mod file is not available.
+-- .info --
+{"Version":"v1.9.0"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_newergoversion_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_newergoversion_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..21d53529842e8e257a5d0e86e2e7ef63e0a1c84d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_newergoversion_v1.0.0.txt
@@ -0,0 +1,10 @@
+-- .mod --
+module example.com/retract/newergoversion
+
+go 1.21
+
+-- .info --
+{"Version":"v1.0.0"}
+
+-- retract.go --
+package newergoversion
\ No newline at end of file
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_newergoversion_v1.2.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_newergoversion_v1.2.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7aa28b90e3ab9b7cd34a59c32e7c6bcfccf3791d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_newergoversion_v1.2.0.txt
@@ -0,0 +1,12 @@
+-- .mod --
+module example.com/retract/newergoversion
+
+go 1.23
+
+retract v1.2.0
+
+-- .info --
+{"Version":"v1.2.0"}
+
+-- retract.go --
+package newergoversion
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_noupgrade_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_noupgrade_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..466afc576577c4a54271c9acf6fd2618ce9cfa9d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_noupgrade_v1.0.0.txt
@@ -0,0 +1,9 @@
+-- .mod --
+module example.com/retract/noupgrade
+
+go 1.19
+
+retract v1.0.0 // bad
+
+-- .info --
+{"Version":"v1.0.0"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-block.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-block.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c4a53e1d80eac4d7c505b836acdaa0c79997aa96
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-block.txt
@@ -0,0 +1,6 @@
+-- .mod --
+module example.com/retract/rationale
+
+go 1.14
+-- .info --
+{"Version":"v1.0.0-block"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-blockwithcomment.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-blockwithcomment.txt
new file mode 100644
index 0000000000000000000000000000000000000000..92573b62e3679ab6d4207406d09bf58e4e195b30
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-blockwithcomment.txt
@@ -0,0 +1,6 @@
+-- .mod --
+module example.com/retract/rationale
+
+go 1.14
+-- .info --
+{"Version":"v1.0.0-blockwithcomment"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-empty.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-empty.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1f0894aa8bea1323ce3c4336355f0f93e0746cb0
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-empty.txt
@@ -0,0 +1,8 @@
+-- .mod --
+module example.com/retract/rationale
+
+go 1.14
+-- .info --
+{"Version":"v1.0.0-empty"}
+-- empty.go --
+package empty
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-long.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-long.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1b5e7534285de0b511458f33a923acf4e408e267
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-long.txt
@@ -0,0 +1,8 @@
+-- .mod --
+module example.com/retract/rationale
+
+go 1.14
+-- .info --
+{"Version":"v1.0.0-long"}
+-- empty.go --
+package empty
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b1ffe27225066d9160687ab7939991b9f6970fb6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline1.txt
@@ -0,0 +1,8 @@
+-- .mod --
+module example.com/retract/rationale
+
+go 1.14
+-- .info --
+{"Version":"v1.0.0-multiline1"}
+-- empty.go --
+package empty
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline2.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline2.txt
new file mode 100644
index 0000000000000000000000000000000000000000..72f80b3254106bf084ef6bbd1a0397661abcae8a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-multiline2.txt
@@ -0,0 +1,8 @@
+-- .mod --
+module example.com/retract/rationale
+
+go 1.14
+-- .info --
+{"Version":"v1.0.0-multiline2"}
+-- empty.go --
+package empty
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-order.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-order.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1b0450462b0c0962f048d44d5d86857b07c43336
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-order.txt
@@ -0,0 +1,6 @@
+-- .mod --
+module example.com/retract/rationale
+
+go 1.14
+-- .info --
+{"Version":"v1.0.0-order"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-unprintable.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-unprintable.txt
new file mode 100644
index 0000000000000000000000000000000000000000..949612431e5d17b6071f3cc63afefa430792cb82
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.0-unprintable.txt
@@ -0,0 +1,8 @@
+-- .mod --
+module example.com/retract/rationale
+
+go 1.14
+-- .info --
+{"Version":"v1.0.0-unprintable"}
+-- empty.go --
+package empty
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.1-order.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.1-order.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3be7d5b56e4b22198954564bff39c7ef499b3a77
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.0.1-order.txt
@@ -0,0 +1,6 @@
+-- .mod --
+module example.com/retract/rationale
+
+go 1.14
+-- .info --
+{"Version":"v1.0.1-order"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.9.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.9.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6975d4ebd4dd93b13159bce97d4e539c851285fc
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rationale_v1.9.0.txt
@@ -0,0 +1,48 @@
+Module example.com/retract/description retracts all versions of itself.
+The rationale comments have various problems.
+
+-- .mod --
+module example.com/retract/rationale
+
+go 1.14
+
+retract (
+	v1.0.0-empty
+
+	// short description
+	// more
+	//
+	// detail
+	v1.0.0-multiline1 // suffix
+	// after not included
+)
+
+// short description
+// more
+//
+// detail
+retract v1.0.0-multiline2 // suffix
+
+// loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong
+retract v1.0.0-long
+
+// Ends with a BEL character. Beep!
+retract v1.0.0-unprintable
+
+// block comment
+retract (
+	v1.0.0-block
+
+	// inner comment
+	v1.0.0-blockwithcomment
+)
+
+retract (
+	[v1.0.0-order, v1.0.0-order] // degenerate range
+	v1.0.0-order // single version
+
+	v1.0.1-order // single version
+	[v1.0.1-order, v1.0.1-order] // degenerate range
+)
+-- .info --
+{"Version":"v1.9.0"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rename_v1.0.0-bad.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rename_v1.0.0-bad.txt
new file mode 100644
index 0000000000000000000000000000000000000000..25c4ff1b1f9e2dfa4fd5f28fb2655d76899bf5d1
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rename_v1.0.0-bad.txt
@@ -0,0 +1,16 @@
+Module example.com/retract/rename is renamed in a later version.
+
+This happens frequently when a repository is renamed or when a go.mod file
+is added for the first time with a custom module path.
+-- .info --
+{"Version":"v1.0.0-bad"}
+-- .mod --
+module example.com/retract/rename
+
+go 1.16
+-- go.mod --
+module example.com/retract/rename
+
+go 1.16
+-- rename.go --
+package rename
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rename_v1.9.0-new.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rename_v1.9.0-new.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9c08f713c40a6fbf88606de596248aaef57cd837
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_rename_v1.9.0-new.txt
@@ -0,0 +1,22 @@
+Module example.com/retract/rename is renamed in this version.
+
+This happens frequently when a repository is renamed or when a go.mod file
+is added for the first time with a custom module path.
+-- .info --
+{"Version":"v1.9.0-new"}
+-- .mod --
+module example.com/retract/newname
+
+go 1.16
+
+// bad
+retract v1.0.0-bad
+-- go.mod --
+module example.com/retract/newname
+
+go 1.16
+
+// bad
+retract v1.0.0-bad
+-- newname.go --
+package newname
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_all_v1.9.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_all_v1.9.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4dc486b599801e3ef470df119c3f136ccf1d6659
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_all_v1.9.0.txt
@@ -0,0 +1,14 @@
+Module example.com/retract/self/prev is a module that retracts its own
+latest version.
+
+No unretracted versions are available.
+
+-- .mod --
+module example.com/retract/self/all
+
+go 1.15
+
+retract v1.9.0 // bad
+
+-- .info --
+{"Version":"v1.9.0"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..04c28455d76d903fea3cf65e2bea8e9f9acfe900
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.0.0.txt
@@ -0,0 +1,16 @@
+Module example.com/retract/self/prerelease is a module that retracts its own
+latest version and all other release version.
+
+A pre-release version higher than the highest release version is still
+available, and that should be matched by @latest.
+
+-- .mod --
+module example.com/retract/self/prerelease
+
+go 1.15
+
+-- .info --
+{"Version":"v1.0.0"}
+
+-- p.go --
+package p
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7c1c047e69e8b97d955c48521f1a44d7b093af9d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.0.txt
@@ -0,0 +1,19 @@
+Module example.com/retract/self/prerelease is a module that retracts its own
+latest version and all other release version.
+
+A pre-release version higher than the highest release version is still
+available, and that should be matched by @latest.
+
+-- .mod --
+module example.com/retract/self/prerelease
+
+go 1.15
+
+retract v1.0.0 // bad
+retract v1.9.0 // self
+
+-- .info --
+{"Version":"v1.9.0"}
+
+-- p.go --
+package p
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.1-pre.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.1-pre.txt
new file mode 100644
index 0000000000000000000000000000000000000000..abf44fdae14fcb41ae10bbe9e36d7ea4e8c23645
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prerelease_v1.9.1-pre.txt
@@ -0,0 +1,16 @@
+Module example.com/retract/self/prerelease is a module that retracts its own
+latest version and all other release version.
+
+A pre-release version higher than the highest release version is still
+available, and that should be matched by @latest.
+
+-- .mod --
+module example.com/retract/self/prerelease
+
+go 1.15
+
+-- .info --
+{"Version":"v1.9.1-pre"}
+
+-- p.go --
+package p
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.0.0-bad.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.0.0-bad.txt
new file mode 100644
index 0000000000000000000000000000000000000000..095063d69b2d6b23786f2b887a86ff8f3205d2c9
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.0.0-bad.txt
@@ -0,0 +1,14 @@
+See example.com_retract_self_prev_v1.9.0.txt.
+
+This version is retracted.
+
+-- .mod --
+module example.com/retract/self/prev
+
+go 1.15
+
+-- .info --
+{"Version":"v1.0.0-bad"}
+
+-- p.go --
+package p
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..27c3a390655a213a619cb727c71470c687e5fcf7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.1.0.txt
@@ -0,0 +1,14 @@
+See example.com_retract_self_pref_v1.9.0.txt.
+
+This version is the latest (only) non-retracted version.
+
+-- .mod --
+module example.com/retract/self/prev
+
+go 1.15
+
+-- .info --
+{"Version":"v1.1.0"}
+
+-- p.go --
+package p
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.9.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.9.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..03d6168f0d19d0b552c370681d742361a3b5baee
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_prev_v1.9.0.txt
@@ -0,0 +1,18 @@
+Module example.com/retract/self/prev is a module that retracts its own
+latest version, as well as an earlier version.
+
+A previous unretracted release version, v1.1.0, is still available.
+
+-- .mod --
+module example.com/retract/self/prev
+
+go 1.15
+
+retract v1.0.0-bad // bad
+retract v1.9.0 // self
+
+-- .info --
+{"Version":"v1.9.0"}
+
+-- p.go --
+package p
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v0.0.0-20200325131415-0123456789ab b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v0.0.0-20200325131415-0123456789ab
new file mode 100644
index 0000000000000000000000000000000000000000..f9ab41e88f9ea342f8c02aae72a3c9ce350dc0d9
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v0.0.0-20200325131415-0123456789ab
@@ -0,0 +1,20 @@
+See example.com_retract_self_pseudo_v1.9.0.txt.
+
+This version is not retracted. It should be returned by the proxy's
+@latest endpoint. It should match the @latest version query.
+
+TODO(golang.org/issue/24031): the proxy and proxy.golang.org both return
+the highest release version from the @latest endpoint, even if that
+version is retracted, so there is no way for the go command to
+discover an unretracted pseudo-version.
+
+-- .mod --
+module example.com/retract/self/pseudo
+
+go 1.15
+
+-- .info --
+{"Version":"v0.0.0-20200325131415-01234567890ab"}
+
+-- p.go --
+package p
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.0.0-bad.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.0.0-bad.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d47eda05977544b762e592e296df8b49b6718d9e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.0.0-bad.txt
@@ -0,0 +1,14 @@
+See example.com_retract_self_pseudo_v1.9.0.txt.
+
+This version is retracted.
+
+-- .mod --
+module example.com/retract/self/pseudo
+
+go 1.15
+
+-- .info --
+{"Version":"v1.0.0-bad"}
+
+-- p.go --
+package p
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.9.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.9.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..db09cc6a5f405d16f2e544aa25ec199f3a0fc98e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_self_pseudo_v1.9.0.txt
@@ -0,0 +1,16 @@
+Module example.com/retract/self/pseudo is a module that retracts its own
+latest version, as well as an earlier version.
+
+An unretracted pseudo-version is available.
+
+-- .mod --
+module example.com/retract/self/pseudo
+
+go 1.15
+
+retract v1.0.0-bad // bad
+retract v1.9.0 // self
+
+-- .info --
+{"Version":"v1.9.0"}
+
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-bad.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-bad.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2f996cfc366cf2df450b1efe89c6a0add3c2543f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-bad.txt
@@ -0,0 +1,10 @@
+-- .mod --
+module example.com/retract
+
+go 1.15
+
+-- .info --
+{"Version":"v1.0.0-bad"}
+
+-- retract.go --
+package retract
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-good.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-good.txt
new file mode 100644
index 0000000000000000000000000000000000000000..78152bba4fad7231e5c7af93869bf31423566a7d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-good.txt
@@ -0,0 +1,10 @@
+-- .mod --
+module example.com/retract
+
+go 1.15
+
+-- .info --
+{"Version":"v1.0.0-good"}
+
+-- retract.go --
+package retract
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-unused.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-unused.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3bc9e35b7c5f702a2f33021d644fe4030ac3fbf0
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.0.0-unused.txt
@@ -0,0 +1,10 @@
+-- .mod --
+module example.com/retract
+
+go 1.15
+
+-- .info --
+{"Version":"v1.0.0-unused"}
+
+-- retract.go --
+package retract
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..18d6d832e2ba22505f6655da5d4ac71311ce4fcd
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_retract_v1.1.0.txt
@@ -0,0 +1,13 @@
+-- .mod --
+module example.com/retract
+
+go 1.15
+
+retract v1.0.0-bad // bad
+retract v1.0.0-unused // bad
+
+-- .info --
+{"Version":"v1.1.0"}
+
+-- retract.go --
+package retract
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split-incompatible_subpkg_v0.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split-incompatible_subpkg_v0.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..edf5d487885d4293a7178464953ebfa3a9dcd3a5
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split-incompatible_subpkg_v0.1.0.txt
@@ -0,0 +1,14 @@
+Written by hand.
+Test case for getting a package that has been moved to a nested module,
+with a +incompatible version (and thus no go.mod file) at the root module.
+
+-- .mod --
+module example.com/split-incompatible/subpkg
+-- .info --
+{"Version": "v0.1.0"}
+-- go.mod --
+module example.com/split-incompatible/subpkg
+
+go 1.16
+-- subpkg.go --
+package subpkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.0.0+incompatible.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.0.0+incompatible.txt
new file mode 100644
index 0000000000000000000000000000000000000000..00076d74fc2f8868a1e78de129bcbe6a6fbf1d51
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.0.0+incompatible.txt
@@ -0,0 +1,10 @@
+Written by hand.
+Test case for getting a package that has been moved to a nested module,
+with a +incompatible version (and thus no go.mod file) at the root module.
+
+-- .mod --
+module example.com/split-incompatible
+-- .info --
+{"Version": "v2.0.0+incompatible"}
+-- subpkg/subpkg.go --
+package subpkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.1.0-pre+incompatible.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.1.0-pre+incompatible.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bb1c1fecc9d2713360069cf593b6df89e6e1c303
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split-incompatible_v2.1.0-pre+incompatible.txt
@@ -0,0 +1,10 @@
+Written by hand.
+Test case for getting a package that has been moved to a nested module,
+with a +incompatible version (and thus no go.mod file) at the root module.
+
+-- .mod --
+module example.com/split-incompatible
+-- .info --
+{"Version": "v2.1.0-pre+incompatible"}
+-- README.txt --
+subpkg has moved to module example.com/split-incompatible/subpkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split_subpkg_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split_subpkg_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b197b66398185a59f45822aab4ab1bce687e6127
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split_subpkg_v1.1.0.txt
@@ -0,0 +1,11 @@
+Written by hand.
+Test case for getting a package that has been moved to a different module.
+
+-- .mod --
+module example.com/split/subpkg
+
+require example.com/split v1.1.0
+-- .info --
+{"Version": "v1.1.0"}
+-- x.go --
+package subpkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b706e590d98fd52d932a9fc9d9bd6a8af2f0291b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split_v1.0.0.txt
@@ -0,0 +1,9 @@
+Written by hand.
+Test case for getting a package that has been moved to a different module.
+
+-- .mod --
+module example.com/split
+-- .info --
+{"Version": "v1.0.0"}
+-- subpkg/x.go --
+package subpkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d38971f9b62817c3b206fc1b0558d8a0468b83fe
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_split_v1.1.0.txt
@@ -0,0 +1,9 @@
+Written by hand.
+Test case for getting a package that has been moved to a different module.
+
+-- .mod --
+module example.com/split
+
+require example.com/split/subpkg v1.1.0
+-- .info --
+{"Version": "v1.1.0"}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_stack_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_stack_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..787b7aedfa40a738e3516a5fe3f2063137e5df68
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_stack_v1.0.0.txt
@@ -0,0 +1,18 @@
+Module with a function that prints file name for the top stack frame.
+Different versions of this module are identical, but they should return
+different file names with -trimpath.
+-- .mod --
+module example.com/stack
+
+go 1.14
+-- .info --
+{"Version":"v1.0.0"}
+-- stack.go --
+package stack
+
+import "runtime"
+
+func TopFile() string {
+	_, file, _, _ := runtime.Caller(0)
+	return file
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_stack_v1.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_stack_v1.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c715dd234f70ed8f18001d04c5829d55fe7956a1
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_stack_v1.0.1.txt
@@ -0,0 +1,18 @@
+Module with a function that prints file name for the top stack frame.
+Different versions of this module are identical, but they should return
+different file names with -trimpath.
+-- .mod --
+module example.com/stack
+
+go 1.14
+-- .info --
+{"Version":"v1.0.1"}
+-- stack.go --
+package stack
+
+import "runtime"
+
+func TopFile() string {
+	_, file, _, _ := runtime.Caller(0)
+	return file
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_tools_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_tools_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..22e36b993affb711fb3dec2d25eb8fed287305eb
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_tools_v1.0.0.txt
@@ -0,0 +1,12 @@
+-- .info --
+{"Version": "v1.0.0"}
+-- .mod --
+module example.com/tools
+-- cmd/hello/hello.go --
+package main
+
+import "fmt"
+
+func main() {
+	fmt.Println("hello")
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a68588eedb4ae267a1990f95f93c0e2833dc9df3
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.0.txt
@@ -0,0 +1,14 @@
+-- .info --
+{"Version":"v1.0.0"}
+-- .mod --
+// Deprecated: in v1.0.0
+module example.com/undeprecated
+
+go 1.17
+-- go.mod --
+// Deprecated: in v1.0.0
+module example.com/undeprecated
+
+go 1.17
+-- undeprecated.go --
+package undeprecated
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ecabf322ec486fa99f1455f04241d559a5bfb6a4
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_undeprecated_v1.0.1.txt
@@ -0,0 +1,14 @@
+-- .info --
+{"Version":"v1.0.1"}
+-- .mod --
+// no longer deprecated
+module example.com/undeprecated
+
+go 1.17
+-- go.mod --
+// no longer deprecated
+module example.com/undeprecated
+
+go 1.17
+-- undeprecated.go --
+package undeprecated
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_usemissingpre_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_usemissingpre_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5e1c5c815ed73a30f35200301f86c0040d27b40e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_usemissingpre_v1.0.0.txt
@@ -0,0 +1,13 @@
+This module requires example.com/missingpkg at a prerelease version, which
+is newer than @latest.
+
+-- .mod --
+module example.com/usemissingpre
+
+require example.com/missingpkg v1.0.1-beta
+-- .info --
+{"Version":"v1.0.0"}
+-- use.go --
+package use
+
+import _ "example.com/missingpkg"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..263287d9e2cf8a713700edc468524a0c671d3ae1
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_v1.0.0.txt
@@ -0,0 +1,9 @@
+Written by hand.
+Test case for module at root of domain.
+
+-- .mod --
+module example.com
+-- .info --
+{"Version": "v1.0.0"}
+-- x.go --
+package x
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_version_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_version_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d8c45b527e9d504591ef9ec2881ac7472620cf66
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_version_v1.0.0.txt
@@ -0,0 +1,11 @@
+example.com/version v1.0.0
+written by hand
+
+-- .mod --
+module example.com/version
+-- .info --
+{"Version":"v1.0.0"}
+-- version.go --
+package version
+
+const V = "v1.0.0"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_version_v1.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_version_v1.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3bfdb0e4cdcc2fad4895b0d05aac7b312810c85d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_version_v1.0.1.txt
@@ -0,0 +1,11 @@
+example.com/version v1.0.1
+written by hand
+
+-- .mod --
+module example.com/version
+-- .info --
+{"Version":"v1.0.1"}
+-- version.go --
+package version
+
+const V = "v1.0.1"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_version_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_version_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8109a9acc9e53e84d9ce5714a6a3d013b4e37449
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.com_version_v1.1.0.txt
@@ -0,0 +1,11 @@
+example.com/version v1.1.0
+written by hand
+
+-- .mod --
+module example.com/version
+-- .info --
+{"Version":"v1.1.0"}
+-- version.go --
+package version
+
+const V = "v1.1.0"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_ambiguous_nested_v0.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_ambiguous_nested_v0.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8c9de7a5f4764ff9bd25cb22fee721f842c51001
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_ambiguous_nested_v0.1.0.txt
@@ -0,0 +1,19 @@
+Written by hand.
+
+Test module containing a package that is also provided by a nested module tagged
+with the same version.
+
+-- .mod --
+module example.net/ambiguous/nested
+
+go 1.16
+-- .info --
+{"Version": "v0.1.0"}
+-- go.mod --
+module example.net/ambiguous/nested
+
+go 1.16
+-- pkg/pkg.go --
+// Package pkg exists in both example.net/ambiguous v0.1.0
+// and example.net/ambiguous/nested v0.1.0
+package pkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_ambiguous_v0.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_ambiguous_v0.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8fa6d83346d0f901d812d81f2208c44fc93955f0
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_ambiguous_v0.1.0.txt
@@ -0,0 +1,19 @@
+Written by hand.
+
+Test module containing a package that is also provided by a nested module tagged
+with the same version.
+
+-- .mod --
+module example.net/ambiguous
+
+go 1.16
+-- .info --
+{"Version": "v0.1.0"}
+-- go.mod --
+module example.net/ambiguous
+
+go 1.16
+-- nested/pkg/pkg.go --
+// Package pkg exists in both example.net/ambiguous v0.1.0
+// and example.net/ambiguous/nested v0.1.0
+package pkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_ambiguous_v0.2.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_ambiguous_v0.2.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7589ad76a311a2a054944b71dafb9dc2e4bf7b61
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_ambiguous_v0.2.0.txt
@@ -0,0 +1,18 @@
+Written by hand.
+
+Test module containing a package that is also provided by a nested module tagged
+with the same version.
+
+-- .mod --
+module example.net/ambiguous
+
+go 1.16
+-- .info --
+{"Version": "v0.2.0"}
+-- go.mod --
+module example.net/ambiguous
+
+go 1.16
+-- nested/pkg/README.txt --
+// Package pkg no longer exists in this module at v0.2.0.
+// Find it in module example.net/ambiguous/nested instead.
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_pkgadded_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_pkgadded_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..207e86a73cf4d224bede65f143add33156e904b7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_pkgadded_v1.0.0.txt
@@ -0,0 +1,17 @@
+Written by hand.
+Test module with a root package added in v1.1.0
+and a subpackage added in v1.2.0.
+
+-- .mod --
+module example.net/pkgadded
+
+go 1.16
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module example.net/pkgadded
+
+go 1.16
+-- README.txt --
+We will add the package example.net/pkgadded in v1.1.0,
+and example.net/pkgadded/subpkg in v1.2.0.
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_pkgadded_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_pkgadded_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1c88de2dd6ffa773926302c18f756b5940ea90bd
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_pkgadded_v1.1.0.txt
@@ -0,0 +1,19 @@
+Written by hand.
+Test module with a root package added in v1.1.0
+and a subpackage added in v1.2.0.
+
+-- .mod --
+module example.net/pkgadded
+
+go 1.16
+-- .info --
+{"Version":"v1.1.0"}
+-- go.mod --
+module example.net/pkgadded
+
+go 1.16
+-- README.txt --
+We will add the package example.net/pkgadded/subpkg in v1.2.0.
+-- pkgadded.go --
+// Package pkgadded was added in v1.1.0.
+package pkgadded
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_pkgadded_v1.2.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_pkgadded_v1.2.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..922951ac37ea88b107f85f77d08ea4d0c16a4356
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/example.net_pkgadded_v1.2.0.txt
@@ -0,0 +1,20 @@
+Written by hand.
+Test module with a root package added in v1.1.0
+and a subpackage added in v1.2.0.
+
+-- .mod --
+module example.net/pkgadded
+
+go 1.16
+-- .info --
+{"Version":"v1.2.0"}
+-- go.mod --
+module example.net/pkgadded
+
+go 1.16
+-- pkgadded.go --
+// Package pkgadded was added in v1.1.0.
+package pkgadded
+-- subpkg/subpkg.go --
+// Package subpkg was added in v1.2.0.
+package subpkg
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/github.com_dmitshur-test_modtest5_v0.0.0-20190619020302-197a620e0c9a.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/github.com_dmitshur-test_modtest5_v0.0.0-20190619020302-197a620e0c9a.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c2709c161e169124637b6e20687ebad339582ddd
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/github.com_dmitshur-test_modtest5_v0.0.0-20190619020302-197a620e0c9a.txt
@@ -0,0 +1,10 @@
+module github.com/dmitshur-test/modtest5@v0.0.0-20190619020302-197a620e0c9a
+
+-- .mod --
+module github.com/dmitshur-test/modtest5
+-- .info --
+{"Version":"v0.0.0-20190619020302-197a620e0c9a","Time":"2019-06-18T19:03:02-07:00"}
+-- p.go --
+package p
+
+const v = 1
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/github.com_dmitshur-test_modtest5_v0.5.0-alpha.0.20190619023908-3da23a9deb9e.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/github.com_dmitshur-test_modtest5_v0.5.0-alpha.0.20190619023908-3da23a9deb9e.txt
new file mode 100644
index 0000000000000000000000000000000000000000..22e47f378ef91398d5c903453bd199c8de54209d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/github.com_dmitshur-test_modtest5_v0.5.0-alpha.0.20190619023908-3da23a9deb9e.txt
@@ -0,0 +1,10 @@
+module github.com/dmitshur-test/modtest5@v0.5.0-alpha.0.20190619023908-3da23a9deb9e
+
+-- .mod --
+module github.com/dmitshur-test/modtest5
+-- .info --
+{"Version":"v0.5.0-alpha.0.20190619023908-3da23a9deb9e","Time":"2019-06-18T19:39:08-07:00"}
+-- p.go --
+package p
+
+const v = 3
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/github.com_dmitshur-test_modtest5_v0.5.0-alpha.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/github.com_dmitshur-test_modtest5_v0.5.0-alpha.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4f088ccd2ca3f6063196e54ef4bcc810a1ac8bb5
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/github.com_dmitshur-test_modtest5_v0.5.0-alpha.txt
@@ -0,0 +1,10 @@
+module github.com/dmitshur-test/modtest5@v0.5.0-alpha
+
+-- .mod --
+module github.com/dmitshur-test/modtest5
+-- .info --
+{"Version":"v0.5.0-alpha","Time":"2019-06-18T19:04:46-07:00"}
+-- p.go --
+package p
+
+const v = 2
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_notx_useinternal_v0.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_notx_useinternal_v0.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0420a1a4a0aee33062b90750eb7428fe093fb5a4
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_notx_useinternal_v0.1.0.txt
@@ -0,0 +1,13 @@
+written by hand — attempts to use a prohibited internal package
+(https://golang.org/s/go14internal)
+
+-- .mod --
+module golang.org/notx/useinternal
+-- .info --
+{"Version":"v0.1.0","Name":"","Short":"","Time":"2018-07-25T17:24:00Z"}
+-- go.mod --
+module golang.org/notx/useinternal
+-- useinternal.go --
+package useinternal
+
+import _ "golang.org/x/internal/subtle"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.1.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.1.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3713de33bde2e7aadfa86a60eec0fc8186df5fc6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.1.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.18.1.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.18.1.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.3.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.3.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8eda1eedb0b15ed7b84ee149bb3431055f689906
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.3.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.18.3.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.18.3.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.5.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.5.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d74ef250f27fb8df55dc46fd0b04424f94f12a28
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.5.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.18.5.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.18.5.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.7.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.7.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2fc7f85895c0fa539214e40c5dc0c16b9671de68
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.7.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.18.7.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.18.7.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.9.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.9.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7b0785106d5d44e3182b13cf6cba9d7b5ae560d3
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.9.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.18.9.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.18.9.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2c80ce5e2a26fe8b001dfcd0ad07143a6efe8136
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.18.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.18.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.18.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.0.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.0.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..215c5477884e6c7e2f6b7c72dd14207362a4d36a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.0.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.22.0.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.22.0.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.1.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.1.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ac36e3fe6549561269d31c002672cd8bc20bc84e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.1.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.22.1.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.22.1.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.3.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.3.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1178a48ab1a7bd2250b14a4e27b271a2fd016e0e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.3.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.22.3.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.22.3.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.5.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.5.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d330127a79487999d0d80976582d1de1bbe9c5aa
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.5.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.22.5.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.22.5.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.7.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.7.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a72863b830f4a8f530c315c77c9d7312d3241c4f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.7.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.22.7.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.22.7.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.9.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.9.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ac558496c27148f8fad8e350dcefd36943d03dfd
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22.9.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.22.9.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.22.9.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22rc1.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22rc1.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b384f28a83d7876ecd1859b6e71d0b38b13c39dc
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.22rc1.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.22rc1.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.22rc1.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.23.0.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.23.0.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bbc137768e25927c2a6461a70fcd3dc4acd26852
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.23.0.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.23.0.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.23.0.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.23.5.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.23.5.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..206e8ad2b8ed22ca4809d15e681f5e7cd8889df6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.23.5.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.23.5.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.23.5.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.23.9.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.23.9.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7d037764185e614e3a65411b352632fbb449d3b3
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.23.9.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.23.9.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.23.9.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.24rc1.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.24rc1.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4b61dddaa24920c07169e3ec75197b09127b9af9
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.24rc1.linux-amd64.txt
@@ -0,0 +1,8 @@
+golang.org/toolchain v0.0.1-go1.24rc1.linux-amd64
+written by hand
+-- .info --
+{"Version":"v0.0.1-go1.24rc1.linux-amd64"}
+-- .mod --
+golang.org/toolchain
+-- go.mod --
+golang.org/toolchain
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.aix-ppc64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.aix-ppc64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e70c4d7185da5fac938a1b934579397e834ddb5d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.aix-ppc64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.aix-ppc64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.aix-ppc64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-386.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-386.txt
new file mode 100644
index 0000000000000000000000000000000000000000..981334eae955fa60c10de66292f0d8440c88d34e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-386.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.android-386
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.android-386"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a01fce844a22f5313bfaeca2efd8b66dca8a3543
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-amd64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.android-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.android-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-arm.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-arm.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0de1cecbab8db01cbc28adb11c5d4eaecf7a56e3
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-arm.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.android-arm
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.android-arm"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-arm64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-arm64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1ebeadcb9fd0421b4a85465fc69d2bddc6fc12a0
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.android-arm64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.android-arm64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.android-arm64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.darwin-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.darwin-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..509185322e92956860f769b9ba8fd7fea3d7d584
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.darwin-amd64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.darwin-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.darwin-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.darwin-arm64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.darwin-arm64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6b2b132d2d038659cbc81c2a56335c0e98a6c37d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.darwin-arm64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.darwin-arm64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.darwin-arm64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.dragonfly-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.dragonfly-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..814180adddf8bbf093b6aede5654c9482a68e003
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.dragonfly-amd64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.dragonfly-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.dragonfly-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-386.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-386.txt
new file mode 100644
index 0000000000000000000000000000000000000000..12e0df493e104e7570937e0ea5046464a33f35e3
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-386.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.freebsd-386
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.freebsd-386"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bf470a5a05d29fb5080c79f6f5ae84b908be5f20
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-amd64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.freebsd-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.freebsd-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-arm.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-arm.txt
new file mode 100644
index 0000000000000000000000000000000000000000..dc32e0edf47211cd6142847cdd127e18e1303e8b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-arm.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.freebsd-arm
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.freebsd-arm"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-arm64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-arm64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4335ba6fc74e9b767f571dce686d6f9a0a7c4532
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-arm64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.freebsd-arm64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.freebsd-arm64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-riscv64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-riscv64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6161fe2f8182b095ed931934ae3fbba77b3df030
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.freebsd-riscv64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.freebsd-riscv64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.freebsd-riscv64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.illumos-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.illumos-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b677457f6e7b0b0915a023be1a94ca8899badd52
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.illumos-amd64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.illumos-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.illumos-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.ios-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.ios-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e8363bcb7fe5a5309bdec1cf57fbe2c7d79f1307
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.ios-amd64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.ios-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.ios-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.ios-arm64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.ios-arm64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9585966f2a7d97cdabc4a5b553cfd9d1f9233813
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.ios-arm64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.ios-arm64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.ios-arm64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.js-wasm.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.js-wasm.txt
new file mode 100644
index 0000000000000000000000000000000000000000..37fa6c06b7f34a4e99f8f1c01ea3ba417841d03b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.js-wasm.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.js-wasm
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.js-wasm"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-386.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-386.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ee966385f60cdb3795eb4c80fc457271f3e911de
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-386.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.linux-386
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.linux-386"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6277341561a5dd7732be3ba03b5114bea52a22e7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-amd64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.linux-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.linux-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-arm.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-arm.txt
new file mode 100644
index 0000000000000000000000000000000000000000..678711b1a1bd7e5f2f7c314ea3aa19a2ea9fbd68
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-arm.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.linux-arm
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.linux-arm"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-arm64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-arm64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bb305ab250307706e11a41688de8d19f2bfedd7f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-arm64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.linux-arm64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.linux-arm64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-loong64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-loong64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..52a23d4b4ad1c85e3bc597b6dfe34c3e2579fecf
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-loong64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.linux-loong64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.linux-loong64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-mips64x.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-mips64x.txt
new file mode 100644
index 0000000000000000000000000000000000000000..79fff1322eb62f3606876e5bfa9b481223b8889a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-mips64x.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.linux-mips64x
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.linux-mips64x"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-mipsx.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-mipsx.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a7256265637f6244d7cff90a92e74d0e79a93525
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-mipsx.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.linux-mipsx
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.linux-mipsx"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-ppc64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-ppc64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f25ae8eddcb10f3a152eaaffd10453a040443efe
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-ppc64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.linux-ppc64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.linux-ppc64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-ppc64le.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-ppc64le.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e22b8ee205b84a0124e75e2b96d618eca1f9186f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-ppc64le.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.linux-ppc64le
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.linux-ppc64le"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-riscv64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-riscv64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2e15fe3cf6fc99267b29b5a113435a15e2247885
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-riscv64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.linux-riscv64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.linux-riscv64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-s390x.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-s390x.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1022ee4935153e7944af4ccd186d0993a27624b0
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.linux-s390x.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.linux-s390x
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.linux-s390x"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-386.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-386.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b7b156e441329f1632a9705aa919d1714195461
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-386.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.netbsd-386
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.netbsd-386"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ebdb407c4c2fd0acc5195fd7136b8f782b28d09e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-amd64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.netbsd-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.netbsd-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-arm.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-arm.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fcacf0800fafd16d9e935f2867fd9d10596412dd
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-arm.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.netbsd-arm
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.netbsd-arm"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-arm64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-arm64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c2bd257cc57ec3e5b29293872b00b3d068d1abaa
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.netbsd-arm64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.netbsd-arm64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.netbsd-arm64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-386.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-386.txt
new file mode 100644
index 0000000000000000000000000000000000000000..965a054f6d3e81f14d0db1eae36c61d0a8b730da
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-386.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.openbsd-386
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.openbsd-386"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1acb7a3b3447c25452b36d03bfc8d51882487da9
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-amd64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.openbsd-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.openbsd-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-arm.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-arm.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0e47d9cda7dde1ca3e271e50a9ec630f7c9cbf21
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-arm.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.openbsd-arm
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.openbsd-arm"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-arm64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-arm64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8c99161865f61580d4e5da32229cffc3dada6d9a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-arm64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.openbsd-arm64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.openbsd-arm64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-mips64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-mips64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6c7cdfbfe49312bec643448dfbcf7530c41f231c
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-mips64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.openbsd-mips64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.openbsd-mips64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-ppc64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-ppc64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..60c73b7c1aab03fe47fbc343755b15c86917f411
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-ppc64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.openbsd-ppc64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.openbsd-ppc64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-riscv64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-riscv64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..978be3bf7858dcfd04fb439008c3ebaf2790ee04
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.openbsd-riscv64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.openbsd-riscv64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.openbsd-riscv64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.plan9-386.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.plan9-386.txt
new file mode 100644
index 0000000000000000000000000000000000000000..26f720984ca8ccf45c6846d437ffb6231abeca75
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.plan9-386.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.plan9-386
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.plan9-386"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/rc
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.plan9-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.plan9-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7cf1ddecb0fd0b24f873e31c0b8900a3cad6608f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.plan9-amd64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.plan9-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.plan9-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/rc
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.plan9-arm.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.plan9-arm.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3c3b6a74b63c69f53e1518e58d7ff18528769e6e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.plan9-arm.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.plan9-arm
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.plan9-arm"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/rc
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.solaris-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.solaris-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a750aaf114ec3a4ed32aba8f034024cde9ff8f87
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.solaris-amd64.txt
@@ -0,0 +1,14 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.solaris-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.solaris-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go --
+#!/bin/sh
+echo go1.999testmod here!
+-- bin/gofmt --
+echo i am unused
+-- pkg/tool/fake --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-386.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-386.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ca0f7aabb0d3175c4aaa8662f104f9cc86348c83
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-386.txt
@@ -0,0 +1,10 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.windows-386
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.windows-386"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go.bat --
+@echo go1.999testmod here!
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-amd64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-amd64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..44e16c7a0404dabfe4263411aa11bb6898142970
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-amd64.txt
@@ -0,0 +1,10 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.windows-amd64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.windows-amd64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go.bat --
+@echo go1.999testmod here!
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-arm.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-arm.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ee4e016943191ae25236c0c627861c5f05aebabd
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-arm.txt
@@ -0,0 +1,10 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.windows-arm
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.windows-arm"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go.bat --
+@echo go1.999testmod here!
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-arm64.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-arm64.txt
new file mode 100644
index 0000000000000000000000000000000000000000..be3ff801868e23e9f3fb8248d8f7e4202db35728
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_toolchain_v0.0.1-go1.999testmod.windows-arm64.txt
@@ -0,0 +1,10 @@
+golang.org/toolchain@v0.0.1-go1.999testmod.windows-arm64
+
+-- .mod --
+module golang.org/toolchain
+-- .info --
+{"Version":"v0.0.1-go1.999testmod.windows-arm64"}
+-- go.mod --
+module golang.org/toolchain
+-- bin/go.bat --
+@echo go1.999testmod here!
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_internal_v0.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_internal_v0.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5737e95cf47fdb761f7d8701924b2b62b29ec74a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_internal_v0.1.0.txt
@@ -0,0 +1,43 @@
+written by hand — loosely derived from golang.org/x/crypto/internal/subtle,
+but splitting the internal package across a module boundary
+
+-- .mod --
+module golang.org/x/internal
+-- .info --
+{"Version":"v0.1.0","Name":"","Short":"","Time":"2018-07-25T17:24:00Z"}
+-- go.mod --
+module golang.org/x/internal
+-- subtle/aliasing.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+// This is a tiny version of golang.org/x/crypto/internal/subtle.
+
+package subtle
+
+import "unsafe"
+
+func AnyOverlap(x, y []byte) bool {
+	return len(x) > 0 && len(y) > 0 &&
+		uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) &&
+		uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1]))
+}
+-- subtle/aliasing_appengine.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package subtle
+
+import "reflect"
+
+func AnyOverlap(x, y []byte) bool {
+	return len(x) > 0 && len(y) > 0 &&
+		reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() &&
+		reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer()
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_text_v0.0.0-20170915032832-14c0d48ead0c.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_text_v0.0.0-20170915032832-14c0d48ead0c.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f4f50cdedb616ebb822ce89a152f2c09f168fe0a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_text_v0.0.0-20170915032832-14c0d48ead0c.txt
@@ -0,0 +1,47 @@
+written by hand - just enough to compile rsc.io/sampler, rsc.io/quote
+
+-- .mod --
+module golang.org/x/text
+-- .info --
+{"Version":"v0.0.0-20170915032832-14c0d48ead0c","Name":"v0.0.0-20170915032832-14c0d48ead0c","Short":"14c0d48ead0c","Time":"2017-09-15T03:28:32Z"}
+-- go.mod --
+module golang.org/x/text
+-- unused/unused.go --
+package unused
+-- language/lang.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a tiny version of golang.org/x/text.
+
+package language
+
+import "strings"
+
+type Tag string
+
+func Make(s string) Tag { return Tag(s) }
+
+func (t Tag) String() string { return string(t) }
+
+func NewMatcher(tags []Tag) Matcher { return &matcher{tags} }
+
+type Matcher interface {
+	Match(...Tag) (Tag, int, int)
+}
+
+type matcher struct {
+	tags []Tag
+}
+
+func (m *matcher) Match(prefs ...Tag) (Tag, int, int) {
+	for _, pref := range prefs {
+		for _, tag := range m.tags {
+			if tag == pref || strings.HasPrefix(string(pref), string(tag+"-")) || strings.HasPrefix(string(tag), string(pref+"-")) {
+				return tag, 0, 0
+			}
+		}
+	}
+	return m.tags[0], 0, 0
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_text_v0.3.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_text_v0.3.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5561afae8ed2fd52ee24afb4c3b8777cb62e0aba
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_text_v0.3.0.txt
@@ -0,0 +1,47 @@
+written by hand - just enough to compile rsc.io/sampler, rsc.io/quote
+
+-- .mod --
+module golang.org/x/text
+-- .info --
+{"Version":"v0.3.0","Name":"","Short":"","Time":"2017-09-16T03:28:32Z"}
+-- go.mod --
+module golang.org/x/text
+-- unused/unused.go --
+package unused
+-- language/lang.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a tiny version of golang.org/x/text.
+
+package language
+
+import "strings"
+
+type Tag string
+
+func Make(s string) Tag { return Tag(s) }
+
+func (t Tag) String() string { return string(t) }
+
+func NewMatcher(tags []Tag) Matcher { return &matcher{tags} }
+
+type Matcher interface {
+	Match(...Tag) (Tag, int, int)
+}
+
+type matcher struct {
+	tags []Tag
+}
+
+func (m *matcher) Match(prefs ...Tag) (Tag, int, int) {
+	for _, pref := range prefs {
+		for _, tag := range m.tags {
+			if tag == pref || strings.HasPrefix(string(pref), string(tag+"-")) || strings.HasPrefix(string(tag), string(pref+"-")) {
+				return tag, 0, 0
+			}
+		}
+	}
+	return m.tags[0], 0, 0
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_useinternal_v0.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_useinternal_v0.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3fcba447befe22d4dc35a54684cf49028ecefa9b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/golang.org_x_useinternal_v0.1.0.txt
@@ -0,0 +1,13 @@
+written by hand — uses an internal package from another module
+(https://golang.org/s/go14internal)
+
+-- .mod --
+module golang.org/x/useinternal
+-- .info --
+{"Version":"v0.1.0","Name":"","Short":"","Time":"2018-07-25T17:24:00Z"}
+-- go.mod --
+module golang.org/x/useinternal
+-- useinternal.go --
+package useinternal
+
+import _ "golang.org/x/internal/subtle"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/gopkg.in_dummy.v2-unstable_v2.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/gopkg.in_dummy.v2-unstable_v2.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f174159fd3fddc0e2fdd8728a7f85d6f971abe6a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/gopkg.in_dummy.v2-unstable_v2.0.0.txt
@@ -0,0 +1,9 @@
+gopkg.in/dummy.v2-unstable v2.0.0
+written by hand
+
+-- .mod --
+module gopkg.in/dummy.v2-unstable
+-- .info --
+{"Version":"v2.0.0"}
+-- dummy.go --
+package dummy
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/not-rsc.io_quote_v0.1.0-nomod.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/not-rsc.io_quote_v0.1.0-nomod.txt
new file mode 100644
index 0000000000000000000000000000000000000000..efff08826ad5ddf894f99a74512f63f13ba3cc19
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/not-rsc.io_quote_v0.1.0-nomod.txt
@@ -0,0 +1,59 @@
+Constructed by hand.
+(derived from rsc.io/quote@e7a685a342, but without an explicit go.mod file.)
+
+-- .mod --
+module "not-rsc.io/quote"
+-- .info --
+{"Version":"v0.1.0-nomod","Time":"2018-02-14T00:51:33Z"}
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+// Hello returns a greeting.
+func Hello() string {
+	return "Hello, world."
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import "testing"
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory. Share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_depofdirectpatch_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_depofdirectpatch_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..40616c668a8a74ce6bdcb1102a66e4d6a3c0493d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_depofdirectpatch_v1.0.0.txt
@@ -0,0 +1,11 @@
+patch.example.com/depofdirectpatch v1.0.0
+written by hand
+
+-- .mod --
+module patch.example.com/depofdirectpatch
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module patch.example.com/depofdirectpatch
+-- depofdirectpatch.go --
+package depofdirectpatch
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_depofdirectpatch_v1.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_depofdirectpatch_v1.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e075028656e2691feccc438337b67de71d875a00
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_depofdirectpatch_v1.0.1.txt
@@ -0,0 +1,11 @@
+patch.example.com/depofdirectpatch v1.0.1
+written by hand
+
+-- .mod --
+module patch.example.com/depofdirectpatch
+-- .info --
+{"Version":"v1.0.1"}
+-- go.mod --
+module patch.example.com/depofdirectpatch
+-- depofdirectpatch.go --
+package depofdirectpatch
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_direct_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_direct_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1e775fb89bc9ac1664a7cd980ea0abaf099ffe71
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_direct_v1.0.0.txt
@@ -0,0 +1,21 @@
+patch.example.com/direct v1.0.0
+written by hand
+
+-- .mod --
+module patch.example.com/direct
+
+require (
+	patch.example.com/indirect v1.0.0
+)
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module patch.example.com/direct
+
+require (
+	patch.example.com/indirect v1.0.0
+)
+-- direct.go --
+package direct
+
+import _ "patch.example.com/indirect"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_direct_v1.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_direct_v1.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..64912b7b439b6868a480395ffc923c4dd3a14d1f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_direct_v1.0.1.txt
@@ -0,0 +1,27 @@
+patch.example.com/direct v1.0.1
+written by hand
+
+-- .mod --
+module patch.example.com/direct
+
+require (
+	patch.example.com/indirect v1.0.0
+	patch.example.com/depofdirectpatch v1.0.0
+)
+-- .info --
+{"Version":"v1.0.1"}
+-- go.mod --
+module patch.example.com/direct
+
+require (
+	patch.example.com/indirect v1.0.0
+	patch.example.com/depofdirectpatch v1.0.0
+)
+-- direct.go --
+package direct
+
+import _ "patch.example.com/indirect"
+-- usedepofdirectpatch/unused.go --
+package usedepofdirectpatch
+
+import _ "patch.example.com/depofdirectpatch"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_direct_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_direct_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..406e3b9f628a76d6ea4c10568cfdc3d11095b82d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_direct_v1.1.0.txt
@@ -0,0 +1,21 @@
+patch.example.com/direct v1.1.0
+written by hand
+
+-- .mod --
+module patch.example.com/direct
+
+require (
+	patch.example.com/indirect v1.0.0
+)
+-- .info --
+{"Version":"v1.1.0"}
+-- go.mod --
+module patch.example.com/direct
+
+require (
+	patch.example.com/indirect v1.0.0
+)
+-- direct.go --
+package direct
+
+import _ "patch.example.com/indirect"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_indirect_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_indirect_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ea7f5e2d8d0c8fa841f716c06604de327ce3b65f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_indirect_v1.0.0.txt
@@ -0,0 +1,11 @@
+patch.example.com/indirect v1.0.0
+written by hand
+
+-- .mod --
+module patch.example.com/indirect
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module patch.example.com/indirect
+-- direct.go --
+package indirect
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_indirect_v1.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_indirect_v1.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8c6cf8e7bf94732bfe178d118c4ac4ce0ce0e022
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_indirect_v1.0.1.txt
@@ -0,0 +1,11 @@
+patch.example.com/indirect v1.0.1
+written by hand
+
+-- .mod --
+module patch.example.com/indirect
+-- .info --
+{"Version":"v1.0.1"}
+-- go.mod --
+module patch.example.com/indirect
+-- direct.go --
+package indirect
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_indirect_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_indirect_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f7229d417aa8c209f9bc00942891caaa5635441a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/patch.example.com_indirect_v1.1.0.txt
@@ -0,0 +1,11 @@
+patch.example.com/indirect v1.1.0
+written by hand
+
+-- .mod --
+module patch.example.com/indirect
+-- .info --
+{"Version":"v1.1.0"}
+-- go.mod --
+module patch.example.com/indirect
+-- direct.go --
+package indirect
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_!c!g!o_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_!c!g!o_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6276147535b445b33e1ebecc9d154e56186ebd49
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_!c!g!o_v1.0.0.txt
@@ -0,0 +1,19 @@
+rsc.io/CGO v1.0.0
+
+-- .mod --
+module rsc.io/CGO
+-- .info --
+{"Version":"v1.0.0","Name":"","Short":"","Time":"2018-08-01T18:23:45Z"}
+-- go.mod --
+module rsc.io/CGO
+-- cgo.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package CGO
+
+// #cgo CFLAGS: -I${SRCDIR}
+import "C"
+
+var V = 0
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_!q!u!o!t!e_v1.5.2.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_!q!u!o!t!e_v1.5.2.txt
new file mode 100644
index 0000000000000000000000000000000000000000..21185c39f3320ed7e5b5926cf18d48bd3a923fdc
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_!q!u!o!t!e_v1.5.2.txt
@@ -0,0 +1,88 @@
+rsc.io/QUOTE v1.5.2
+
+-- .mod --
+module rsc.io/QUOTE
+
+require rsc.io/quote v1.5.2
+-- .info --
+{"Version":"v1.5.2","Name":"","Short":"","Time":"2018-07-15T16:25:34Z"}
+-- go.mod --
+module rsc.io/QUOTE
+
+require rsc.io/quote v1.5.2
+-- QUOTE/quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// PACKAGE QUOTE COLLECTS LOUD SAYINGS.
+package QUOTE
+
+import (
+	"strings"
+
+	"rsc.io/quote"
+)
+
+// HELLO RETURNS A GREETING.
+func HELLO() string {
+	return strings.ToUpper(quote.Hello())
+}
+
+// GLASS RETURNS A USEFUL PHRASE FOR WORLD TRAVELERS.
+func GLASS() string {
+	return strings.ToUpper(quote.GLASS())
+}
+
+// GO RETURNS A GO PROVERB.
+func GO() string {
+	return strings.ToUpper(quote.GO())
+}
+
+// OPT RETURNS AN OPTIMIZATION TRUTH.
+func OPT() string {
+	return strings.ToUpper(quote.OPT())
+}
+-- QUOTE/quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package QUOTE
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHELLO(t *testing.T) {
+	hello := "HELLO, WORLD"
+	if out := HELLO(); out != hello {
+		t.Errorf("HELLO() = %q, want %q", out, hello)
+	}
+}
+
+func TestGLASS(t *testing.T) {
+	glass := "I CAN EAT GLASS AND IT DOESN'T HURT ME."
+	if out := GLASS(); out != glass {
+		t.Errorf("GLASS() = %q, want %q", out, glass)
+	}
+}
+
+func TestGO(t *testing.T) {
+	go1 := "DON'T COMMUNICATE BY SHARING MEMORY, SHARE MEMORY BY COMMUNICATING."
+	if out := GO(); out != go1 {
+		t.Errorf("GO() = %q, want %q", out, go1)
+	}
+}
+
+func TestOPT(t *testing.T) {
+	opt := "IF A PROGRAM IS TOO SLOW, IT MUST HAVE A LOOP."
+	if out := OPT(); out != opt {
+		t.Errorf("OPT() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_!q!u!o!t!e_v1.5.3-!p!r!e.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_!q!u!o!t!e_v1.5.3-!p!r!e.txt
new file mode 100644
index 0000000000000000000000000000000000000000..54bac2df7bb36867c48026adf215aae7cc6a24c6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_!q!u!o!t!e_v1.5.3-!p!r!e.txt
@@ -0,0 +1,88 @@
+rsc.io/QUOTE v1.5.3-PRE (sigh)
+
+-- .mod --
+module rsc.io/QUOTE
+
+require rsc.io/quote v1.5.2
+-- .info --
+{"Version":"v1.5.3-PRE","Name":"","Short":"","Time":"2018-07-15T16:25:34Z"}
+-- go.mod --
+module rsc.io/QUOTE
+
+require rsc.io/quote v1.5.2
+-- QUOTE/quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// PACKAGE QUOTE COLLECTS LOUD SAYINGS.
+package QUOTE
+
+import (
+	"strings"
+
+	"rsc.io/quote"
+)
+
+// HELLO RETURNS A GREETING.
+func HELLO() string {
+	return strings.ToUpper(quote.Hello())
+}
+
+// GLASS RETURNS A USEFUL PHRASE FOR WORLD TRAVELERS.
+func GLASS() string {
+	return strings.ToUpper(quote.GLASS())
+}
+
+// GO RETURNS A GO PROVERB.
+func GO() string {
+	return strings.ToUpper(quote.GO())
+}
+
+// OPT RETURNS AN OPTIMIZATION TRUTH.
+func OPT() string {
+	return strings.ToUpper(quote.OPT())
+}
+-- QUOTE/quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package QUOTE
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHELLO(t *testing.T) {
+	hello := "HELLO, WORLD"
+	if out := HELLO(); out != hello {
+		t.Errorf("HELLO() = %q, want %q", out, hello)
+	}
+}
+
+func TestGLASS(t *testing.T) {
+	glass := "I CAN EAT GLASS AND IT DOESN'T HURT ME."
+	if out := GLASS(); out != glass {
+		t.Errorf("GLASS() = %q, want %q", out, glass)
+	}
+}
+
+func TestGO(t *testing.T) {
+	go1 := "DON'T COMMUNICATE BY SHARING MEMORY, SHARE MEMORY BY COMMUNICATING."
+	if out := GO(); out != go1 {
+		t.Errorf("GO() = %q, want %q", out, go1)
+	}
+}
+
+func TestOPT(t *testing.T) {
+	opt := "IF A PROGRAM IS TOO SLOW, IT MUST HAVE A LOOP."
+	if out := OPT(); out != opt {
+		t.Errorf("OPT() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile1_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile1_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9d23e7db98cf3a7570713686600e36c97d1642e5
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile1_v1.0.0.txt
@@ -0,0 +1,14 @@
+rsc.io/badfile1 v1.0.0
+written by hand
+this is part of the badfile test but is a valid zip file.
+
+-- .mod --
+module rsc.io/badfile1
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module rsc.io/badfile1
+-- α.go --
+package α
+-- .gitignore --
+-- x/y/z/.gitignore --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile2_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile2_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..58e1e1c103acdb862f82b3781e2bda8fa50a0a59
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile2_v1.0.0.txt
@@ -0,0 +1,12 @@
+rsc.io/badfile1 v1.0.0
+written by hand
+
+-- .mod --
+module rsc.io/badfile2
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module rsc.io/badfile2
+-- ☺.go --
+package smiley
+
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile3_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile3_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a008448c5fd27a32e81d5370536ae621611a089e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile3_v1.0.0.txt
@@ -0,0 +1,12 @@
+rsc.io/badfile3 v1.0.0
+written by hand
+
+-- .mod --
+module rsc.io/badfile3
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module rsc.io/badfile3
+-- x?y.go --
+package x
+
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile4_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile4_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e28844dc632e55a767fcfd11df4e8bfb37e303a5
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile4_v1.0.0.txt
@@ -0,0 +1,15 @@
+rsc.io/badfile4 v1.0.0
+written by hand
+
+-- .mod --
+module rsc.io/badfile4
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module rsc.io/badfile4
+-- x/Y.go --
+package x
+-- x/y.go --
+package x
+
+
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile5_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile5_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3c7903a3bc0c71b3df20e10062cc301b5730d48d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badfile5_v1.0.0.txt
@@ -0,0 +1,13 @@
+rsc.io/badfile5 v1.0.0
+written by hand
+
+-- .mod --
+module rsc.io/badfile5
+-- .info --
+{"Version":"v1.0.0"}
+-- go.mod --
+module rsc.io/badfile5
+-- x/y/z/w.go --
+package z
+-- x/Y/zz/ww.go --
+package zz
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badmod_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badmod_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..993ceb7a0be72a4ab5a5596357cea171bd72d653
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badmod_v1.0.0.txt
@@ -0,0 +1,11 @@
+rsc.io/badmod v1.0.0
+written by hand
+
+-- .mod --
+module rsc.io/badmod
+hello world
+-- .info --
+{"Version":"v1.0.0"}
+-- x.go --
+package x
+
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badsum_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badsum_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d62db2627a0510da485bedf9826c919989d13c2c
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badsum_v1.0.0.txt
@@ -0,0 +1,14 @@
+rsc.io/badsum@v1.0.0
+
+This module would match the hard-coded hash for rsc.io/badsum v1.0.0
+in modfetch/notary.go if not for the "break hash" line.
+
+-- .mod --
+module "rsc.io/badsum"
+-- .info --
+{"Version":"v1.0.0","Time":"2018-02-14T00:45:20Z"}
+-- go.mod --
+module "rsc.io/badsum"
+-- badsum.go --
+package badsum
+// break hash
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badsum_v1.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badsum_v1.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5fea50a01d73c53444616b2e856c6c36acf82dfc
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badsum_v1.0.1.txt
@@ -0,0 +1,14 @@
+rsc.io/badsum@v1.0.1
+
+This module would match the hard-coded hash for rsc.io/badsum v1.0.1/go.mod
+in modfetch/notary.go if not for the "break hash" line.
+
+-- .mod --
+module "rsc.io/badsum"
+# break hash
+-- .info --
+{"Version":"v1.0.1","Time":"2018-02-14T00:45:20Z"}
+-- go.mod --
+module "rsc.io/badsum"
+-- badsum.go --
+package badsum
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badzip_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badzip_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..07a38fa6d7d7492fd365c426c43ae2877a7dd544
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_badzip_v1.0.0.txt
@@ -0,0 +1,11 @@
+rsc.io/badzip v1.0.0
+written by hand
+
+-- .mod --
+module rsc.io/badzip
+-- .info --
+{"Version":"v1.0.0"}
+-- x.go --
+package x
+-- /rsc.io/badzip@v1.0.0.txt --
+This file should not be here.
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_breaker_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_breaker_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a103e3f8aa1a926e2095665f1b7bc300a614cdb1
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_breaker_v1.0.0.txt
@@ -0,0 +1,11 @@
+rsc.io/breaker v1.0.0
+written by hand
+
+-- .mod --
+module rsc.io/breaker
+-- .info --
+{"Version":"v1.0.0"}
+-- breaker.go --
+package breaker
+
+const X = 1
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_breaker_v2.0.0+incompatible.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_breaker_v2.0.0+incompatible.txt
new file mode 100644
index 0000000000000000000000000000000000000000..59d8bacf07881356e60c8d7a196ffd534d505b6b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_breaker_v2.0.0+incompatible.txt
@@ -0,0 +1,11 @@
+rsc.io/breaker v2.0.0+incompatible
+written by hand
+
+-- .mod --
+module rsc.io/breaker
+-- .info --
+{"Version":"v2.0.0+incompatible", "Name": "7307b307f4f0dde421900f8e5126fadac1e13aed", "Short": "7307b307f4f0"}
+-- breaker.go --
+package breaker
+
+const XX = 2
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_breaker_v2.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_breaker_v2.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..59d8bacf07881356e60c8d7a196ffd534d505b6b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_breaker_v2.0.0.txt
@@ -0,0 +1,11 @@
+rsc.io/breaker v2.0.0+incompatible
+written by hand
+
+-- .mod --
+module rsc.io/breaker
+-- .info --
+{"Version":"v2.0.0+incompatible", "Name": "7307b307f4f0dde421900f8e5126fadac1e13aed", "Short": "7307b307f4f0"}
+-- breaker.go --
+package breaker
+
+const XX = 2
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_fortune_v0.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_fortune_v0.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..644695cba11b6c9034a40466311392522e9cd493
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_fortune_v0.0.1.txt
@@ -0,0 +1,21 @@
+rsc.io/fortune 0.0.1
+written by hand
+
+-- .mod --
+module rsc.io/fortune
+go 1.21rc999
+
+-- go.mod --
+module rsc.io/fortune
+go 1.21rc999
+
+-- .info --
+{"Version":"v0.0.1"}
+-- fortune.go --
+package main
+
+import "rsc.io/quote"
+
+func main() {
+	println(quote.Hello())
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_fortune_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_fortune_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d8a71f3cd9397f993133aeaa9b1ebfffec51a507
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_fortune_v1.0.0.txt
@@ -0,0 +1,15 @@
+rsc.io/fortune v1.0.0
+written by hand
+
+-- .mod --
+module rsc.io/fortune
+-- .info --
+{"Version":"v1.0.0"}
+-- fortune.go --
+package main
+
+import "rsc.io/quote"
+
+func main() {
+	println(quote.Hello())
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_fortune_v2_v2.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_fortune_v2_v2.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3acd6379311f643194e45a712464b4af4aed5fa6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_fortune_v2_v2.0.0.txt
@@ -0,0 +1,21 @@
+rsc.io/fortune v2.0.0
+written by hand
+
+-- .mod --
+module rsc.io/fortune/v2
+-- .info --
+{"Version":"v2.0.0"}
+-- fortune.go --
+package main
+
+import "rsc.io/quote"
+
+func main() {
+	println(quote.Hello())
+}
+-- fortune_test.go --
+package main
+
+import "testing"
+
+func TestFortuneV2(t *testing.T) {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_future_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_future_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d3826a3d436ad7feb790532a8e78ee2de0bb2f0c
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_future_v1.0.0.txt
@@ -0,0 +1,16 @@
+rsc.io/future v1.0.0
+written by hand
+
+-- .mod --
+module rsc.io/future
+go 1.999
+-- .info --
+{"Version":"v1.0.0"}
+-- main.go --
+package main
+
+func main() {
+}
+-- go.mod --
+module rsc.io/future
+go 1.999
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needall_v0.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needall_v0.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0a1582a577cb9539928d7271463e6db7e5c3ed2e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needall_v0.0.1.txt
@@ -0,0 +1,25 @@
+rsc.io/needall 0.0.1
+written by hand
+
+-- .mod --
+module rsc.io/needall
+go 1.23
+
+require rsc.io/needgo121 v0.0.1
+require rsc.io/needgo122 v0.0.1
+require rsc.io/needgo123 v0.0.1
+
+-- go.mod --
+module rsc.io/needall
+go 1.23
+
+require rsc.io/needgo121 v0.0.1
+require rsc.io/needgo122 v0.0.1
+require rsc.io/needgo123 v0.0.1
+
+-- .info --
+{"Version":"v0.0.1"}
+-- p.go --
+package p
+
+func F() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo1183_v0.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo1183_v0.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a41296e1c734c1460491fce25b7fd45eb9a02cea
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo1183_v0.0.1.txt
@@ -0,0 +1,17 @@
+rsc.io/needgo1183 v0.0.1
+written by hand
+
+-- .mod --
+module rsc.io/needgo1183
+go 1.18.3
+
+-- go.mod --
+module rsc.io/needgo1183
+go 1.18.3
+
+-- .info --
+{"Version":"v0.0.1"}
+-- p.go --
+package p
+
+func F() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo118_v0.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo118_v0.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..805eac7bb2e4f0f55173fd9e6fdcc34de1ba2675
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo118_v0.0.1.txt
@@ -0,0 +1,17 @@
+rsc.io/needgo118 0.0.1
+written by hand
+
+-- .mod --
+module rsc.io/needgo118
+go 1.18
+
+-- go.mod --
+module rsc.io/needgo118
+go 1.18
+
+-- .info --
+{"Version":"v0.0.1"}
+-- p.go --
+package p
+
+func F() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo121_v0.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo121_v0.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5b059609c4e52ba47032b612945eeb61716611f4
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo121_v0.0.1.txt
@@ -0,0 +1,17 @@
+rsc.io/needgo121 0.0.1
+written by hand
+
+-- .mod --
+module rsc.io/needgo121
+go 1.21
+
+-- go.mod --
+module rsc.io/needgo121
+go 1.21
+
+-- .info --
+{"Version":"v0.0.1"}
+-- p.go --
+package p
+
+func F() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo1223_v0.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo1223_v0.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f166a8226e0e4c3b4d07ec1c895741fcbed49bd3
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo1223_v0.0.1.txt
@@ -0,0 +1,17 @@
+rsc.io/needgo1223 0.0.1
+written by hand
+
+-- .mod --
+module rsc.io/needgo1223
+go 1.22.3
+
+-- go.mod --
+module rsc.io/needgo1223
+go 1.22.3
+
+-- .info --
+{"Version":"v0.0.1"}
+-- p.go --
+package p
+
+func F() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo122_v0.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo122_v0.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..59116eb6abeadfd530e87f5a887dd9bdd6b5546e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo122_v0.0.1.txt
@@ -0,0 +1,17 @@
+rsc.io/needgo122 0.0.1
+written by hand
+
+-- .mod --
+module rsc.io/needgo122
+go 1.22
+
+-- go.mod --
+module rsc.io/needgo122
+go 1.22
+
+-- .info --
+{"Version":"v0.0.1"}
+-- p.go --
+package p
+
+func F() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo123_v0.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo123_v0.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0ec55714da031e2ff9e30aed55e44fd4e0246201
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo123_v0.0.1.txt
@@ -0,0 +1,17 @@
+rsc.io/needgo123 0.0.1
+written by hand
+
+-- .mod --
+module rsc.io/needgo123
+go 1.23
+
+-- go.mod --
+module rsc.io/needgo123
+go 1.23
+
+-- .info --
+{"Version":"v0.0.1"}
+-- p.go --
+package p
+
+func F() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo124_v0.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo124_v0.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..634f50477fa544f5b956ff54d0379e8d72553028
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_needgo124_v0.0.1.txt
@@ -0,0 +1,17 @@
+rsc.io/needgo124 0.0.1
+written by hand
+
+-- .mod --
+module rsc.io/needgo124
+go 1.24
+
+-- go.mod --
+module rsc.io/needgo124
+go 1.24
+
+-- .info --
+{"Version":"v0.0.1"}
+-- p.go --
+package p
+
+func F() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_panicnil_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_panicnil_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6ea1b22e0816955af56e5f8383df2466dcef0d23
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_panicnil_v1.0.0.txt
@@ -0,0 +1,13 @@
+rsc.io/panicnil v1.0.0
+written by hand
+
+-- .mod --
+module rsc.io/panicnil
+-- .info --
+{"Version":"v1.0.0"}
+-- fortune.go --
+package main
+
+func main() {
+	panic(nil)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_panicnil_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_panicnil_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fe67a8838c3bd9ab5d48c7194307f284f14eae13
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_panicnil_v1.1.0.txt
@@ -0,0 +1,14 @@
+rsc.io/panicnil v1.1.0
+written by hand
+
+-- .mod --
+module rsc.io/panicnil
+go 1.21
+-- .info --
+{"Version":"v1.1.0"}
+-- fortune.go --
+package main
+
+func main() {
+	panic(nil)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180214005133-e7a685a342c0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180214005133-e7a685a342c0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8ae173e7aec71ffd8055273a3617f0741f3091f4
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180214005133-e7a685a342c0.txt
@@ -0,0 +1,60 @@
+rsc.io/quote@e7a685a342
+
+-- .mod --
+module "rsc.io/quote"
+-- .info --
+{"Version":"v0.0.0-20180214005133-e7a685a342c0","Name":"e7a685a342c001acc3eb7f5eafa82980480042c7","Short":"e7a685a342c0","Time":"2018-02-14T00:51:33Z"}
+-- go.mod --
+module "rsc.io/quote"
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+// Hello returns a greeting.
+func Hello() string {
+	return "Hello, world."
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import "testing"
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory. Share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180214005840-23179ee8a569.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180214005840-23179ee8a569.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bc626bac7a49e22f79368972e840cda549d0e4a6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180214005840-23179ee8a569.txt
@@ -0,0 +1,86 @@
+rsc.io/quote@v0.0.0-20180214005840-23179ee8a569
+
+-- .mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- .info --
+{"Version":"v0.0.0-20180214005840-23179ee8a569","Name":"23179ee8a569bb05d896ae05c6503ec69a19f99f","Short":"23179ee8a569","Time":"2018-02-14T00:58:40Z"}
+-- go.mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/sampler"
+
+// Hello returns a greeting.
+func Hello() string {
+	return sampler.Hello()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180628003336-dd9747d19b04.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180628003336-dd9747d19b04.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bbc8097dc3a2f9ba7c8b5d60f6c06f75c634ce91
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180628003336-dd9747d19b04.txt
@@ -0,0 +1,100 @@
+rsc.io/quote@dd9747d
+
+-- .mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- .info --
+{"Version":"v0.0.0-20180628003336-dd9747d19b04","Name":"dd9747d19b041365fbddf0399ddba6bff5eb1b3e","Short":"dd9747d19b04","Time":"2018-06-28T00:33:36Z"}
+-- buggy/buggy_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buggy
+
+import "testing"
+
+func Test(t *testing.T) {
+	t.Fatal("buggy!")
+}
+-- go.mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/sampler"
+
+AN EVEN WORSE CHANGE!
+
+// Hello returns a greeting.
+func Hello() string {
+	return sampler.Hello()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709153244-fd906ed3b100.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709153244-fd906ed3b100.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e461ed4231e20b0ef009e985e869954e8ec46daf
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709153244-fd906ed3b100.txt
@@ -0,0 +1,86 @@
+rsc.io/quote@v2.0.0
+
+-- .mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- .info --
+{"Version":"v0.0.0-20180709153244-fd906ed3b100","Name":"fd906ed3b100e47181ffa9ec36d82294525c9109","Short":"fd906ed3b100","Time":"2018-07-09T15:32:44Z"}
+-- go.mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/sampler"
+
+// Hello returns a greeting.
+func HelloV2() string {
+	return sampler.Hello()
+}
+
+// Glass returns a useful phrase for world travelers.
+func GlassV2() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func GoV2() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func OptV2() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709160352-0d003b9c4bfa.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709160352-0d003b9c4bfa.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c1d511fda71b9ea26ec793be0e2388b99534c5e0
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709160352-0d003b9c4bfa.txt
@@ -0,0 +1,98 @@
+rsc.io/quote@v0.0.0-20180709160352-0d003b9c4bfa
+
+-- .mod --
+module rsc.io/quote
+
+require rsc.io/sampler v1.3.0
+-- .info --
+{"Version":"v0.0.0-20180709160352-0d003b9c4bfa","Name":"0d003b9c4bfac881641be8eb1598b782a467a97f","Short":"0d003b9c4bfa","Time":"2018-07-09T16:03:52Z"}
+-- buggy/buggy_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buggy
+
+import "testing"
+
+func Test(t *testing.T) {
+	t.Fatal("buggy!")
+}
+-- go.mod --
+module rsc.io/quote
+
+require rsc.io/sampler v1.3.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/quote/v2"
+
+// Hello returns a greeting.
+func Hello() string {
+	return quote.HelloV2()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return quote.GlassV2()
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return quote.GoV2()
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return quote.OptV2()
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709162749-b44a0b17b2d1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709162749-b44a0b17b2d1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f7f794d76dd79fcb475cf9cec13ec3a3d323b4fb
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709162749-b44a0b17b2d1.txt
@@ -0,0 +1,104 @@
+rsc.io/quote@v0.0.0-20180709162749-b44a0b17b2d1
+
+-- .mod --
+module rsc.io/quote
+
+require (
+	rsc.io/quote/v2 v2.0.1
+	rsc.io/sampler v1.3.0
+)
+-- .info --
+{"Version":"v0.0.0-20180709162749-b44a0b17b2d1","Name":"b44a0b17b2d1fe4c98a8d0e7a68c9bf9e762799a","Short":"b44a0b17b2d1","Time":"2018-07-09T16:27:49Z"}
+-- buggy/buggy_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buggy
+
+import "testing"
+
+func Test(t *testing.T) {
+	t.Fatal("buggy!")
+}
+-- go.mod --
+module rsc.io/quote
+
+require (
+	rsc.io/quote/v2 v2.0.1
+	rsc.io/sampler v1.3.0
+)
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/quote/v2"
+
+// Hello returns a greeting.
+func Hello() string {
+	return quote.HelloV2()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return quote.GlassV2()
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return quote.GoV2()
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return quote.OptV2()
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709162816-fe488b867524.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709162816-fe488b867524.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2d5d8b4e72aa36b2ef25a58ea0f846fd97a2eedb
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709162816-fe488b867524.txt
@@ -0,0 +1,104 @@
+rsc.io/quote@v0.0.0-20180709162816-fe488b867524
+
+-- .mod --
+module rsc.io/quote
+
+require (
+	rsc.io/quote/v2 v2.0.1
+	rsc.io/sampler v1.3.0
+)
+-- .info --
+{"Version":"v0.0.0-20180709162816-fe488b867524","Name":"fe488b867524806e861c3f4f43ae6946a42ca3f1","Short":"fe488b867524","Time":"2018-07-09T16:28:16Z"}
+-- buggy/buggy_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buggy
+
+import "testing"
+
+func Test(t *testing.T) {
+	t.Fatal("buggy!")
+}
+-- go.mod --
+module rsc.io/quote
+
+require (
+	rsc.io/quote/v2 v2.0.1
+	rsc.io/sampler v1.3.0
+)
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/quote/v2"
+
+// Hello returns a greeting.
+func Hello() string {
+	return quote.HelloV2()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return quote.GlassV2()
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return quote.GoV2()
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return quote.OptV2()
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709162918-a91498bed0a7.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709162918-a91498bed0a7.txt
new file mode 100644
index 0000000000000000000000000000000000000000..853a8c2a1ac7fcc502340a22d4da6d8cb0117b6f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180709162918-a91498bed0a7.txt
@@ -0,0 +1,98 @@
+rsc.io/quote@v0.0.0-20180709162918-a91498bed0a7
+
+-- .mod --
+module rsc.io/quote
+
+require rsc.io/sampler v1.3.0
+-- .info --
+{"Version":"v0.0.0-20180709162918-a91498bed0a7","Name":"a91498bed0a73d4bb9c1fb2597925f7883bc40a7","Short":"a91498bed0a7","Time":"2018-07-09T16:29:18Z"}
+-- buggy/buggy_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buggy
+
+import "testing"
+
+func Test(t *testing.T) {
+	t.Fatal("buggy!")
+}
+-- go.mod --
+module rsc.io/quote
+
+require rsc.io/sampler v1.3.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/quote/v3"
+
+// Hello returns a greeting.
+func Hello() string {
+	return quote.HelloV3()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return quote.GlassV3()
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return quote.GoV3()
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return quote.OptV3()
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180710144737-5d9f230bcfba.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180710144737-5d9f230bcfba.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2ebeac397146fa95b166a87c9c458c9df313776e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v0.0.0-20180710144737-5d9f230bcfba.txt
@@ -0,0 +1,104 @@
+rsc.io/quote@v0.0.0-20180710144737-5d9f230bcfba
+
+-- .mod --
+module rsc.io/quote
+
+require (
+	rsc.io/quote/v3 v3.0.0
+	rsc.io/sampler v1.3.0
+)
+-- .info --
+{"Version":"v0.0.0-20180710144737-5d9f230bcfba","Name":"5d9f230bcfbae514bb6c2215694c2ce7273fc604","Short":"5d9f230bcfba","Time":"2018-07-10T14:47:37Z"}
+-- buggy/buggy_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buggy
+
+import "testing"
+
+func Test(t *testing.T) {
+	t.Fatal("buggy!")
+}
+-- go.mod --
+module rsc.io/quote
+
+require (
+	rsc.io/quote/v3 v3.0.0
+	rsc.io/sampler v1.3.0
+)
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/quote/v3"
+
+// Hello returns a greeting.
+func Hello() string {
+	return quote.HelloV3()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return quote.GlassV3()
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return quote.GoV3()
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return quote.OptV3()
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9a0793744455fa9e4fba7fd68faef2bd90c10395
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.0.0.txt
@@ -0,0 +1,35 @@
+rsc.io/quote@v1.0.0
+
+-- .mod --
+module "rsc.io/quote"
+-- .info --
+{"Version":"v1.0.0","Name":"f488df80bcdbd3e5bafdc24ad7d1e79e83edd7e6","Short":"f488df80bcdb","Time":"2018-02-14T00:45:20Z"}
+-- go.mod --
+module "rsc.io/quote"
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+// Hello returns a greeting.
+func Hello() string {
+	return "Hello, world."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import "testing"
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.1.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.1.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0c416053901ce1a4dcfe4198661d91eeaa6a5a47
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.1.0.txt
@@ -0,0 +1,48 @@
+rsc.io/quote@v1.1.0
+
+-- .mod --
+module "rsc.io/quote"
+-- .info --
+{"Version":"v1.1.0","Name":"cfd7145f43f92a8d56b4a3dd603795a3291381a9","Short":"cfd7145f43f9","Time":"2018-02-14T00:46:44Z"}
+-- go.mod --
+module "rsc.io/quote"
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+// Hello returns a greeting.
+func Hello() string {
+	return "Hello, world."
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import "testing"
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.2.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.2.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e714f0b913719e8cec983d916d69237097961c3d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.2.0.txt
@@ -0,0 +1,61 @@
+rsc.io/quote@v1.2.0
+
+-- .mod --
+module "rsc.io/quote"
+-- .info --
+{"Version":"v1.2.0","Name":"d8a3de91045c932a1c71e545308fe97571d6d65c","Short":"d8a3de91045c","Time":"2018-02-14T00:47:51Z"}
+-- go.mod --
+module "rsc.io/quote"
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+// Hello returns a greeting.
+func Hello() string {
+	return "Hello, world."
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import "testing"
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+// Go returns a Go proverb.
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory. Share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.2.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.2.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..89d5191d3a068eb92992252be8efbabd7698512a
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.2.1.txt
@@ -0,0 +1,60 @@
+rsc.io/quote@v1.2.1
+
+-- .mod --
+module "rsc.io/quote"
+-- .info --
+{"Version":"v1.2.1","Name":"5c1f03b64ab7aa958798a569a31924655dc41e76","Short":"5c1f03b64ab7","Time":"2018-02-14T00:54:20Z"}
+-- go.mod --
+module "rsc.io/quote"
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+// Hello returns a greeting.
+func Hello() string {
+	return "Hello, world."
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import "testing"
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.3.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.3.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d62766c7d2b950f5ef290b8483d5c0a7bcdc6396
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.3.0.txt
@@ -0,0 +1,73 @@
+rsc.io/quote@v1.3.0
+
+-- .mod --
+module "rsc.io/quote"
+-- .info --
+{"Version":"v1.3.0","Name":"84de74b35823c1e49634f2262f1a58cfc951ebae","Short":"84de74b35823","Time":"2018-02-14T00:54:53Z"}
+-- go.mod --
+module "rsc.io/quote"
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+// Hello returns a greeting.
+func Hello() string {
+	return "Hello, world."
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import "testing"
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.4.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.4.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..698ff8de81222020e39d2df4711cb400d288587c
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.4.0.txt
@@ -0,0 +1,79 @@
+rsc.io/quote@v1.4.0
+
+-- .mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.0.0
+-- .info --
+{"Version":"v1.4.0","Name":"19e8b977bd2f437798c2cc2dcfe8a1c0f169481b","Short":"19e8b977bd2f","Time":"2018-02-14T00:56:05Z"}
+-- go.mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.0.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/sampler"
+
+// Hello returns a greeting.
+func Hello() string {
+	return sampler.Hello()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import "testing"
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e7fcdbccff5317095c54d931abbcdf66ef85ca6f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.0.txt
@@ -0,0 +1,79 @@
+rsc.io/quote@v1.5.0
+
+-- .mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- .info --
+{"Version":"v1.5.0","Name":"3ba1e30dc83bd52c990132b9dfb1688a9d22de13","Short":"3ba1e30dc83b","Time":"2018-02-14T00:58:15Z"}
+-- go.mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/sampler"
+
+// Hello returns a greeting.
+func Hello() string {
+	return sampler.Hello()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import "testing"
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..eed051bea04f18c2023002dcb5ff3a245c6e8959
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.1.txt
@@ -0,0 +1,86 @@
+rsc.io/quote@23179ee8a569
+
+-- .mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- .info --
+{"Version":"v1.5.1","Name":"23179ee8a569bb05d896ae05c6503ec69a19f99f","Short":"23179ee8a569","Time":"2018-02-14T00:58:40Z"}
+-- go.mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/sampler"
+
+// Hello returns a greeting.
+func Hello() string {
+	return sampler.Hello()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.2.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.2.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8671f6fe772de54147fadc51e1bbee57cbad2962
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.2.txt
@@ -0,0 +1,98 @@
+rsc.io/quote@v1.5.2
+
+-- .mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- .info --
+{"Version":"v1.5.2","Name":"c4d4236f92427c64bfbcf1cc3f8142ab18f30b22","Short":"c4d4236f9242","Time":"2018-02-14T15:44:20Z"}
+-- buggy/buggy_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buggy
+
+import "testing"
+
+func Test(t *testing.T) {
+	t.Fatal("buggy!")
+}
+-- go.mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/sampler"
+
+// Hello returns a greeting.
+func Hello() string {
+	return sampler.Hello()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.3-pre1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.3-pre1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..212ef13aaf82503671712da0cb8b06ed8f1a4922
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v1.5.3-pre1.txt
@@ -0,0 +1,100 @@
+rsc.io/quote@v1.5.3-pre1
+
+-- .mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- .info --
+{"Version":"v1.5.3-pre1","Name":"2473dfd877c95382420e47686aa9076bf58c79e0","Short":"2473dfd877c9","Time":"2018-06-28T00:32:53Z"}
+-- buggy/buggy_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package buggy
+
+import "testing"
+
+func Test(t *testing.T) {
+	t.Fatal("buggy!")
+}
+-- go.mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/sampler"
+
+// A CHANGE!
+
+// Hello returns a greeting.
+func Hello() string {
+	return sampler.Hello()
+}
+
+// Glass returns a useful phrase for world travelers.
+func Glass() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func Go() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func Opt() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v2.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v2.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e461ed4231e20b0ef009e985e869954e8ec46daf
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v2.0.0.txt
@@ -0,0 +1,86 @@
+rsc.io/quote@v2.0.0
+
+-- .mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- .info --
+{"Version":"v0.0.0-20180709153244-fd906ed3b100","Name":"fd906ed3b100e47181ffa9ec36d82294525c9109","Short":"fd906ed3b100","Time":"2018-07-09T15:32:44Z"}
+-- go.mod --
+module "rsc.io/quote"
+
+require "rsc.io/sampler" v1.3.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/sampler"
+
+// Hello returns a greeting.
+func HelloV2() string {
+	return sampler.Hello()
+}
+
+// Glass returns a useful phrase for world travelers.
+func GlassV2() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func GoV2() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func OptV2() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v2_v2.0.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v2_v2.0.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d51128c46b9f602c74bf70a2e46c00b5101056de
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v2_v2.0.1.txt
@@ -0,0 +1,86 @@
+rsc.io/quote/v2@v2.0.1
+
+-- .mod --
+module rsc.io/quote/v2
+
+require rsc.io/sampler v1.3.0
+-- .info --
+{"Version":"v2.0.1","Name":"754f68430672776c84704e2d10209a6ec700cd64","Short":"754f68430672","Time":"2018-07-09T16:25:34Z"}
+-- go.mod --
+module rsc.io/quote/v2
+
+require rsc.io/sampler v1.3.0
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/sampler"
+
+// Hello returns a greeting.
+func HelloV2() string {
+	return sampler.Hello()
+}
+
+// Glass returns a useful phrase for world travelers.
+func GlassV2() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func GoV2() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func OptV2() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
+-- quote_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package quote
+
+import (
+	"os"
+	"testing"
+)
+
+func init() {
+	os.Setenv("LC_ALL", "en")
+}
+
+func TestHello(t *testing.T) {
+	hello := "Hello, world."
+	if out := Hello(); out != hello {
+		t.Errorf("Hello() = %q, want %q", out, hello)
+	}
+}
+
+func TestGlass(t *testing.T) {
+	glass := "I can eat glass and it doesn't hurt me."
+	if out := Glass(); out != glass {
+		t.Errorf("Glass() = %q, want %q", out, glass)
+	}
+}
+
+func TestGo(t *testing.T) {
+	go1 := "Don't communicate by sharing memory, share memory by communicating."
+	if out := Go(); out != go1 {
+		t.Errorf("Go() = %q, want %q", out, go1)
+	}
+}
+
+func TestOpt(t *testing.T) {
+	opt := "If a program is too slow, it must have a loop."
+	if out := Opt(); out != opt {
+		t.Errorf("Opt() = %q, want %q", out, opt)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v3_v3.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v3_v3.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0afe1f0519992e9b0e43fbfd15ae710938f4bc83
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_quote_v3_v3.0.0.txt
@@ -0,0 +1,45 @@
+rsc.io/quote/v3@v3.0.0
+
+-- .mod --
+module rsc.io/quote/v3
+
+require rsc.io/sampler v1.3.0
+
+-- .info --
+{"Version":"v3.0.0","Name":"d88915d7e77ed0fd35d0a022a2f244e2202fd8c8","Short":"d88915d7e77e","Time":"2018-07-09T15:34:46Z"}
+-- go.mod --
+module rsc.io/quote/v3
+
+require rsc.io/sampler v1.3.0
+
+-- quote.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quote collects pithy sayings.
+package quote // import "rsc.io/quote"
+
+import "rsc.io/sampler"
+
+// Hello returns a greeting.
+func HelloV3() string {
+	return sampler.Hello()
+}
+
+// Glass returns a useful phrase for world travelers.
+func GlassV3() string {
+	// See http://www.oocities.org/nodotus/hbglass.html.
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// Go returns a Go proverb.
+func GoV3() string {
+	return "Don't communicate by sharing memory, share memory by communicating."
+}
+
+// Opt returns an optimization truth.
+func OptV3() string {
+	// Wisdom from ken.
+	return "If a program is too slow, it must have a loop."
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c4b6a71c88c405e927490bcfd356304bab1e2c1d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.0.0.txt
@@ -0,0 +1,20 @@
+rsc.io/sampler@v1.0.0
+
+-- .mod --
+module "rsc.io/sampler"
+-- .info --
+{"Version":"v1.0.0","Name":"60bef405c52117ad21d2adb10872b95cf17f8fca","Short":"60bef405c521","Time":"2018-02-13T18:05:54Z"}
+-- go.mod --
+module "rsc.io/sampler"
+-- sampler.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sampler shows simple texts.
+package sampler // import "rsc.io/sampler"
+
+// Hello returns a greeting.
+func Hello() string {
+	return "Hello, world."
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.2.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.2.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..98c35fa238901321e24e54dd9cbdcb0f69702f22
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.2.0.txt
@@ -0,0 +1,138 @@
+rsc.io/sampler@v1.2.0
+
+-- .mod --
+module "rsc.io/sampler"
+
+require "golang.org/x/text" v0.0.0-20170915032832-14c0d48ead0c
+-- .info --
+{"Version":"v1.2.0","Name":"25f24110b153246056eccc14a3a4cd81afaff586","Short":"25f24110b153","Time":"2018-02-13T18:13:45Z"}
+-- go.mod --
+module "rsc.io/sampler"
+
+require "golang.org/x/text" v0.0.0-20170915032832-14c0d48ead0c
+-- hello.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Translations by Google Translate.
+
+package sampler
+
+var hello = newText(`
+
+English: en: Hello, world.
+French: fr: Bonjour le monde.
+Spanish: es: Hola Mundo.
+
+`)
+-- hello_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sampler
+
+import (
+	"testing"
+
+	"golang.org/x/text/language"
+)
+
+var helloTests = []struct {
+	prefs []language.Tag
+	text  string
+}{
+	{
+		[]language.Tag{language.Make("en-US"), language.Make("fr")},
+		"Hello, world.",
+	},
+	{
+		[]language.Tag{language.Make("fr"), language.Make("en-US")},
+		"Bonjour la monde.",
+	},
+}
+
+func TestHello(t *testing.T) {
+	for _, tt := range helloTests {
+		text := Hello(tt.prefs...)
+		if text != tt.text {
+			t.Errorf("Hello(%v) = %q, want %q", tt.prefs, text, tt.text)
+		}
+	}
+}
+-- sampler.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sampler shows simple texts.
+package sampler // import "rsc.io/sampler"
+
+import (
+	"os"
+	"strings"
+
+	"golang.org/x/text/language"
+)
+
+// DefaultUserPrefs returns the default user language preferences.
+// It consults the $LC_ALL, $LC_MESSAGES, and $LANG environment
+// variables, in that order.
+func DefaultUserPrefs() []language.Tag {
+	var prefs []language.Tag
+	for _, k := range []string{"LC_ALL", "LC_MESSAGES", "LANG"} {
+		if env := os.Getenv(k); env != "" {
+			prefs = append(prefs, language.Make(env))
+		}
+	}
+	return prefs
+}
+
+// Hello returns a localized greeting.
+// If no prefs are given, Hello uses DefaultUserPrefs.
+func Hello(prefs ...language.Tag) string {
+	if len(prefs) == 0 {
+		prefs = DefaultUserPrefs()
+	}
+	return hello.find(prefs)
+}
+
+// A text is a localized text.
+type text struct {
+	byTag   map[string]string
+	matcher language.Matcher
+}
+
+// newText creates a new localized text, given a list of translations.
+func newText(s string) *text {
+	t := &text{
+		byTag: make(map[string]string),
+	}
+	var tags []language.Tag
+	for _, line := range strings.Split(s, "\n") {
+		line = strings.TrimSpace(line)
+		if line == "" {
+			continue
+		}
+		f := strings.Split(line, ": ")
+		if len(f) != 3 {
+			continue
+		}
+		tag := language.Make(f[1])
+		tags = append(tags, tag)
+		t.byTag[tag.String()] = f[2]
+	}
+	t.matcher = language.NewMatcher(tags)
+	return t
+}
+
+// find finds the text to use for the given language tag preferences.
+func (t *text) find(prefs []language.Tag) string {
+	tag, _, _ := t.matcher.Match(prefs...)
+	s := t.byTag[tag.String()]
+	if strings.HasPrefix(s, "RTL ") {
+		s = "\u200F" + strings.TrimPrefix(s, "RTL ") + "\u200E"
+	}
+	return s
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.2.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.2.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7982cccea100c0a523aa9e668ed45aa2d6dfada6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.2.1.txt
@@ -0,0 +1,134 @@
+generated by ./addmod.bash rsc.io/sampler@v1.2.1
+
+-- .mod --
+module "rsc.io/sampler"
+
+require "golang.org/x/text" v0.0.0-20170915032832-14c0d48ead0c
+-- .info --
+{"Version":"v1.2.1","Name":"cac3af4f8a0ab40054fa6f8d423108a63a1255bb","Short":"cac3af4f8a0a","Time":"2018-02-13T18:16:22Z"}
+-- hello.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Translations by Google Translate.
+
+package sampler
+
+var hello = newText(`
+
+English: en: Hello, world.
+French: fr: Bonjour le monde.
+Spanish: es: Hola Mundo.
+
+`)
+-- hello_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sampler
+
+import (
+	"testing"
+
+	"golang.org/x/text/language"
+)
+
+var helloTests = []struct {
+	prefs []language.Tag
+	text  string
+}{
+	{
+		[]language.Tag{language.Make("en-US"), language.Make("fr")},
+		"Hello, world.",
+	},
+	{
+		[]language.Tag{language.Make("fr"), language.Make("en-US")},
+		"Bonjour le monde.",
+	},
+}
+
+func TestHello(t *testing.T) {
+	for _, tt := range helloTests {
+		text := Hello(tt.prefs...)
+		if text != tt.text {
+			t.Errorf("Hello(%v) = %q, want %q", tt.prefs, text, tt.text)
+		}
+	}
+}
+-- sampler.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sampler shows simple texts.
+package sampler // import "rsc.io/sampler"
+
+import (
+	"os"
+	"strings"
+
+	"golang.org/x/text/language"
+)
+
+// DefaultUserPrefs returns the default user language preferences.
+// It consults the $LC_ALL, $LC_MESSAGES, and $LANG environment
+// variables, in that order.
+func DefaultUserPrefs() []language.Tag {
+	var prefs []language.Tag
+	for _, k := range []string{"LC_ALL", "LC_MESSAGES", "LANG"} {
+		if env := os.Getenv(k); env != "" {
+			prefs = append(prefs, language.Make(env))
+		}
+	}
+	return prefs
+}
+
+// Hello returns a localized greeting.
+// If no prefs are given, Hello uses DefaultUserPrefs.
+func Hello(prefs ...language.Tag) string {
+	if len(prefs) == 0 {
+		prefs = DefaultUserPrefs()
+	}
+	return hello.find(prefs)
+}
+
+// A text is a localized text.
+type text struct {
+	byTag   map[string]string
+	matcher language.Matcher
+}
+
+// newText creates a new localized text, given a list of translations.
+func newText(s string) *text {
+	t := &text{
+		byTag: make(map[string]string),
+	}
+	var tags []language.Tag
+	for _, line := range strings.Split(s, "\n") {
+		line = strings.TrimSpace(line)
+		if line == "" {
+			continue
+		}
+		f := strings.Split(line, ": ")
+		if len(f) != 3 {
+			continue
+		}
+		tag := language.Make(f[1])
+		tags = append(tags, tag)
+		t.byTag[tag.String()] = f[2]
+	}
+	t.matcher = language.NewMatcher(tags)
+	return t
+}
+
+// find finds the text to use for the given language tag preferences.
+func (t *text) find(prefs []language.Tag) string {
+	tag, _, _ := t.matcher.Match(prefs...)
+	s := t.byTag[tag.String()]
+	if strings.HasPrefix(s, "RTL ") {
+		s = "\u200F" + strings.TrimPrefix(s, "RTL ") + "\u200E"
+	}
+	return s
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.3.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.3.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..febe51fd9a93358f31e53637410521a4bc14e5c7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.3.0.txt
@@ -0,0 +1,202 @@
+rsc.io/sampler@v1.3.0
+
+-- .mod --
+module "rsc.io/sampler"
+
+require "golang.org/x/text" v0.0.0-20170915032832-14c0d48ead0c
+-- .info --
+{"Version":"v1.3.0","Name":"0cc034b51e57ed7832d4c67d526f75a900996e5c","Short":"0cc034b51e57","Time":"2018-02-13T19:05:03Z"}
+-- glass.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Translations from Frank da Cruz, Ethan Mollick, and many others.
+// See http://kermitproject.org/utf8.html.
+// http://www.oocities.org/nodotus/hbglass.html
+// https://en.wikipedia.org/wiki/I_Can_Eat_Glass
+
+package sampler
+
+var glass = newText(`
+
+English: en: I can eat glass and it doesn't hurt me.
+French: fr: Je peux manger du verre, ça ne me fait pas mal.
+Spanish: es: Puedo comer vidrio, no me hace daño.
+
+`)
+-- glass_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sampler
+
+import (
+	"testing"
+
+	"golang.org/x/text/language"
+	_ "rsc.io/testonly"
+)
+
+var glassTests = []struct {
+	prefs []language.Tag
+	text  string
+}{
+	{
+		[]language.Tag{language.Make("en-US"), language.Make("fr")},
+		"I can eat glass and it doesn't hurt me.",
+	},
+	{
+		[]language.Tag{language.Make("fr"), language.Make("en-US")},
+		"Je peux manger du verre, ça ne me fait pas mal.",
+	},
+}
+
+func TestGlass(t *testing.T) {
+	for _, tt := range glassTests {
+		text := Glass(tt.prefs...)
+		if text != tt.text {
+			t.Errorf("Glass(%v) = %q, want %q", tt.prefs, text, tt.text)
+		}
+	}
+}
+-- go.mod --
+module "rsc.io/sampler"
+
+require "golang.org/x/text" v0.0.0-20170915032832-14c0d48ead0c
+-- hello.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Translations by Google Translate.
+
+package sampler
+
+var hello = newText(`
+
+English: en: Hello, world.
+French: fr: Bonjour le monde.
+Spanish: es: Hola Mundo.
+
+`)
+-- hello_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sampler
+
+import (
+	"testing"
+
+	"golang.org/x/text/language"
+)
+
+var helloTests = []struct {
+	prefs []language.Tag
+	text  string
+}{
+	{
+		[]language.Tag{language.Make("en-US"), language.Make("fr")},
+		"Hello, world.",
+	},
+	{
+		[]language.Tag{language.Make("fr"), language.Make("en-US")},
+		"Bonjour le monde.",
+	},
+}
+
+func TestHello(t *testing.T) {
+	for _, tt := range helloTests {
+		text := Hello(tt.prefs...)
+		if text != tt.text {
+			t.Errorf("Hello(%v) = %q, want %q", tt.prefs, text, tt.text)
+		}
+	}
+}
+-- sampler.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sampler shows simple texts.
+package sampler // import "rsc.io/sampler"
+
+import (
+	"os"
+	"strings"
+
+	"golang.org/x/text/language"
+)
+
+// DefaultUserPrefs returns the default user language preferences.
+// It consults the $LC_ALL, $LC_MESSAGES, and $LANG environment
+// variables, in that order.
+func DefaultUserPrefs() []language.Tag {
+	var prefs []language.Tag
+	for _, k := range []string{"LC_ALL", "LC_MESSAGES", "LANG"} {
+		if env := os.Getenv(k); env != "" {
+			prefs = append(prefs, language.Make(env))
+		}
+	}
+	return prefs
+}
+
+// Hello returns a localized greeting.
+// If no prefs are given, Hello uses DefaultUserPrefs.
+func Hello(prefs ...language.Tag) string {
+	if len(prefs) == 0 {
+		prefs = DefaultUserPrefs()
+	}
+	return hello.find(prefs)
+}
+
+// Glass returns a localized silly phrase.
+// If no prefs are given, Glass uses DefaultUserPrefs.
+func Glass(prefs ...language.Tag) string {
+	if len(prefs) == 0 {
+		prefs = DefaultUserPrefs()
+	}
+	return glass.find(prefs)
+}
+
+// A text is a localized text.
+type text struct {
+	byTag   map[string]string
+	matcher language.Matcher
+}
+
+// newText creates a new localized text, given a list of translations.
+func newText(s string) *text {
+	t := &text{
+		byTag: make(map[string]string),
+	}
+	var tags []language.Tag
+	for _, line := range strings.Split(s, "\n") {
+		line = strings.TrimSpace(line)
+		if line == "" {
+			continue
+		}
+		f := strings.Split(line, ": ")
+		if len(f) != 3 {
+			continue
+		}
+		tag := language.Make(f[1])
+		tags = append(tags, tag)
+		t.byTag[tag.String()] = f[2]
+	}
+	t.matcher = language.NewMatcher(tags)
+	return t
+}
+
+// find finds the text to use for the given language tag preferences.
+func (t *text) find(prefs []language.Tag) string {
+	tag, _, _ := t.matcher.Match(prefs...)
+	s := t.byTag[tag.String()]
+	if strings.HasPrefix(s, "RTL ") {
+		s = "\u200F" + strings.TrimPrefix(s, "RTL ") + "\u200E"
+	}
+	return s
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.3.1.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.3.1.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a293f108696ea3e114e3239567d570bb571b8904
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.3.1.txt
@@ -0,0 +1,201 @@
+rsc.io/sampler@v1.3.1
+
+-- .mod --
+module "rsc.io/sampler"
+
+require "golang.org/x/text" v0.0.0-20170915032832-14c0d48ead0c
+-- .info --
+{"Version":"v1.3.1","Name":"f545d0289d06e2add4556ea6a15fc4938014bf87","Short":"f545d0289d06","Time":"2018-02-14T16:34:12Z"}
+-- glass.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Translations from Frank da Cruz, Ethan Mollick, and many others.
+// See http://kermitproject.org/utf8.html.
+// http://www.oocities.org/nodotus/hbglass.html
+// https://en.wikipedia.org/wiki/I_Can_Eat_Glass
+
+package sampler
+
+var glass = newText(`
+
+English: en: I can eat glass and it doesn't hurt me.
+French: fr: Je peux manger du verre, ça ne me fait pas mal.
+Spanish: es: Puedo comer vidrio, no me hace daño.
+
+`)
+-- glass_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sampler
+
+import (
+	"testing"
+
+	"golang.org/x/text/language"
+)
+
+var glassTests = []struct {
+	prefs []language.Tag
+	text  string
+}{
+	{
+		[]language.Tag{language.Make("en-US"), language.Make("fr")},
+		"I can eat glass and it doesn't hurt me.",
+	},
+	{
+		[]language.Tag{language.Make("fr"), language.Make("en-US")},
+		"Je peux manger du verre, ça ne me fait pas mal.",
+	},
+}
+
+func TestGlass(t *testing.T) {
+	for _, tt := range glassTests {
+		text := Glass(tt.prefs...)
+		if text != tt.text {
+			t.Errorf("Glass(%v) = %q, want %q", tt.prefs, text, tt.text)
+		}
+	}
+}
+-- go.mod --
+module "rsc.io/sampler"
+
+require "golang.org/x/text" v0.0.0-20170915032832-14c0d48ead0c
+-- hello.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Translations by Google Translate.
+
+package sampler
+
+var hello = newText(`
+
+English: en: Hello, world.
+French: fr: Bonjour le monde.
+Spanish: es: Hola Mundo.
+
+`)
+-- hello_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sampler
+
+import (
+	"testing"
+
+	"golang.org/x/text/language"
+)
+
+var helloTests = []struct {
+	prefs []language.Tag
+	text  string
+}{
+	{
+		[]language.Tag{language.Make("en-US"), language.Make("fr")},
+		"Hello, world.",
+	},
+	{
+		[]language.Tag{language.Make("fr"), language.Make("en-US")},
+		"Bonjour le monde.",
+	},
+}
+
+func TestHello(t *testing.T) {
+	for _, tt := range helloTests {
+		text := Hello(tt.prefs...)
+		if text != tt.text {
+			t.Errorf("Hello(%v) = %q, want %q", tt.prefs, text, tt.text)
+		}
+	}
+}
+-- sampler.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sampler shows simple texts in a variety of languages.
+package sampler // import "rsc.io/sampler"
+
+import (
+	"os"
+	"strings"
+
+	"golang.org/x/text/language"
+)
+
+// DefaultUserPrefs returns the default user language preferences.
+// It consults the $LC_ALL, $LC_MESSAGES, and $LANG environment
+// variables, in that order.
+func DefaultUserPrefs() []language.Tag {
+	var prefs []language.Tag
+	for _, k := range []string{"LC_ALL", "LC_MESSAGES", "LANG"} {
+		if env := os.Getenv(k); env != "" {
+			prefs = append(prefs, language.Make(env))
+		}
+	}
+	return prefs
+}
+
+// Hello returns a localized greeting.
+// If no prefs are given, Hello uses DefaultUserPrefs.
+func Hello(prefs ...language.Tag) string {
+	if len(prefs) == 0 {
+		prefs = DefaultUserPrefs()
+	}
+	return hello.find(prefs)
+}
+
+// Glass returns a localized silly phrase.
+// If no prefs are given, Glass uses DefaultUserPrefs.
+func Glass(prefs ...language.Tag) string {
+	if len(prefs) == 0 {
+		prefs = DefaultUserPrefs()
+	}
+	return glass.find(prefs)
+}
+
+// A text is a localized text.
+type text struct {
+	byTag   map[string]string
+	matcher language.Matcher
+}
+
+// newText creates a new localized text, given a list of translations.
+func newText(s string) *text {
+	t := &text{
+		byTag: make(map[string]string),
+	}
+	var tags []language.Tag
+	for _, line := range strings.Split(s, "\n") {
+		line = strings.TrimSpace(line)
+		if line == "" {
+			continue
+		}
+		f := strings.Split(line, ": ")
+		if len(f) != 3 {
+			continue
+		}
+		tag := language.Make(f[1])
+		tags = append(tags, tag)
+		t.byTag[tag.String()] = f[2]
+	}
+	t.matcher = language.NewMatcher(tags)
+	return t
+}
+
+// find finds the text to use for the given language tag preferences.
+func (t *text) find(prefs []language.Tag) string {
+	tag, _, _ := t.matcher.Match(prefs...)
+	s := t.byTag[tag.String()]
+	if strings.HasPrefix(s, "RTL ") {
+		s = "\u200F" + strings.TrimPrefix(s, "RTL ") + "\u200E"
+	}
+	return s
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.99.99.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.99.99.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5036d20ab58b79421e0b2c8a3b449766f8f14925
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_sampler_v1.99.99.txt
@@ -0,0 +1,140 @@
+rsc.io/sampler@v1.99.99
+
+-- .mod --
+module "rsc.io/sampler"
+
+require "golang.org/x/text" v0.0.0-20170915032832-14c0d48ead0c
+-- .info --
+{"Version":"v1.99.99","Name":"732a3c400797d8835f2af34a9561f155bef85435","Short":"732a3c400797","Time":"2018-02-13T22:20:19Z"}
+-- go.mod --
+module "rsc.io/sampler"
+
+require "golang.org/x/text" v0.0.0-20170915032832-14c0d48ead0c
+-- hello.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Translations by Google Translate.
+
+package sampler
+
+var hello = newText(`
+
+English: en: 99 bottles of beer on the wall, 99 bottles of beer, ...
+
+`)
+-- hello_test.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sampler
+
+import (
+	"testing"
+
+	"golang.org/x/text/language"
+)
+
+var helloTests = []struct {
+	prefs []language.Tag
+	text  string
+}{
+	{
+		[]language.Tag{language.Make("en-US"), language.Make("fr")},
+		"Hello, world.",
+	},
+	{
+		[]language.Tag{language.Make("fr"), language.Make("en-US")},
+		"Bonjour le monde.",
+	},
+}
+
+func TestHello(t *testing.T) {
+	for _, tt := range helloTests {
+		text := Hello(tt.prefs...)
+		if text != tt.text {
+			t.Errorf("Hello(%v) = %q, want %q", tt.prefs, text, tt.text)
+		}
+	}
+}
+-- sampler.go --
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sampler shows simple texts.
+package sampler // import "rsc.io/sampler"
+
+import (
+	"os"
+	"strings"
+
+	"golang.org/x/text/language"
+)
+
+// DefaultUserPrefs returns the default user language preferences.
+// It consults the $LC_ALL, $LC_MESSAGES, and $LANG environment
+// variables, in that order.
+func DefaultUserPrefs() []language.Tag {
+	var prefs []language.Tag
+	for _, k := range []string{"LC_ALL", "LC_MESSAGES", "LANG"} {
+		if env := os.Getenv(k); env != "" {
+			prefs = append(prefs, language.Make(env))
+		}
+	}
+	return prefs
+}
+
+// Hello returns a localized greeting.
+// If no prefs are given, Hello uses DefaultUserPrefs.
+func Hello(prefs ...language.Tag) string {
+	if len(prefs) == 0 {
+		prefs = DefaultUserPrefs()
+	}
+	return hello.find(prefs)
+}
+
+func Glass() string {
+	return "I can eat glass and it doesn't hurt me."
+}
+
+// A text is a localized text.
+type text struct {
+	byTag   map[string]string
+	matcher language.Matcher
+}
+
+// newText creates a new localized text, given a list of translations.
+func newText(s string) *text {
+	t := &text{
+		byTag: make(map[string]string),
+	}
+	var tags []language.Tag
+	for _, line := range strings.Split(s, "\n") {
+		line = strings.TrimSpace(line)
+		if line == "" {
+			continue
+		}
+		f := strings.Split(line, ": ")
+		if len(f) != 3 {
+			continue
+		}
+		tag := language.Make(f[1])
+		tags = append(tags, tag)
+		t.byTag[tag.String()] = f[2]
+	}
+	t.matcher = language.NewMatcher(tags)
+	return t
+}
+
+// find finds the text to use for the given language tag preferences.
+func (t *text) find(prefs []language.Tag) string {
+	tag, _, _ := t.matcher.Match(prefs...)
+	s := t.byTag[tag.String()]
+	if strings.HasPrefix(s, "RTL ") {
+		s = "\u200F" + strings.TrimPrefix(s, "RTL ") + "\u200E"
+	}
+	return s
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_testonly_v1.0.0.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_testonly_v1.0.0.txt
new file mode 100644
index 0000000000000000000000000000000000000000..dfb8ca24ec9ce4a07feb55db2e14b0889bf11363
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/mod/rsc.io_testonly_v1.0.0.txt
@@ -0,0 +1,9 @@
+rsc.io/testonly v1.0.0
+written by hand
+
+-- .mod --
+module rsc.io/testonly
+-- .info --
+{"Version":"v1.0.0"}
+-- testonly.go --
+package testonly
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/savedir.go b/platform/dbops/binaries/go/go/src/cmd/go/testdata/savedir.go
new file mode 100644
index 0000000000000000000000000000000000000000..bd42c3e485c43f3f007b65b984a5bbfd8768a772
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/savedir.go
@@ -0,0 +1,79 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+// Savedir archives a directory tree as a txtar archive printed to standard output.
+//
+// Usage:
+//
+//	go run savedir.go /path/to/dir >saved.txt
+//
+// Typically the tree is later extracted during a test with tg.extract("testdata/saved.txt").
+package main
+
+import (
+	"cmd/go/internal/str"
+	"flag"
+	"fmt"
+	"internal/txtar"
+	"io/fs"
+	"log"
+	"os"
+	"path/filepath"
+	"strings"
+	"unicode/utf8"
+)
+
+func usage() {
+	fmt.Fprintf(os.Stderr, "usage: go run savedir.go dir >saved.txt\n")
+	os.Exit(2)
+}
+
+const goCmd = "vgo"
+
+func main() {
+	flag.Usage = usage
+	flag.Parse()
+	if flag.NArg() != 1 {
+		usage()
+	}
+
+	log.SetPrefix("savedir: ")
+	log.SetFlags(0)
+
+	dir := flag.Arg(0)
+
+	a := new(txtar.Archive)
+	dir = filepath.Clean(dir)
+	filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error {
+		if path == dir {
+			return nil
+		}
+		name := info.Name()
+		if strings.HasPrefix(name, ".") {
+			if info.IsDir() {
+				return filepath.SkipDir
+			}
+			return nil
+		}
+		if !info.Type().IsRegular() {
+			return nil
+		}
+		data, err := os.ReadFile(path)
+		if err != nil {
+			log.Fatal(err)
+		}
+		if !utf8.Valid(data) {
+			log.Printf("%s: ignoring invalid UTF-8 data", path)
+			return nil
+		}
+		a.Files = append(a.Files, txtar.File{Name: str.TrimFilePathPrefix(path, dir), Data: data})
+		return nil
+	})
+
+	data := txtar.Format(a)
+	os.Stdout.Write(data)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/README b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/README
new file mode 100644
index 0000000000000000000000000000000000000000..792a158760f6f50c16bcd740df8ba9c2d17b8bca
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/README
@@ -0,0 +1,425 @@
+This file is generated by 'go generate cmd/go'. DO NOT EDIT.
+
+This directory holds test scripts *.txt run during 'go test cmd/go'.
+To run a specific script foo.txt
+
+	go test cmd/go -run=Script/^foo$
+
+In general script files should have short names: a few words, not whole sentences.
+The first word should be the general category of behavior being tested,
+often the name of a go subcommand (list, build, test, ...) or concept (vendor, pattern).
+
+Each script is a text archive (go doc internal/txtar).
+The script begins with an actual command script to run
+followed by the content of zero or more supporting files to
+create in the script's temporary file system before it starts executing.
+
+As an example, run_hello.txt says:
+
+	# hello world
+	go run hello.go
+	stderr 'hello world'
+	! stdout .
+
+	-- hello.go --
+	package main
+	func main() { println("hello world") }
+
+Each script runs in a fresh temporary work directory tree, available to scripts as $WORK.
+Scripts also have access to other environment variables, including:
+
+	GOARCH=
+	GOCACHE=
+	GOEXE=
+	GOOS=
+	GOPATH=$WORK/gopath
+	GOPROXY=
+	GOROOT=
+	GOROOT_FINAL=
+	TESTGO_GOROOT=
+	HOME=/no-home
+	PATH=
+	TMPDIR=$WORK/tmp
+	GODEBUG=
+	devnull=
+	goversion=
+
+On Plan 9, the variables $path and $home are set instead of $PATH and $HOME.
+On Windows, the variables $USERPROFILE and $TMP are set instead of
+$HOME and $TMPDIR.
+
+The lines at the top of the script are a sequence of commands to be executed by
+a small script engine configured in ../../script_test.go (not the system shell).
+
+The scripts' supporting files are unpacked relative to $GOPATH/src
+(aka $WORK/gopath/src) and then the script begins execution in that directory as
+well. Thus the example above runs in $WORK/gopath/src with GOPATH=$WORK/gopath
+and $WORK/gopath/src/hello.go containing the listed contents.
+
+Each line of a script is parsed into a sequence of space-separated command
+words, with environment variable expansion within each word and # marking
+an end-of-line comment. Additional variables named ':' and '/' are expanded
+within script arguments (expanding to the value of os.PathListSeparator and
+os.PathSeparator respectively) but are not inherited in subprocess environments.
+
+Adding single quotes around text keeps spaces in that text from being treated
+as word separators and also disables environment variable expansion. Inside a
+single-quoted block of text, a repeated single quote indicates a literal single
+quote, as in:
+
+    'Don''t communicate by sharing memory.'
+
+A line beginning with # is a comment and conventionally explains what is being
+done or tested at the start of a new section of the script.
+
+Commands are executed one at a time, and errors are checked for each command;
+if any command fails unexpectedly, no subsequent commands in the script are
+executed. The command prefix ! indicates that the command on the rest of the
+line (typically go or a matching predicate) must fail instead of succeeding.
+The command prefix ? indicates that the command may or may not succeed, but the
+script should continue regardless.
+
+The command prefix [cond] indicates that the command on the rest of the line
+should only run when the condition is satisfied.
+
+A condition can be negated: [!root] means to run the rest of the line only if
+the user is not root. Multiple conditions may be given for a single command,
+for example, '[linux] [amd64] skip'. The command will run if all conditions are
+satisfied.
+
+When TestScript runs a script and the script fails, by default TestScript shows
+the execution of the most recent phase of the script (since the last # comment)
+and only shows the # comments for earlier phases. For example, here is a
+multi-phase script with a bug in it:
+
+	# GOPATH with p1 in d2, p2 in d2
+	env GOPATH=$WORK${/}d1${:}$WORK${/}d2
+
+	# build & install p1
+	env
+	go install -i p1
+	! stale p1
+	! stale p2
+
+	# modify p2 - p1 should appear stale
+	cp $WORK/p2x.go $WORK/d2/src/p2/p2.go
+	stale p1 p2
+
+	# build & install p1 again
+	go install -i p11
+	! stale p1
+	! stale p2
+
+	-- $WORK/d1/src/p1/p1.go --
+	package p1
+	import "p2"
+	func F() { p2.F() }
+	-- $WORK/d2/src/p2/p2.go --
+	package p2
+	func F() {}
+	-- $WORK/p2x.go --
+	package p2
+	func F() {}
+	func G() {}
+
+The bug is that the final phase installs p11 instead of p1. The test failure looks like:
+
+	$ go test -run=Script
+	--- FAIL: TestScript (3.75s)
+	    --- FAIL: TestScript/install_rebuild_gopath (0.16s)
+	        script_test.go:223:
+	            # GOPATH with p1 in d2, p2 in d2 (0.000s)
+	            # build & install p1 (0.087s)
+	            # modify p2 - p1 should appear stale (0.029s)
+	            # build & install p1 again (0.022s)
+	            > go install -i p11
+	            [stderr]
+	            can't load package: package p11: cannot find package "p11" in any of:
+	            	/Users/rsc/go/src/p11 (from $GOROOT)
+	            	$WORK/d1/src/p11 (from $GOPATH)
+	            	$WORK/d2/src/p11
+	            [exit status 1]
+	            FAIL: unexpected go command failure
+
+	        script_test.go:73: failed at testdata/script/install_rebuild_gopath.txt:15 in $WORK/gopath/src
+
+	FAIL
+	exit status 1
+	FAIL	cmd/go	4.875s
+	$
+
+Note that the commands in earlier phases have been hidden, so that the relevant
+commands are more easily found, and the elapsed time for a completed phase
+is shown next to the phase heading. To see the entire execution, use "go test -v",
+which also adds an initial environment dump to the beginning of the log.
+
+Note also that in reported output, the actual name of the per-script temporary directory
+has been consistently replaced with the literal string $WORK.
+
+The cmd/go test flag -testwork (which must appear on the "go test" command line after
+standard test flags) causes each test to log the name of its $WORK directory and other
+environment variable settings and also to leave that directory behind when it exits,
+for manual debugging of failing tests:
+
+	$ go test -run=Script -work
+	--- FAIL: TestScript (3.75s)
+	    --- FAIL: TestScript/install_rebuild_gopath (0.16s)
+	        script_test.go:223:
+	            WORK=/tmp/cmd-go-test-745953508/script-install_rebuild_gopath
+	            GOARCH=
+	            GOCACHE=/Users/rsc/Library/Caches/go-build
+	            GOOS=
+	            GOPATH=$WORK/gopath
+	            GOROOT=/Users/rsc/go
+	            HOME=/no-home
+	            TMPDIR=$WORK/tmp
+	            exe=
+
+	            # GOPATH with p1 in d2, p2 in d2 (0.000s)
+	            # build & install p1 (0.085s)
+	            # modify p2 - p1 should appear stale (0.030s)
+	            # build & install p1 again (0.019s)
+	            > go install -i p11
+	            [stderr]
+	            can't load package: package p11: cannot find package "p11" in any of:
+	            	/Users/rsc/go/src/p11 (from $GOROOT)
+	            	$WORK/d1/src/p11 (from $GOPATH)
+	            	$WORK/d2/src/p11
+	            [exit status 1]
+	            FAIL: unexpected go command failure
+
+	        script_test.go:73: failed at testdata/script/install_rebuild_gopath.txt:15 in $WORK/gopath/src
+
+	FAIL
+	exit status 1
+	FAIL	cmd/go	4.875s
+	$
+
+	$ WORK=/tmp/cmd-go-test-745953508/script-install_rebuild_gopath
+	$ cd $WORK/d1/src/p1
+	$ cat p1.go
+	package p1
+	import "p2"
+	func F() { p2.F() }
+	$
+
+The available commands are:
+cat files...
+	concatenate files and print to the script's stdout buffer
+
+
+cc args...
+	run the platform C compiler
+
+
+cd dir
+	change the working directory
+
+
+chmod perm paths...
+	change file mode bits
+
+	Changes the permissions of the named files or directories to
+	be equal to perm.
+	Only numerical permissions are supported.
+
+cmp [-q] file1 file2
+	compare files for differences
+
+	By convention, file1 is the actual data and file2 is the
+	expected data.
+	The command succeeds if the file contents are identical.
+	File1 can be 'stdout' or 'stderr' to compare the stdout or
+	stderr buffer from the most recent command.
+
+cmpenv [-q] file1 file2
+	compare files for differences, with environment expansion
+
+	By convention, file1 is the actual data and file2 is the
+	expected data.
+	The command succeeds if the file contents are identical
+	after substituting variables from the script environment.
+	File1 can be 'stdout' or 'stderr' to compare the script's
+	stdout or stderr buffer.
+
+cp src... dst
+	copy files to a target file or directory
+
+	src can include 'stdout' or 'stderr' to copy from the
+	script's stdout or stderr buffer.
+
+echo string...
+	display a line of text
+
+
+env [key[=value]...]
+	set or log the values of environment variables
+
+	With no arguments, print the script environment to the log.
+	Otherwise, add the listed key=value pairs to the environment
+	or print the listed keys.
+
+exec program [args...] [&]
+	run an executable program with arguments
+
+	Note that 'exec' does not terminate the script (unlike Unix
+	shells).
+
+exists [-readonly] [-exec] file...
+	check that files exist
+
+
+go [args...] [&]
+	run the 'go' program provided by the script host
+
+
+grep [-count=N] [-q] 'pattern' file
+	find lines in a file that match a pattern
+
+	The command succeeds if at least one match (or the exact
+	count, if given) is found.
+	The -q flag suppresses printing of matches.
+
+help [-v] name...
+	log help text for commands and conditions
+
+	To display help for a specific condition, enclose it in
+	brackets: 'help [amd64]'.
+	To display complete documentation when listing all commands,
+	pass the -v flag.
+
+mkdir path...
+	create directories, if they do not already exist
+
+	Unlike Unix mkdir, parent directories are always created if
+	needed.
+
+mv old new
+	rename a file or directory to a new path
+
+	OS-specific restrictions may apply when old and new are in
+	different directories.
+
+replace [old new]... file
+	replace strings in a file
+
+	The 'old' and 'new' arguments are unquoted as if in quoted
+	Go strings.
+
+rm path...
+	remove a file or directory
+
+	If the path is a directory, its contents are removed
+	recursively.
+
+skip [msg]
+	skip the current test
+
+
+sleep duration [&]
+	sleep for a specified duration
+
+	The duration must be given as a Go time.Duration string.
+
+stale target...
+	check that build targets are stale
+
+
+stderr [-count=N] [-q] 'pattern' file
+	find lines in the stderr buffer that match a pattern
+
+	The command succeeds if at least one match (or the exact
+	count, if given) is found.
+	The -q flag suppresses printing of matches.
+
+stdout [-count=N] [-q] 'pattern' file
+	find lines in the stdout buffer that match a pattern
+
+	The command succeeds if at least one match (or the exact
+	count, if given) is found.
+	The -q flag suppresses printing of matches.
+
+stop [msg]
+	stop execution of the script
+
+	The message is written to the script log, but no error is
+	reported from the script engine.
+
+symlink path -> target
+	create a symlink
+
+	Creates path as a symlink to target.
+	The '->' token (like in 'ls -l' output on Unix) is required.
+
+wait 
+	wait for completion of background commands
+
+	Waits for all background commands to complete.
+	The output (and any error) from each command is printed to
+	the log in the order in which the commands were started.
+	After the call to 'wait', the script's stdout and stderr
+	buffers contain the concatenation of the background
+	commands' outputs.
+
+
+
+The available conditions are:
+[GOARCH:*]
+	runtime.GOARCH == 
+[GODEBUG:*]
+	GODEBUG contains 
+[GOEXPERIMENT:*]
+	GOEXPERIMENT  is enabled
+[GOOS:*]
+	runtime.GOOS == 
+[abscc]
+	default $CC path is absolute and exists
+[asan]
+	GOOS/GOARCH supports -asan
+[buildmode:*]
+	go supports -buildmode=
+[case-sensitive]
+	$WORK filesystem is case-sensitive
+[cc:*]
+	go env CC =  (ignoring the go/env file)
+[cgo]
+	host CGO_ENABLED
+[cgolinkext]
+	platform requires external linking for cgo
+[compiler:*]
+	runtime.Compiler == 
+[cross]
+	cmd/go GOOS/GOARCH != GOHOSTOS/GOHOSTARCH
+[exec:*]
+	 names an executable in the test binary's PATH
+[fuzz]
+	GOOS/GOARCH supports -fuzz
+[fuzz-instrumented]
+	GOOS/GOARCH supports -fuzz with instrumentation
+[git]
+	the 'git' executable exists and provides the standard CLI
+[go-builder]
+	GO_BUILDER_NAME is non-empty
+[link]
+	testenv.HasLink()
+[mismatched-goroot]
+	test's GOROOT_FINAL does not match the real GOROOT
+[msan]
+	GOOS/GOARCH supports -msan
+[mustlinkext]
+	platform always requires external linking
+[net:*]
+	can connect to external network host 
+[race]
+	GOOS/GOARCH supports -race
+[root]
+	os.Geteuid() == 0
+[short]
+	testing.Short()
+[symlink]
+	testenv.HasSymlink()
+[trimpath]
+	test binary was built with -trimpath
+[verbose]
+	testing.Verbose()
+
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/autocgo.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/autocgo.txt
new file mode 100644
index 0000000000000000000000000000000000000000..586c80251da3196433bcb4adce706b59cbd54ec6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/autocgo.txt
@@ -0,0 +1,29 @@
+# Test automatic setting of CGO_ENABLED based on $CC and what's in $PATH.
+
+[!cgo] skip
+[cross] skip
+
+# Assume we're on a system that can enable cgo normally.
+env CGO_ENABLED=
+go env CGO_ENABLED
+stdout 1
+
+# Clearing CC and removing everything but Go from the PATH should usually
+# disable cgo: no C compiler anymore (unless the baked-in defaultCC is an
+# absolute path and exists.
+env CC=
+env PATH=$GOROOT/bin
+go env CGO_ENABLED
+[!abscc] stdout 0
+[abscc] stdout 1
+
+# Setting CC should re-enable cgo.
+env CC=cc
+go env CGO_ENABLED
+stdout 1
+
+# So should setting CGO_ENABLED.
+env CC=
+env CGO_ENABLED=1
+go env CGO_ENABLED
+stdout 1
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/badgo.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/badgo.txt
new file mode 100644
index 0000000000000000000000000000000000000000..cf4e2584d656508ef0505e568b1d6b0866371160
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/badgo.txt
@@ -0,0 +1,50 @@
+go get example.net/badgo@v1.0.0
+go get example.net/badgo@v1.1.0
+go get example.net/badgo@v1.2.0
+go get example.net/badgo@v1.3.0
+go get example.net/badgo@v1.4.0
+go get example.net/badgo@v1.5.0
+! go get example.net/badgo@v1.6.0
+stderr 'invalid go version .X.Y.: must match format 1.23'
+
+-- go.mod --
+module m
+
+replace (
+	example.net/badgo v1.0.0 => ./v1.0.0
+	example.net/badgo v1.1.0 => ./v1.1.0
+	example.net/badgo v1.2.0 => ./v1.2.0
+	example.net/badgo v1.3.0 => ./v1.3.0
+	example.net/badgo v1.4.0 => ./v1.4.0
+	example.net/badgo v1.5.0 => ./v1.5.0
+	example.net/badgo v1.6.0 => ./v1.6.0
+)
+
+-- v1.0.0/go.mod --
+module example.net/badgo
+go 1.17.0
+
+-- v1.1.0/go.mod --
+module example.net/badgo
+go 1.17rc2
+
+-- v1.2.0/go.mod --
+module example.net/badgo
+go 1.17.1
+
+-- v1.3.0/go.mod --
+module example.net/badgo
+go v1.17.0
+
+-- v1.4.0/go.mod --
+module example.net/badgo
+go v1.17.0-rc.2
+
+-- v1.5.0/go.mod --
+module example.net/badgo
+go v1.17.1
+
+-- v1.6.0/go.mod --
+module example.net/badgo
+go X.Y
+
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/bug.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/bug.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f64fb85bdc42e037f6c8ec7b894b3baa728a78f6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/bug.txt
@@ -0,0 +1,58 @@
+# Verify that go bug creates the appropriate URL issue body
+
+[!GOOS:linux] skip
+[short] skip
+
+go install
+go build -o $TMPDIR/go ./go
+env BROWSER=$GOPATH/bin/browser PATH=$TMPDIR:$PATH
+go bug
+exists $TMPDIR/browser
+grep '^go version' $TMPDIR/browser
+grep '^GOROOT/bin/go version: go version' $TMPDIR/browser
+grep '^GOROOT/bin/go tool compile -V: compile version' $TMPDIR/browser
+grep '^uname -sr: Linux' $TMPDIR/browser
+
+-- go.mod --
+module browser
+
+-- main.go --
+package main
+
+import (
+	"fmt"
+	"net/url"
+	"os"
+	"path/filepath"
+)
+
+func main() {
+	u, err := url.Parse(os.Args[1])
+	if err != nil {
+		panic(err)
+	}
+	body, err := url.PathUnescape(u.Query().Get("body"))
+	if err != nil {
+		panic(err)
+	}
+	out := filepath.Join(os.TempDir(), "browser")
+	f, err := os.Create(out)
+	if err != nil {
+		panic(err)
+	}
+	fmt.Fprintln(f, body)
+	if err := f.Close(); err != nil {
+		panic(err)
+	}
+}
+
+-- go/main.go --
+package main
+
+import (
+    "os"
+)
+
+func main() {
+    os.Exit(1)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_GOTMPDIR.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_GOTMPDIR.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4c9129ec45c5ddc58fd2fd7dd892a74ed72ae2c9
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_GOTMPDIR.txt
@@ -0,0 +1,50 @@
+# Set GOCACHE to a clean directory to ensure that 'go build' has work to report.
+[!GOOS:windows] env GOCACHE=$WORK/gocache
+[GOOS:windows] env GOCACHE=$WORK\gocache
+
+# 'go build' should use GOTMPDIR if set.
+[!GOOS:windows] env GOTMPDIR=$WORK/my-favorite-tmpdir
+[GOOS:windows] env GOTMPDIR=$WORK\my-favorite-tmpdir
+mkdir $GOTMPDIR
+go build -x hello.go
+stderr ^WORK=.*my-favorite-tmpdir
+
+# Make GOTMPDIR a regular file. This prevents the creation of work directories,
+# so we can check that certain commands don't create them.
+# This simulates running on a full disk or a read-only volume.
+rm $GOTMPDIR
+cp hello.go $GOTMPDIR # any file will do
+
+# 'go build' should fail if GOTMPDIR is read-only.
+! go build -x .
+stderr '^go: creating work dir: \w+ '$GOTMPDIR
+
+# 'go list' should only fail if it needs to build something.
+go list -x .
+! stderr 'creating work dir'
+stdout m
+go list -m all
+stdout m
+! go list -x -export .
+stderr '^go: creating work dir: \w+ '$GOTMPDIR
+
+# 'go clean -cache' and 'go clean -modcache' should not fail.
+go clean -x -cache
+! stderr 'creating work dir'
+go clean -x -modcache
+! stderr 'creating work dir'
+
+# 'go env' should not fail for specific variables.
+# Without arguments, it needs to initialize a builder to load cgo flags, and
+# that uses a temporary directory.
+! go env
+stderr '^go: creating work dir: \w+ '$GOTMPDIR
+go env GOROOT
+
+-- go.mod --
+module m
+
+go 1.15
+-- hello.go --
+package main
+func main() { println("hello") }
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_acl_windows.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_acl_windows.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2cb60e0ffdcfdba14cfe820db7d29d271751796b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_acl_windows.txt
@@ -0,0 +1,44 @@
+[!GOOS:windows] stop
+[!exec:icacls] skip
+[!exec:powershell] skip
+
+# Create $WORK\guest and give the Guests group full access.
+# Files created within that directory will have different security attributes by default.
+mkdir $WORK\guest
+exec icacls $WORK\guest /grant '*S-1-5-32-546:(oi)(ci)f'
+
+env TMP=$WORK\guest
+env TEMP=$WORK\guest
+
+# Build a binary using the guest directory as an intermediate
+cd TestACL
+go build -o main.exe main.go
+# Build the same binary, but write it to the guest directory.
+go build -o $TMP\main.exe main.go
+
+# Read ACLs for the files.
+exec powershell -Command 'Get-Acl main.exe | Select -expand AccessToString'
+cp stdout $WORK\exe-acl.txt
+exec powershell -Command 'Get-Acl main.go | Select -expand AccessToString'
+cp stdout $WORK\src-acl.txt
+cd $TMP
+exec powershell -Command 'Get-Acl main.exe | Select -expand AccessToString'
+cp stdout $WORK\guest-acl.txt
+
+cd $WORK
+
+# The executable written to the source directory should have the same ACL as the source file.
+cmp $WORK\exe-acl.txt $WORK\src-acl.txt
+
+# The file written to the guest-allowed directory should give Guests control.
+grep 'BUILTIN\\Guests\s+Allow' $WORK\guest-acl.txt
+
+# The file written to the ordinary directory should not.
+! grep 'BUILTIN\\Guests\s+Allow' $WORK\exe-acl.txt
+
+
+-- TestACL/go.mod --
+module TestACL
+-- TestACL/main.go --
+package main
+func main() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_arm.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_arm.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ff2a36456e406d4444ee82bd14bc14ce20806bfd
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_arm.txt
@@ -0,0 +1,13 @@
+[short] skip 'skipping cross-compile in short mode'
+
+env GOARCH=arm
+env GOOS=linux
+env GOARM=5
+
+go build hello.go
+! stderr 'unable to find math.a'
+
+-- hello.go --
+package main
+
+func main() {}
\ No newline at end of file
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_buildvcs_auto.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_buildvcs_auto.txt
new file mode 100644
index 0000000000000000000000000000000000000000..cfd5d8243b8e2007f4d32858477e5220d2215819
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_buildvcs_auto.txt
@@ -0,0 +1,91 @@
+# Regression test for https://go.dev/issue/51748: by default, 'go build' should
+# not attempt to stamp VCS information when the VCS tool is not present.
+
+[short] skip
+[!git] skip
+
+cd sub
+exec git init .
+exec git config user.name 'Nameless Gopher'
+exec git config user.email 'nobody@golang.org'
+exec git add sub.go
+exec git commit -m 'initial state'
+cd ..
+
+exec git init
+exec git config user.name 'Nameless Gopher'
+exec git config user.email 'nobody@golang.org'
+exec git submodule add ./sub
+exec git add go.mod example.go
+exec git commit -m 'initial state'
+
+
+# Control case: with a git binary in $PATH,
+# 'go build' on a package in the same git repo
+# succeeds and stamps VCS metadata by default.
+
+go build -o example.exe .
+go version -m example.exe
+stdout '^\tbuild\tvcs=git$'
+stdout '^\tbuild\tvcs.modified=false$'
+
+
+# Building a binary from a different (nested) VCS repo should not stamp VCS
+# info. It should be an error if VCS stamps are requested explicitly with
+# '-buildvcs' (since we know the VCS metadata exists), but not an error
+# with '-buildvcs=auto'.
+
+go build -o sub.exe ./sub
+go version -m sub.exe
+! stdout '^\tbuild\tvcs'
+
+! go build -buildvcs -o sub.exe ./sub
+stderr '\Aerror obtaining VCS status: main package is in repository ".*" but current directory is in repository ".*"\n\tUse -buildvcs=false to disable VCS stamping.\n\z'
+
+cd ./sub
+go build -o sub.exe .
+go version -m sub.exe
+! stdout '^\tbuild\tvcs'
+
+! go build -buildvcs -o sub.exe .
+stderr '\Aerror obtaining VCS status: main module is in repository ".*" but current directory is in repository ".*"\n\tUse -buildvcs=false to disable VCS stamping.\n\z'
+cd ..
+
+
+# After removing 'git' from $PATH, 'go build -buildvcs' should fail...
+
+env PATH=
+env path=
+! go build -buildvcs -o example.exe .
+stderr 'go: missing Git command\. See https://golang\.org/s/gogetcmd$'
+
+# ...but by default we should omit VCS metadata when the tool is missing.
+
+go build -o example.exe .
+go version -m example.exe
+! stdout '^\tbuild\tvcs'
+
+# The default behavior can be explicitly set with '-buildvcs=auto'.
+
+go build -buildvcs=auto -o example.exe .
+go version -m example.exe
+! stdout '^\tbuild\tvcs'
+
+# Other flag values should be rejected with a useful error message.
+
+! go build -buildvcs=hg -o example.exe .
+stderr '\Ainvalid boolean value "hg" for -buildvcs: value is neither ''auto'' nor a valid bool\nusage: go build .*\nRun ''go help build'' for details.\n\z'
+
+
+-- go.mod --
+module example
+
+go 1.18
+-- example.go --
+package main
+
+func main() {}
+-- sub/sub.go --
+package main
+
+func main() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_arch_mode.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_arch_mode.txt
new file mode 100644
index 0000000000000000000000000000000000000000..931827fbde62334cb8da3aa5c4bf95204eb80e1e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_arch_mode.txt
@@ -0,0 +1,21 @@
+# Issue 9737: verify that GOARM affects the computed build ID
+
+[short] skip
+
+# arm
+env GOOS=linux
+env GOARCH=arm
+env GOARM=5
+go install mycmd
+env GOARM=7
+stale mycmd
+
+
+-- go.mod --
+module mycmd
+
+go 1.16
+-- x.go --
+package main
+
+func main() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_compile.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_compile.txt
new file mode 100644
index 0000000000000000000000000000000000000000..64b391f9aa4651dcac1826be4a74f95f7bc9d2f7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_compile.txt
@@ -0,0 +1,21 @@
+env GO111MODULE=off
+[short] skip
+
+# Set up fresh GOCACHE.
+env GOCACHE=$WORK/gocache
+mkdir $GOCACHE
+
+# Building trivial non-main package should run compiler the first time.
+go build -x lib.go
+stderr '(compile|gccgo)( |\.exe).*lib\.go'
+
+# ... but not again ...
+go build -x lib.go
+! stderr '(compile|gccgo)( |\.exe).*lib\.go'
+
+# ... unless we use -a.
+go build -a -x lib.go
+stderr '(compile|gccgo)( |\.exe)'
+
+-- lib.go --
+package lib
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_disabled.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_disabled.txt
new file mode 100644
index 0000000000000000000000000000000000000000..cb1a7558fca71f3edab009c32329376f45b7a45e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_disabled.txt
@@ -0,0 +1,50 @@
+# The build cache is required to build anything. It also may be needed to
+# initialize the build system, which is needed for commands like 'go env'.
+# However, there are lots of commands the cache is not needed for, and we
+# shouldn't require it when it won't be used.
+#
+# TODO(golang.org/issue/39882): commands below should work, too.
+# * go clean -modcache
+# * go env
+# * go fix
+# * go fmt
+# * go generate
+# * go get
+# * go list (without -export or -compiled)
+
+env GOCACHE=off
+
+# Commands that don't completely load packages should work.
+go doc fmt
+stdout Printf
+
+! go tool compile -h
+stderr usage:
+
+go version
+stdout '^go version'
+
+
+# Module commands that don't load packages should work.
+go mod init m
+exists go.mod
+
+go mod edit -require rsc.io/quote@v1.5.2
+
+go mod download rsc.io/quote
+
+go mod graph
+stdout rsc.io/quote
+
+go mod verify
+
+
+# Commands that load but don't build packages should work.
+go fmt .
+
+go doc .
+
+-- main.go --
+package main
+
+func main() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_gomips.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_gomips.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0cbf16a923b8b2c129a9a7149afc761755e1f207
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_gomips.txt
@@ -0,0 +1,40 @@
+env GO111MODULE=off
+[short] skip # rebuilds std for mips
+
+# Set up fresh GOCACHE.
+env GOCACHE=$WORK/gocache
+mkdir $GOCACHE
+
+# Building for mipsle without setting GOMIPS will use floating point registers.
+env GOARCH=mipsle
+env GOOS=linux
+go build -gcflags=-S f.go
+stderr ADDD.F[0-9]+,.F[0-9]+,.F[0-9]+
+
+# Clean cache
+go clean -cache
+
+# Building with GOMIPS=softfloat will not use floating point registers
+env GOMIPS=softfloat
+go build -gcflags=-S f.go
+! stderr ADDD.F[0-9]+,.F[0-9]+,.F[0-9]+
+
+# Clean cache
+go clean -cache
+
+# Build without setting GOMIPS
+env GOMIPS=
+go build -gcflags=-S f.go
+stderr ADDD.F[0-9]+,.F[0-9]+,.F[0-9]+
+
+# Building with GOMIPS should still not use floating point registers.
+env GOMIPS=softfloat
+go build -gcflags=-S f.go
+! stderr ADDD.F[0-9]+,.F[0-9]+,.F[0-9]+
+
+-- f.go --
+package f
+
+func F(x float64) float64 {
+     return x + x
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_link.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_link.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b9c740ac10ea23acdb23901b3d9a113081e5d169
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_link.txt
@@ -0,0 +1,26 @@
+env GO111MODULE=off
+[short] skip
+
+# Set up fresh GOCACHE.
+env GOCACHE=$WORK/gocache
+mkdir $GOCACHE
+
+# Building a main package should run the compiler and linker ...
+go build -o $devnull -x main.go
+stderr '(compile|gccgo)( |\.exe).*main\.go'
+stderr '(link|gccgo)( |\.exe)'
+
+# ... and then the linker again ...
+go build -o $devnull -x main.go
+! stderr '(compile|gccgo)( |\.exe).*main\.go'
+stderr '(link|gccgo)( |\.exe)'
+
+# ... but the output binary can serve as a cache.
+go build -o main$GOEXE -x main.go
+stderr '(link|gccgo)( |\.exe)'
+go build -o main$GOEXE -x main.go
+! stderr '(link|gccgo)( |\.exe)'
+
+-- main.go --
+package main
+func main() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_output.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_output.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fc040b48cfe5f0a6a7748d15a3de1fc1a8e1c009
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_output.txt
@@ -0,0 +1,67 @@
+env GO111MODULE=off
+env GODEBUG=gocachetest=1
+
+[!compiler:gc] skip
+[short] skip # clears cache, rebuilds too much
+
+# Set up fresh GOCACHE.
+env GOCACHE=$WORK/gocache
+mkdir $GOCACHE
+
+# Building a trivial non-main package should run compiler the first time.
+go build -x -gcflags=-m lib.go
+stderr 'compile( |\.exe"?)'
+stderr 'lib.go:2.* can inline f'
+
+# ... but not the second, even though it still prints the compiler output.
+go build -x -gcflags=-m lib.go
+! stderr 'compile( |\.exe"?)'
+stderr 'lib.go:2.* can inline f'
+
+# Building a trivial main package should run the compiler and linker the first time.
+go build -x -gcflags=-m -ldflags='-v -w' main.go
+stderr 'compile( |\.exe"?)'
+stderr 'main.go:2.* can inline main' # from compiler
+stderr 'link(\.exe"?)? -'
+stderr '\d+ symbols' # from linker
+
+# ... but not the second, even though it still prints the compiler and linker output.
+go build -x -gcflags=-m -ldflags='-v -w' main.go
+! stderr 'compile( |\.exe"?)'
+stderr 'main.go:2.* can inline main' # from compiler
+! stderr 'link(\.exe"?)? -'
+stderr '\d+ symbols' # from linker
+
+# Running a test should run the compiler, linker, and the test the first time.
+go test -v -x -gcflags=-m -ldflags=-v p
+stderr 'compile( |\.exe"?)'
+stderr 'p_test.go:.*can inline Test' # from compile of p_test
+stderr 'testmain\.go:.*inlin' # from compile of testmain
+stderr 'link(\.exe"?)? -'
+stderr '\d+ symbols' # from linker
+stderr 'p\.test( |\.exe"?)'
+stdout 'TEST' # from test
+
+# ... but not the second, even though it still prints the compiler, linker, and test output.
+go test -v -x -gcflags=-m -ldflags=-v p
+! stderr 'compile( |\.exe"?)'
+stderr 'p_test.go:.*can inline Test' # from compile of p_test
+stderr 'testmain\.go:.*inlin' # from compile of testmain
+! stderr 'link(\.exe"?)? -'
+stderr '\d+ symbols' # from linker
+! stderr 'p\.test( |\.exe"?)'
+stdout 'TEST' # from test
+
+
+-- lib.go --
+package p
+func f(x *int) *int { return x }
+
+-- main.go --
+package main
+func main() {}
+
+-- p/p_test.go --
+package p
+import "testing"
+func Test(t *testing.T) {println("TEST")}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_trimpath.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_trimpath.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7ee3c3b41d489a7cd3059efe567737f25b040559
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cache_trimpath.txt
@@ -0,0 +1,47 @@
+[short] skip
+env GO111MODULE=on
+
+# Set up fresh GOCACHE.
+env GOCACHE=$WORK/gocache
+mkdir $GOCACHE
+
+cd $WORK
+go build -o a.out
+
+# Varying -trimpath should cause a rebuild.
+go build -x -o a.out -trimpath
+stderr '(compile|gccgo)( |\.exe)'
+stderr 'link( |\.exe)'
+
+# Two distinct versions of the same module with identical content should
+# still be cached separately.
+# Verifies golang.org/issue/35412.
+go get example.com/stack@v1.0.0
+go run -trimpath printstack.go
+stdout '^example.com/stack@v1.0.0/stack.go$'
+go get example.com/stack@v1.0.1
+go run -trimpath printstack.go
+stdout '^example.com/stack@v1.0.1/stack.go$'
+
+-- $WORK/hello.go --
+package main
+func main() { println("hello") }
+
+-- $WORK/printstack.go --
+// +build ignore
+
+package main
+
+import (
+	"fmt"
+
+	"example.com/stack"
+)
+
+func main() {
+	fmt.Println(stack.TopFile())
+}
+-- $WORK/go.mod --
+module m
+
+go 1.14
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cc_cache_issue64423.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cc_cache_issue64423.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f1bc2c3108596fab59b70326ea3587c4b28a2e05
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cc_cache_issue64423.txt
@@ -0,0 +1,121 @@
+# Regression test for https://go.dev/issue/64423:
+#
+# When we parse the version for a Clang binary, we should accept
+# an arbitrary vendor prefix, which (as of 2023) may be injected
+# by defining CLANG_VENDOR when building clang itself.
+#
+# Since we don't want to actually rebuild the Clang toolchain in
+# this test, we instead simulate it by injecting a fake "clang"
+# binary that runs the real one as a subprocess.
+
+[!cgo] skip
+[short] skip 'builds and links a fake clang binary'
+[!cc:clang] skip 'test is specific to clang version parsing'
+
+# Save the location of the real clang command for our fake one to use.
+go run ./which clang
+cp stdout $WORK/.realclang
+
+# Build a fake clang and ensure that it is the one in $PATH.
+mkdir $WORK/bin
+go build -o $WORK/bin/clang$GOEXE ./fakeclang
+[!GOOS:plan9] env PATH=$WORK${/}bin
+[GOOS:plan9] env path=$WORK${/}bin
+
+# Force CGO_ENABLED=1 so that the following commands should error
+# out if the fake clang doesn't work.
+env CGO_ENABLED=1
+
+# The bug in https://go.dev/issue/64423 resulted in cache keys that
+# didn't contain any information about the C compiler.
+# Since the bug was in cache key computation, isolate the cache:
+# if we change the way caching works, we want the test to fail
+# instead of accidentally reusing the cached information from a
+# previous test run.
+env GOCACHE=$WORK${/}.cache
+mkdir $GOCACHE
+
+go build -x runtime/cgo
+
+	# Tell our fake clang to stop working.
+	# Previously, 'go build -x runtime/cgo' would continue to
+	# succeed because both the broken clang and the non-broken one
+	# resulted in a cache key with no clang version information.
+env GO_BREAK_CLANG=1
+! go build -x runtime/cgo
+stderr '# runtime/cgo\nGO_BREAK_CLANG is set'
+
+-- go.mod --
+module example/issue64423
+go 1.20
+-- which/main.go --
+package main
+
+import (
+	"os"
+	"os/exec"
+)
+
+func main() {
+	path, err := exec.LookPath(os.Args[1])
+	if err != nil {
+		panic(err)
+	}
+	os.Stdout.WriteString(path)
+}
+-- fakeclang/main.go --
+package main
+
+import (
+	"bufio"
+	"bytes"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+)
+
+func main() {
+	if os.Getenv("GO_BREAK_CLANG") != "" {
+		os.Stderr.WriteString("GO_BREAK_CLANG is set\n")
+		os.Exit(1)
+	}
+
+	b, err := os.ReadFile(filepath.Join(os.Getenv("WORK"), ".realclang"))
+	if err != nil {
+		log.Fatal(err)
+	}
+	clang := string(bytes.TrimSpace(b))
+	cmd := exec.Command(clang, os.Args[1:]...)
+	cmd.Stdout = os.Stdout
+	stderr, err := cmd.StderrPipe()
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	if err := cmd.Start(); err != nil {
+		log.Fatal(err)
+	}
+
+	r := bufio.NewReader(stderr)
+	for {
+		line, err := r.ReadString('\n')
+		if line != "" {
+			if strings.Contains(line, "clang version") {
+				// Simulate a clang version string with an arbitrary vendor prefix.
+				const vendorString = "Gopher Solutions Unlimited "
+				os.Stderr.WriteString(vendorString)
+			}
+			os.Stderr.WriteString(line)
+		}
+		if err != nil {
+			break
+		}
+	}
+	os.Stderr.Close()
+
+	if err := cmd.Wait(); err != nil {
+		os.Exit(1)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cd_gopath_different.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cd_gopath_different.txt
new file mode 100644
index 0000000000000000000000000000000000000000..64d7d74ce214fd9b4567d19067885ea3e985924f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cd_gopath_different.txt
@@ -0,0 +1,73 @@
+[compiler:gccgo] skip 'gccgo does not support -ldflags -X'
+env GO111MODULE=off
+go build run_go.go
+
+# Apply identity function to GOPATH
+exec ./run_go$GOEXE $GOPATH/src/my.pkg/main $GOPATH IDENTITY build -o $WORK/tmp/a.exe -ldflags -X=my.pkg.Text=linkXworked
+exec $WORK/tmp/a.exe
+stderr 'linkXworked'
+rm $WORK/tmp/a.exe
+
+[!GOOS:windows] stop 'rest of the tests only apply to Windows'
+
+# Replace '\' with '/' in GOPATH
+exec ./run_go$GOEXE $GOPATH/src/my.pkg/main $GOPATH REPLACE_SLASH build -o $WORK/tmp/a.exe -ldflags -X=my.pkg.Text=linkXworked
+exec $WORK/tmp/a.exe
+stderr 'linkXworked'
+rm $WORK/tmp/a.exe
+
+# Apply identity function to GOPATH
+exec ./run_go$GOEXE $GOPATH/src/my.pkg/main $GOPATH UPPER build -o $WORK/tmp/a.exe -ldflags -X=my.pkg.Text=linkXworked
+exec $WORK/tmp/a.exe
+stderr 'linkXworked'
+rm $WORK/tmp/a.exe
+
+# Apply identity function to GOPATH
+exec ./run_go$GOEXE $GOPATH/src/my.pkg/main $GOPATH LOWER build -o $WORK/tmp/a.exe -ldflags -X=my.pkg.Text=linkXworked
+exec $WORK/tmp/a.exe
+stderr 'linkXworked'
+rm $WORK/tmp/a.exe
+
+-- run_go.go --
+package main
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+	"strings"
+)
+
+func main() {
+	dir := os.Args[1]
+	gopath := os.Args[2]
+	switch os.Args[3] {
+		case "IDENTITY":
+		case "REPLACE_SLASH": gopath = strings.ReplaceAll(gopath, `\`, `/`)
+		case "UPPER": gopath = strings.ToUpper(gopath)
+		case "LOWER": gopath = strings.ToLower(gopath)
+		default: fmt.Fprintln(os.Stderr, "bad op"); os.Exit(1)
+	}
+	cmd := exec.Command("go", os.Args[4:]...)
+	cmd.Dir = dir
+	cmd.Env = append(os.Environ(), "GOPATH="+gopath)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+}
+
+-- my.pkg/main/main.go --
+package main
+
+import "my.pkg"
+
+func main() {
+	println(pkg.Text)
+}
+-- my.pkg/pkg.go --
+package pkg
+
+var Text = "unset"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cgo_consistent_results.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cgo_consistent_results.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f22994f71fcaf5247c483af3e274d29d640bc4f3
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cgo_consistent_results.txt
@@ -0,0 +1,23 @@
+[short] skip
+[!cgo] skip
+
+[GOOS:solaris] skip "skipping on Solaris; see golang.org/issue/13247"
+[GOOS:illumos] skip "skipping on Solaris; see golang.org/issue/13247"
+
+go build -o $WORK/exe1$GOEXE cgotest
+go build -x -o $WORK/exe2$GOEXE cgotest
+
+# TODO(matloob): skip if stderr does not contain '-fdebug-prefix-map=\$WORK'
+
+cmp $WORK/exe1$GOEXE $WORK/exe2$GOEXE
+
+-- go.mod --
+module cgotest
+
+go 1.16
+-- m.go --
+package cgotest
+
+import "C"
+
+var _ C.int
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cgo_error.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cgo_error.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c11ab46ecc344ce93b7f20ea5c0eabb3d0f771de
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cgo_error.txt
@@ -0,0 +1,17 @@
+[short] skip
+[!cgo] skip
+
+! go build .
+stderr '# foo\nfoo.c:'
+! stderr 'EXTRA string'
+
+-- go.mod --
+module foo
+
+go 1.20
+-- foo.go --
+package foo
+
+import "C"
+-- foo.c --
+#include "doesnotexist.h"
\ No newline at end of file
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_concurrent_backend.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_concurrent_backend.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9cac635e5a6760ca21b89163cdf6eec6b650382f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_concurrent_backend.txt
@@ -0,0 +1,10 @@
+# Tests golang.org/issue/48490
+# cmd/go should enable concurrent compilation by default
+
+# Reset all experiments, since one of them can disable
+# concurrent compilation, e.g: fieldtrack.
+env GOEXPERIMENT=none
+
+env GOMAXPROCS=4
+go build -n -x -a fmt
+stderr ' -c=4 '
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cwd_newline.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cwd_newline.txt
new file mode 100644
index 0000000000000000000000000000000000000000..91cb57fa49e8219c18fcf16ee290676a2966353e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_cwd_newline.txt
@@ -0,0 +1,138 @@
+[GOOS:windows] skip 'filesystem normalizes / to \'
+[GOOS:plan9] skip 'filesystem disallows \n in paths'
+
+# If the directory path containing a package to be built includes a newline,
+# the go command should refuse to even try to build the package.
+
+env DIR=$WORK${/}${newline}'package main'${newline}'func main() { panic("uh-oh")'${newline}'/*'
+
+mkdir $DIR
+cd $DIR
+exec pwd
+cp $WORK/go.mod ./go.mod
+cp $WORK/main.go ./main.go
+cp $WORK/main_nocgo.go ./main_nocgo.go
+cp $WORK/main_test.go ./main_test.go
+
+! go build -o $devnull .
+stderr 'package example: invalid package directory .*uh-oh'
+
+[cgo] ! go build -o $devnull main.go
+[!cgo] ! go build -o $devnull main_nocgo.go
+stderr 'package command-line-arguments: invalid package directory .*uh-oh'
+
+! go run .
+stderr 'package example: invalid package directory .*uh-oh'
+
+[cgo] ! go run main.go
+[!cgo] ! go run main_nocgo.go
+stderr 'package command-line-arguments: invalid package directory .*uh-oh'
+
+! go test .
+stderr 'package example: invalid package directory .*uh-oh'
+
+[cgo] ! go test -v main.go main_test.go
+[!cgo] ! go test -v main_nocgo.go main_test.go
+stderr 'package command-line-arguments: invalid package directory .*uh-oh'
+
+go list -compiled -e -f '{{with .CompiledGoFiles}}{{.}}{{end}}' .
+! stdout .
+! stderr .
+! exists obj_
+
+
+# The cgo tool should only accept the source file if the working directory
+# is not written in line directives in the resulting files.
+
+[cgo] ! go tool cgo main.go
+[cgo] stderr 'cgo: input path contains newline character: .*uh-oh'
+[cgo] ! exists _obj
+
+[cgo] go tool cgo -trimpath=$PWD main.go
+[cgo] grep '//line main\.go:1:1' _obj/main.cgo1.go
+[cgo] ! grep 'uh-oh' _obj/main.cgo1.go
+[cgo] rm _obj
+
+
+# Since we do preserve $PWD (or set it appropriately) for commands, and we do
+# not resolve symlinks unnecessarily, referring to the contents of the unsafe
+# directory via a safe symlink should be ok, and should not inject the data from
+# the symlink target path.
+
+[!symlink] stop 'remainder of test checks symlink behavior'
+[short] stop 'links and runs binaries'
+
+symlink $WORK${/}link -> $DIR
+
+[cgo] go run $WORK${/}link${/}main.go
+[!cgo] go run $WORK${/}link${/}main_nocgo.go
+! stdout panic
+! stderr panic
+stderr '^ok$'
+
+[cgo] go test -v $WORK${/}link${/}main.go $WORK${/}link${/}main_test.go
+[!cgo] go test -v $WORK${/}link${/}main_nocgo.go $WORK${/}link${/}main_test.go
+! stdout panic
+! stderr panic
+stdout '^ok$'   # 'go test' combines the test's stdout into stderr
+
+cd $WORK/link
+
+[cgo] ! go run $DIR${/}main.go
+[!cgo] ! go run $DIR${/}main_nocgo.go
+stderr 'package command-line-arguments: invalid package directory .*uh-oh'
+
+go run .
+! stdout panic
+! stderr panic
+stderr '^ok$'
+
+[cgo] go run main.go
+[!cgo] go run main_nocgo.go
+! stdout panic
+! stderr panic
+stderr '^ok$'
+
+go test -v
+! stdout panic
+! stderr panic
+stdout '^ok$'  # 'go test' combines the test's stdout into stderr
+
+go test -v .
+! stdout panic
+! stderr panic
+stdout '^ok$'  # 'go test' combines the test's stdout into stderr
+
+[cgo] go tool cgo main.go
+[cgo] grep '//line .*'${/}'link'${/}'main\.go:1:1' _obj/main.cgo1.go
+[cgo] ! grep 'uh-oh' _obj/main.cgo1.go
+
+-- $WORK/go.mod --
+module example
+go 1.19
+-- $WORK/main.go --
+package main
+
+import "C"
+
+func main() {
+	/* nothing here */
+	println("ok")
+}
+-- $WORK/main_nocgo.go --
+//go:build !cgo
+
+package main
+
+func main() {
+	/* nothing here */
+	println("ok")
+}
+-- $WORK/main_test.go --
+package main
+
+import "testing"
+
+func TestMain(*testing.M) {
+	main()
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_darwin_cc_arch.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_darwin_cc_arch.txt
new file mode 100644
index 0000000000000000000000000000000000000000..df3fa5bd35fa90863d795f6aac73e52ee08511e2
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_darwin_cc_arch.txt
@@ -0,0 +1,24 @@
+# Test that we pass -arch flag to C compiler on Darwin (issue 43692).
+
+[!GOOS:darwin] skip
+[!cgo] skip
+
+# clear CC, in case user sets it
+env CC=
+
+env CGO_ENABLED=1
+
+env GOARCH=amd64
+go build -n -x c.go
+stderr 'clang.*-arch x86_64'
+
+env GOARCH=arm64
+go build -n -x c.go
+stderr 'clang.*-arch arm64'
+
+-- c.go --
+package main
+
+import "C"
+
+func main() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_dash_n_cgo.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_dash_n_cgo.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3f49ef6f9ea80cba4e87b6c90e9e5541b1ddc88c
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_dash_n_cgo.txt
@@ -0,0 +1,18 @@
+# Tests golang.org/issue/14944
+
+[!cgo] skip
+
+go build -n foo.go
+! stderr 'os.Stat .* no such file or directory' # there shouldn't be a stat of the archive file
+
+-- foo.go --
+package main
+
+/*
+#include 
+*/
+import "C"
+
+func main() {
+        println(C.INT_MAX)
+}
\ No newline at end of file
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_dash_o_dev_null.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_dash_o_dev_null.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e415fc224dc1261f2ab5aa5450c08e01cb4a1862
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_dash_o_dev_null.txt
@@ -0,0 +1,13 @@
+# Issue #25579
+
+[short] skip
+
+go build -o $devnull hello.go
+! exists 'hello'$GOEXE
+
+-- hello.go --
+package main
+
+func main() {
+	println("hello, world")
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_dash_x.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_dash_x.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e5580a2cc69ed11320243d3d5cdea9d50768a081
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_dash_x.txt
@@ -0,0 +1,48 @@
+[short] skip
+[!cgo] skip
+
+[!exec:/usr/bin/env] skip
+[!exec:bash] skip
+
+mkdir $WORK/tmp/cache
+env GOCACHE=$WORK/tmp/cache
+
+# Before building our test main.go, ensure that an up-to-date copy of
+# runtime/cgo is present in the cache. If it isn't, the 'go build' step below
+# will fail with "can't open import". See golang.org/issue/29004.
+#
+# (The fix in golang.org/issue/29004 didn't completely fix the underlying issue:
+# cmd/go/internal/load adds a bunch of implicit dependencies
+# based on various heuristics, and, due to a bug described in
+# https://golang.org/issue/31544#issuecomment-490607180,
+# those implicit dependencies are not added early enough during
+# loading to properly affect the import graph.)
+go build runtime/cgo
+
+go build -x -o main main.go
+cp stderr commands.txt
+cat header.txt commands.txt
+cp stdout test.sh
+
+exec ./main
+cmp stderr hello.txt
+rm ./main
+
+exec /usr/bin/env bash -x test.sh
+exec ./main
+cmp stderr hello.txt
+
+grep '^WORK=(.*)\n' commands.txt
+
+-- main.go --
+package main
+
+import "C"
+
+func main() {
+	print("hello\n")
+}
+-- header.txt --
+set -e
+-- hello.txt --
+hello
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_exe.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_exe.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a994d170884d64a5d0badecbb9d3e4031236b219
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_exe.txt
@@ -0,0 +1,25 @@
+# go build with -o and -buildmode=exe should report an error on a non-main package.
+
+! go build -buildmode=exe -o out$GOEXE ./not_main
+stderr '-buildmode=exe requires exactly one main package'
+! exists out$GOEXE
+! go build -buildmode=exe -o out$GOEXE ./main_one ./main_two
+stderr '-buildmode=exe requires exactly one main package'
+! exists out$GOEXE
+
+-- go.mod --
+module m
+
+go 1.16
+-- not_main/not_main.go --
+package not_main
+
+func F() {}
+-- main_one/main_one.go --
+package main
+
+func main() {}
+-- main_two/main_two.go --
+package main
+
+func main() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_gcflags.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_gcflags.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9603e0b7b710e94c689637bd0836282b6ba06bef
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_gcflags.txt
@@ -0,0 +1,22 @@
+env GO111MODULE=off
+
+# Test that the user can override default code generation flags.
+
+[compiler:gccgo] skip  # gccgo does not use -gcflags
+[!cgo] skip
+[!GOOS:linux] skip  # test only works if c-archive implies -shared
+[short] skip
+
+env GOCACHE=$WORK/gocache  # Looking for compile commands, so need a clean cache.
+go build -x -n -buildmode=c-archive -gcflags=all=-shared=false ./override.go
+stderr '^.*/compile (.* )?-shared (.* )?-shared=false'
+
+-- override.go --
+package main
+
+import "C"
+
+//export GoFunc
+func GoFunc() {}
+
+func main() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_gcflags_order.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_gcflags_order.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3725c89eb39231056fb52d1c0d177f529dd36567
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_gcflags_order.txt
@@ -0,0 +1,22 @@
+# Tests golang.org/issue/47682
+# Flags specified with -gcflags should appear after other flags generated by cmd/go.
+
+cd m
+go build -n -gcflags=-lang=go1.17
+stderr ' -lang=go1.16.* -lang=go1.17'
+! go build -gcflags='-c 0'
+stderr 'compile: -c must be at least 1, got 0'
+
+-- m/go.mod --
+module example.com
+
+go 1.16
+
+-- m/main.go --
+package main
+
+func main() {
+    var s = []int{1, 2, 3}
+    var pa = (*[2]int)(s[1:])
+    println(pa[1])
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_gopath_order.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_gopath_order.txt
new file mode 100644
index 0000000000000000000000000000000000000000..caf25022e4cdd94ab9f9f83a47261d5bd56886c6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_gopath_order.txt
@@ -0,0 +1,36 @@
+# golang.org/issue/14176#issuecomment-179895769
+# golang.org/issue/14192
+# -I arguments to compiler could end up not in GOPATH order,
+# leading to unexpected import resolution in the compiler.
+
+env GO111MODULE=off
+env GOPATH=$WORK/p1${:}$WORK/p2
+mkdir $WORK/p1/src/foo $WORK/p2/src/baz
+mkdir $WORK/p2/pkg/${GOOS}_${GOARCH} $WORK/p1/src/bar
+cp foo.go $WORK/p1/src/foo/foo.go
+cp baz.go $WORK/p2/src/baz/baz.go
+cp foo.a $WORK/p2/pkg/${GOOS}_${GOARCH}/foo.a
+cp bar.go $WORK/p1/src/bar/bar.go
+
+go install -x bar
+
+# add in baz.a to the mix
+mkdir $WORK/p1/pkg/${GOOS}_${GOARCH}
+cp baz.a $WORK/p1/pkg/${GOOS}_${GOARCH}/baz.a
+env GOPATH=$WORK/p1${:}$WORK/p2
+go install -x bar
+env GOPATH=$WORK/p2${:}$WORK/p1
+go install -x bar
+
+-- foo.go --
+package foo
+-- baz.go --
+package baz
+-- foo.a --
+bad
+-- baz.a --
+bad
+-- bar.go --
+package bar
+import _ "baz"
+import _ "foo"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_ignore_leading_bom.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_ignore_leading_bom.txt
new file mode 100644
index 0000000000000000000000000000000000000000..37141f3466b3e7d7ac785d6e5cd909157bb2febe
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_ignore_leading_bom.txt
@@ -0,0 +1,27 @@
+# Per https://golang.org/ref/spec#Source_code_representation:
+# a compiler may ignore a UTF-8-encoded byte order mark (U+FEFF)
+# if it is the first Unicode code point in the source text.
+
+go list -f 'Imports: {{.Imports}} EmbedFiles: {{.EmbedFiles}}' .
+stdout '^Imports: \[embed m/hello\] EmbedFiles: \[.*file\]$'
+
+-- go.mod --
+module m
+
+go 1.16
+-- m.go --
+package main
+
+import (
+	_ "embed"
+
+	"m/hello"
+)
+
+//go:embed file
+var s string
+
+-- hello/hello.go --
+package hello
+
+-- file --
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_import_comment.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_import_comment.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b500340bfb48b6e0405702caa67d7cb267d8bec3
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_import_comment.txt
@@ -0,0 +1,68 @@
+# Test in GOPATH mode first.
+env GO111MODULE=off
+cd m
+
+# Import comment matches
+go build -n works.go
+
+# Import comment mismatch
+! go build -n wrongplace.go
+stderr 'wrongplace expects import "my/x"'
+
+# Import comment syntax error
+! go build -n bad.go
+stderr 'cannot parse import comment'
+
+# Import comment conflict
+! go build -n conflict.go
+stderr 'found import comments'
+
+
+# Test in module mode.
+# We ignore import comments, so these commands should succeed.
+env GO111MODULE=on
+
+# Import comment matches
+go build -n works.go
+
+# Import comment mismatch
+go build -n wrongplace.go
+
+# Import comment syntax error
+go build -n bad.go
+
+# Import comment conflict
+go build -n conflict.go
+
+-- m/go.mod --
+module m
+
+go 1.16
+-- m/bad.go --
+package p
+
+import "m/bad"
+-- m/conflict.go --
+package p
+
+import "m/conflict"
+-- m/works.go --
+package p
+
+import _ "m/works/x"
+-- m/wrongplace.go --
+package p
+
+import "m/wrongplace"
+-- m/bad/bad.go --
+package bad // import
+-- m/conflict/a.go --
+package conflict // import "a"
+-- m/conflict/b.go --
+package conflict /* import "b" */
+-- m/works/x/x.go --
+package x // import "m/works/x"
+-- m/works/x/x1.go --
+package x // important! not an import comment
+-- m/wrongplace/x.go --
+package x // import "my/x"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_import_cycle.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_import_cycle.txt
new file mode 100644
index 0000000000000000000000000000000000000000..16e4e87daeeeae113061b4c62bb28dd389bd273e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_import_cycle.txt
@@ -0,0 +1,13 @@
+# mod_import_cycle covers this error in module mode.
+env GO111MODULE=off
+
+! go build selfimport
+stderr -count=1 'import cycle not allowed'
+
+go list -e -f '{{.Error}}' selfimport # Don't hang forever
+stdout -count=1 'import cycle not allowed'
+
+-- selfimport/selfimport.go --
+package selfimport
+
+import "selfimport"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_internal.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_internal.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0a37d6520c6a7aef39cfcf4f2b7135e2de967bde
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_internal.txt
@@ -0,0 +1,65 @@
+# Test internal package errors are handled
+cd testinternal3
+go list .
+stdout 'testinternal3'
+
+# Test internal cache
+cd ../testinternal4
+! go build testinternal4/p
+stderr 'internal'
+
+# Test internal packages outside GOROOT are respected
+cd ../testinternal2
+env GO111MODULE=off
+! go build -v .
+stderr 'p\.go:3:8: use of internal package .*internal/w not allowed'
+env GO111MODULE=''
+
+[compiler:gccgo] skip # gccgo does not have GOROOT
+cd ../testinternal
+! go build -v .
+stderr 'p\.go:3:8: use of internal package net/http/internal not allowed'
+
+-- testinternal/go.mod --
+module testinternal
+
+go 1.16
+-- testinternal/p.go --
+package p
+
+import _ "net/http/internal"
+-- testinternal2/go.mod --
+module testinternal2
+
+go 1.16
+-- testinternal2/p.go --
+package p
+
+import _ "./x/y/z/internal/w"
+-- testinternal2/x/y/z/internal/w/w.go --
+package w
+-- testinternal3/go.mod --
+module testinternal3
+
+go 1.16
+-- testinternal3/t.go --
+package t
+
+import _ "internal/does-not-exist"
+-- testinternal4/go.mod --
+module testinternal4
+
+go 1.16
+-- testinternal4/p/p.go --
+package p
+
+import (
+	_ "testinternal4/q/internal/x"
+	_ "testinternal4/q/j"
+)
+-- testinternal4/q/internal/x/x.go --
+package x
+-- testinternal4/q/j/j.go --
+package j
+
+import _ "testinternal4/q/internal/x"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue48319.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue48319.txt
new file mode 100644
index 0000000000000000000000000000000000000000..148d8f0ff6f2e529136eae619762571696d7d879
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue48319.txt
@@ -0,0 +1,53 @@
+# Regression test for https://go.dev/issue/48319:
+# cgo builds should not include debug information from a stale GOROOT_FINAL.
+
+[short] skip
+[!cgo] skip
+
+# This test has problems when run on the LUCI darwin longtest builder,
+# which uses a more contemporary Xcode version that is unfriendly to
+# reproducible builds (see issue #64947 for the gory details). Note
+# that individual developers running "go test cmd/go" on Darwin may
+# still run into failures depending on their Xcode version.
+[GOOS:darwin] [go-builder] skip
+
+# This test is sensitive to cache invalidation,
+# so use a separate build cache that we can control.
+env GOCACHE=$WORK/gocache
+mkdir $GOCACHE
+
+# Build a binary using a specific value of GOROOT_FINAL.
+env GOROOT_FINAL=$WORK${/}goroot1
+go build -o main.exe
+mv main.exe main1.exe
+
+# Now clean the cache and build using a different GOROOT_FINAL.
+# The resulting binaries should differ in their debug metadata.
+go clean -cache
+env GOROOT_FINAL=$WORK${/}goroot2
+go build -o main.exe
+mv main.exe main2.exe
+! cmp -q main2.exe main1.exe
+
+# Set GOROOT_FINAL back to the first value.
+# If the build is properly reproducible, the two binaries should match.
+env GOROOT_FINAL=$WORK${/}goroot1
+go build -o main.exe
+cmp -q main.exe main1.exe
+
+-- go.mod --
+module main
+
+go 1.18
+-- main.go --
+package main
+
+import "C"
+
+import "runtime"
+
+var _ C.int
+
+func main() {
+	println(runtime.GOROOT())
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue59571.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue59571.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2cf32594bf40334ce21389a1e90688ee231cb597
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue59571.txt
@@ -0,0 +1,40 @@
+# Regression test for https://go.dev/issue/59571
+# Build should be reproducible, even with aliased generic types.
+
+go build -a -o 1.a
+go build -a -o 2.a
+cmp -q 1.a 2.a
+
+-- go.mod --
+module m
+
+go 1.20
+-- m.go --
+package m
+
+type (
+	SliceFlag[T any] struct{}
+
+	Alias1  = SliceFlag[[1]int]
+	Alias2  = SliceFlag[[2]int]
+	Alias3  = SliceFlag[[3]int]
+	Alias4  = SliceFlag[[4]int]
+	Alias5  = SliceFlag[[5]int]
+	Alias6  = SliceFlag[[6]int]
+	Alias7  = SliceFlag[[7]int]
+	Alias8  = SliceFlag[[8]int]
+	Alias9  = SliceFlag[[9]int]
+	Alias10 = SliceFlag[[10]int]
+	Alias11 = SliceFlag[[11]int]
+	Alias12 = SliceFlag[[12]int]
+	Alias13 = SliceFlag[[13]int]
+	Alias14 = SliceFlag[[14]int]
+	Alias15 = SliceFlag[[15]int]
+	Alias16 = SliceFlag[[16]int]
+	Alias17 = SliceFlag[[17]int]
+	Alias18 = SliceFlag[[18]int]
+	Alias19 = SliceFlag[[19]int]
+	Alias20 = SliceFlag[[20]int]
+)
+
+func (x *SliceFlag[T]) String() string { return "zzz" }
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue62156.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue62156.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d241570cf61955a445f03bed1bfcc442ce797f7f
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue62156.txt
@@ -0,0 +1,27 @@
+# Regression test for https://go.dev/issue/62156:
+# DWARF generation for inlined functions may require more runtime type
+# descriptors to be written.
+
+go build
+
+-- go.mod --
+module m
+
+go 1.20
+-- main.go --
+package main
+
+import "m/sub"
+
+func main() { sub.F() }
+-- sub/sub.go --
+package sub
+
+type iface interface{ m() }
+
+func F() {
+	f := func(rt []iface) []iface {
+		return append([]iface{}, rt...)
+	}
+	f(nil)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue6480.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue6480.txt
new file mode 100644
index 0000000000000000000000000000000000000000..991112fff118d4bf747a466dbce9fe104a5e2c91
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue6480.txt
@@ -0,0 +1,128 @@
+# "go test -c -test.bench=XXX errors" should not hang.
+# "go test -c" should also produce reproducible binaries.
+# "go test -c" should also appear to write a new binary every time,
+# even if it's really just updating the mtime on an existing up-to-date binary.
+
+[compiler:gccgo] skip
+[short] skip
+
+# Install some commands to compare mtimes
+env GOBIN=$WORK/tmp/bin
+go install m/now m/mtime m/before
+
+# Initial builds
+go test -c -test.bench=XXX errors
+go test -c -o errors2.test errors
+cmp errors.test$GOEXE errors2.test # // errors2.test has no exeSuffix because -o above doesn't have it
+
+# Check errors.test mtime is updated
+exec $GOBIN/now
+cp stdout start_time.txt
+go test -x -c -test.bench=XXX errors
+! stderr '[\\/]link|gccgo' # make sure up-to-date test binary is not relinked
+exec $GOBIN/mtime errors.test$GOEXE
+cp stdout errors1_mod_time.txt
+exec $GOBIN/before start_time.txt errors1_mod_time.txt
+rm start_time.txt errors1_mod_time.txt
+
+# Check errors2.test mtime is updated
+exec $GOBIN/now
+cp stdout start_time.txt
+go test -x -c -o errors2.test errors
+! stderr '[\\/]link|gccgo' # make sure up-to-date test binary is not relinked
+exec $GOBIN/mtime errors2.test
+cp stdout errors2_mod_time.txt
+exec $GOBIN/before start_time.txt errors2_mod_time.txt
+
+-- go.mod --
+module m
+
+go 1.16
+-- now/now.go --
+// Writes time.Now() to a file
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+	"time"
+)
+
+func main() {
+	if err := json.NewEncoder(os.Stdout).Encode(time.Now()); err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+}
+-- mtime/mtime.go --
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+)
+
+func main() {
+	info, err := os.Stat(os.Args[1])
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	if err := json.NewEncoder(os.Stdout).Encode(info.ModTime()); err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+}
+-- before/before.go --
+package main
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+	"time"
+)
+
+func truncateLike(t, p time.Time) time.Time {
+	nano := p.UnixNano()
+	d := 1 * time.Nanosecond
+	for nano%int64(d) == 0 && d < 1*time.Second {
+		d *= 10
+	}
+	for nano%int64(d) == 0 && d < 2*time.Second {
+		d *= 2
+	}
+	return t.Truncate(d)
+}
+
+func main() {
+	var t1 time.Time
+	b1, err := os.ReadFile(os.Args[1])
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	if err := json.Unmarshal(b1, &t1); err != nil  {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+
+	var t2 time.Time
+	b2, err := os.ReadFile(os.Args[2])
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	if err := json.Unmarshal(b2, &t2); err != nil  {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+
+	t1 = truncateLike(t1, t2)
+	if !t1.Before(t2) {
+		fmt.Fprintf(os.Stderr, "time in %v (%v) is not before time in %v (%v)", os.Args[1], t1, os.Args[2], t2)
+		os.Exit(1)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue_65528.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue_65528.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ab4d62bbb2b473a6f692b42e7db308008ec18a89
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_issue_65528.txt
@@ -0,0 +1,9 @@
+go build
+
+-- go.mod --
+module test
+
+go 1.0
+
+-- p.go --
+package p
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_link_x_import_path_escape.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_link_x_import_path_escape.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d47c482170ae28553de014de0a25af816656ac17
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_link_x_import_path_escape.txt
@@ -0,0 +1,22 @@
+[compiler:gccgo] skip 'gccgo does not support -ldflags -X'
+
+go build -o linkx$GOEXE -ldflags -X=my.pkg.Text=linkXworked my.pkg/main
+exec ./linkx$GOEXE
+stderr '^linkXworked$'
+
+-- go.mod --
+module my.pkg
+
+go 1.16
+-- main/main.go --
+package main
+
+import "my.pkg"
+
+func main() {
+	println(pkg.Text)
+}
+-- pkg.go --
+package pkg
+
+var Text = "unset"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_multi_main.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_multi_main.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8afd8b8a2e11af607e639cb2a8eb41982265164b
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_multi_main.txt
@@ -0,0 +1,43 @@
+# Verify build -o can output multiple executables to a directory.
+
+mkdir $WORK/bin
+go build -o $WORK/bin ./cmd/c1 ./cmd/c2
+! stderr 'multiple packages'
+
+! go build -o $WORK/bin ./pkg1 ./pkg1
+stderr 'no main packages'
+
+! go build ./cmd/c1
+stderr 'already exists and is a directory'
+
+# Verify build -o output correctly local packages
+mkdir $WORK/local
+go build -o $WORK/local ./exec.go
+exists $WORK/local/exec$GOEXE
+
+-- go.mod --
+module exmod
+
+-- cmd/c1/main.go --
+package main
+
+func main() {}
+
+-- cmd/c2/main.go --
+package main
+
+func main() {}
+
+-- pkg1/pkg1.go --
+package pkg1
+
+-- pkg2/pkg2.go --
+package pkg2
+
+-- exec.go --
+package main
+
+func main() {}
+
+-- c1$GOEXE/keep.txt --
+Create c1 directory.
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_n_cgo.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_n_cgo.txt
new file mode 100644
index 0000000000000000000000000000000000000000..fa01927720b0dc94220c3623bef0ff86ddc299b4
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_n_cgo.txt
@@ -0,0 +1,18 @@
+[!cgo] skip
+
+# Test that nothing is prepended to $WORK path prefix.
+# See issue golang.org/issue/37012.
+go build -n
+! stderr '[/\\]\$WORK'
+stderr '[ =]\$WORK'
+
+-- go.mod --
+module m
+
+go 1.16
+-- main.go --
+package main
+
+import "C"
+
+var _ C.int
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_negative_p.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_negative_p.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9123907dc87a40e41803c145433602aada4db3dd
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_negative_p.txt
@@ -0,0 +1,5 @@
+! go build -p=-1 example.go
+stderr 'go: -p must be a positive integer: -1'
+
+-- example.go --
+package example
\ No newline at end of file
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_no_go.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_no_go.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b61d7522740d34447f64f75056a8cd15f9bffbbf
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_no_go.txt
@@ -0,0 +1,41 @@
+! go build ./empty/test
+stderr 'no non-test Go files in '
+
+! go build ./empty/xtest
+stderr 'no non-test Go files in '
+
+! go build ./empty/testxtest
+stderr 'no non-test Go files in '
+
+! go build ./exclude
+stderr 'build constraints exclude all Go files in '
+
+! go build ./exclude/ignore
+stderr 'no Go files in '
+
+! go build ./exclude/empty
+stderr 'no Go files in '
+
+-- go.mod --
+module m
+
+go 1.16
+-- empty/test/test_test.go --
+package p
+-- empty/testxtest/test_test.go --
+package p
+-- empty/testxtest/xtest_test.go --
+package p_test
+-- empty/xtest/xtest_test.go --
+package p_test
+-- exclude/empty/x.txt --
+-- exclude/ignore/_x.go --
+package x
+-- exclude/x.go --
+// +build linux,!linux
+
+package x
+-- exclude/x_linux.go --
+// +build windows
+
+package x
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_nocache.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_nocache.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b21e755e8939fb63ab69ecae07280cfc2db655bd
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_nocache.txt
@@ -0,0 +1,40 @@
+env GO111MODULE=off
+
+# As of Go 1.12, the module cache is required.
+
+# If none of the variables we use to locate GOCACHE are set, the cache is off
+# and we cannot build.
+env GOCACHE=
+env XDG_CACHE_HOME=
+env HOME=
+[GOOS:plan9] env home=
+[GOOS:windows] env LocalAppData=
+! go build -o triv triv.go
+stderr 'build cache is required, but could not be located: GOCACHE is not defined and .*'
+
+# If GOCACHE is set but is not an absolute path, and we cannot build.
+env GOCACHE=test
+! go build -o triv triv.go
+stderr 'build cache is required, but could not be located: GOCACHE is not an absolute path'
+
+# An explicit GOCACHE=off also disables builds.
+env GOCACHE=off
+! go build -o triv triv.go
+stderr 'build cache is disabled by GOCACHE=off'
+
+# If GOCACHE is set to an unwritable directory, we should diagnose it as such.
+[GOOS:windows] stop # Does not support unwritable directories.
+[root] skip # Can write to unwritable directories.
+
+mkdir $WORK/unwritable/home
+chmod 0555 $WORK/unwritable/home
+[!GOOS:plan9] env HOME=$WORK/unwritable/home
+[GOOS:plan9] env home=$WORK/unwritable/home
+
+env GOCACHE=$WORK/unwritable/home
+! go build -o triv triv.go
+stderr 'failed to initialize build cache.* permission denied'
+
+-- triv.go --
+package main
+func main() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_output.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_output.txt
new file mode 100644
index 0000000000000000000000000000000000000000..457960f9ac5641edcf3e6bdf89da32596c54da19
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_output.txt
@@ -0,0 +1,117 @@
+[compiler:gccgo] skip 'gccgo has no standard packages'
+[short] skip
+
+[!GOOS:windows] env NONEXE='.exe'
+[GOOS:windows] env NONEXE=''
+
+env GOBIN=$WORK/tmp/bin
+go install m/isarchive &
+
+go build x.go
+exists -exec x$GOEXE
+rm x$GOEXE
+! exists x$NONEXE
+
+go build -o myprog x.go
+! exists x
+! exists x.exe
+exists -exec myprog
+! exists myprogr.exe
+
+! exists bin
+go build -o bin/x x.go
+exists -exec bin/x
+rm bin
+
+! exists bin
+go build -o bin/ x.go
+exists -exec bin/x$GOEXE
+rm bin
+
+[GOOS:windows] ! exists bin
+[GOOS:windows] go build -o bin\x x.go
+[GOOS:windows] exists -exec bin\x
+[GOOS:windows] rm bin
+
+[GOOS:windows] ! exists bin
+[GOOS:windows] go build -o bin\ x.go
+[GOOS:windows] exists -exec bin\x.exe
+[GOOS:windows] rm bin
+
+! exists bin
+mkdir bin
+go build -o bin x.go
+exists -exec bin/x$GOEXE
+rm bin
+
+go build p.go
+! exists p
+! exists p.a
+! exists p.o
+! exists p.exe
+
+wait # for isarchive
+
+go build -o p.a p.go
+exists p.a
+exec $GOBIN/isarchive p.a
+
+go build cmd/gofmt
+exists -exec gofmt$GOEXE
+rm gofmt$GOEXE
+! exists gofmt$NONEXE
+
+go build -o mygofmt cmd/gofmt
+exists -exec mygofmt
+! exists mygofmt.exe
+! exists gofmt
+! exists gofmt.exe
+
+go build sync/atomic
+! exists atomic
+! exists atomic.exe
+
+go build -o myatomic.a sync/atomic
+exists myatomic.a
+exec $GOBIN/isarchive myatomic.a
+! exists atomic
+! exists atomic.a
+! exists atomic.exe
+
+! go build -o whatever cmd/gofmt sync/atomic
+stderr 'multiple packages'
+
+-- go.mod --
+module m
+
+go 1.16
+-- x.go --
+package main
+
+func main() {}
+-- p.go --
+package p
+-- isarchive/isarchive.go --
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+)
+
+func main() {
+	f, err := os.Open(os.Args[1])
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	buf := make([]byte, 100)
+	io.ReadFull(f, buf)
+	f.Close()
+	if !bytes.HasPrefix(buf, []byte("!\n")) {
+		fmt.Fprintf(os.Stderr, "file %s exists but is not an archive\n", os.Args[1])
+		os.Exit(1)
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_overlay.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_overlay.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b64bc0261422ab2b8a403daa9661daa37417a7d9
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_overlay.txt
@@ -0,0 +1,313 @@
+[short] skip
+
+# Test building in overlays.
+# TODO(#39958): add a test case where the destination file in the replace map
+#   isn't a go file. Either completely exclude that case in fs.IsDirWithGoFiles
+#   if the compiler doesn't allow it, or test that it works all the way.
+# TODO(#39958): add a test that both gc and gccgo assembly files can include .h
+#   files.
+
+# The main package (m) is contained in an overlay. It imports m/dir2 which has one
+# file in an overlay and one file outside the overlay, which in turn imports m/dir,
+# which only has source files in the overlay.
+
+cd m
+
+! go build .
+go build -overlay overlay.json -o main$GOEXE .
+exec ./main$goexe
+stdout '^hello$'
+
+go build -overlay overlay.json -o print_abspath$GOEXE ./printpath
+exec ./print_abspath$GOEXE
+stdout $WORK[/\\]gopath[/\\]src[/\\]m[/\\]printpath[/\\]main.go
+
+go build -overlay overlay.json -o print_trimpath$GOEXE -trimpath ./printpath
+exec ./print_trimpath$GOEXE
+stdout ^m[/\\]printpath[/\\]main.go
+
+go build -overlay overlay.json -o print_trimpath_two_files$GOEXE printpath/main.go printpath/other.go
+exec ./print_trimpath_two_files$GOEXE
+stdout $WORK[/\\]gopath[/\\]src[/\\]m[/\\]printpath[/\\]main.go
+stdout $WORK[/\\]gopath[/\\]src[/\\]m[/\\]printpath[/\\]other.go
+
+[cgo] go build -overlay overlay.json -o main_cgo_replace$GOEXE ./cgo_hello_replace
+[cgo] exec ./main_cgo_replace$GOEXE
+[cgo] stdout '^hello cgo\r?\n'
+
+[cgo] go build -overlay overlay.json -o main_cgo_quote$GOEXE ./cgo_hello_quote
+[cgo] exec ./main_cgo_quote$GOEXE
+[cgo] stdout '^hello cgo\r?\n'
+
+[cgo] go build -overlay overlay.json -o main_cgo_angle$GOEXE ./cgo_hello_angle
+[cgo] exec ./main_cgo_angle$GOEXE
+[cgo] stdout '^hello cgo\r?\n'
+
+go build -overlay overlay.json -o main_call_asm$GOEXE ./call_asm
+exec ./main_call_asm$GOEXE
+! stdout .
+
+[cgo] go list -compiled -overlay overlay.json -f '{{range .CompiledGoFiles}}{{. | printf "%s\n"}}{{end}}' ./cgo_hello_replace
+[cgo] cp stdout compiled_cgo_sources.txt
+[cgo] go run ../print_line_comments.go compiled_cgo_sources.txt
+[cgo] stdout $GOPATH[/\\]src[/\\]m[/\\]cgo_hello_replace[/\\]cgo_hello_replace.go
+[cgo] ! stdout $GOPATH[/\\]src[/\\]m[/\\]overlay[/\\]hello.c
+
+# Change the contents of a file in the overlay and ensure that makes the target stale
+env OLD_GOCACHE=$GOCACHE
+env GOCACHE=$WORK/cache  # use a fresh cache so that multiple runs of the test don't interfere
+go build -x -overlay overlay.json ./test_cache
+stderr '(compile|gccgo)( |\.exe).*test_cache.go'
+go build -x -overlay overlay.json ./test_cache
+! stderr '(compile|gccgo)( |\.exe).*test_cache.go'  # cached
+cp overlay/test_cache_different.go overlay/test_cache.go
+go build -x -overlay overlay.json ./test_cache
+stderr '(compile|gccgo)( |\.exe).*test_cache.go'  # not cached
+env CACHE=$OLD_GOCACHE
+
+# Run same tests but with gccgo.
+env GO111MODULE=off
+[!exec:gccgo] stop
+[cross] stop  # gccgo can't necessarily cross-compile
+
+! go build -compiler=gccgo .
+go build -compiler=gccgo -overlay overlay.json -o main_gccgo$GOEXE .
+exec ./main_gccgo$goexe
+stdout '^hello$'
+
+go build -compiler=gccgo -overlay overlay.json -o print_abspath_gccgo$GOEXE ./printpath
+exec ./print_abspath_gccgo$GOEXE
+stdout $WORK[/\\]gopath[/\\]src[/\\]m[/\\]printpath[/\\]main.go
+
+go build -compiler=gccgo -overlay overlay.json -o print_trimpath_gccgo$GOEXE -trimpath ./printpath
+exec ./print_trimpath_gccgo$GOEXE
+stdout ^\.[/\\]printpath[/\\]main.go
+
+
+go build -compiler=gccgo  -overlay overlay.json -o main_cgo_replace_gccgo$GOEXE ./cgo_hello_replace
+exec ./main_cgo_replace_gccgo$GOEXE
+stdout '^hello cgo\r?\n'
+
+go build -compiler=gccgo  -overlay overlay.json -o main_cgo_quote_gccgo$GOEXE ./cgo_hello_quote
+exec ./main_cgo_quote_gccgo$GOEXE
+stdout '^hello cgo\r?\n'
+
+go build -compiler=gccgo  -overlay overlay.json -o main_cgo_angle_gccgo$GOEXE ./cgo_hello_angle
+exec ./main_cgo_angle_gccgo$GOEXE
+stdout '^hello cgo\r?\n'
+
+go build -compiler=gccgo -overlay overlay.json -o main_call_asm_gccgo$GOEXE ./call_asm
+exec ./main_call_asm_gccgo$GOEXE
+! stdout .
+
+
+-- m/go.mod --
+// TODO(matloob): how do overlays work with go.mod (especially if mod=readonly)
+module m
+
+go 1.16
+
+-- m/dir2/h.go --
+package dir2
+
+func PrintMessage() {
+	printMessage()
+}
+-- m/dir/foo.txt --
+The build action code currently expects the package directory
+to exist, so it can run the compiler in that directory.
+TODO(matloob): Remove this requirement.
+-- m/printpath/about.txt --
+the actual code is in the overlay
+-- m/overlay.json --
+{
+	"Replace": {
+		"f.go": "overlay/f.go",
+		"dir/g.go": "overlay/dir_g.go",
+		"dir2/i.go": "overlay/dir2_i.go",
+		"printpath/main.go": "overlay/printpath.go",
+		"printpath/other.go": "overlay2/printpath2.go",
+		"call_asm/asm_gc.s": "overlay/asm_gc.s",
+		"call_asm/asm_gccgo.s": "overlay/asm_gccgo.s",
+		"test_cache/main.go": "overlay/test_cache.go",
+		"cgo_hello_replace/cgo_header.h": "overlay/cgo_head.h",
+		"cgo_hello_replace/hello.c": "overlay/hello.c",
+		"cgo_hello_quote/cgo_hello.go": "overlay/cgo_hello_quote.go",
+		"cgo_hello_quote/cgo_header.h": "overlay/cgo_head.h",
+		"cgo_hello_angle/cgo_hello.go": "overlay/cgo_hello_angle.go",
+		"cgo_hello_angle/cgo_header.h": "overlay/cgo_head.h"
+	}
+}
+-- m/cgo_hello_replace/cgo_hello_replace.go --
+package main
+
+// #include "cgo_header.h"
+import "C"
+
+func main() {
+	C.say_hello()
+}
+-- m/cgo_hello_replace/cgo_header.h --
+ // Test that this header is replaced with one that has the proper declaration.
+void say_goodbye();
+
+-- m/cgo_hello_replace/hello.c --
+#include 
+
+void say_goodbye() { puts("goodbye cgo\n"); fflush(stdout); }
+
+-- m/overlay/f.go --
+package main
+
+import "m/dir2"
+
+func main() {
+	dir2.PrintMessage()
+}
+-- m/call_asm/main.go --
+package main
+
+func foo() // There will be a "missing function body" error if the assembly file isn't found.
+
+func main() {
+	foo()
+}
+-- m/overlay/dir_g.go --
+package dir
+
+import "fmt"
+
+func PrintMessage() {
+	fmt.Println("hello")
+}
+-- m/overlay/printpath.go --
+package main
+
+import (
+	"fmt"
+	"path/filepath"
+	"runtime"
+)
+
+func main() {
+	_, file, _, _ := runtime.Caller(0)
+
+	// Since https://golang.org/cl/214286, the runtime's debug paths are
+	// slash-separated regardless of platform, so normalize them to system file
+	// paths.
+	fmt.Println(filepath.FromSlash(file))
+}
+-- m/overlay2/printpath2.go --
+package main
+
+import (
+	"fmt"
+	"path/filepath"
+	"runtime"
+)
+
+func init() {
+	_, file, _, _ := runtime.Caller(0)
+	fmt.Println(filepath.FromSlash(file))
+}
+-- m/overlay/dir2_i.go --
+package dir2
+
+import "m/dir"
+
+func printMessage() {
+	dir.PrintMessage()
+}
+-- m/overlay/cgo_hello_quote.go --
+package main
+
+// #include "cgo_header.h"
+import "C"
+
+func main() {
+	C.say_hello()
+}
+-- m/overlay/cgo_hello_angle.go --
+package main
+
+// #include 
+import "C"
+
+func main() {
+	C.say_hello()
+}
+-- m/overlay/cgo_head.h --
+void say_hello();
+-- m/overlay/hello.c --
+#include 
+
+void say_hello() { puts("hello cgo\n"); fflush(stdout); }
+-- m/overlay/asm_gc.s --
+// +build gc
+
+TEXT ·foo(SB),0,$0
+	RET
+
+-- m/overlay/asm_gccgo.s --
+// +build gccgo
+
+.globl main.foo
+.text
+main.foo:
+	ret
+
+-- m/overlay/test_cache.go --
+package foo
+
+import "fmt"
+
+func bar() {
+	fmt.Println("something")
+}
+-- m/overlay/test_cache_different.go --
+package foo
+
+import "fmt"
+
+func bar() {
+	fmt.Println("different")
+}
+-- m/cgo_hello_quote/hello.c --
+#include 
+
+void say_hello() { puts("hello cgo\n"); fflush(stdout); }
+-- m/cgo_hello_angle/hello.c --
+#include 
+
+void say_hello() { puts("hello cgo\n"); fflush(stdout); }
+
+-- print_line_comments.go --
+package main
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"strings"
+)
+
+func main() {
+	compiledGoFilesArg := os.Args[1]
+	b, err := ioutil.ReadFile(compiledGoFilesArg)
+	if err != nil {
+		log.Fatal(err)
+	}
+	compiledGoFiles := strings.Split(strings.TrimSpace(string(b)), "\n")
+	for _, f := range compiledGoFiles {
+		b, err := ioutil.ReadFile(f)
+		if err != nil {
+			log.Fatal(err)
+		}
+		for _, line := range strings.Split(string(b), "\n") {
+			if strings.HasPrefix(line, "#line") || strings.HasPrefix(line, "//line") {
+				fmt.Println(line)
+			}
+		}
+	}
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_patterns_outside_gopath.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_patterns_outside_gopath.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6a600cfb0a8c5761f59d73141b79b731bffdeef3
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_patterns_outside_gopath.txt
@@ -0,0 +1,36 @@
+# Tests issue #18778
+[short] skip
+
+cd pkgs
+
+env GO111MODULE=off
+go build ./...
+! stdout .
+go test ./...
+stdout '^ok'
+go list ./...
+stdout 'pkgs$'
+stdout 'pkgs/a'
+
+-- pkgs/go.mod --
+module pkgs
+
+go 1.16
+-- pkgs/a.go --
+package x
+-- pkgs/a_test.go --
+package x_test
+
+import "testing"
+
+func TestX(t *testing.T) {
+}
+-- pkgs/a/a.go --
+package a
+-- pkgs/a/a_test.go --
+package a_test
+
+import "testing"
+
+func TestA(t *testing.T) {
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pgo.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pgo.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3b0804badd6f5db100d71608ef58e97f08fc45c8
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pgo.txt
@@ -0,0 +1,74 @@
+# Test go build -pgo flag.
+# Specifically, the build cache handles profile content correctly.
+
+[short] skip 'compiles and links executables'
+
+# build without PGO
+go build triv.go
+
+# build with PGO, should trigger rebuild
+# starting with an empty profile (the compiler accepts it)
+go build -x -pgo=prof -o triv.exe triv.go
+stderr 'compile.*-pgoprofile=.*prof.*triv.go'
+
+# check that PGO appears in build info
+# N.B. we can't start the stdout check with -pgo because the script assumes that
+# if the first arg starts with - it is a grep flag.
+go version -m triv.exe
+stdout 'build\s+-pgo=.*'${/}'prof'
+
+# store the build ID
+go list -export -json=BuildID -pgo=prof triv.go
+stdout '"BuildID":' # check that output actually contains a build ID
+cp stdout list.out
+
+# build again with the same profile, should be cached
+go build -x -pgo=prof -o triv.exe triv.go
+! stderr 'compile.*triv.go'
+
+# check that the build ID is the same
+go list -export -json=BuildID -pgo=prof triv.go
+cmp stdout list.out
+
+# overwrite the prof
+go run overwrite.go
+
+# build again, profile content changed, should trigger rebuild
+go build -n -pgo=prof triv.go
+stderr 'compile.*-pgoprofile=.*prof.*p.go'
+
+# check that the build ID is different
+go list -export -json=BuildID -pgo=prof triv.go
+! cmp stdout list.out
+
+# build with trimpath, buildinfo path should be trimmed
+go build -x -pgo=prof -trimpath -o triv.exe triv.go
+
+# check that path is trimmed
+go version -m triv.exe
+stdout 'build\s+-pgo=prof'
+
+-- prof --
+-- triv.go --
+package main
+func main() {}
+-- overwrite.go --
+package main
+
+import (
+	"os"
+	"runtime/pprof"
+)
+
+func main() {
+	f, err := os.Create("prof")
+	if err != nil {
+		panic(err)
+	}
+	err = pprof.StartCPUProfile(f)
+	if err != nil {
+		panic(err)
+	}
+	pprof.StopCPUProfile()
+	f.Close()
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pgo_auto.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pgo_auto.txt
new file mode 100644
index 0000000000000000000000000000000000000000..509be0d5c601f72c12a33aa5b57d25c4b0db1748
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pgo_auto.txt
@@ -0,0 +1,97 @@
+# Test go build -pgo=auto flag.
+
+[short] skip 'compiles and links executables'
+
+# use default.pgo for a single main package
+go build -n -pgo=auto -o a1.exe ./a/a1
+stderr 'compile.*-pgoprofile=.*default\.pgo.*a1.go'
+
+# check that pgo applied to dependencies
+stderr 'compile.*-p test/dep.*-pgoprofile=.*default\.pgo'
+
+# check that pgo appears in build info
+# N.B. we can't start the stdout check with -pgo because the script assumes that
+# if the first arg starts with - it is a grep flag.
+stderr 'build\\t-pgo=.*default\.pgo'
+
+# check also that -pgo appears with the other flags, before non-flag settings
+! stderr 'build\\t[A-Za-z].*build\\t-pgo'
+
+# use default.pgo for ... with a single main package
+go build -n -pgo=auto ./a/...
+stderr 'compile.*-pgoprofile=.*default\.pgo.*a1.go'
+
+# check that pgo appears in build info
+stderr 'build\\t-pgo=.*default\.pgo'
+
+# build succeeds without PGO when default.pgo file is absent
+go build -n -pgo=auto -o nopgo.exe ./nopgo
+stderr 'compile.*nopgo.go'
+! stderr 'compile.*-pgoprofile'
+
+# check that pgo doesn't appear in build info
+! stderr 'build\\t-pgo='
+
+# other build-related commands
+go install -a -n -pgo=auto ./a/a1
+stderr 'compile.*-pgoprofile=.*default\.pgo.*a1.go'
+
+go run -a -n -pgo=auto ./a/a1
+stderr 'compile.*-pgoprofile=.*default\.pgo.*a1.go'
+
+go test -a -n -pgo=auto ./a/a1
+stderr 'compile.*-pgoprofile=.*default\.pgo.*a1.go.*a1_test.go'
+stderr 'compile.*-pgoprofile=.*default\.pgo.*external_test.go'
+
+# go list commands should succeed as usual
+go list -pgo=auto ./a/a1
+
+go list -test -pgo=auto ./a/a1
+
+go list -deps -pgo=auto ./a/a1
+
+# -pgo=auto is the default. Commands without explicit -pgo=auto
+# should work as -pgo=auto.
+go build -a -n -o a1.exe ./a/a1
+stderr 'compile.*-pgoprofile=.*default\.pgo.*a1.go'
+stderr 'compile.*-p test/dep.*-pgoprofile=.*default\.pgo'
+
+# check that pgo appears in build info
+stderr 'build\\t-pgo=.*default\.pgo'
+
+go build -a -n -o nopgo.exe ./nopgo
+stderr 'compile.*nopgo.go'
+! stderr 'compile.*-pgoprofile'
+
+# check that pgo doesn't appear in build info
+! stderr 'build\\t-pgo='
+
+# -pgo=off should turn off PGO.
+go build -a -n -pgo=off -o a1.exe ./a/a1
+stderr 'compile.*a1.go'
+! stderr 'compile.*-pgoprofile'
+
+# check that pgo doesn't appear in build info
+! stderr 'build\\t-pgo='
+
+-- go.mod --
+module test
+go 1.20
+-- a/a1/a1.go --
+package main
+import _ "test/dep"
+func main() {}
+-- a/a1/a1_test.go --
+package main
+import "testing"
+func TestA(*testing.T) {}
+-- a/a1/external_test.go --
+package main_test
+import "testing"
+func TestExternal(*testing.T) {}
+-- a/a1/default.pgo --
+-- nopgo/nopgo.go --
+package main
+func main() {}
+-- dep/dep.go --
+package dep
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pgo_auto_multi.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pgo_auto_multi.txt
new file mode 100644
index 0000000000000000000000000000000000000000..991b72ce850b92304d9f000b92d9d5c0e063c09e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pgo_auto_multi.txt
@@ -0,0 +1,109 @@
+# Test go build -pgo=auto flag with multiple main packages.
+
+go install -a -n -pgo=auto ./a ./b ./nopgo
+
+# a/default.pgo applies to package a and (transitive)
+# dependencies.
+stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*a(/|\\\\)a\.go'
+stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*dep(/|\\\\)dep\.go'
+stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*dep2(/|\\\\)dep2\.go'
+stderr -count=1 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*dep3(/|\\\\)dep3\.go'
+
+# b/default.pgo applies to package b and (transitive)
+# dependencies.
+stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*b(/|\\\\)b\.go'
+stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*dep(/|\\\\)dep\.go'
+stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*dep2(/|\\\\)dep2\.go'
+stderr -count=1 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*dep3(/|\\\\)dep3\.go'
+
+# nopgo should be built without PGO.
+! stderr 'compile.*-pgoprofile=.*nopgo(/|\\\\)nopgo\.go'
+
+# Dependencies should also be built without PGO.
+# Here we want to match a compile action without -pgoprofile,
+# by matching 3 occurrences of "compile dep.go", among which
+# 2 of them have -pgoprofile (therefore one without).
+stderr -count=3 'compile.*dep(/|\\\\)dep.go'
+stderr -count=2 'compile.*-pgoprofile=.*dep(/|\\\\)dep\.go'
+
+stderr -count=3 'compile.*dep2(/|\\\\)dep2.go'
+stderr -count=2 'compile.*-pgoprofile=.*dep2(/|\\\\)dep2\.go'
+
+stderr -count=3 'compile.*dep3(/|\\\\)dep3.go'
+stderr -count=2 'compile.*-pgoprofile=.*dep3(/|\\\\)dep3\.go'
+
+# check that pgo appears or not in build info as expected
+stderr 'path\\ttest/a\\n.*build\\t-pgo=.*a(/|\\\\)default\.pgo'
+stderr 'path\\ttest/b\\n.*build\\t-pgo=.*b(/|\\\\)default\.pgo'
+! stderr 'path\\ttest/nopgo\\n.*build\\t-pgo='
+
+# go test works the same way
+go test -a -n -pgo=auto ./a ./b ./nopgo
+stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*a(/|\\\\)a_test\.go'
+stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*dep(/|\\\\)dep\.go'
+stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*b(/|\\\\)b_test\.go'
+stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*dep(/|\\\\)dep\.go'
+! stderr 'compile.*-pgoprofile=.*nopgo(/|\\\\)nopgo_test\.go'
+
+# test-only dependencies also have profiles attached
+stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*testdep(/|\\\\)testdep\.go'
+stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*testdep(/|\\\\)testdep\.go'
+stderr 'compile.*-pgoprofile=.*a(/|\\\\)default\.pgo.*testdep2(/|\\\\)testdep2\.go'
+stderr 'compile.*-pgoprofile=.*b(/|\\\\)default\.pgo.*testdep2(/|\\\\)testdep2\.go'
+
+# go list -deps prints packages built multiple times.
+go list -pgo=auto -deps ./a ./b ./nopgo
+stdout 'test/dep \[test/a\]'
+stdout 'test/dep \[test/b\]'
+stdout 'test/dep$'
+
+# Here we have 3 main packages, a, b, and nopgo, where a and b each has
+# its own default.pgo profile, and nopgo has none.
+# All 3 main packages import dep and dep2, both of which then import dep3
+# (a diamond-shape import graph).
+-- go.mod --
+module test
+go 1.20
+-- a/a.go --
+package main
+import _ "test/dep"
+import _ "test/dep2"
+func main() {}
+-- a/a_test.go --
+package main
+import "testing"
+import _ "test/testdep"
+func TestA(*testing.T) {}
+-- a/default.pgo --
+-- b/b.go --
+package main
+import _ "test/dep"
+import _ "test/dep2"
+func main() {}
+-- b/b_test.go --
+package main
+import "testing"
+import _ "test/testdep"
+func TestB(*testing.T) {}
+-- b/default.pgo --
+-- nopgo/nopgo.go --
+package main
+import _ "test/dep"
+import _ "test/dep2"
+func main() {}
+-- nopgo/nopgo_test.go --
+package main
+import "testing"
+func TestNopgo(*testing.T) {}
+-- dep/dep.go --
+package dep
+import _ "test/dep3"
+-- dep2/dep2.go --
+package dep2
+-- dep3/dep3.go --
+package dep3
+-- testdep/testdep.go --
+package testdep
+import _ "test/testdep2"
+-- testdep2/testdep2.go --
+package testdep2
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pie_race.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pie_race.txt
new file mode 100644
index 0000000000000000000000000000000000000000..39bea0521fcd96e7fd5917a8510f447cc575c73c
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_pie_race.txt
@@ -0,0 +1,30 @@
+# go build -buildmode=pie -race main.go on Darwin should work without errors
+
+[!race] skip 'test requires race detector support'
+
+[!GOOS:darwin] ! go build -buildmode=pie -race
+[!GOOS:darwin] stderr '^-buildmode=pie not supported when -race is enabled on '$GOOS'/'$GOARCH'$'
+[!GOOS:darwin] stop 'not testing -buildmode=pie -race on platform that does not support it'
+
+go build -buildmode=pie -race bytes
+! stderr .
+
+[short] stop 'not linking a binary in -short mode'
+
+go build -buildmode=pie -race main.go
+! stderr .
+exec ./main
+stdout 'Hello, 世界'
+
+-- go.mod --
+module m
+
+go 1.21
+-- main.go --
+package main
+
+import "fmt"
+
+func main() {
+	fmt.Println("Hello, 世界")
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_plugin_non_main.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_plugin_non_main.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e0bbbefb19418ba922be4c877b2d398d3da89dd4
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_plugin_non_main.txt
@@ -0,0 +1,13 @@
+# Plugins are not supported on all platforms.
+[!buildmode:plugin] skip
+
+go build -n testdep
+! go build -buildmode=plugin testdep
+stderr '-buildmode=plugin requires exactly one main package'
+
+-- go.mod --
+module testdep
+
+go 1.16
+-- testdep.go --
+package p
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_plugin_reproducible.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_plugin_reproducible.txt
new file mode 100644
index 0000000000000000000000000000000000000000..aa489df728108eab0fa4c2f5619bf6ac787619eb
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_plugin_reproducible.txt
@@ -0,0 +1,18 @@
+[!buildmode:plugin] skip
+[short] skip
+
+# This test has problems when run on the LUCI darwin longtest builder,
+# which uses a more contemporary Xcode version that is unfriendly to
+# reproducible builds (see issue #64947 for the gory details). Note
+# that individual developers running "go test cmd/go" on Darwin may
+# still run into failures depending on their Xcode version.
+[GOOS:darwin] [go-builder] skip
+
+go build -trimpath -buildvcs=false -buildmode=plugin -o a.so main.go
+go build -trimpath -buildvcs=false -buildmode=plugin -o b.so main.go
+cmp -q a.so b.so
+
+-- main.go --
+package main
+
+func main() {}
\ No newline at end of file
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_relative_pkgdir.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_relative_pkgdir.txt
new file mode 100644
index 0000000000000000000000000000000000000000..57f18eefcec45a11a9e6aea3a5b0128f4d7e7c76
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_relative_pkgdir.txt
@@ -0,0 +1,9 @@
+env GO111MODULE=off
+
+# Regression test for golang.org/issue/21309: accept relative -pkgdir argument.
+
+[short] skip
+
+mkdir $WORK/gocache
+env GOCACHE=$WORK/gocache
+go build -pkgdir=. runtime
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_relative_tmpdir.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_relative_tmpdir.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ea7412e1168e5154ca5ea71062464eabd0c25e63
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_relative_tmpdir.txt
@@ -0,0 +1,18 @@
+env GO111MODULE=off
+
+# If GOTMPDIR is relative, 'go build' should derive an absolute $WORK directory.
+cd $WORK
+mkdir tmp
+env GOTMPDIR=tmp
+go build -work a
+stderr 'WORK='$WORK
+
+# Similarly if TMP/TMPDIR is relative.
+env GOTMPDIR=
+env TMP=tmp    # Windows
+env TMPDIR=tmp # Unix
+go build -work a
+stderr 'WORK='$WORK
+
+-- a/a.go --
+package a
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_runtime_gcflags.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_runtime_gcflags.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c87e480911933a6211fd31991cbf17ec2e75da44
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_runtime_gcflags.txt
@@ -0,0 +1,11 @@
+env GO111MODULE=off
+[short] skip # rebuilds all of std
+
+# Set up fresh GOCACHE.
+env GOCACHE=$WORK/gocache
+mkdir $GOCACHE
+
+# Verify the standard library (specifically runtime/internal/atomic) can be
+# built with -gcflags when -n is given. See golang.org/issue/29346.
+go build -n -gcflags=all='-l' std
+stderr 'compile.* runtime/internal/atomic .* -l'
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_shorten_pkg.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_shorten_pkg.txt
new file mode 100644
index 0000000000000000000000000000000000000000..38672b65df758fd38e88fe71b2ec3200a2e83bb1
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_shorten_pkg.txt
@@ -0,0 +1,81 @@
+[short] skip
+
+# This test may go away when the loopvar experiment goes away.
+# Accurate reporting of notable loops in the presence of inlining
+# can create warnings in sibling directories, and it's nice if those
+# can be trimmed like subdirectory paths are.
+
+env GOEXPERIMENT=loopvar
+go build -gcflags=inlines/a=-d=loopvar=2 .
+stderr ^\.[\\/]b[\\/]b\.go:12:6:.*loop.inlined.into.a[\\/]a\.go
+stderr ^\.[\\/]b[\\/]b\.go:12:9:.*loop.inlined.into.a[\\/]a\.go
+
+-- go.mod --
+module inlines
+
+go 1.21
+-- a/a.go --
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package a
+
+import "inlines/b"
+
+func F() []*int {
+	var s []*int
+	for i := 0; i < 10; i++ {
+		s = append(s, &i)
+	}
+	return s
+}
+
+func Fb() []*int {
+	bf, _ := b.F()
+	return bf
+}
+-- b/b.go --
+package b
+
+var slice = []int{1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024}
+
+func F() ([]*int, []*int) {
+	return g()
+}
+
+func g() ([]*int, []*int) {
+	var s []*int
+	var t []*int
+	for i, j := range slice {
+		s = append(s, &i)
+		t = append(t, &j)
+	}
+	return s[:len(s)-1], t
+}
+-- main.go --
+package main
+
+import (
+	"fmt"
+	"inlines/a"
+	"inlines/b"
+)
+
+func sum(s []*int) int {
+	sum := 0
+	for _, pi := range s {
+		sum += *pi
+	}
+	return sum
+}
+
+func main() {
+	af := a.F()
+	bf, _ := b.F()
+	abf := a.Fb()
+
+	saf, sbf, sabf := sum(af), sum(bf), sum(abf)
+
+	fmt.Printf("af, bf, abf sums = %d, %d, %d\n", saf, sbf, sabf)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_single_error.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_single_error.txt
new file mode 100644
index 0000000000000000000000000000000000000000..241cdb954ba7664ce796ea16c761d23a59dfe45c
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_single_error.txt
@@ -0,0 +1,18 @@
+# go test ./... with a bad package should report the error once (#44624).
+! go test ./...
+stderr -count=1 undefined
+
+-- go.mod --
+module example.com
+
+go 1.18
+-- a/a.go --
+package a
+
+import "example.com/b"
+-- b/b.go --
+package b
+
+var X = Y
+-- b/b_test.go --
+package b
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_static.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_static.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7db90a1600f398ce05912e9217e93050fb92e60c
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_static.txt
@@ -0,0 +1,44 @@
+[short] skip 'links and runs binaries'
+
+# This test requires external linking. Assume that if cgo is supported
+# then external linking works.
+[!cgo] skip 'requires a C linker'
+
+# Only run on Unix systems.
+[GOOS:windows] skip
+[GOOS:plan9] skip
+
+# Ordinary build should work.
+go build
+exec ./hello
+stdout Hello
+
+# Building with -linkmode=external should not say anything about
+# runtime/cgo (issue #31544).
+go build -ldflags=-linkmode=external
+! stderr runtime/cgo
+exec ./hello
+stdout Hello
+
+# Some targets don't support -static
+[GOOS:darwin] skip 'no static linking on Darwin'
+[GOOS:solaris] skip 'no static linking on Solaris'
+
+# Building with -linkmode=external -extldflags=-static should work.
+go build -ldflags='-linkmode=external -extldflags=-static'
+! stderr runtime/cgo
+exec ./hello
+stdout Hello
+
+-- go.mod --
+module hello
+
+go 1.20
+-- hello.go --
+package main
+
+import "fmt"
+
+func main() {
+	fmt.Println("Hello, world")
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_tag_goexperiment.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_tag_goexperiment.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bee218f4c1fff290b993cf68930a4753ecbce708
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_tag_goexperiment.txt
@@ -0,0 +1,31 @@
+[short] skip
+# Reset all experiments so fieldtrack is definitely off.
+env GOEXPERIMENT=none
+go run m
+stderr 'fieldtrack off'
+# Turn fieldtrack on.
+env GOEXPERIMENT=none,fieldtrack
+go run m
+stderr 'fieldtrack on'
+
+-- ft_off.go --
+// +build !goexperiment.fieldtrack
+
+package main
+
+func main() {
+	println("fieldtrack off")
+}
+
+-- ft_on.go --
+// +build goexperiment.fieldtrack
+
+package main
+
+func main() {
+	println("fieldtrack on")
+}
+
+-- go.mod --
+module m
+go 1.14
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_tags_no_comma.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_tags_no_comma.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a14a200b676fc85ddd0a2793c6dbba7dbfb71dbc
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_tags_no_comma.txt
@@ -0,0 +1,4 @@
+[compiler:gccgo] skip 'gccgo has no standard packages'
+go build -tags 'tag1 tag2' math
+! go build -tags 'tag1,tag2 tag3' math
+stderr 'space-separated list contains comma'
\ No newline at end of file
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_test_only.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_test_only.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8693a80a08e67539a7e331870cd846cb045d57a8
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_test_only.txt
@@ -0,0 +1,19 @@
+# Named explicitly, test-only packages should be reported as
+# unbuildable/uninstallable, even if there is a wildcard also matching.
+! go build m/testonly m/testonly...
+stderr 'no non-test Go files in'
+! go install ./testonly
+stderr 'no non-test Go files in'
+
+# Named through a wildcard, the test-only packages should be silently ignored.
+go build m/testonly...
+go install ./testonly...
+
+-- go.mod --
+module m
+
+go 1.16
+-- testonly/t_test.go --
+package testonly
+-- testonly2/t.go --
+package testonly2
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_trimpath.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_trimpath.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2a2aa2080a14113a003bce14132aeda41db5b9c9
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_trimpath.txt
@@ -0,0 +1,164 @@
+[short] skip
+
+# If GOROOT_FINAL is set, 'go build -trimpath' bakes that into the resulting
+# binary instead of GOROOT. Explicitly unset it here.
+env GOROOT_FINAL=
+
+# Set up two identical directories that can be used as GOPATH.
+env GO111MODULE=on
+mkdir $WORK/a/src/paths $WORK/b/src/paths
+cp paths.go $WORK/a/src/paths
+cp paths.go $WORK/b/src/paths
+cp overlay.json $WORK/a/src/paths
+cp overlay.json $WORK/b/src/paths
+cp go.mod $WORK/a/src/paths/
+cp go.mod $WORK/b/src/paths/
+
+
+# A binary built without -trimpath should contain the module root dir
+# and GOROOT for debugging and stack traces.
+cd $WORK/a/src/paths
+go build -o $WORK/paths-dbg.exe .
+exec $WORK/paths-dbg.exe $WORK/paths-dbg.exe
+stdout 'binary contains module root: true'
+stdout 'binary contains GOROOT: true'
+
+# A binary built with -trimpath should not contain the current workspace
+# or GOROOT.
+go build -trimpath -o $WORK/paths-a.exe .
+exec $WORK/paths-a.exe $WORK/paths-a.exe
+stdout 'binary contains module root: false'
+stdout 'binary contains GOROOT: false'
+
+# A binary from an external module built with -trimpath should not contain
+# the current workspace or GOROOT.
+go get rsc.io/fortune
+go install -trimpath rsc.io/fortune
+exec $WORK/paths-a.exe $GOPATH/bin/fortune$GOEXE
+stdout 'binary contains module root: false'
+stdout 'binary contains GOROOT: false'
+go mod edit -droprequire rsc.io/fortune
+
+# Two binaries built from identical packages in different directories
+# should be identical.
+cd $WORK/b/src/paths
+go build -trimpath -o $WORK/paths-b.exe
+cmp -q $WORK/paths-a.exe $WORK/paths-b.exe
+
+
+# Same sequence of tests but with overlays.
+# A binary built without -trimpath should contain the module root dir
+# and GOROOT for debugging and stack traces.
+cd $WORK/a/src/paths
+go build -overlay overlay.json -o $WORK/paths-dbg.exe ./overlaydir
+exec $WORK/paths-dbg.exe $WORK/paths-dbg.exe
+stdout 'binary contains module root: true'
+stdout 'binary contains GOROOT: true'
+
+# A binary built with -trimpath should not contain the current workspace
+# or GOROOT.
+go build -overlay overlay.json -trimpath -o $WORK/paths-a.exe ./overlaydir
+exec $WORK/paths-a.exe $WORK/paths-a.exe
+stdout 'binary contains module root: false'
+stdout 'binary contains GOROOT: false'
+
+# Two binaries built from identical packages in different directories
+# should be identical.
+cd $WORK/b/src/paths
+go build -overlay overlay.json -trimpath -o $WORK/paths-b.exe ./overlaydir
+cmp -q $WORK/paths-a.exe $WORK/paths-b.exe
+
+
+# Same sequence of tests but in GOPATH mode.
+# A binary built without -trimpath should contain GOPATH and GOROOT.
+env GO111MODULE=off
+cd $WORK
+env GOPATH=$WORK/a
+go build -o paths-dbg.exe paths
+exec ./paths-dbg.exe paths-dbg.exe
+stdout 'binary contains GOPATH: true'
+stdout 'binary contains GOROOT: true'
+
+# A binary built with -trimpath should not contain GOPATH or GOROOT.
+go build -trimpath -o paths-a.exe paths
+exec ./paths-a.exe paths-a.exe
+stdout 'binary contains GOPATH: false'
+stdout 'binary contains GOROOT: false'
+
+# Two binaries built from identical packages in different GOPATH roots
+# should be identical.
+env GOPATH=$WORK/b
+go build -trimpath -o paths-b.exe paths
+cmp -q paths-a.exe paths-b.exe
+
+
+# Same sequence of tests but with gccgo.
+# gccgo does not support builds in module mode.
+[!exec:gccgo] stop
+[cross] stop  # gccgo can't necessarily cross-compile
+env GOPATH=$WORK/a
+
+# A binary built with gccgo without -trimpath should contain the current
+# GOPATH and GOROOT.
+go build -compiler=gccgo -o paths-dbg.exe paths
+exec ./paths-dbg.exe paths-dbg.exe
+stdout 'binary contains GOPATH: true'
+stdout 'binary contains GOROOT: false' # gccgo doesn't load std from GOROOT.
+
+# A binary built with gccgo with -trimpath should not contain GOPATH or GOROOT.
+go build -compiler=gccgo -trimpath -o paths-a.exe paths
+exec ./paths-a.exe paths-a.exe
+stdout 'binary contains GOPATH: false'
+stdout 'binary contains GOROOT: false'
+
+# Two binaries built from identical packages in different directories
+# should be identical.
+env GOPATH=$WORK/b
+go build -compiler=gccgo -trimpath -o paths-b.exe paths
+cmp -q paths-a.exe paths-b.exe
+
+-- paths.go --
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+)
+
+func main() {
+	exe := os.Args[1]
+	data, err := ioutil.ReadFile(exe)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	if os.Getenv("GO111MODULE") == "on" {
+		out, err := exec.Command("go", "env", "GOMOD").Output()
+		if err != nil {
+			log.Fatal(err)
+		}
+		modRoot := filepath.Dir(strings.TrimSpace(string(out)))
+		check(data, "module root", modRoot)
+	} else {
+		check(data, "GOPATH", os.Getenv("GOPATH"))
+	}
+	check(data, "GOROOT", os.Getenv("GOROOT"))
+}
+
+func check(data []byte, desc, dir string) {
+	containsDir := bytes.Contains(data, []byte(dir))
+	containsSlashDir := bytes.Contains(data, []byte(filepath.ToSlash(dir)))
+	fmt.Printf("binary contains %s: %v\n", desc, containsDir || containsSlashDir)
+}
+-- overlay.json --
+{ "Replace": { "overlaydir/paths.go": "paths.go" } }
+-- go.mod --
+module paths
+
+go 1.14
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_trimpath_cgo.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_trimpath_cgo.txt
new file mode 100644
index 0000000000000000000000000000000000000000..528982442d2c9fed3e70f747ff3ebdeb5f600e71
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_trimpath_cgo.txt
@@ -0,0 +1,184 @@
+# This test builds a cgo binary and verifies the source directory path
+# does not appear in the binary, either literally or in compressed DWARF.
+# TODO(golang.org/issue/36072): ideally we should build a binary from identical
+# sources in different directories and verify the binary and all intermediate
+# files are identical.
+
+[short] skip
+[!cgo] skip
+
+# Check that the source path appears when -trimpath is not used.
+go build -o hello.exe .
+grep -q gopath[/\\]src hello.exe
+go run ./list-dwarf hello.exe
+stdout gopath[/\\]src
+
+# Check that the source path does not appear when -trimpath is used.
+[GOOS:aix] stop # can't inspect XCOFF binaries
+go build -trimpath -o hello.exe .
+! grep -q gopath[/\\]src hello.exe
+go run ./list-dwarf hello.exe
+! stdout gopath/src
+
+
+# Do the above, with the cgo (but not .c) sources in an overlay
+# Check that the source path appears when -trimpath is not used.
+mkdir $WORK/overlay
+cp hello.go $WORK/overlay/hello.go
+mkdir hello_overlay
+cp hello.c hello_overlay/hello.c
+go build -overlay overlay.json -o hello_overlay.exe ./hello_overlay
+grep -q gopath[/\\]src hello_overlay.exe
+! grep -q $WORK[/\\]overlay hello_overlay.exe
+go run ./list-dwarf hello_overlay.exe
+stdout gopath[/\\]src
+! stdout $WORK[/\\]overlay
+
+# Check that the source path does not appear when -trimpath is used.
+go build -overlay overlay.json -trimpath -o hello_overlay.exe ./hello_overlay
+! grep -q gopath[/\\]src hello_overlay.exe
+! grep -q $WORK[/\\]overlay hello_overlay.exe
+go run ./list-dwarf hello_overlay.exe
+! stdout gopath/src
+! stdout $WORK[/\\]overlay
+
+-- go.mod --
+module m
+
+go 1.14
+-- overlay.json --
+{
+	"Replace": {
+		"hello_overlay/hello.go": "../../overlay/hello.go"
+	}
+}
+-- hello.c --
+#include 
+
+void say_hello() { puts("Hello, world!\n"); }
+
+-- hello.go --
+package main
+
+// void say_hello();
+import "C"
+
+func main() {
+	C.say_hello()
+}
+
+-- list-dwarf/list-dwarf.go --
+package main
+
+import (
+	"debug/dwarf"
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"sort"
+)
+
+func main() {
+	files, err := run(os.Args[1])
+	if err != nil {
+		log.Fatal(err)
+	}
+	for _, file := range files {
+		fmt.Println(file)
+	}
+}
+
+func run(exePath string) ([]string, error) {
+	dwarfData, err := readDWARF(exePath)
+	if err != nil {
+		return nil, err
+	}
+
+	dwarfReader := dwarfData.Reader()
+	files := make(map[string]bool)
+	for {
+		e, err := dwarfReader.Next()
+		if err != nil {
+			return nil, err
+		}
+		if e == nil {
+			break
+		}
+		lr, err := dwarfData.LineReader(e)
+		if err != nil {
+			return nil, err
+		}
+		if lr == nil {
+			continue
+		}
+
+		var le dwarf.LineEntry
+		for {
+			if err := lr.Next(&le); err != nil {
+				if err == io.EOF {
+					break
+				}
+				return nil, err
+			}
+			files[le.File.Name] = true
+		}
+	}
+
+	sortedFiles := make([]string, 0, len(files))
+	for file := range files {
+		sortedFiles = append(sortedFiles, file)
+	}
+	sort.Strings(sortedFiles)
+	return sortedFiles, nil
+}
+-- list-dwarf/read_darwin.go --
+package main
+
+import (
+	"debug/dwarf"
+	"debug/macho"
+)
+
+func readDWARF(exePath string) (*dwarf.Data, error) {
+	machoFile, err := macho.Open(exePath)
+	if err != nil {
+		return nil, err
+	}
+	defer machoFile.Close()
+	return machoFile.DWARF()
+}
+-- list-dwarf/read_elf.go --
+// +build android dragonfly freebsd illumos linux netbsd openbsd solaris
+
+package main
+
+import (
+	"debug/dwarf"
+	"debug/elf"
+)
+
+func readDWARF(exePath string) (*dwarf.Data, error) {
+	elfFile, err := elf.Open(exePath)
+	if err != nil {
+		return nil, err
+	}
+	defer elfFile.Close()
+	return elfFile.DWARF()
+}
+-- list-dwarf/read_windows.go --
+package main
+
+import (
+	"debug/dwarf"
+	"debug/pe"
+)
+
+func readDWARF(exePath string) (*dwarf.Data, error) {
+	peFile, err := pe.Open(exePath)
+	if err != nil {
+		return nil, err
+	}
+	defer peFile.Close()
+	return peFile.DWARF()
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_trimpath_goroot.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_trimpath_goroot.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a26cfd23be488cdf90afba4b270e9ad7469483c2
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_trimpath_goroot.txt
@@ -0,0 +1,103 @@
+# Regression test for https://go.dev/issue/51461 and https://go.dev/issue/51483.
+#
+# When built with -trimpath, runtime.GOROOT() returned the bogus string "go"
+# if GOROOT was not set explicitly in the environment.
+# It should instead return the empty string, since we know that we don't
+# have a valid path to return.
+#
+# TODO(#51483): when runtime.GOROOT() returns the empty string,
+# go/build should default to 'go env GOROOT' instead.
+
+env GOROOT_FINAL=
+
+[trimpath] env GOROOT=
+[trimpath] ! go env GOROOT
+[trimpath] stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$'
+[trimpath] env GOROOT=$TESTGO_GOROOT
+
+[short] stop
+
+# With GOROOT still set but GOROOT_FINAL unset, 'go build' and 'go test -c'
+# should cause runtime.GOROOT() to report either the correct GOROOT
+# (without -trimpath) or no GOROOT at all (with -trimpath).
+
+go build -o example.exe .
+go build -trimpath -o example-trimpath.exe .
+go test -c -o example.test.exe .
+go test -trimpath -c -o example.test-trimpath.exe .
+
+env GOROOT=
+
+exec ./example.exe
+stdout '^GOROOT '$TESTGO_GOROOT'$'
+stdout '^runtime '$TESTGO_GOROOT${/}src${/}runtime'$'
+
+! exec ./example-trimpath.exe
+stdout '^GOROOT $'
+stderr 'cannot find package "runtime" in any of:\n\t\(\$GOROOT not set\)\n\t'$WORK${/}gopath${/}src${/}runtime' \(from \$GOPATH\)\n\z'
+
+exec ./example.test.exe -test.v
+stdout '^GOROOT '$TESTGO_GOROOT'$'
+stdout '^runtime '$TESTGO_GOROOT${/}src${/}runtime'$'
+
+! exec ./example.test-trimpath.exe -test.v
+stdout '^GOROOT $'
+stderr 'cannot find package "runtime" in any of:\n\t\(\$GOROOT not set\)\n\t'$WORK${/}gopath${/}src${/}runtime' \(from \$GOPATH\)$'
+
+# If a correct GOROOT is baked in to the 'go' command itself, 'go run' and
+# 'go test' should not implicitly set GOROOT in the process environment
+# (because that could mask an unexpected production dependency on the GOROOT
+# environment variable), but 'go generate' should (because the generator may
+# reasonably expect to be able to locate the GOROOT for which it is generating
+# code).
+
+[trimpath] stop
+[mismatched-goroot] stop
+
+! go run -trimpath .
+stdout '^GOROOT $'
+stderr 'cannot find package "runtime" in any of:\n\t\(\$GOROOT not set\)\n\t'$WORK${/}gopath${/}src${/}runtime' \(from \$GOPATH\)\nexit status 1\n\z'
+
+! go test -trimpath -v .
+stdout '^GOROOT $'
+stdout 'cannot find package "runtime" in any of:\n\t\(\$GOROOT not set\)\n\t'$WORK${/}gopath${/}src${/}runtime' \(from \$GOPATH\)$'
+
+env GOFLAGS=-trimpath
+go generate .
+stdout '^GOROOT '$TESTGO_GOROOT'$'
+stdout '^runtime '$TESTGO_GOROOT${/}src${/}runtime'$'
+
+-- go.mod --
+module example
+
+go 1.19
+-- main.go --
+package main
+
+//go:generate go run .
+
+import (
+	"fmt"
+	"go/build"
+	"os"
+	"runtime"
+)
+
+func main() {
+	fmt.Println("GOROOT", runtime.GOROOT())
+
+	p, err := build.Default.Import("runtime", "", build.FindOnly)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	fmt.Println("runtime", p.Dir)
+}
+-- main_test.go --
+package main
+
+import "testing"
+
+func TestMain(*testing.M) {
+	main()
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_unsupported_goos.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_unsupported_goos.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c94d6d252e845734f3965aefad3734a06bd9ffc7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_unsupported_goos.txt
@@ -0,0 +1,6 @@
+[compiler:gccgo] skip # gccgo assumes cross-compilation is always possible
+
+env GOOS=windwos # intentional misspelling of windows
+
+! go build -n exclude
+stderr 'unsupported GOOS/GOARCH pair'
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_vendor.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_vendor.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f430ff2c3eb888d596b848f5f95ee06d50fe446d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/build_vendor.txt
@@ -0,0 +1,42 @@
+# Build
+env GO111MODULE=off
+go build vend/x
+! stdout .
+! stderr .
+
+-- vend/dir1/dir1.go --
+package dir1
+-- vend/subdir/bad.go --
+package subdir
+
+import _ "r"
+-- vend/subdir/good.go --
+package subdir
+
+import _ "p"
+-- vend/vendor/p/p.go --
+package p
+-- vend/vendor/q/q.go --
+package q
+-- vend/vendor/vend/dir1/dir2/dir2.go --
+package dir2
+-- vend/x/invalid/invalid.go --
+package invalid
+
+import "vend/x/invalid/vendor/foo"
+-- vend/x/vendor/p/p/p.go --
+package p
+
+import _ "notfound"
+-- vend/x/vendor/p/p.go --
+package p
+-- vend/x/vendor/r/r.go --
+package r
+-- vend/x/x.go --
+package x
+
+import _ "p"
+import _ "q"
+import _ "r"
+import _ "vend/dir1"      // not vendored
+import _ "vend/dir1/dir2" // vendored
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cache_unix.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cache_unix.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e11804d39abebdad9bb07f724f5d948346ff3682
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cache_unix.txt
@@ -0,0 +1,37 @@
+env GO111MODULE=off
+
+# Integration test for cache directory calculation (cmd/go/internal/cache).
+
+[GOOS:windows] skip 'windows does not use XDG_CACHE_HOME'
+[GOOS:darwin]  skip 'darwin does not use XDG_CACHE_HOME'
+[GOOS:ios]     skip 'ios does not use XDG_CACHE_HOME'
+[GOOS:plan9]   skip 'plan9 does not use XDG_CACHE_HOME'
+
+mkdir $WORK/gocache
+mkdir $WORK/xdg
+mkdir $WORK/home
+
+# Set GOCACHE, XDG_CACHE_HOME, and HOME.
+env GOCACHE=$WORK/gocache
+env XDG_CACHE_HOME=$WORK/xdg
+env HOME=$WORK/home
+
+# With all three set, we should prefer GOCACHE.
+go env GOCACHE
+stdout $WORK'/gocache$'
+
+# Without GOCACHE, we should prefer XDG_CACHE_HOME over HOME.
+env GOCACHE=
+go env GOCACHE
+stdout $WORK'/xdg/go-build$$'
+
+# With only HOME set, we should use $HOME/.cache.
+env XDG_CACHE_HOME=
+go env GOCACHE
+stdout $WORK'/home/.cache/go-build$'
+
+# With no guidance from the environment, we must disable the cache, but that
+# should not cause commands that do not write to the cache to fail.
+env HOME=
+go env GOCACHE
+stdout 'off'
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cache_vet.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cache_vet.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6689048f5409ae7585305dc25fde853c5fccfb72
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cache_vet.txt
@@ -0,0 +1,22 @@
+env GO111MODULE=off
+
+[short] skip
+[GODEBUG:gocacheverify=1] skip
+[compiler:gccgo] skip  # gccgo has no standard packages
+
+# Start with a clean build cache:
+# test failures may be masked if the cache has just the right entries already.
+env GOCACHE=$WORK/cache
+
+# Run 'go vet os/user' once to warm up the cache.
+go vet os/user
+
+# Check that second vet reuses cgo-derived inputs.
+# The first command could be build instead of vet,
+# except that if the cache is empty and there's a net.a
+# in GOROOT/pkg, the build will not bother to regenerate
+# and cache the cgo outputs, whereas vet always will.
+
+go vet -x os/user
+! stderr '^(clang|gcc)'  # should not have run compiler
+! stderr '[\\/]cgo '     # should not have run cgo
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_asm_error.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_asm_error.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7aaa713e24412c1966b003e3f774888cd9df7dcb
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_asm_error.txt
@@ -0,0 +1,25 @@
+[!cgo] skip
+
+# Test that cgo package can't contain a go assembly file.
+
+# Ensure the build fails and reports that the package has a Go assembly file.
+! go build cgoasm
+stderr 'package using cgo has Go assembly file'
+
+-- go.mod --
+module cgoasm
+
+go 1.16
+-- p.go --
+package p
+
+/*
+// hi
+*/
+import "C"
+
+func F() {}
+-- p.s --
+TEXT asm(SB),$0
+	RET
+
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_bad_directives.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_bad_directives.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7d28171fad47e708df5a9dc9445ad5469bfddb85
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_bad_directives.txt
@@ -0,0 +1,129 @@
+[!cgo] skip
+[short] skip
+
+cp x.go.txt x.go
+
+# Only allow //go:cgo_ldflag .* in cgo-generated code
+[compiler:gc] cp x_gc.go.txt x.go
+[compiler:gc] ! go build x
+[compiler:gc] stderr '//go:cgo_ldflag .* only allowed in cgo-generated code'
+
+# Ignore _* files
+rm x.go
+! go build .
+stderr 'no Go files'
+cp cgo_yy.go.txt _cgo_yy.go
+! go build .
+stderr 'no Go files' #_* files are ignored...
+
+[compiler:gc] ! go build _cgo_yy.go # ... but if forced, the comment is rejected
+# Actually, today there is a separate issue that _ files named
+# on the command line are ignored. Once that is fixed,
+# we want to see the cgo_ldflag error.
+[compiler:gc] stderr '//go:cgo_ldflag only allowed in cgo-generated code|no Go files'
+
+rm _cgo_yy.go
+
+# Reject #cgo CFLAGS: -fplugin=foo.so
+cp x.go.txt x.go
+cp y_fplugin.go.txt y.go
+! go build x
+stderr 'invalid flag in #cgo CFLAGS: -fplugin=foo.so'
+
+# Reject #cgo CFLAGS: -lbar -fplugin=foo.so
+cp y_lbar_fplugin.go.txt y.go
+! go build x
+stderr 'invalid flag in #cgo CFLAGS: -fplugin=foo.so'
+
+# Reject #cgo pkg-config: -foo
+cp y_pkgconfig_dash_foo.txt y.go
+! go build x
+stderr 'invalid pkg-config package name: -foo'
+
+# Reject #cgo pkg-config: @foo
+cp y_pkgconfig_at_foo.txt y.go
+! go build x
+stderr 'invalid pkg-config package name: @foo'
+
+# Reject #cgo CFLAGS: @foo
+cp y_cflags_at_foo.txt y.go
+! go build x
+stderr 'invalid flag in #cgo CFLAGS: @foo'
+
+# Reject #cgo CFLAGS: -D
+cp y_cflags_dash_d.txt y.go
+! go build x
+stderr 'invalid flag in #cgo CFLAGS: -D without argument'
+
+# Note that -I @foo is allowed because we rewrite it into -I /path/to/src/@foo
+# before the check is applied. There's no such rewrite for -D.
+
+# Reject #cgo CFLAGS: -D @foo
+cp y_cflags_dash_d_space_at_foo.txt y.go
+! go build x
+stderr 'invalid flag in #cgo CFLAGS: -D @foo'
+
+# Reject #cgo CFLAGS -D@foo
+cp y_cflags_dash_d_at_foo.txt y.go
+! go build x
+stderr 'invalid flag in #cgo CFLAGS: -D@foo'
+
+# Check for CFLAGS in commands
+env CGO_CFLAGS=-D@foo
+cp y_no_cflags.txt y.go
+go build -n x
+stderr '-D@foo'
+
+-- go.mod --
+module x
+
+go 1.16
+-- x_gc.go.txt --
+package x
+
+//go:cgo_ldflag "-fplugin=foo.so"
+
+import "C"
+-- cgo_yy.go.txt --
+package x
+
+//go:cgo_ldflag "-fplugin=foo.so"
+
+import "C"
+-- x.go.txt --
+package x
+-- y_fplugin.go.txt --
+package x
+// #cgo CFLAGS: -fplugin=foo.so
+import "C"
+-- y_lbar_fplugin.go.txt --
+package x
+// #cgo CFLAGS: -Ibar -fplugin=foo.so
+import "C"
+-- y_pkgconfig_dash_foo.txt --
+package x
+// #cgo pkg-config: -foo
+import "C"
+-- y_pkgconfig_at_foo.txt --
+package x
+// #cgo pkg-config: @foo
+import "C"
+-- y_cflags_at_foo.txt --
+package x
+// #cgo CFLAGS: @foo
+import "C"
+-- y_cflags_dash_d.txt --
+package x
+// #cgo CFLAGS: -D
+import "C"
+-- y_cflags_dash_d_space_at_foo.txt --
+package x
+// #cgo CFLAGS: -D @foo
+import "C"
+-- y_cflags_dash_d_at_foo.txt --
+package x
+// #cgo CFLAGS: -D@foo
+import "C"
+-- y_no_cflags.txt --
+package x
+import "C"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_badmethod_issue57926.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_badmethod_issue57926.txt
new file mode 100644
index 0000000000000000000000000000000000000000..81ef850cb960ca45733ef2f2016f4f75fa24982c
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_badmethod_issue57926.txt
@@ -0,0 +1,31 @@
+[short] skip
+[!cgo] skip
+
+# Test that cgo rejects attempts to declare methods
+# on the types C.T or *C.T; see issue #57926.
+
+! go build
+stderr 'cannot define new methods on non-local type C.T'
+stderr 'cannot define new methods on non-local type \*C.T'
+! stderr 'Alias'
+
+-- go.mod --
+module example.com
+go 1.12
+
+-- a.go --
+package a
+
+/*
+typedef int T;
+*/
+import "C"
+
+func (C.T) f() {}
+func (recv *C.T) g() {}
+
+// The check is more education than enforcement,
+// and is easily defeated using a type alias.
+type Alias = C.T
+func (Alias) h() {}
+func (*Alias) i() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_depends_on_syscall.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_depends_on_syscall.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bd4777c821cd4a3bdc25177b7e5d3fab0348b2b7
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_depends_on_syscall.txt
@@ -0,0 +1,15 @@
+[!cgo] skip
+[!race] skip
+
+go list -race -deps foo
+stdout syscall
+
+-- go.mod --
+module foo
+
+go 1.16
+-- foo.go --
+package foo
+
+// #include 
+import "C"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_flag_contains_space.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_flag_contains_space.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a3372bbbc7672622704d58252d0751c0b30cf554
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_flag_contains_space.txt
@@ -0,0 +1,16 @@
+[short] skip
+[!cgo] skip
+
+env GOCACHE=$WORK/gocache  # Looking for compile flags, so need a clean cache.
+go build -x -n main.go
+stderr '"-I[^"]+c flags"' # find quoted c flags
+! stderr '"-I[^"]+c flags".*"-I[^"]+c flags"' # don't find too many quoted c flags per line
+stderr '"-L[^"]+ld flags"' # find quoted ld flags
+! stderr '"-L[^"]+c flags".*"-L[^"]+c flags"' # don't find too many quoted ld flags per line
+
+-- main.go --
+package main
+// #cgo CFLAGS: -I"c flags"
+// #cgo LDFLAGS: -L"ld flags"
+import "C"
+func main() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_path.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_path.txt
new file mode 100644
index 0000000000000000000000000000000000000000..be23893df59863ddf5e20cd0ff4c50bdcd51316e
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_path.txt
@@ -0,0 +1,41 @@
+[!cgo] skip
+
+# Require that CC is something that requires a PATH lookup.
+# Normally, the default is gcc or clang, but if CC was set during make.bash,
+# that becomes the default.
+[!cc:clang] [!cc:gcc] skip 'C compiler is not gcc or clang'
+
+env GOCACHE=$WORK/gocache  # Looking for compile flags, so need a clean cache.
+[!GOOS:windows] env PATH=.:$PATH
+[!GOOS:windows] chmod 0755 p/gcc p/clang
+[!GOOS:windows] exists -exec p/gcc p/clang
+[GOOS:windows] exists -exec p/gcc.bat p/clang.bat
+! exists p/bug.txt
+! go build -x
+stderr '^cgo: C compiler "(clang|gcc)" not found: exec: "(clang|gcc)": cannot run executable found relative to current directory'
+! exists p/bug.txt
+
+-- go.mod --
+module m
+
+-- m.go --
+package m
+
+import _ "m/p"
+
+-- p/p.go --
+package p
+
+// #define X 1
+import "C"
+
+-- p/gcc --
+#!/bin/sh
+echo ran gcc >bug.txt
+-- p/clang --
+#!/bin/sh
+echo ran clang >bug.txt
+-- p/gcc.bat --
+echo ran gcc >bug.txt
+-- p/clang.bat --
+echo ran clang >bug.txt
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_path_space.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_path_space.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1a789022a5bcd82d367ee4b5d7a770166023b06d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_path_space.txt
@@ -0,0 +1,56 @@
+# Check that if the PATH directory containing the C compiler has a space,
+# we can still use that compiler with cgo.
+# Verifies #43808.
+[!cgo] skip
+
+# Set CC explicitly to something that requires a PATH lookup.
+# Normally, the default is gcc or clang, but if CC was set during make.bash,
+# that becomes the default.
+[exec:clang] env CC=clang
+[exec:gcc] env CC=gcc
+[!exec:clang] [!exec:gcc] skip 'Unknown C compiler'
+
+[!GOOS:windows] chmod 0755 $WORK/'program files'/clang
+[!GOOS:windows] chmod 0755 $WORK/'program files'/gcc
+[!GOOS:windows] exists -exec $WORK/'program files'/clang
+[!GOOS:windows] exists -exec $WORK/'program files'/gcc
+[!GOOS:windows] env PATH=$WORK/'program files':$PATH
+[GOOS:windows] exists -exec $WORK/'program files'/gcc.bat
+[GOOS:windows] exists -exec $WORK/'program files'/clang.bat
+[GOOS:windows] env PATH=$WORK\'program files';%PATH%
+
+! exists $WORK/log.txt
+? go build -x
+exists $WORK/log.txt
+rm $WORK/log.txt
+
+# TODO(#41400, #43078): when CC is set explicitly, it should be allowed to
+# contain spaces separating arguments, and it should be possible to quote
+# arguments with spaces (including the path), as in CGO_CFLAGS and other
+# variables. For now, this doesn't work.
+[!GOOS:windows] env CC=$WORK/'program files'/gcc
+[GOOS:windows] env CC=$WORK\'program files'\gcc.bat
+! go build -x
+! exists $WORK/log.txt
+
+-- go.mod --
+module m
+
+-- m.go --
+package m
+
+// #define X 1
+import "C"
+
+-- $WORK/program files/gcc --
+#!/bin/sh
+
+echo ok >$WORK/log.txt
+-- $WORK/program files/clang --
+#!/bin/sh
+
+echo ok >$WORK/log.txt
+-- $WORK/program files/gcc.bat --
+echo ok >%WORK%\log.txt
+-- $WORK/program files/clang.bat --
+echo ok >%WORK%\log.txt
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_path_space_quote.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_path_space_quote.txt
new file mode 100644
index 0000000000000000000000000000000000000000..955610130088d512759b29d86d0dba92a0875ef2
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_path_space_quote.txt
@@ -0,0 +1,58 @@
+# This test checks that the CC environment variable may contain quotes and
+# spaces. Arguments are normally split on spaces, tabs, newlines. If an
+# argument contains these characters, the entire argument may be quoted
+# with single or double quotes. This is the same as -gcflags and similar
+# options.
+
+[short] skip
+[!exec:clang] [!exec:gcc] skip
+[!cgo] skip
+
+env GOENV=$WORK/go.env
+mkdir 'program files'
+go build -o 'program files' './which cc/which cc.go'
+[exec:clang] env CC='"'$PWD${/}program' 'files${/}which' 'cc"' 'clang
+[!exec:clang] env CC='"'$PWD${/}program' 'files${/}which' 'cc"' 'gcc
+go env CC
+stdout 'program files[/\\]which cc" (clang|gcc)$'
+go env -w CC=$CC
+env CC=
+go env CC
+stdout 'program files[/\\]which cc" (clang|gcc)$'
+
+go run .
+stdout 1
+
+-- go.mod --
+module test
+
+go 1.17
+-- which cc/which cc.go --
+package main
+
+import (
+	"fmt"
+	"os"
+	"os/exec"
+)
+
+func main() {
+	args := append([]string{"-DWRAPPER_WAS_USED=1"}, os.Args[2:]...)
+	cmd := exec.Command(os.Args[1], args...)
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	if err := cmd.Run(); err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+}
+-- hello.go --
+package main
+
+// int x = WRAPPER_WAS_USED;
+import "C"
+import "fmt"
+
+func main() {
+	fmt.Println(C.x)
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_stale.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_stale.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0d30aeaa9d41df08764303e99cc56b9c1e4df231
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_stale.txt
@@ -0,0 +1,39 @@
+# golang.org/issue/46347: a stale runtime/cgo should only force a single rebuild
+
+[!cgo] skip
+[short] skip
+
+
+# If we set a unique CGO_CFLAGS, the installed copy of runtime/cgo
+# should be reported as stale.
+
+env CGO_CFLAGS=-DTestScript_cgo_stale=true
+stale runtime/cgo
+
+
+# If we then build a package that uses cgo, runtime/cgo should be rebuilt and
+# cached with the new flag, but not installed to GOROOT.
+# It has no install target, and thus is never stale.
+
+env GOCACHE=$WORK/cache  # Use a fresh cache to avoid interference between runs.
+
+go build -x .
+stderr '[/\\]cgo'$GOEXE'["]? .* -importpath runtime/cgo'
+! stale runtime/cgo
+
+
+# After runtime/cgo has been rebuilt and cached, it should not be rebuilt again.
+
+go build -x .
+! stderr '[/\\]cgo'$GOEXE'["]? .* -importpath runtime/cgo'
+! stale runtime/cgo
+
+
+-- go.mod --
+module example.com/m
+
+go 1.17
+-- m.go --
+package m
+
+import "C"
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_stale_precompiled.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_stale_precompiled.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b2a0e0c8d578ad135e44ba091773e82969aedaa6
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_stale_precompiled.txt
@@ -0,0 +1,40 @@
+# Regression test for https://go.dev/issue/47215 and https://go.dev/issue/50183:
+# A mismatched $GOROOT_FINAL or missing $CC caused the C dependencies of the net
+# package to appear stale, and it could not be rebuilt due to a missing $CC.
+
+[!cgo] skip
+
+# This test may start with the runtime/cgo package already stale.
+# Explicitly rebuild it to ensure that it is cached.
+# (See https://go.dev/issue/50892.)
+#
+# If running in non-short mode, explicitly vary CGO_CFLAGS
+# as a control case (to ensure that our regexps do catch rebuilds).
+
+[!short] env GOCACHE=$WORK/cache
+[!short] env CGO_CFLAGS=-DTestScript_cgo_stale_precompiled=true
+go build -x runtime/cgo
+[!short] stderr '[/\\]cgo'$GOEXE'["]? .* -importpath runtime/cgo'
+
+# https://go.dev/issue/50183: a mismatched GOROOT_FINAL caused net to be stale.
+env oldGOROOT_FINAL=$GOROOT_FINAL
+env GOROOT_FINAL=$WORK${/}goroot
+go build -x runtime/cgo
+! stderr '[/\\]cgo'$GOEXE'["]? .* -importpath runtime/cgo'
+
+env GOROOT_FINAL=$oldGOROOT_FINAL
+
+# https://go.dev/issue/47215: a missing $(go env CC) caused the precompiled net
+# to be stale. But as of https://go.dev/cl/452457 the precompiled libraries are
+# no longer installed anyway! Since we're requiring a C compiler in order to
+# build and use cgo libraries in the standard library, we should make sure it
+# matches what's in the cache.
+
+[abscc] stop
+
+env CGO_ENABLED=1
+env CC=''
+[!GOOS:plan9] env PATH=''  # Guaranteed not to include $(go env CC)!
+[GOOS:plan9] env path=''
+! go build -x runtime/cgo
+stderr 'C compiler .* not found'
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_suspect_flag_force_external.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_suspect_flag_force_external.txt
new file mode 100644
index 0000000000000000000000000000000000000000..6dc30bedb9e9ca127819797dbec8cc307a277ccd
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_suspect_flag_force_external.txt
@@ -0,0 +1,201 @@
+# Test case to verify that when we have a package that uses CGO in
+# combination with selected "unusual" flags (involving plugins, LTO)
+# that we force external linking.  See related
+# issues 58619,  58620, and 58848.
+
+[compiler:gccgo] skip # only external linking for gccgo
+
+[!cgo] skip 'test verifies behavior that depends on CGO_CFLAGS'
+[mustlinkext] skip 'test expects internal linking for non-cgo programs'
+
+# Here we build three program: one with explicit CGO use, one with no
+# CGO use, and one that uses a stdlib package ("runtime/cgo") that has
+# CGO in it. It used to be that only the explicit use of CGO would
+# trigger external linking, and that the program that only used
+# "runtime/cgo" would always be handled with internal linking. This caused
+# issues when users included odd/unusual flags (ex: -fplugin, -flto)
+# in CGO_CFLAGS, causing the Go linker to have to read and interpret
+# non-standard host objects.
+#
+# As of 1.21 we continue to use internal linking for programs whose
+# CGO use comes only from stdlib packages in the absence of any flag
+# funny business, however if the Go command sees flags that may be suspicious,
+# it signals the Go linker to invoke the external linker.
+
+# The next few tests run builds passing "-n" to the Go command, then
+# checking the output to see if the Go command is trying to pass a
+# "preferlinkext" token to the linker to request external linking.
+
+#-----------------------
+
+# Use a fresh GOCACHE for these next steps, so as to have the real
+# actions for the runtime/cgo package appear in the "-n -x" output.
+env GOCACHE=$WORK/gocache
+mkdir $GOCACHE
+
+# First build: there is no CGO in use, so no token should be present regardless
+# of weird CGO flags.
+go build -x -n -o dummy.exe ./noUseOfCgo
+! stderr preferlinkext
+env CGO_CFLAGS=-flto
+go build -x -n -o dummy.exe ./noUseOfCgo
+! stderr preferlinkext
+env CGO_CFLAGS=
+
+# Second build uses CGO, so we expect to see the token present in the
+# -n output only when strange flags are used.
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+env CGO_CFLAGS=-flto
+go build -x -n -o dummy.exe ./usesInternalCgo
+stderr preferlinkext
+env CGO_CFLAGS=-fplugin
+go build -x -n -o dummy.exe ./usesInternalCgo
+stderr preferlinkext
+env CGO_CFLAGS=-fprofile-instr-generate
+go build -x -n -o dummy.exe ./usesInternalCgo
+stderr preferlinkext
+
+# The -fdebug-prefix-map=path is permitted for internal linking.
+env CGO_CFLAGS=-fdebug-prefix-map=/some/sandbox/execroot/workspace=/tmp/new
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+env CGO_CFLAGS=-fdebug-prefix-map=/Users/someone/.cache/bazel/_bazel_someone/3fa7e4650c43657ead684537951f49e2/sandbox/linux-sandbox/10/execroot/rules_go_static=.
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+# The -ffile-prefix-map=path is permitted for internal linking too.
+env CGO_CFLAGS=-ffile-prefix-map=/Users/someone/.cache/bazel/_bazel_someone/3fa7e4650c43657ead684537951f49e2/sandbox/linux-sandbox/10/execroot/rules_go_static/bazel-out/aarch64-fastbuild-ST-b33d65c724e6/bin/external/io_bazel_rules_go/stdlib_=.
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+# Verifying that -fdebug-prefix-map=path, -ffile-prefix-map, -no-canonical-prefixes
+# and -fno-canonical-systemd-headers are permitted for internal linking.
+env CGO_CFLAGS=-fdebug-prefix-map=old=/tmp/new
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+env CGO_CFLAGS=-ffile-prefix-map=/Users/someone/_11233/things=new
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+env CGO_CFLAGS=-no-canonical-prefixes
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+env CGO_CFLAGS=-fno-canonical-system-headers
+go build -x -n -o dummy.exe ./usesInternalCgo
+! stderr preferlinkext
+env CGO_CFLAGS=
+
+[short] skip
+
+# In the remaining tests below we do actual builds (without -n) to
+# verify that the Go linker is going the right thing in addition to the
+# Go command. Here the idea is to pass "-tmpdir" to the linker, then
+# check after the link is done for the presence of the file
+# /go.o, which the Go linker creates prior to kicking off the
+# external linker.
+
+mkdir tmp1
+mkdir tmp2
+mkdir tmp3
+mkdir tmp4
+mkdir tmp5
+
+# First build: no external linking expected
+go build -ldflags=-tmpdir=tmp1 -o $devnull ./noUseOfCgo &
+
+# Second build: using only "runtime/cgo", expect internal linking.
+go build -ldflags=-tmpdir=tmp2 -o $devnull ./usesInternalCgo &
+
+# Third build: program uses only "runtime/cgo", so we would normally
+# expect internal linking, except that cflags contain suspicious entries
+# (in this case, a flag that does not appear on the allow list).
+env CGO_CFLAGS=-fmerge-all-constants
+env CGO_LDFLAGS=-fmerge-all-constants
+go build -ldflags=-tmpdir=tmp3 -o $devnull ./usesInternalCgo &
+env CGO_CFLAGS=
+env CGO_LDFLAGS=
+
+# Fourth build: explicit CGO, expect external linking.
+go build -ldflags=-tmpdir=tmp4 -o $devnull ./usesExplicitCgo &
+
+# Fifth build: explicit CGO, but we specifically asked for internal linking
+# via a flag, so using internal linking it is.
+[cgolinkext] go list ./usesInternalCgo
+[!cgolinkext] go build '-ldflags=-tmpdir=tmp5 -linkmode=internal' -o $devnull ./usesInternalCgo &
+
+# Sixth build: explicit CGO use in a non-main package.
+go build -o p.a ./nonMainPackageUsesExplicitCgo &
+
+wait
+
+# Check first build: no external linking expected
+! exists tmp1/go.o
+
+# Check second build: using only "runtime/cgo", expect internal linking.
+[!cgolinkext] ! exists tmp2/go.o
+[cgolinkext] exists tmp2/go.o
+
+# Check third build: has suspicious flag.
+exists tmp3/go.o
+
+# Fourth build: explicit CGO, expect external linking.
+exists tmp4/go.o
+
+# Fifth build: explicit CGO, -linkmode=internal.
+! exists tmp5/go.o
+
+# Sixth build: make sure that "go tool nm" doesn't get confused
+# by the presence of the "preferlinkext" sentinel.
+go tool nm p.a
+
+-- go.mod --
+
+module cgo.example
+
+go 1.20
+
+-- noUseOfCgo/main.go --
+
+package main
+
+func main() {
+	println("clean as a whistle")
+}
+
+-- usesInternalCgo/main.go --
+
+package main
+
+import (
+	"runtime/cgo"
+)
+
+func main() {
+	q := "hello"
+	h := cgo.NewHandle(q)
+	h.Delete()
+}
+
+-- usesExplicitCgo/main.go --
+
+package main
+
+/*
+int meaningOfLife() { return 42; }
+*/
+import "C"
+
+func main() {
+     println(C.meaningOfLife())
+}
+
+-- nonMainPackageUsesExplicitCgo/main.go --
+
+package p
+
+/*
+int meaningOfLife() { return 42; }
+*/
+import "C"
+
+func PrintIt() {
+     println(C.meaningOfLife())
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_syso_issue29253.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_syso_issue29253.txt
new file mode 100644
index 0000000000000000000000000000000000000000..18526c6d311e374f6fd5344b9aa01ce07eed02b3
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_syso_issue29253.txt
@@ -0,0 +1,31 @@
+env GO111MODULE=off
+[short] skip
+
+# This test tests that we can link in-package syso files that provides symbols
+# for cgo. See issue 29253.
+[!cgo] stop
+[!compiler:gc] stop
+cc -c -o pkg/o.syso ext.c
+go build main.go
+
+-- ext.c --
+// +build ignore
+
+int f() { return 42; }
+-- pkg/pkg.go --
+package pkg
+
+// extern int f(void);
+import "C"
+
+func init() {
+	if v := C.f(); v != 42 {
+		panic(v)
+	}
+}
+-- main.go --
+package main
+
+import _ "pkg"
+
+func main() {}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_trimpath_macro.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_trimpath_macro.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b5cc1167cbe5ba1d76b262e09185d7ef10996eb0
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_trimpath_macro.txt
@@ -0,0 +1,71 @@
+# This is a test that -trimpath trims the paths of every directory
+# of Cgo dependencies in the module, and trims file paths included
+# through the __FILE__ macro using --file-prefix-map.
+
+[!cgo] skip
+[short] skip 'links and runs binaries'
+
+# Test in main module.
+go run -trimpath -mod=vendor ./main
+stdout '(\\_\\_|/_)[\\/]m[\\/]c[\\/]bar.h'
+
+# Test in vendored module.
+go run -trimpath -mod=vendor v.com/main
+stdout '(\\_\\_|/_)[\\/]vendor[\\/]v.com[\\/]c[\\/]bar.h'
+
+# Test in GOPATH mode.
+env GO111MODULE=off
+go run -trimpath ./main
+stdout '(\\_\\_|/_)[\\/]GOPATH[\\/]src[\\/]c[\\/]bar.h'
+
+-- go.mod --
+module m
+
+require v.com v1.0.0
+-- go.sum --
+v.com v1.0.0 h1:xxx
+v.com v1.0.0/go.mod h1:xxx
+-- vendor/modules.txt --
+# v.com v1.0.0
+## explicit; go 1.20
+v.com/main
+-- vendor/v.com/main/main.go --
+package main
+
+// #cgo CFLAGS: -I../c
+// #include "stdio.h"
+// void printfile();
+import "C"
+
+func main() {
+    C.printfile()
+    C.fflush(C.stdout)
+}
+-- vendor/v.com/main/foo.c --
+#include "bar.h"
+-- vendor/v.com/c/bar.h --
+#include "stdio.h"
+
+void printfile() {
+    printf("%s\n", __FILE__);
+}
+-- main/main.go --
+package main
+
+// #cgo CFLAGS: -I../c
+// #include "stdio.h"
+// void printfile();
+import "C"
+
+func main() {
+    C.printfile()
+    C.fflush(C.stdout)
+}
+-- main/foo.c --
+#include "bar.h"
+-- c/bar.h --
+#include "stdio.h"
+
+void printfile() {
+    printf("%s\n", __FILE__);
+}
\ No newline at end of file
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_undef.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_undef.txt
new file mode 100644
index 0000000000000000000000000000000000000000..30034fbac14fb62b146a222698967476837b62ac
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/cgo_undef.txt
@@ -0,0 +1,68 @@
+# Issue 52863.
+
+# We manually create a .syso and a .a file in package a,
+# such that the .syso file only works when linked against the .a file.
+# Package a has #cgo LDFLAGS to make this happen.
+#
+# Package c imports package a, and uses cgo itself.
+# The generation of the _cgo_import.go for package c will fail,
+# because it won't know that it has to link against a/libb.a
+# (because we don't gather the #cgo LDFLAGS from all transitively
+# imported packages).
+#
+# The _cgo_import.go file is only needed for internal linking.
+# When generating _cgo_import.go for package c fails, an ordinary
+# external link should still work. But an internal link is expected
+# to fail, because the failure to create _cgo_import.go should cause
+# the linker to report an inability to internally link.
+
+[short] skip
+[!cgo] skip
+[!exec:ar] skip
+
+cc -c -o a/b.syso b/b.c
+cc -c -o b/lib.o b/lib.c
+exec ar rc a/libb.a b/lib.o
+go build
+! go build -ldflags=-linkmode=internal
+stderr 'some packages could not be built to support internal linking.*m/c|requires external linking|does not support internal cgo'
+
+-- go.mod --
+module m
+
+-- a/a.go --
+package a
+
+// #cgo LDFLAGS: -L. -lb
+// extern int CFn(int);
+import "C"
+
+func GoFn(v int) int { return int(C.CFn(C.int(v))) }
+
+-- b/b.c --
+extern int LibFn(int);
+int CFn(int i) { return LibFn(i); }
+
+-- b/lib.c --
+int LibFn(int i) { return i; }
+
+-- c/c.go --
+package c
+
+// static int D(int i) { return i; }
+import "C"
+
+import "m/a"
+
+func Fn(i int) (int, int) {
+     return a.GoFn(i), int(C.D(C.int(i)))
+}
+
+-- main.go --
+package main
+
+import "m/c"
+
+func main() {
+	println(c.Fn(0))
+}
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/chdir.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/chdir.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a6feed6b45fce015b686a55e2bf24a0b5517633d
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/script/chdir.txt
@@ -0,0 +1,35 @@
+env OLD=$PWD
+
+# basic -C functionality
+cd $GOROOT/src/math
+go list -C ../strings
+stdout strings
+! go list -C ../nonexist
+stderr 'chdir.*nonexist'
+
+# check for -C in subcommands with custom flag parsing
+# cmd/go/chdir_test.go handles the normal ones more directly.
+
+# go doc
+go doc -C ../strings HasPrefix
+
+# go env
+go env -C $OLD/custom GOMOD
+stdout 'custom[\\/]go.mod'
+! go env -C ../nonexist
+stderr '^go: chdir ../nonexist: '
+
+# go test
+go test -C ../strings -n
+stderr 'strings\.test'
+
+# go vet
+go vet -C ../strings -n
+stderr strings_test
+
+# -C must be first on command line (as of Go 1.21)
+! go test -n -C ../strings
+stderr '^invalid value "../strings" for flag -C: -C flag must be first flag on command line$'
+
+-- custom/go.mod --
+module m
diff --git a/platform/dbops/binaries/go/go/src/cmd/go/testdata/vendormod.txt b/platform/dbops/binaries/go/go/src/cmd/go/testdata/vendormod.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1bdaf2abb054cfc11c8e9a4ae9e7753548f38dcc
--- /dev/null
+++ b/platform/dbops/binaries/go/go/src/cmd/go/testdata/vendormod.txt
@@ -0,0 +1,160 @@
+generated by: go run savedir.go vendormod
+
+-- a/foo/AUTHORS.txt --
+-- a/foo/CONTRIBUTORS --
+-- a/foo/LICENSE --
+-- a/foo/PATENTS --
+-- a/foo/COPYING --
+-- a/foo/COPYLEFT --
+-- a/foo/licensed-to-kill --
+-- w/LICENSE --
+-- x/NOTICE! --
+-- x/x2/LICENSE --
+-- mypkg/LICENSE.txt --
+-- a/foo/bar/b/main.go --
+package b
+-- a/foo/bar/b/main_test.go --
+package b
+
+import (
+	"os"
+	"testing"
+)
+
+func TestDir(t *testing.T) {
+	if _, err := os.Stat("../testdata/1"); err != nil {
+		t.Fatalf("testdata: %v", err)
+	}
+}
+-- a/foo/bar/c/main.go --
+package c
+-- a/foo/bar/c/main_test.go --
+package c
+
+import (
+	"os"
+	"testing"
+)
+
+func TestDir(t *testing.T) {
+	if _, err := os.Stat("../../../testdata/1"); err != nil {
+		t.Fatalf("testdata: %v", err)
+	}
+	if _, err := os.Stat("./testdata/1"); err != nil {
+		t.Fatalf("testdata: %v", err)
+	}
+}
+-- a/foo/bar/c/testdata/1 --
+-- a/foo/bar/testdata/1 --
+-- a/go.mod --
+module a
+-- a/main.go --
+package a
+-- a/main_test.go --
+package a
+
+import (
+	"os"
+	"testing"
+)
+
+func TestDir(t *testing.T) {
+	if _, err := os.Stat("./testdata/1"); err != nil {
+		t.Fatalf("testdata: %v", err)
+	}
+}
+-- a/testdata/1 --
+-- appengine.go --
+// +build appengine
+
+package m
+
+import _ "appengine"
+import _ "appengine/datastore"
+-- go.mod --
+module m
+
+require (
+	a v1.0.0
+	mysite/myname/mypkg v1.0.0
+	w v1.0.0 // indirect
+	x v1.0.0
+	y v1.0.0
+	z v1.0.0
+)
+
+replace (
+	a v1.0.0 => ./a
+	mysite/myname/mypkg v1.0.0 => ./mypkg
+	w v1.0.0 => ./w
+	x v1.0.0 => ./x
+	y v1.0.0 => ./y
+	z v1.0.0 => ./z
+)
+-- mypkg/go.mod --
+module me
+-- mypkg/mydir/d.go --
+package mydir
+-- subdir/v1_test.go --
+package m
+
+import _ "mysite/myname/mypkg/mydir"
+-- testdata1.go --
+package m
+
+import _ "a"
+-- testdata2.go --
+package m
+
+import _ "a/foo/bar/b"
+import _ "a/foo/bar/c"
+-- v1.go --
+package m
+
+import _ "x"
+-- v2.go --
+// +build abc
+
+package mMmMmMm
+
+import _ "y"
+-- v3.go --
+// +build !abc
+
+package m
+
+import _ "z"
+-- v4.go --
+// +build notmytag
+
+package m
+
+import _ "x/x1"
+-- w/go.mod --
+module w
+-- w/w.go --
+package w
+-- x/go.mod --
+module x
+-- x/testdata/x.txt --
+placeholder - want directory with no go files
+-- x/x.go --
+package x
+-- x/x1/x1.go --
+// +build notmytag
+
+package x1
+-- x/x2/dummy.txt --
+dummy
+-- x/x_test.go --
+package x
+
+import _ "w"
+-- y/go.mod --
+module y
+-- y/y.go --
+package y
+-- z/go.mod --
+module z
+-- z/z.go --
+package z