repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/types/errors.go
vendor/github.com/onsi/ginkgo/v2/types/errors.go
package types import ( "fmt" "reflect" "strings" "github.com/onsi/ginkgo/v2/formatter" ) type GinkgoError struct { Heading string Message string DocLink string CodeLocation CodeLocation } func (g GinkgoError) Error() string { out := formatter.F("{{bold}}{{red}}%s{{/}}\n", g.Heading) if (g.CodeLocation != CodeLocation{}) { contentsOfLine := strings.TrimLeft(g.CodeLocation.ContentsOfLine(), "\t ") if contentsOfLine != "" { out += formatter.F("{{light-gray}}%s{{/}}\n", contentsOfLine) } out += formatter.F("{{gray}}%s{{/}}\n", g.CodeLocation) } if g.Message != "" { out += formatter.Fiw(1, formatter.COLS, g.Message) out += "\n\n" } if g.DocLink != "" { out += formatter.Fiw(1, formatter.COLS, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}\n", g.DocLink) } return out } type ginkgoErrors struct{} var GinkgoErrors = ginkgoErrors{} func (g ginkgoErrors) UncaughtGinkgoPanic(cl CodeLocation) error { return GinkgoError{ Heading: "Your Test Panicked", Message: `When you, or your assertion library, calls Ginkgo's Fail(), Ginkgo panics to prevent subsequent assertions from running. Normally Ginkgo rescues this panic so you shouldn't see it. However, if you make an assertion in a goroutine, Ginkgo can't capture the panic. To circumvent this, you should call defer GinkgoRecover() at the top of the goroutine that caused this panic. Alternatively, you may have made an assertion outside of a Ginkgo leaf node (e.g. in a container node or some out-of-band function) - please move your assertion to an appropriate Ginkgo node (e.g. a BeforeSuite, BeforeEach, It, etc...).`, DocLink: "mental-model-how-ginkgo-handles-failure", CodeLocation: cl, } } func (g ginkgoErrors) RerunningSuite() error { return GinkgoError{ Heading: "Rerunning Suite", Message: formatter.F(`It looks like you are calling RunSpecs more than once. Ginkgo does not support rerunning suites. If you want to rerun a suite try {{bold}}ginkgo --repeat=N{{/}} or {{bold}}ginkgo --until-it-fails{{/}}`), DocLink: "repeating-spec-runs-and-managing-flaky-specs", } } /* Tree construction errors */ func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) error { return GinkgoError{ Heading: "Ginkgo detected an issue with your spec structure", Message: formatter.F( `It looks like you are trying to add a {{bold}}[%s]{{/}} node to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running. To enable randomization and parallelization Ginkgo requires the spec tree to be fully constructed up front. In practice, this means that you can only create nodes like {{bold}}[%s]{{/}} at the top-level or within the body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType), CodeLocation: cl, DocLink: "mental-model-how-ginkgo-traverses-the-spec-hierarchy", } } func (g ginkgoErrors) CaughtPanicDuringABuildPhase(caughtPanic interface{}, cl CodeLocation) error { return GinkgoError{ Heading: "Assertion or Panic detected during tree construction", Message: formatter.F( `Ginkgo detected a panic while constructing the spec tree. You may be trying to make an assertion in the body of a container node (i.e. {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}). Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/}}, {{bold}}It{{/}}, etc. {{bold}}Here's the content of the panic that was caught:{{/}} %v`, caughtPanic), CodeLocation: cl, DocLink: "no-assertions-in-container-nodes", } } func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" } return GinkgoError{ Heading: "Ginkgo detected an issue with your spec structure", Message: formatter.F( `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a container node. {{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), CodeLocation: cl, DocLink: docLink, } } func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" } return GinkgoError{ Heading: "Ginkgo detected an issue with your spec structure", Message: formatter.F( `It looks like you are trying to add a {{bold}}[%s]{{/}} node within a leaf node after the spec started running. {{bold}}%s{{/}} can only be called at the top level.`, nodeType, nodeType), CodeLocation: cl, DocLink: docLink, } } func (g ginkgoErrors) MultipleBeforeSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { return ginkgoErrorMultipleSuiteNodes("setup", nodeType, cl, earlierNodeType, earlierCodeLocation) } func (g ginkgoErrors) MultipleAfterSuiteNodes(nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { return ginkgoErrorMultipleSuiteNodes("teardown", nodeType, cl, earlierNodeType, earlierCodeLocation) } func ginkgoErrorMultipleSuiteNodes(setupOrTeardown string, nodeType NodeType, cl CodeLocation, earlierNodeType NodeType, earlierCodeLocation CodeLocation) error { return GinkgoError{ Heading: "Ginkgo detected an issue with your spec structure", Message: formatter.F( `It looks like you are trying to add a {{bold}}[%s]{{/}} node but you already have a {{bold}}[%s]{{/}} node defined at: {{gray}}%s{{/}}. Ginkgo only allows you to define one suite %s node.`, nodeType, earlierNodeType, earlierCodeLocation, setupOrTeardown), CodeLocation: cl, DocLink: "suite-setup-and-cleanup-beforesuite-and-aftersuite", } } /* Decorator errors */ func (g ginkgoErrors) InvalidDecoratorForNodeType(cl CodeLocation, nodeType NodeType, decorator string) error { return GinkgoError{ Heading: "Invalid Decorator", Message: formatter.F(`[%s] node cannot be passed a(n) '%s' decorator`, nodeType, decorator), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Invalid Combination of Decorators: Focused and Pending", Message: formatter.F(`[%s] node was decorated with both Focus and Pending. At most one is allowed.`, nodeType), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly", Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { return GinkgoError{ Heading: "Unknown Decorator", Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Invalid Function", Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error { mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" if nodeType.Is(NodeTypeContainer) { mustGet = "{{bold}}func(){{/}}" } return GinkgoError{ Heading: "Invalid Function", Message: formatter.F(`[%s] node must be passed `+mustGet+`. You passed {{bold}}%s{{/}} instead.`, nodeType, t), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error { mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" return GinkgoError{ Heading: "Invalid Function", Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function. You passed {{bold}}%s{{/}} instead.`, t), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error { mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}" return GinkgoError{ Heading: "Invalid Function", Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function. You passed {{bold}}%s{{/}} instead.`, t), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Multiple Functions", Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Missing Functions", Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType), CodeLocation: cl, DocLink: "spec-timeouts-and-interruptible-nodes", } } func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error { return GinkgoError{ Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`), CodeLocation: cl, DocLink: "spec-timeouts-and-interruptible-nodes", } } /* Ordered Container errors */ func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Invalid Serial Node in Non-Serial Ordered Container", Message: formatter.F(`[%s] node was decorated with Serial but occurs in an Ordered container that is not marked Serial. Move the Serial decorator to the outer-most Ordered container to mark all ordered specs within the container as serial.`, nodeType), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Setup Node not in Ordered Container", Message: fmt.Sprintf("[%s] setup nodes must appear inside an Ordered container. They cannot be nested within other containers, even containers in an ordered container.", nodeType), CodeLocation: cl, DocLink: "ordered-containers", } } func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error { return GinkgoError{ Heading: "ContinueOnFailure not decorating an outermost Ordered Container", Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.", CodeLocation: cl, DocLink: "ordered-containers", } } /* DeferCleanup errors */ func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { return GinkgoError{ Heading: "DeferCleanup requires a valid function", Message: "You must pass DeferCleanup a function to invoke. This function must return zero or one values - if it does return, it must return an error. The function can take arbitrarily many arguments and you should provide these to DeferCleanup to pass along to the function.", CodeLocation: cl, DocLink: "cleaning-up-our-cleanup-code-defercleanup", } } func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) error { return GinkgoError{ Heading: "DeferCleanup must be called inside a setup or subject node", Message: "You must call DeferCleanup inside a setup node (e.g. BeforeEach, BeforeSuite, AfterAll...) or a subject node (i.e. It). You can't call DeferCleanup at the top-level or in a container node - use the After* family of setup nodes instead.", CodeLocation: cl, DocLink: "cleaning-up-our-cleanup-code-defercleanup", } } func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.", CodeLocation: cl, DocLink: "cleaning-up-our-cleanup-code-defercleanup", } } func (g ginkgoErrors) PushingCleanupInCleanupNode(cl CodeLocation) error { return GinkgoError{ Heading: "DeferCleanup cannot be called in a DeferCleanup callback", Message: "Please inline your cleanup code - Ginkgo doesn't let you call DeferCleanup from within DeferCleanup", CodeLocation: cl, DocLink: "cleaning-up-our-cleanup-code-defercleanup", } } /* ReportEntry errors */ func (g ginkgoErrors) TooManyReportEntryValues(cl CodeLocation, arg interface{}) error { return GinkgoError{ Heading: "Too Many ReportEntry Values", Message: formatter.F(`{{bold}}AddGinkgoReport{{/}} can only be given one value. Got unexpected value: %#v`, arg), CodeLocation: cl, DocLink: "attaching-data-to-reports", } } func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error { return GinkgoError{ Heading: "Ginkgo detected an issue with your spec structure", Message: formatter.F(`It looks like you are calling {{bold}}AddGinkgoReport{{/}} outside of a running spec. Make sure you call {{bold}}AddGinkgoReport{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), CodeLocation: cl, DocLink: "attaching-data-to-reports", } } /* By errors */ func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error { return GinkgoError{ Heading: "Ginkgo detected an issue with your spec structure", Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), CodeLocation: cl, DocLink: "documenting-complex-specs-by", } } /* FileFilter and SkipFilter errors */ func (g ginkgoErrors) InvalidFileFilter(filter string) error { return GinkgoError{ Heading: "Invalid File Filter", Message: fmt.Sprintf(`The provided file filter: "%s" is invalid. File filters must have the format "file", "file:lines" where "file" is a regular expression that will match against the file path and lines is a comma-separated list of integers (e.g. file:1,5,7) or line-ranges (e.g. file:1-3,5-9) or both (e.g. file:1,5-9)`, filter), DocLink: "filtering-specs", } } func (g ginkgoErrors) InvalidFileFilterRegularExpression(filter string, err error) error { return GinkgoError{ Heading: "Invalid File Filter Regular Expression", Message: fmt.Sprintf(`The provided file filter: "%s" included an invalid regular expression. regexp.Compile error: %s`, filter, err), DocLink: "filtering-specs", } } /* Label Errors */ func (g ginkgoErrors) SyntaxErrorParsingLabelFilter(input string, location int, error string) error { var message string if location >= 0 { for i, r := range input { if i == location { message += "{{red}}{{bold}}{{underline}}" } message += string(r) if i == location { message += "{{/}}" } } } else { message = input } message += "\n" + error return GinkgoError{ Heading: "Syntax Error Parsing Label Filter", Message: message, DocLink: "spec-labels", } } func (g ginkgoErrors) InvalidLabel(label string, cl CodeLocation) error { return GinkgoError{ Heading: "Invalid Label", Message: fmt.Sprintf("'%s' is an invalid label. Labels cannot contain of the following characters: '&|!,()/'", label), CodeLocation: cl, DocLink: "spec-labels", } } func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { return GinkgoError{ Heading: "Invalid Empty Label", Message: "Labels cannot be empty", CodeLocation: cl, DocLink: "spec-labels", } } /* Table errors */ func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { return GinkgoError{ Heading: "DescribeTable passed multiple functions", Message: "It looks like you are passing multiple functions into DescribeTable. Only one function can be passed in. This function will be called for each Entry in the table.", CodeLocation: cl, DocLink: "table-specs", } } func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { return GinkgoError{ Heading: "Invalid Entry description", Message: "Entry description functions must be a string, a function that accepts the entry parameters and returns a string, or nil.", CodeLocation: cl, DocLink: "table-specs", } } func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { return GinkgoError{ Heading: "No parameters have been passed to the Table Function", Message: "The Table Function expected at least 1 parameter", CodeLocation: cl, DocLink: "table-specs", } } func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error { return GinkgoError{ Heading: "DescribeTable passed incorrect parameter type", Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name), CodeLocation: cl, DocLink: "table-specs", } } func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { return GinkgoError{ Heading: fmt.Sprintf("Too few parameters passed in to %s", kind), Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), CodeLocation: cl, DocLink: "table-specs", } } func (g ginkgoErrors) TooManyParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { return GinkgoError{ Heading: fmt.Sprintf("Too many parameters passed in to %s", kind), Message: fmt.Sprintf("The %s expected %d parameters but you passed in %d", kind, expected, actual), CodeLocation: cl, DocLink: "table-specs", } } func (g ginkgoErrors) IncorrectParameterTypeToTableFunction(i int, expected, actual reflect.Type, kind string, cl CodeLocation) error { return GinkgoError{ Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), Message: fmt.Sprintf("The %s expected parameter #%d to be of type <%s> but you passed in <%s>", kind, i, expected, actual), CodeLocation: cl, DocLink: "table-specs", } } func (g ginkgoErrors) IncorrectVariadicParameterTypeToTableFunction(expected, actual reflect.Type, kind string, cl CodeLocation) error { return GinkgoError{ Heading: fmt.Sprintf("Incorrect parameters type passed to %s", kind), Message: fmt.Sprintf("The %s expected its variadic parameters to be of type <%s> but you passed in <%s>", kind, expected, actual), CodeLocation: cl, DocLink: "table-specs", } } func (g ginkgoErrors) ContextsCannotBeUsedInSubtreeTables(cl CodeLocation) error { return GinkgoError{ Heading: "Contexts cannot be used in subtree tables", Message: "You''ve defined a subtree body function that accepts a context but did not provide one in the table entry. Ginkgo SpecContexts can only be passed in to subject and setup nodes - so if you are trying to implement a spec timeout you should request a context in the It function within your subtree body function, not in the subtree body function itself.", CodeLocation: cl, DocLink: "table-specs", } } /* Parallel Synchronization errors */ func (g ginkgoErrors) AggregatedReportUnavailableDueToNodeDisappearing() error { return GinkgoError{ Heading: "Test Report unavailable because a Ginkgo parallel process disappeared", Message: "The aggregated report could not be fetched for a ReportAfterSuite node. A Ginkgo parallel process disappeared before it could finish reporting.", } } func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error { return GinkgoError{ Heading: "SynchronizedBeforeSuite failed on Ginkgo parallel process #1", Message: "The first SynchronizedBeforeSuite function running on Ginkgo parallel process #1 failed. This suite will now abort.", } } func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { return GinkgoError{ Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back", Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.", } } /* Configuration errors */ func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error { return GinkgoError{ Heading: "Unknown Type passed to RunSpecs", Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), } } var sharedParallelErrorMessage = "It looks like you are trying to run specs in parallel with go test.\nThis is unsupported and you should use the ginkgo CLI instead." func (g ginkgoErrors) InvalidParallelTotalConfiguration() error { return GinkgoError{ Heading: "-ginkgo.parallel.total must be >= 1", Message: sharedParallelErrorMessage, DocLink: "spec-parallelization", } } func (g ginkgoErrors) InvalidParallelProcessConfiguration() error { return GinkgoError{ Heading: "-ginkgo.parallel.process is one-indexed and must be <= ginkgo.parallel.total", Message: sharedParallelErrorMessage, DocLink: "spec-parallelization", } } func (g ginkgoErrors) MissingParallelHostConfiguration() error { return GinkgoError{ Heading: "-ginkgo.parallel.host is missing", Message: sharedParallelErrorMessage, DocLink: "spec-parallelization", } } func (g ginkgoErrors) UnreachableParallelHost(host string) error { return GinkgoError{ Heading: "Could not reach ginkgo.parallel.host:" + host, Message: sharedParallelErrorMessage, DocLink: "spec-parallelization", } } func (g ginkgoErrors) DryRunInParallelConfiguration() error { return GinkgoError{ Heading: "Ginkgo only performs -dryRun in serial mode.", Message: "Please try running ginkgo -dryRun again, but without -p or -procs to ensure the suite is running in series.", } } func (g ginkgoErrors) GracePeriodCannotBeZero() error { return GinkgoError{ Heading: "Ginkgo requires a positive --grace-period.", Message: "Please set --grace-period to a positive duration. The default is 30s.", } } func (g ginkgoErrors) ConflictingVerbosityConfiguration() error { return GinkgoError{ Heading: "Conflicting reporter verbosity settings.", Message: "You can't set more than one of -v, -vv and --succinct. Please pick one!", } } func (g ginkgoErrors) InvalidOutputInterceptorModeConfiguration(value string) error { return GinkgoError{ Heading: fmt.Sprintf("Invalid value '%s' for --output-interceptor-mode.", value), Message: "You must choose one of 'dup', 'swap', or 'none'.", } } func (g ginkgoErrors) InvalidGoFlagCount() error { return GinkgoError{ Heading: "Use of go test -count", Message: "Ginkgo does not support using go test -count to rerun suites. Only -count=1 is allowed. To repeat suite runs, please use the ginkgo cli and `ginkgo -until-it-fails` or `ginkgo -repeat=N`.", } } func (g ginkgoErrors) InvalidGoFlagParallel() error { return GinkgoError{ Heading: "Use of go test -parallel", Message: "Go test's implementation of parallelization does not actually parallelize Ginkgo specs. Please use the ginkgo cli and `ginkgo -p` or `ginkgo -procs=N` instead.", } } func (g ginkgoErrors) BothRepeatAndUntilItFails() error { return GinkgoError{ Heading: "--repeat and --until-it-fails are both set", Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?", } } /* Stack-Trace parsing errors */ func (g ginkgoErrors) FailedToParseStackTrace(message string) error { return GinkgoError{ Heading: "Failed to Parse Stack Trace", Message: message, } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/types/config.go
vendor/github.com/onsi/ginkgo/v2/types/config.go
/* Ginkgo accepts a number of configuration options. These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli) */ package types import ( "flag" "os" "path/filepath" "runtime" "strconv" "strings" "time" ) // Configuration controlling how an individual test suite is run type SuiteConfig struct { RandomSeed int64 RandomizeAllSpecs bool FocusStrings []string SkipStrings []string FocusFiles []string SkipFiles []string LabelFilter string FailOnPending bool FailOnEmpty bool FailFast bool FlakeAttempts int MustPassRepeatedly int DryRun bool PollProgressAfter time.Duration PollProgressInterval time.Duration Timeout time.Duration EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually OutputInterceptorMode string SourceRoots []string GracePeriod time.Duration ParallelProcess int ParallelTotal int ParallelHost string } func NewDefaultSuiteConfig() SuiteConfig { return SuiteConfig{ RandomSeed: time.Now().Unix(), Timeout: time.Hour, ParallelProcess: 1, ParallelTotal: 1, GracePeriod: 30 * time.Second, } } type VerbosityLevel uint const ( VerbosityLevelSuccinct VerbosityLevel = iota VerbosityLevelNormal VerbosityLevelVerbose VerbosityLevelVeryVerbose ) func (vl VerbosityLevel) GT(comp VerbosityLevel) bool { return vl > comp } func (vl VerbosityLevel) GTE(comp VerbosityLevel) bool { return vl >= comp } func (vl VerbosityLevel) Is(comp VerbosityLevel) bool { return vl == comp } func (vl VerbosityLevel) LTE(comp VerbosityLevel) bool { return vl <= comp } func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { return vl < comp } // Configuration for Ginkgo's reporter type ReporterConfig struct { NoColor bool Succinct bool Verbose bool VeryVerbose bool FullTrace bool ShowNodeEvents bool GithubOutput bool SilenceSkips bool ForceNewlines bool JSONReport string JUnitReport string TeamcityReport string } func (rc ReporterConfig) Verbosity() VerbosityLevel { if rc.Succinct { return VerbosityLevelSuccinct } else if rc.Verbose { return VerbosityLevelVerbose } else if rc.VeryVerbose { return VerbosityLevelVeryVerbose } return VerbosityLevelNormal } func (rc ReporterConfig) WillGenerateReport() bool { return rc.JSONReport != "" || rc.JUnitReport != "" || rc.TeamcityReport != "" } func NewDefaultReporterConfig() ReporterConfig { return ReporterConfig{} } // Configuration for the Ginkgo CLI type CLIConfig struct { //for build, run, and watch Recurse bool SkipPackage string RequireSuite bool NumCompilers int //for run and watch only Procs int Parallel bool AfterRunHook string OutputDir string KeepSeparateCoverprofiles bool KeepSeparateReports bool //for run only KeepGoing bool UntilItFails bool Repeat int RandomizeSuites bool //for watch only Depth int WatchRegExp string } func NewDefaultCLIConfig() CLIConfig { return CLIConfig{ Depth: 1, WatchRegExp: `\.go$`, } } func (g CLIConfig) ComputedProcs() int { if g.Procs > 0 { return g.Procs } n := 1 if g.Parallel { n = runtime.NumCPU() if n > 4 { n = n - 1 } } return n } func (g CLIConfig) ComputedNumCompilers() int { if g.NumCompilers > 0 { return g.NumCompilers } return runtime.NumCPU() } // Configuration for the Ginkgo CLI capturing available go flags // A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags). // More details can be found at: // https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/ type GoFlagsConfig struct { //build-time flags for code-and-performance analysis Race bool Cover bool CoverMode string CoverPkg string Vet string //run-time flags for code-and-performance analysis BlockProfile string BlockProfileRate int CoverProfile string CPUProfile string MemProfile string MemProfileRate int MutexProfile string MutexProfileFraction int Trace string //build-time flags for building A bool ASMFlags string BuildMode string BuildVCS bool Compiler string GCCGoFlags string GCFlags string InstallSuffix string LDFlags string LinkShared bool Mod string N bool ModFile string ModCacheRW bool MSan bool PkgDir string Tags string TrimPath bool ToolExec string Work bool X bool O string } func NewDefaultGoFlagsConfig() GoFlagsConfig { return GoFlagsConfig{} } func (g GoFlagsConfig) BinaryMustBePreserved() bool { return g.BlockProfile != "" || g.CPUProfile != "" || g.MemProfile != "" || g.MutexProfile != "" } // Configuration that were deprecated in 2.0 type deprecatedConfig struct { DebugParallel bool NoisySkippings bool NoisyPendings bool RegexScansFilePath bool SlowSpecThresholdWithFLoatUnits float64 Stream bool Notify bool EmitSpecProgress bool SlowSpecThreshold time.Duration AlwaysEmitGinkgoWriter bool } // Flags // Flags sections used by both the CLI and the Ginkgo test process var FlagSections = GinkgoFlagSections{ {Key: "multiple-suites", Style: "{{dark-green}}", Heading: "Running Multiple Test Suites"}, {Key: "order", Style: "{{green}}", Heading: "Controlling Test Order"}, {Key: "parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism"}, {Key: "low-level-parallel", Style: "{{yellow}}", Heading: "Controlling Test Parallelism", Description: "These are set by the Ginkgo CLI, {{red}}{{bold}}do not set them manually{{/}} via go test.\nUse ginkgo -p or ginkgo -procs=N instead."}, {Key: "filter", Style: "{{cyan}}", Heading: "Filtering Tests"}, {Key: "failure", Style: "{{red}}", Heading: "Failure Handling"}, {Key: "output", Style: "{{magenta}}", Heading: "Controlling Output Formatting"}, {Key: "code-and-coverage-analysis", Style: "{{orange}}", Heading: "Code and Coverage Analysis"}, {Key: "performance-analysis", Style: "{{coral}}", Heading: "Performance Analysis"}, {Key: "debug", Style: "{{blue}}", Heading: "Debugging Tests", Description: "In addition to these flags, Ginkgo supports a few debugging environment variables. To change the parallel server protocol set {{blue}}GINKGO_PARALLEL_PROTOCOL{{/}} to {{bold}}HTTP{{/}}. To avoid pruning callstacks set {{blue}}GINKGO_PRUNE_STACK{{/}} to {{bold}}FALSE{{/}}."}, {Key: "watch", Style: "{{light-yellow}}", Heading: "Controlling Ginkgo Watch"}, {Key: "misc", Style: "{{light-gray}}", Heading: "Miscellaneous"}, {Key: "go-build", Style: "{{light-gray}}", Heading: "Go Build Flags", Succinct: true, Description: "These flags are inherited from go build. Run {{bold}}ginkgo help build{{/}} for more detailed flag documentation."}, } // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, {KeyPath: "S.FailOnPending", Name: "fail-on-pending", SectionKey: "failure", DeprecatedName: "failOnPending", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will mark the test suite as failed if any specs are pending."}, {KeyPath: "S.FailFast", Name: "fail-fast", SectionKey: "failure", DeprecatedName: "failFast", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will stop running a test suite after a failure occurs."}, {KeyPath: "S.FlakeAttempts", Name: "flake-attempts", SectionKey: "failure", UsageDefaultValue: "0 - failed tests are not retried", DeprecatedName: "flakeAttempts", DeprecatedDocLink: "changed-command-line-flags", Usage: "Make up to this many attempts to run each spec. If any of the attempts succeed, the suite will not be failed."}, {KeyPath: "S.FailOnEmpty", Name: "fail-on-empty", SectionKey: "failure", Usage: "If set, ginkgo will mark the test suite as failed if no specs are run."}, {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0", Usage: "Emit node progress reports periodically if node hasn't completed after this duration."}, {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s", Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."}, {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug", Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."}, {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h", Usage: "Test suite fails if it does not complete within the specified timeout."}, {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s", Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."}, {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none", Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."}, {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", Usage: "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed."}, {KeyPath: "S.FocusFiles", Name: "focus-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", Usage: "If set, ginkgo will only run specs in matching files. Can be specified multiple times, values are ORed."}, {KeyPath: "S.SkipFiles", Name: "skip-file", SectionKey: "filter", UsageArgument: "file (regexp) | file:line | file:lineA-lineB | file:line,line,line", Usage: "If set, ginkgo will skip specs in matching files. Can be specified multiple times, values are ORed."}, {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug", DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."}, } // ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) var ParallelConfigFlags = GinkgoFlags{ {KeyPath: "S.ParallelProcess", Name: "parallel.process", SectionKey: "low-level-parallel", UsageDefaultValue: "1", Usage: "This worker process's (one-indexed) process number. For running specs in parallel."}, {KeyPath: "S.ParallelTotal", Name: "parallel.total", SectionKey: "low-level-parallel", UsageDefaultValue: "1", Usage: "The total number of worker processes. For running specs in parallel."}, {KeyPath: "S.ParallelHost", Name: "parallel.host", SectionKey: "low-level-parallel", UsageDefaultValue: "set by Ginkgo CLI", Usage: "The address for the server that will synchronize the processes."}, } // ReporterConfigFlags provides flags for the Ginkgo test process, and CLI var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, suppress color output in default reporter. You can also set the environment variable GINKGO_NO_COLOR=TRUE"}, {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", Usage: "If set, emits more output including GinkgoWriter contents."}, {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", Usage: "If set, emits with maximal verbosity - includes skipped and pending tests."}, {KeyPath: "R.Succinct", Name: "succinct", SectionKey: "output", Usage: "If set, default reporter prints out a very succinct report"}, {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", Usage: "If set, default reporter prints easier to manage output in Github Actions."}, {KeyPath: "R.SilenceSkips", Name: "silence-skips", SectionKey: "output", Usage: "If set, default reporter will not print out skipped tests."}, {KeyPath: "R.ForceNewlines", Name: "force-newlines", SectionKey: "output", Usage: "If set, default reporter will ensure a newline appears after each test."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, {KeyPath: "R.JUnitReport", Name: "junit-report", UsageArgument: "filename.xml", SectionKey: "output", DeprecatedName: "reportFile", DeprecatedDocLink: "improved-reporting-infrastructure", Usage: "If set, Ginkgo will generate a conformant junit test report in the specified file."}, {KeyPath: "R.TeamcityReport", Name: "teamcity-report", UsageArgument: "filename", SectionKey: "output", Usage: "If set, Ginkgo will generate a Teamcity-formatted test report at the specified location."}, {KeyPath: "D.SlowSpecThresholdWithFLoatUnits", DeprecatedName: "slowSpecThreshold", DeprecatedDocLink: "changed--slowspecthreshold", Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"}, {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."}, } // BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process func BuildTestSuiteFlagSet(suiteConfig *SuiteConfig, reporterConfig *ReporterConfig) (GinkgoFlagSet, error) { flags := SuiteConfigFlags.CopyAppend(ParallelConfigFlags...).CopyAppend(ReporterConfigFlags...) flags = flags.WithPrefix("ginkgo") bindings := map[string]interface{}{ "S": suiteConfig, "R": reporterConfig, "D": &deprecatedConfig{}, } extraGoFlagsSection := GinkgoFlagSection{Style: "{{gray}}", Heading: "Go test flags"} return NewAttachedGinkgoFlagSet(flag.CommandLine, flags, bindings, FlagSections, extraGoFlagsSection) } // VetConfig validates that the Ginkgo test process' configuration is sound func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig ReporterConfig) []error { errors := []error{} if flagSet.WasSet("count") || flagSet.WasSet("test.count") { flag := flagSet.Lookup("count") if flag == nil { flag = flagSet.Lookup("test.count") } count, err := strconv.Atoi(flag.Value.String()) if err != nil || count != 1 { errors = append(errors, GinkgoErrors.InvalidGoFlagCount()) } } if flagSet.WasSet("parallel") || flagSet.WasSet("test.parallel") { errors = append(errors, GinkgoErrors.InvalidGoFlagParallel()) } if suiteConfig.ParallelTotal < 1 { errors = append(errors, GinkgoErrors.InvalidParallelTotalConfiguration()) } if suiteConfig.ParallelProcess > suiteConfig.ParallelTotal || suiteConfig.ParallelProcess < 1 { errors = append(errors, GinkgoErrors.InvalidParallelProcessConfiguration()) } if suiteConfig.ParallelTotal > 1 && suiteConfig.ParallelHost == "" { errors = append(errors, GinkgoErrors.MissingParallelHostConfiguration()) } if suiteConfig.DryRun && suiteConfig.ParallelTotal > 1 { errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration()) } if suiteConfig.GracePeriod <= 0 { errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero()) } if len(suiteConfig.FocusFiles) > 0 { _, err := ParseFileFilters(suiteConfig.FocusFiles) if err != nil { errors = append(errors, err) } } if len(suiteConfig.SkipFiles) > 0 { _, err := ParseFileFilters(suiteConfig.SkipFiles) if err != nil { errors = append(errors, err) } } if suiteConfig.LabelFilter != "" { _, err := ParseLabelFilter(suiteConfig.LabelFilter) if err != nil { errors = append(errors, err) } } switch strings.ToLower(suiteConfig.OutputInterceptorMode) { case "", "dup", "swap", "none": default: errors = append(errors, GinkgoErrors.InvalidOutputInterceptorModeConfiguration(suiteConfig.OutputInterceptorMode)) } numVerbosity := 0 for _, v := range []bool{reporterConfig.Succinct, reporterConfig.Verbose, reporterConfig.VeryVerbose} { if v { numVerbosity++ } } if numVerbosity > 1 { errors = append(errors, GinkgoErrors.ConflictingVerbosityConfiguration()) } return errors } // GinkgoCLISharedFlags provides flags shared by the Ginkgo CLI's build, watch, and run commands var GinkgoCLISharedFlags = GinkgoFlags{ {KeyPath: "C.Recurse", Name: "r", SectionKey: "multiple-suites", Usage: "If set, ginkgo finds and runs test suites under the current directory recursively."}, {KeyPath: "C.SkipPackage", Name: "skip-package", SectionKey: "multiple-suites", DeprecatedName: "skipPackage", DeprecatedDocLink: "changed-command-line-flags", UsageArgument: "comma-separated list of packages", Usage: "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored."}, {KeyPath: "C.RequireSuite", Name: "require-suite", SectionKey: "failure", DeprecatedName: "requireSuite", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, Ginkgo fails if there are ginkgo tests in a directory but no invocation of RunSpecs."}, {KeyPath: "C.NumCompilers", Name: "compilers", SectionKey: "multiple-suites", UsageDefaultValue: "0 (will autodetect)", Usage: "When running multiple packages, the number of concurrent compilations to perform."}, } // GinkgoCLIRunAndWatchFlags provides flags shared by the Ginkgo CLI's build and watch commands (but not run) var GinkgoCLIRunAndWatchFlags = GinkgoFlags{ {KeyPath: "C.Procs", Name: "procs", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", Usage: "The number of parallel test nodes to run."}, {KeyPath: "C.Procs", Name: "nodes", SectionKey: "parallel", UsageDefaultValue: "1 (run in series)", Usage: "--nodes is an alias for --procs"}, {KeyPath: "C.Parallel", Name: "p", SectionKey: "parallel", Usage: "If set, ginkgo will run in parallel with an auto-detected number of nodes."}, {KeyPath: "C.AfterRunHook", Name: "after-run-hook", SectionKey: "misc", DeprecatedName: "afterSuiteHook", DeprecatedDocLink: "changed-command-line-flags", Usage: "Command to run when a test suite completes."}, {KeyPath: "C.OutputDir", Name: "output-dir", SectionKey: "output", UsageArgument: "directory", DeprecatedName: "outputdir", DeprecatedDocLink: "improved-profiling-support", Usage: "A location to place all generated profiles and reports."}, {KeyPath: "C.KeepSeparateCoverprofiles", Name: "keep-separate-coverprofiles", SectionKey: "code-and-coverage-analysis", Usage: "If set, Ginkgo does not merge coverprofiles into one monolithic coverprofile. The coverprofiles will remain in their respective package directories or in -output-dir if set."}, {KeyPath: "C.KeepSeparateReports", Name: "keep-separate-reports", SectionKey: "output", Usage: "If set, Ginkgo does not merge per-suite reports (e.g. -json-report) into one monolithic report for the entire testrun. The reports will remain in their respective package directories or in -output-dir if set."}, {KeyPath: "D.Stream", DeprecatedName: "stream", DeprecatedDocLink: "removed--stream", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.Notify", DeprecatedName: "notify", DeprecatedDocLink: "removed--notify", DeprecatedVersion: "2.0.0"}, } // GinkgoCLIRunFlags provides flags for Ginkgo CLI's run command that aren't shared by any other commands var GinkgoCLIRunFlags = GinkgoFlags{ {KeyPath: "C.KeepGoing", Name: "keep-going", SectionKey: "multiple-suites", DeprecatedName: "keepGoing", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, failures from earlier test suites do not prevent later test suites from running."}, {KeyPath: "C.UntilItFails", Name: "until-it-fails", SectionKey: "debug", DeprecatedName: "untilItFails", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will keep rerunning test suites until a failure occurs."}, {KeyPath: "C.Repeat", Name: "repeat", SectionKey: "debug", UsageArgument: "n", UsageDefaultValue: "0 - i.e. no repetition, run only once", Usage: "The number of times to re-run a test-suite. Useful for debugging flaky tests. If set to N the suite will be run N+1 times and will be required to pass each time."}, {KeyPath: "C.RandomizeSuites", Name: "randomize-suites", SectionKey: "order", DeprecatedName: "randomizeSuites", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize the order in which test suites run."}, } // GinkgoCLIRunFlags provides flags for Ginkgo CLI's watch command that aren't shared by any other commands var GinkgoCLIWatchFlags = GinkgoFlags{ {KeyPath: "C.Depth", Name: "depth", SectionKey: "watch", Usage: "Ginkgo will watch dependencies down to this depth in the dependency tree."}, {KeyPath: "C.WatchRegExp", Name: "watch-regexp", SectionKey: "watch", DeprecatedName: "watchRegExp", DeprecatedDocLink: "changed-command-line-flags", UsageArgument: "Regular Expression", UsageDefaultValue: `\.go$`, Usage: "Only files matching this regular expression will be watched for changes."}, } // GoBuildFlags provides flags for the Ginkgo CLI build, run, and watch commands that capture go's build-time flags. These are passed to go test -c by the ginkgo CLI var GoBuildFlags = GinkgoFlags{ {KeyPath: "Go.Race", Name: "race", SectionKey: "code-and-coverage-analysis", Usage: "enable data race detection. Supported on linux/amd64, linux/ppc64le, linux/arm64, linux/s390x, freebsd/amd64, netbsd/amd64, darwin/amd64, darwin/arm64, and windows/amd64."}, {KeyPath: "Go.Vet", Name: "vet", UsageArgument: "list", SectionKey: "code-and-coverage-analysis", Usage: `Configure the invocation of "go vet" during "go test" to use the comma-separated list of vet checks. If list is empty, "go test" runs "go vet" with a curated list of checks believed to be always worth addressing. If list is "off", "go test" does not run "go vet" at all. Available checks can be found by running 'go doc cmd/vet'`}, {KeyPath: "Go.Cover", Name: "cover", SectionKey: "code-and-coverage-analysis", Usage: "Enable coverage analysis. Note that because coverage works by annotating the source code before compilation, compilation and test failures with coverage enabled may report line numbers that don't correspond to the original sources."}, {KeyPath: "Go.CoverMode", Name: "covermode", UsageArgument: "set,count,atomic", SectionKey: "code-and-coverage-analysis", Usage: `Set the mode for coverage analysis for the package[s] being tested. 'set': does this statement run? 'count': how many times does this statement run? 'atomic': like count, but correct in multithreaded tests and more expensive (must use atomic with -race). Sets -cover`}, {KeyPath: "Go.CoverPkg", Name: "coverpkg", UsageArgument: "pattern1,pattern2,pattern3", SectionKey: "code-and-coverage-analysis", Usage: "Apply coverage analysis in each test to packages matching the patterns. The default is for each test to analyze only the package being tested. See 'go help packages' for a description of package patterns. Sets -cover."}, {KeyPath: "Go.A", Name: "a", SectionKey: "go-build", Usage: "force rebuilding of packages that are already up-to-date."}, {KeyPath: "Go.ASMFlags", Name: "asmflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", Usage: "arguments to pass on each go tool asm invocation."}, {KeyPath: "Go.BuildMode", Name: "buildmode", UsageArgument: "mode", SectionKey: "go-build", Usage: "build mode to use. See 'go help buildmode' for more."}, {KeyPath: "Go.BuildVCS", Name: "buildvcs", SectionKey: "go-build", Usage: "adds version control information."}, {KeyPath: "Go.Compiler", Name: "compiler", UsageArgument: "name", SectionKey: "go-build", Usage: "name of compiler to use, as in runtime.Compiler (gccgo or gc)."}, {KeyPath: "Go.GCCGoFlags", Name: "gccgoflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", Usage: "arguments to pass on each gccgo compiler/linker invocation."}, {KeyPath: "Go.GCFlags", Name: "gcflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", Usage: "arguments to pass on each go tool compile invocation."}, {KeyPath: "Go.InstallSuffix", Name: "installsuffix", SectionKey: "go-build", Usage: "a suffix to use in the name of the package installation directory, in order to keep output separate from default builds. If using the -race flag, the install suffix is automatically set to raceor, if set explicitly, has _race appended to it. Likewise for the -msan flag. Using a -buildmode option that requires non-default compile flags has a similar effect."}, {KeyPath: "Go.LDFlags", Name: "ldflags", UsageArgument: "'[pattern=]arg list'", SectionKey: "go-build", Usage: "arguments to pass on each go tool link invocation."}, {KeyPath: "Go.LinkShared", Name: "linkshared", SectionKey: "go-build", Usage: "build code that will be linked against shared libraries previously created with -buildmode=shared."}, {KeyPath: "Go.Mod", Name: "mod", UsageArgument: "mode (readonly, vendor, or mod)", SectionKey: "go-build", Usage: "module download mode to use: readonly, vendor, or mod. See 'go help modules' for more."}, {KeyPath: "Go.ModCacheRW", Name: "modcacherw", SectionKey: "go-build", Usage: "leave newly-created directories in the module cache read-write instead of making them read-only."}, {KeyPath: "Go.ModFile", Name: "modfile", UsageArgument: "file", SectionKey: "go-build", Usage: `in module aware mode, read (and possibly write) an alternate go.mod file instead of the one in the module root directory. A file named go.mod must still be present in order to determine the module root directory, but it is not accessed. When -modfile is specified, an alternate go.sum file is also used: its path is derived from the -modfile flag by trimming the ".mod" extension and appending ".sum".`}, {KeyPath: "Go.MSan", Name: "msan", SectionKey: "go-build", Usage: "enable interoperation with memory sanitizer. Supported only on linux/amd64, linux/arm64 and only with Clang/LLVM as the host C compiler. On linux/arm64, pie build mode will be used."}, {KeyPath: "Go.N", Name: "n", SectionKey: "go-build", Usage: "print the commands but do not run them."}, {KeyPath: "Go.PkgDir", Name: "pkgdir", UsageArgument: "dir", SectionKey: "go-build", Usage: "install and load all packages from dir instead of the usual locations. For example, when building with a non-standard configuration, use -pkgdir to keep generated packages in a separate location."}, {KeyPath: "Go.Tags", Name: "tags", UsageArgument: "tag,list", SectionKey: "go-build", Usage: "a comma-separated list of build tags to consider satisfied during the build. For more information about build tags, see the description of build constraints in the documentation for the go/build package. (Earlier versions of Go used a space-separated list, and that form is deprecated but still recognized.)"}, {KeyPath: "Go.TrimPath", Name: "trimpath", SectionKey: "go-build", Usage: `remove all file system paths from the resulting executable. Instead of absolute file system paths, the recorded file names will begin with either "go" (for the standard library), or a module path@version (when using modules), or a plain import path (when using GOPATH).`}, {KeyPath: "Go.ToolExec", Name: "toolexec", UsageArgument: "'cmd args'", SectionKey: "go-build", Usage: "a program to use to invoke toolchain programs like vet and asm. For example, instead of running asm, the go command will run cmd args /path/to/asm <arguments for asm>'."}, {KeyPath: "Go.Work", Name: "work", SectionKey: "go-build", Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", Usage: "output binary path (including name)."}, } // GoRunFlags provides flags for the Ginkgo CLI run, and watch commands that capture go's run-time flags. These are passed to the compiled test binary by the ginkgo CLI var GoRunFlags = GinkgoFlags{ {KeyPath: "Go.CoverProfile", Name: "coverprofile", UsageArgument: "file", SectionKey: "code-and-coverage-analysis", Usage: `Write a coverage profile to the file after all tests have passed. Sets -cover.`}, {KeyPath: "Go.BlockProfile", Name: "blockprofile", UsageArgument: "file", SectionKey: "performance-analysis", Usage: `Write a goroutine blocking profile to the specified file when all tests are complete. Preserves test binary.`}, {KeyPath: "Go.BlockProfileRate", Name: "blockprofilerate", UsageArgument: "rate", SectionKey: "performance-analysis", Usage: `Control the detail provided in goroutine blocking profiles by calling runtime.SetBlockProfileRate with rate. See 'go doc runtime.SetBlockProfileRate'. The profiler aims to sample, on average, one blocking event every n nanoseconds the program spends blocked. By default, if -test.blockprofile is set without this flag, all blocking events are recorded, equivalent to -test.blockprofilerate=1.`},
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/types/code_location.go
vendor/github.com/onsi/ginkgo/v2/types/code_location.go
package types import ( "fmt" "os" "regexp" "runtime" "runtime/debug" "strings" "sync" ) type CodeLocation struct { FileName string `json:",omitempty"` LineNumber int `json:",omitempty"` FullStackTrace string `json:",omitempty"` CustomMessage string `json:",omitempty"` } func (codeLocation CodeLocation) String() string { if codeLocation.CustomMessage != "" { return codeLocation.CustomMessage } return fmt.Sprintf("%s:%d", codeLocation.FileName, codeLocation.LineNumber) } func (codeLocation CodeLocation) ContentsOfLine() string { if codeLocation.CustomMessage != "" { return "" } contents, err := os.ReadFile(codeLocation.FileName) if err != nil { return "" } lines := strings.Split(string(contents), "\n") if len(lines) < codeLocation.LineNumber { return "" } return lines[codeLocation.LineNumber-1] } type codeLocationLocator struct { pcs map[uintptr]bool helpers map[string]bool lock *sync.Mutex } func (c *codeLocationLocator) addHelper(pc uintptr) { c.lock.Lock() defer c.lock.Unlock() if c.pcs[pc] { return } c.lock.Unlock() f := runtime.FuncForPC(pc) c.lock.Lock() if f == nil { return } c.helpers[f.Name()] = true c.pcs[pc] = true } func (c *codeLocationLocator) hasHelper(name string) bool { c.lock.Lock() defer c.lock.Unlock() return c.helpers[name] } func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation { pc := make([]uintptr, 40) n := runtime.Callers(skip+2, pc) if n == 0 { return CodeLocation{} } pc = pc[:n] frames := runtime.CallersFrames(pc) for { frame, more := frames.Next() if !c.hasHelper(frame.Function) { return CodeLocation{FileName: frame.File, LineNumber: frame.Line} } if !more { break } } return CodeLocation{} } var clLocator = &codeLocationLocator{ pcs: map[uintptr]bool{}, helpers: map[string]bool{}, lock: &sync.Mutex{}, } // MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers. func MarkAsHelper(optionalSkip ...int) { skip := 1 if len(optionalSkip) > 0 { skip += optionalSkip[0] } pc, _, _, ok := runtime.Caller(skip) if ok { clLocator.addHelper(pc) } } func NewCustomCodeLocation(message string) CodeLocation { return CodeLocation{ CustomMessage: message, } } func NewCodeLocation(skip int) CodeLocation { return clLocator.getCodeLocation(skip + 1) } func NewCodeLocationWithStackTrace(skip int) CodeLocation { cl := clLocator.getCodeLocation(skip + 1) cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1) return cl } // PruneStack removes references to functions that are internal to Ginkgo // and the Go runtime from a stack string and a certain number of stack entries // at the beginning of the stack. The stack string has the format // as returned by runtime/debug.Stack. The leading goroutine information is // optional and always removed if present. Beware that runtime/debug.Stack // adds itself as first entry, so typically skip must be >= 1 to remove that // entry. func PruneStack(fullStackTrace string, skip int) string { stack := strings.Split(fullStackTrace, "\n") // Ensure that the even entries are the method names and the // odd entries the source code information. if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { // Ignore "goroutine 29 [running]:" line. stack = stack[1:] } // The "+1" is for skipping over the initial entry, which is // runtime/debug.Stack() itself. if len(stack) > 2*(skip+1) { stack = stack[2*(skip+1):] } prunedStack := []string{} if os.Getenv("GINKGO_PRUNE_STACK") == "FALSE" { prunedStack = stack } else { re := regexp.MustCompile(`\/ginkgo\/|\/pkg\/testing\/|\/pkg\/runtime\/`) for i := 0; i < len(stack)/2; i++ { // We filter out based on the source code file name. if !re.MatchString(stack[i*2+1]) { prunedStack = append(prunedStack, stack[i*2]) prunedStack = append(prunedStack, stack[i*2+1]) } } } return strings.Join(prunedStack, "\n") }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
vendor/github.com/onsi/ginkgo/v2/types/deprecated_types.go
package types import ( "strconv" "time" ) /* A set of deprecations to make the transition from v1 to v2 easier for users who have written custom reporters. */ type SuiteSummary = DeprecatedSuiteSummary type SetupSummary = DeprecatedSetupSummary type SpecSummary = DeprecatedSpecSummary type SpecMeasurement = DeprecatedSpecMeasurement type SpecComponentType = NodeType type SpecFailure = DeprecatedSpecFailure var ( SpecComponentTypeInvalid = NodeTypeInvalid SpecComponentTypeContainer = NodeTypeContainer SpecComponentTypeIt = NodeTypeIt SpecComponentTypeBeforeEach = NodeTypeBeforeEach SpecComponentTypeJustBeforeEach = NodeTypeJustBeforeEach SpecComponentTypeAfterEach = NodeTypeAfterEach SpecComponentTypeJustAfterEach = NodeTypeJustAfterEach SpecComponentTypeBeforeSuite = NodeTypeBeforeSuite SpecComponentTypeSynchronizedBeforeSuite = NodeTypeSynchronizedBeforeSuite SpecComponentTypeAfterSuite = NodeTypeAfterSuite SpecComponentTypeSynchronizedAfterSuite = NodeTypeSynchronizedAfterSuite ) type DeprecatedSuiteSummary struct { SuiteDescription string SuiteSucceeded bool SuiteID string NumberOfSpecsBeforeParallelization int NumberOfTotalSpecs int NumberOfSpecsThatWillBeRun int NumberOfPendingSpecs int NumberOfSkippedSpecs int NumberOfPassedSpecs int NumberOfFailedSpecs int NumberOfFlakedSpecs int RunTime time.Duration } type DeprecatedSetupSummary struct { ComponentType SpecComponentType CodeLocation CodeLocation State SpecState RunTime time.Duration Failure SpecFailure CapturedOutput string SuiteID string } type DeprecatedSpecSummary struct { ComponentTexts []string ComponentCodeLocations []CodeLocation State SpecState RunTime time.Duration Failure SpecFailure IsMeasurement bool NumberOfSamples int Measurements map[string]*DeprecatedSpecMeasurement CapturedOutput string SuiteID string } func (s DeprecatedSpecSummary) HasFailureState() bool { return s.State.Is(SpecStateFailureStates) } func (s DeprecatedSpecSummary) TimedOut() bool { return false } func (s DeprecatedSpecSummary) Panicked() bool { return s.State == SpecStatePanicked } func (s DeprecatedSpecSummary) Failed() bool { return s.State == SpecStateFailed } func (s DeprecatedSpecSummary) Passed() bool { return s.State == SpecStatePassed } func (s DeprecatedSpecSummary) Skipped() bool { return s.State == SpecStateSkipped } func (s DeprecatedSpecSummary) Pending() bool { return s.State == SpecStatePending } type DeprecatedSpecFailure struct { Message string Location CodeLocation ForwardedPanic string ComponentIndex int ComponentType SpecComponentType ComponentCodeLocation CodeLocation } type DeprecatedSpecMeasurement struct { Name string Info interface{} Order int Results []float64 Smallest float64 Largest float64 Average float64 StdDeviation float64 SmallestLabel string LargestLabel string AverageLabel string Units string Precision int } func (s DeprecatedSpecMeasurement) PrecisionFmt() string { if s.Precision == 0 { return "%f" } str := strconv.Itoa(s.Precision) return "%." + str + "f" }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
vendor/github.com/onsi/ginkgo/v2/types/enum_support.go
package types import "encoding/json" type EnumSupport struct { toString map[uint]string toEnum map[string]uint maxEnum uint } func NewEnumSupport(toString map[uint]string) EnumSupport { toEnum, maxEnum := map[string]uint{}, uint(0) for k, v := range toString { toEnum[v] = k if maxEnum < k { maxEnum = k } } return EnumSupport{toString: toString, toEnum: toEnum, maxEnum: maxEnum} } func (es EnumSupport) String(e uint) string { if e > es.maxEnum { return es.toString[0] } return es.toString[e] } func (es EnumSupport) UnmarshJSON(b []byte) (uint, error) { var dec string if err := json.Unmarshal(b, &dec); err != nil { return 0, err } out := es.toEnum[dec] // if we miss we get 0 which is what we want anyway return out, nil } func (es EnumSupport) MarshJSON(e uint) ([]byte, error) { if e == 0 || e > es.maxEnum { return json.Marshal(nil) } return json.Marshal(es.toString[e]) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
vendor/github.com/onsi/ginkgo/v2/types/file_filter.go
package types import ( "regexp" "strconv" "strings" ) func ParseFileFilters(filters []string) (FileFilters, error) { ffs := FileFilters{} for _, filter := range filters { ff := FileFilter{} if filter == "" { return nil, GinkgoErrors.InvalidFileFilter(filter) } components := strings.Split(filter, ":") if !(len(components) == 1 || len(components) == 2) { return nil, GinkgoErrors.InvalidFileFilter(filter) } var err error ff.Filename, err = regexp.Compile(components[0]) if err != nil { return nil, err } if len(components) == 2 { lineFilters := strings.Split(components[1], ",") for _, lineFilter := range lineFilters { components := strings.Split(lineFilter, "-") if len(components) == 1 { line, err := strconv.Atoi(strings.TrimSpace(components[0])) if err != nil { return nil, GinkgoErrors.InvalidFileFilter(filter) } ff.LineFilters = append(ff.LineFilters, LineFilter{line, line + 1}) } else if len(components) == 2 { line1, err := strconv.Atoi(strings.TrimSpace(components[0])) if err != nil { return nil, GinkgoErrors.InvalidFileFilter(filter) } line2, err := strconv.Atoi(strings.TrimSpace(components[1])) if err != nil { return nil, GinkgoErrors.InvalidFileFilter(filter) } ff.LineFilters = append(ff.LineFilters, LineFilter{line1, line2}) } else { return nil, GinkgoErrors.InvalidFileFilter(filter) } } } ffs = append(ffs, ff) } return ffs, nil } type FileFilter struct { Filename *regexp.Regexp LineFilters LineFilters } func (f FileFilter) Matches(locations []CodeLocation) bool { for _, location := range locations { if f.Filename.MatchString(location.FileName) && f.LineFilters.Matches(location.LineNumber) { return true } } return false } type FileFilters []FileFilter func (ffs FileFilters) Matches(locations []CodeLocation) bool { for _, ff := range ffs { if ff.Matches(locations) { return true } } return false } type LineFilter struct { Min int Max int } func (lf LineFilter) Matches(line int) bool { return lf.Min <= line && line < lf.Max } type LineFilters []LineFilter func (lfs LineFilters) Matches(line int) bool { if len(lfs) == 0 { return true } for _, lf := range lfs { if lf.Matches(line) { return true } } return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/types/version.go
vendor/github.com/onsi/ginkgo/v2/types/version.go
package types const VERSION = "2.22.0"
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go
package types import ( "os" "strconv" "strings" "sync" "unicode" "github.com/onsi/ginkgo/v2/formatter" ) type Deprecation struct { Message string DocLink string Version string } type deprecations struct{} var Deprecations = deprecations{} func (d deprecations) CustomReporter() Deprecation { return Deprecation{ Message: "Support for custom reporters has been removed in V2. Please read the documentation linked to below for Ginkgo's new behavior and for a migration path:", DocLink: "removed-custom-reporters", Version: "1.16.0", } } func (d deprecations) Async() Deprecation { return Deprecation{ Message: "You are passing a Done channel to a test node to test asynchronous behavior. This is deprecated in Ginkgo V2. Your test will run synchronously and the timeout will be ignored.", DocLink: "removed-async-testing", Version: "1.16.0", } } func (d deprecations) Measure() Deprecation { return Deprecation{ Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.", DocLink: "removed-measure", Version: "1.16.3", } } func (d deprecations) ParallelNode() Deprecation { return Deprecation{ Message: "GinkgoParallelNode is deprecated and will be removed in Ginkgo V2. Please use GinkgoParallelProcess instead.", DocLink: "renamed-ginkgoparallelnode", Version: "1.16.4", } } func (d deprecations) CurrentGinkgoTestDescription() Deprecation { return Deprecation{ Message: "CurrentGinkgoTestDescription() is deprecated in Ginkgo V2. Use CurrentSpecReport() instead.", DocLink: "changed-currentginkgotestdescription", Version: "1.16.0", } } func (d deprecations) Convert() Deprecation { return Deprecation{ Message: "The convert command is deprecated in Ginkgo V2", DocLink: "removed-ginkgo-convert", Version: "1.16.0", } } func (d deprecations) Blur() Deprecation { return Deprecation{ Message: "The blur command is deprecated in Ginkgo V2. Use 'ginkgo unfocus' instead.", Version: "1.16.0", } } func (d deprecations) Nodot() Deprecation { return Deprecation{ Message: "The nodot command is deprecated in Ginkgo V2. Please either dot-import Ginkgo or use the package identifier in your code to references objects and types provided by Ginkgo and Gomega.", DocLink: "removed-ginkgo-nodot", Version: "1.16.0", } } func (d deprecations) SuppressProgressReporting() Deprecation { return Deprecation{ Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.", Version: "2.5.0", } } type DeprecationTracker struct { deprecations map[Deprecation][]CodeLocation lock *sync.Mutex } func NewDeprecationTracker() *DeprecationTracker { return &DeprecationTracker{ deprecations: map[Deprecation][]CodeLocation{}, lock: &sync.Mutex{}, } } func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...CodeLocation) { ackVersion := os.Getenv("ACK_GINKGO_DEPRECATIONS") if deprecation.Version != "" && ackVersion != "" { ack := ParseSemVer(ackVersion) version := ParseSemVer(deprecation.Version) if ack.GreaterThanOrEqualTo(version) { return } } d.lock.Lock() defer d.lock.Unlock() if len(cl) == 1 { d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0]) } else { d.deprecations[deprecation] = []CodeLocation{} } } func (d *DeprecationTracker) DidTrackDeprecations() bool { d.lock.Lock() defer d.lock.Unlock() return len(d.deprecations) > 0 } func (d *DeprecationTracker) DeprecationsReport() string { d.lock.Lock() defer d.lock.Unlock() out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") out += formatter.F("{{light-yellow}}============================================={{/}}\n") for deprecation, locations := range d.deprecations { out += formatter.Fi(1, "{{yellow}}"+deprecation.Message+"{{/}}\n") if deprecation.DocLink != "" { out += formatter.Fi(1, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}https://onsi.github.io/ginkgo/MIGRATING_TO_V2#%s{{/}}\n", deprecation.DocLink) } for _, location := range locations { out += formatter.Fi(2, "{{gray}}%s{{/}}\n", location) } } out += formatter.F("\n{{gray}}To silence deprecations that can be silenced set the following environment variable:{{/}}\n") out += formatter.Fi(1, "{{gray}}ACK_GINKGO_DEPRECATIONS=%s{{/}}\n", VERSION) return out } type SemVer struct { Major int Minor int Patch int } func (s SemVer) GreaterThanOrEqualTo(o SemVer) bool { return (s.Major > o.Major) || (s.Major == o.Major && s.Minor > o.Minor) || (s.Major == o.Major && s.Minor == o.Minor && s.Patch >= o.Patch) } func ParseSemVer(semver string) SemVer { out := SemVer{} semver = strings.TrimFunc(semver, func(r rune) bool { return !(unicode.IsNumber(r) || r == '.') }) components := strings.Split(semver, ".") if len(components) > 0 { out.Major, _ = strconv.Atoi(components[0]) } if len(components) > 1 { out.Minor, _ = strconv.Atoi(components[1]) } if len(components) > 2 { out.Patch, _ = strconv.Atoi(components[2]) } return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
vendor/github.com/onsi/ginkgo/v2/types/label_filter.go
package types import ( "fmt" "regexp" "strings" ) var DEBUG_LABEL_FILTER_PARSING = false type LabelFilter func([]string) bool func matchLabelAction(label string) LabelFilter { expected := strings.ToLower(label) return func(labels []string) bool { for i := range labels { if strings.ToLower(labels[i]) == expected { return true } } return false } } func matchLabelRegexAction(regex *regexp.Regexp) LabelFilter { return func(labels []string) bool { for i := range labels { if regex.MatchString(labels[i]) { return true } } return false } } func notAction(filter LabelFilter) LabelFilter { return func(labels []string) bool { return !filter(labels) } } func andAction(a, b LabelFilter) LabelFilter { return func(labels []string) bool { return a(labels) && b(labels) } } func orAction(a, b LabelFilter) LabelFilter { return func(labels []string) bool { return a(labels) || b(labels) } } func labelSetFor(key string, labels []string) map[string]bool { key = strings.ToLower(strings.TrimSpace(key)) out := map[string]bool{} for _, label := range labels { components := strings.SplitN(label, ":", 2) if len(components) < 2 { continue } if key == strings.ToLower(strings.TrimSpace(components[0])) { out[strings.ToLower(strings.TrimSpace(components[1]))] = true } } return out } func isEmptyLabelSetAction(key string) LabelFilter { return func(labels []string) bool { return len(labelSetFor(key, labels)) == 0 } } func containsAnyLabelSetAction(key string, expectedValues []string) LabelFilter { return func(labels []string) bool { set := labelSetFor(key, labels) for _, value := range expectedValues { if set[value] { return true } } return false } } func containsAllLabelSetAction(key string, expectedValues []string) LabelFilter { return func(labels []string) bool { set := labelSetFor(key, labels) for _, value := range expectedValues { if !set[value] { return false } } return true } } func consistsOfLabelSetAction(key string, expectedValues []string) LabelFilter { return func(labels []string) bool { set := labelSetFor(key, labels) if len(set) != len(expectedValues) { return false } for _, value := range expectedValues { if !set[value] { return false } } return true } } func isSubsetOfLabelSetAction(key string, expectedValues []string) LabelFilter { expectedSet := map[string]bool{} for _, value := range expectedValues { expectedSet[value] = true } return func(labels []string) bool { set := labelSetFor(key, labels) for value := range set { if !expectedSet[value] { return false } } return true } } type lfToken uint const ( lfTokenInvalid lfToken = iota lfTokenRoot lfTokenOpenGroup lfTokenCloseGroup lfTokenNot lfTokenAnd lfTokenOr lfTokenRegexp lfTokenLabel lfTokenSetKey lfTokenSetOperation lfTokenSetArgument lfTokenEOF ) func (l lfToken) Precedence() int { switch l { case lfTokenRoot, lfTokenOpenGroup: return 0 case lfTokenOr: return 1 case lfTokenAnd: return 2 case lfTokenNot: return 3 case lfTokenSetOperation: return 4 } return -1 } func (l lfToken) String() string { switch l { case lfTokenRoot: return "ROOT" case lfTokenOpenGroup: return "(" case lfTokenCloseGroup: return ")" case lfTokenNot: return "!" case lfTokenAnd: return "&&" case lfTokenOr: return "||" case lfTokenRegexp: return "/regexp/" case lfTokenLabel: return "label" case lfTokenSetKey: return "set_key" case lfTokenSetOperation: return "set_operation" case lfTokenSetArgument: return "set_argument" case lfTokenEOF: return "EOF" } return "INVALID" } type treeNode struct { token lfToken location int value string parent *treeNode leftNode *treeNode rightNode *treeNode } func (tn *treeNode) setRightNode(node *treeNode) { tn.rightNode = node node.parent = tn } func (tn *treeNode) setLeftNode(node *treeNode) { tn.leftNode = node node.parent = tn } func (tn *treeNode) firstAncestorWithPrecedenceLEQ(precedence int) *treeNode { if tn.token.Precedence() <= precedence { return tn } return tn.parent.firstAncestorWithPrecedenceLEQ(precedence) } func (tn *treeNode) firstUnmatchedOpenNode() *treeNode { if tn.token == lfTokenOpenGroup { return tn } if tn.parent == nil { return nil } return tn.parent.firstUnmatchedOpenNode() } func (tn *treeNode) constructLabelFilter(input string) (LabelFilter, error) { switch tn.token { case lfTokenOpenGroup: return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, "Mismatched '(' - could not find matching ')'.") case lfTokenLabel: return matchLabelAction(tn.value), nil case lfTokenRegexp: re, err := regexp.Compile(tn.value) if err != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("RegExp compilation error: %s", err)) } return matchLabelRegexAction(re), nil case lfTokenSetOperation: tokenSetOperation := strings.ToLower(tn.value) if tokenSetOperation == "isempty" { return isEmptyLabelSetAction(tn.leftNode.value), nil } if tn.rightNode == nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Set operation '%s' is missing an argument.", tn.value)) } rawValues := strings.Split(tn.rightNode.value, ",") values := make([]string, len(rawValues)) for i := range rawValues { values[i] = strings.ToLower(strings.TrimSpace(rawValues[i])) if strings.ContainsAny(values[i], "&|!,()/") { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, fmt.Sprintf("Invalid label value '%s' in set operation argument.", values[i])) } else if values[i] == "" { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.rightNode.location, "Empty label value in set operation argument.") } } switch tokenSetOperation { case "containsany": return containsAnyLabelSetAction(tn.leftNode.value, values), nil case "containsall": return containsAllLabelSetAction(tn.leftNode.value, values), nil case "consistsof": return consistsOfLabelSetAction(tn.leftNode.value, values), nil case "issubsetof": return isSubsetOfLabelSetAction(tn.leftNode.value, values), nil } } if tn.rightNode == nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, -1, "Unexpected EOF.") } rightLF, err := tn.rightNode.constructLabelFilter(input) if err != nil { return nil, err } switch tn.token { case lfTokenRoot, lfTokenCloseGroup: return rightLF, nil case lfTokenNot: return notAction(rightLF), nil } if tn.leftNode == nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Malformed tree - '%s' is missing left operand.", tn.token)) } leftLF, err := tn.leftNode.constructLabelFilter(input) if err != nil { return nil, err } switch tn.token { case lfTokenAnd: return andAction(leftLF, rightLF), nil case lfTokenOr: return orAction(leftLF, rightLF), nil } return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, tn.location, fmt.Sprintf("Invalid token '%s'.", tn.token)) } func (tn *treeNode) tokenString() string { out := fmt.Sprintf("<%s", tn.token) if tn.value != "" { out += " | " + tn.value } out += ">" return out } func (tn *treeNode) toString(indent int) string { out := tn.tokenString() + "\n" if tn.leftNode != nil { out += fmt.Sprintf("%s |_(L)_%s", strings.Repeat(" ", indent), tn.leftNode.toString(indent+1)) } if tn.rightNode != nil { out += fmt.Sprintf("%s |_(R)_%s", strings.Repeat(" ", indent), tn.rightNode.toString(indent+1)) } return out } var validSetOperations = map[string]string{ "containsany": "containsAny", "containsall": "containsAll", "consistsof": "consistsOf", "issubsetof": "isSubsetOf", "isempty": "isEmpty", } func tokenize(input string) func() (*treeNode, error) { lastToken := lfTokenInvalid lastValue := "" runes, i := []rune(input), 0 peekIs := func(r rune) bool { if i+1 < len(runes) { return runes[i+1] == r } return false } consumeUntil := func(cutset string) (string, int) { j := i for ; j < len(runes); j++ { if strings.IndexRune(cutset, runes[j]) >= 0 { break } } return string(runes[i:j]), j - i } return func() (*treeNode, error) { for i < len(runes) && runes[i] == ' ' { i += 1 } if i >= len(runes) { return &treeNode{token: lfTokenEOF}, nil } node := &treeNode{location: i} defer func() { lastToken = node.token lastValue = node.value }() if lastToken == lfTokenSetKey { //we should get a valid set operation next value, n := consumeUntil(" )") if validSetOperations[strings.ToLower(value)] == "" { return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, fmt.Sprintf("Invalid set operation '%s'.", value)) } i += n node.token, node.value = lfTokenSetOperation, value return node, nil } if lastToken == lfTokenSetOperation { //we should get an argument next, if we aren't isempty var arg = "" origI := i if runes[i] == '{' { i += 1 value, n := consumeUntil("}") if i+n >= len(runes) { return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-1, "Missing closing '}' in set operation argument?") } i += n + 1 arg = value } else { value, n := consumeUntil("&|!,()/") i += n arg = strings.TrimSpace(value) } if strings.ToLower(lastValue) == "isempty" && arg != "" { return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("isEmpty does not take arguments, was passed '%s'.", arg)) } if arg == "" && strings.ToLower(lastValue) != "isempty" { if i < len(runes) && runes[i] == '/' { return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, "Set operations do not support regular expressions.") } else { return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, origI, fmt.Sprintf("Set operation '%s' requires an argument.", lastValue)) } } // note that we sent an empty SetArgument token if we are isempty node.token, node.value = lfTokenSetArgument, arg return node, nil } switch runes[i] { case '&': if !peekIs('&') { return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '&'. Did you mean '&&'?") } i += 2 node.token = lfTokenAnd case '|': if !peekIs('|') { return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Invalid token '|'. Did you mean '||'?") } i += 2 node.token = lfTokenOr case '!': i += 1 node.token = lfTokenNot case ',': i += 1 node.token = lfTokenOr case '(': i += 1 node.token = lfTokenOpenGroup case ')': i += 1 node.token = lfTokenCloseGroup case '/': i += 1 value, n := consumeUntil("/") i += n + 1 node.token, node.value = lfTokenRegexp, value default: value, n := consumeUntil("&|!,()/:") i += n value = strings.TrimSpace(value) //are we the beginning of a set operation? if i < len(runes) && runes[i] == ':' { if peekIs(' ') { if value == "" { return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set key.") } i += 1 //we are the beginning of a set operation node.token, node.value = lfTokenSetKey, value return node, nil } additionalValue, n := consumeUntil("&|!,()/") additionalValue = strings.TrimSpace(additionalValue) if additionalValue == ":" { return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i, "Missing set operation.") } i += n value += additionalValue } valueToCheckForSetOperation := strings.ToLower(value) for setOperation := range validSetOperations { idx := strings.Index(valueToCheckForSetOperation, " "+setOperation) if idx > 0 { return &treeNode{}, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, i-n+idx+1, fmt.Sprintf("Looks like you are using the set operator '%s' but did not provide a set key. Did you forget the ':'?", validSetOperations[setOperation])) } } node.token, node.value = lfTokenLabel, strings.TrimSpace(value) } return node, nil } } func MustParseLabelFilter(input string) LabelFilter { filter, err := ParseLabelFilter(input) if err != nil { panic(err) } return filter } func ParseLabelFilter(input string) (LabelFilter, error) { if DEBUG_LABEL_FILTER_PARSING { fmt.Println("\n==============") fmt.Println("Input: ", input) fmt.Print("Tokens: ") } if input == "" { return func(_ []string) bool { return true }, nil } nextToken := tokenize(input) root := &treeNode{token: lfTokenRoot} current := root LOOP: for { node, err := nextToken() if err != nil { return nil, err } if DEBUG_LABEL_FILTER_PARSING { fmt.Print(node.tokenString() + " ") } switch node.token { case lfTokenEOF: break LOOP case lfTokenLabel, lfTokenRegexp, lfTokenSetKey: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found two adjacent labels. You need an operator between them.") } current.setRightNode(node) case lfTokenNot, lfTokenOpenGroup: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Invalid token '%s'.", node.token)) } current.setRightNode(node) current = node case lfTokenAnd, lfTokenOr: if current.rightNode == nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Operator '%s' missing left hand operand.", node.token)) } nodeToStealFrom := current.firstAncestorWithPrecedenceLEQ(node.token.Precedence()) node.setLeftNode(nodeToStealFrom.rightNode) nodeToStealFrom.setRightNode(node) current = node case lfTokenSetOperation: if current.rightNode == nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Set operation '%s' missing left hand operand.", node.value)) } node.setLeftNode(current.rightNode) current.setRightNode(node) current = node case lfTokenSetArgument: if current.rightNode != nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unexpected set argument '%s'.", node.token)) } current.setRightNode(node) case lfTokenCloseGroup: firstUnmatchedOpenNode := current.firstUnmatchedOpenNode() if firstUnmatchedOpenNode == nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Mismatched ')' - could not find matching '('.") } if firstUnmatchedOpenNode == current && current.rightNode == nil { return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, "Found empty '()' group.") } firstUnmatchedOpenNode.token = lfTokenCloseGroup //signify the group is now closed current = firstUnmatchedOpenNode.parent default: return nil, GinkgoErrors.SyntaxErrorParsingLabelFilter(input, node.location, fmt.Sprintf("Unknown token '%s'.", node.token)) } } if DEBUG_LABEL_FILTER_PARSING { fmt.Printf("\n Tree:\n%s", root.toString(0)) } return root.constructLabelFilter(input) } func ValidateAndCleanupLabel(label string, cl CodeLocation) (string, error) { out := strings.TrimSpace(label) if out == "" { return "", GinkgoErrors.InvalidEmptyLabel(cl) } if strings.ContainsAny(out, "&|!,()/") { return "", GinkgoErrors.InvalidLabel(label, cl) } if out[0] == ':' { return "", GinkgoErrors.InvalidLabel(label, cl) } if strings.Contains(out, ":") { components := strings.SplitN(out, ":", 2) if len(components) < 2 || components[1] == "" { return "", GinkgoErrors.InvalidLabel(label, cl) } } return out, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go
package reporters import ( "encoding/json" "fmt" "os" "path" "github.com/onsi/ginkgo/v2/types" ) // GenerateJSONReport produces a JSON-formatted report at the passed in destination func GenerateJSONReport(report types.Report, destination string) error { if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { return err } f, err := os.Create(destination) if err != nil { return err } defer f.Close() enc := json.NewEncoder(f) enc.SetIndent("", " ") err = enc.Encode([]types.Report{ report, }) if err != nil { return err } return nil } // MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources // It skips over reports that fail to decode but reports on them via the returned messages []string func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { messages := []string{} allReports := []types.Report{} for _, source := range sources { reports := []types.Report{} data, err := os.ReadFile(source) if err != nil { messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) continue } err = json.Unmarshal(data, &reports) if err != nil { messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) continue } os.Remove(source) allReports = append(allReports, reports...) } if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { return messages, err } f, err := os.Create(destination) if err != nil { return messages, err } defer f.Close() enc := json.NewEncoder(f) enc.SetIndent("", " ") err = enc.Encode(allReports) if err != nil { return messages, err } return messages, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go
package reporters import ( "github.com/onsi/ginkgo/v2/config" "github.com/onsi/ginkgo/v2/types" ) // Deprecated: DeprecatedReporter was how Ginkgo V1 provided support for CustomReporters // this has been removed in V2. // Please read the documentation at: // https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-custom-reporters // for Ginkgo's new behavior and for a migration path. type DeprecatedReporter interface { SuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) BeforeSuiteDidRun(setupSummary *types.SetupSummary) SpecWillRun(specSummary *types.SpecSummary) SpecDidComplete(specSummary *types.SpecSummary) AfterSuiteDidRun(setupSummary *types.SetupSummary) SuiteDidEnd(summary *types.SuiteSummary) } // ReportViaDeprecatedReporter takes a V1 custom reporter and a V2 report and // calls the custom reporter's methods with appropriately transformed data from the V2 report. // // ReportViaDeprecatedReporter should be called in a `ReportAfterSuite()` // // Deprecated: ReportViaDeprecatedReporter method exists to help developer bridge between deprecated V1 functionality and the new // reporting support in V2. It will be removed in a future minor version of Ginkgo. func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Report) { conf := config.DeprecatedGinkgoConfigType{ RandomSeed: report.SuiteConfig.RandomSeed, RandomizeAllSpecs: report.SuiteConfig.RandomizeAllSpecs, FocusStrings: report.SuiteConfig.FocusStrings, SkipStrings: report.SuiteConfig.SkipStrings, FailOnPending: report.SuiteConfig.FailOnPending, FailFast: report.SuiteConfig.FailFast, FlakeAttempts: report.SuiteConfig.FlakeAttempts, EmitSpecProgress: false, DryRun: report.SuiteConfig.DryRun, ParallelNode: report.SuiteConfig.ParallelProcess, ParallelTotal: report.SuiteConfig.ParallelTotal, SyncHost: report.SuiteConfig.ParallelHost, StreamHost: report.SuiteConfig.ParallelHost, } summary := &types.DeprecatedSuiteSummary{ SuiteDescription: report.SuiteDescription, SuiteID: report.SuitePath, NumberOfSpecsBeforeParallelization: report.PreRunStats.TotalSpecs, NumberOfTotalSpecs: report.PreRunStats.TotalSpecs, NumberOfSpecsThatWillBeRun: report.PreRunStats.SpecsThatWillRun, } reporter.SuiteWillBegin(conf, summary) for _, spec := range report.SpecReports { switch spec.LeafNodeType { case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: setupSummary := &types.DeprecatedSetupSummary{ ComponentType: spec.LeafNodeType, CodeLocation: spec.LeafNodeLocation, State: spec.State, RunTime: spec.RunTime, Failure: failureFor(spec), CapturedOutput: spec.CombinedOutput(), SuiteID: report.SuitePath, } reporter.BeforeSuiteDidRun(setupSummary) case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: setupSummary := &types.DeprecatedSetupSummary{ ComponentType: spec.LeafNodeType, CodeLocation: spec.LeafNodeLocation, State: spec.State, RunTime: spec.RunTime, Failure: failureFor(spec), CapturedOutput: spec.CombinedOutput(), SuiteID: report.SuitePath, } reporter.AfterSuiteDidRun(setupSummary) case types.NodeTypeIt: componentTexts, componentCodeLocations := []string{}, []types.CodeLocation{} componentTexts = append(componentTexts, spec.ContainerHierarchyTexts...) componentCodeLocations = append(componentCodeLocations, spec.ContainerHierarchyLocations...) componentTexts = append(componentTexts, spec.LeafNodeText) componentCodeLocations = append(componentCodeLocations, spec.LeafNodeLocation) specSummary := &types.DeprecatedSpecSummary{ ComponentTexts: componentTexts, ComponentCodeLocations: componentCodeLocations, State: spec.State, RunTime: spec.RunTime, Failure: failureFor(spec), NumberOfSamples: spec.NumAttempts, CapturedOutput: spec.CombinedOutput(), SuiteID: report.SuitePath, } reporter.SpecWillRun(specSummary) reporter.SpecDidComplete(specSummary) switch spec.State { case types.SpecStatePending: summary.NumberOfPendingSpecs += 1 case types.SpecStateSkipped: summary.NumberOfSkippedSpecs += 1 case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateInterrupted: summary.NumberOfFailedSpecs += 1 case types.SpecStatePassed: summary.NumberOfPassedSpecs += 1 if spec.NumAttempts > 1 { summary.NumberOfFlakedSpecs += 1 } } } } summary.SuiteSucceeded = report.SuiteSucceeded summary.RunTime = report.RunTime reporter.SuiteDidEnd(summary) } func failureFor(spec types.SpecReport) types.DeprecatedSpecFailure { if spec.Failure.IsZero() { return types.DeprecatedSpecFailure{} } index := 0 switch spec.Failure.FailureNodeContext { case types.FailureNodeInContainer: index = spec.Failure.FailureNodeContainerIndex case types.FailureNodeAtTopLevel: index = -1 case types.FailureNodeIsLeafNode: index = len(spec.ContainerHierarchyTexts) - 1 if spec.LeafNodeText != "" { index += 1 } } return types.DeprecatedSpecFailure{ Message: spec.Failure.Message, Location: spec.Failure.Location, ForwardedPanic: spec.Failure.ForwardedPanic, ComponentIndex: index, ComponentType: spec.Failure.FailureNodeType, ComponentCodeLocation: spec.Failure.FailureNodeLocation, } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go
package reporters import ( "github.com/onsi/ginkgo/v2/types" ) type Reporter interface { SuiteWillBegin(report types.Report) WillRun(report types.SpecReport) DidRun(report types.SpecReport) SuiteDidEnd(report types.Report) //Timeline emission EmitFailure(state types.SpecState, failure types.Failure) EmitProgressReport(progressReport types.ProgressReport) EmitReportEntry(entry types.ReportEntry) EmitSpecEvent(event types.SpecEvent) } type NoopReporter struct{} func (n NoopReporter) SuiteWillBegin(report types.Report) {} func (n NoopReporter) WillRun(report types.SpecReport) {} func (n NoopReporter) DidRun(report types.SpecReport) {} func (n NoopReporter) SuiteDidEnd(report types.Report) {} func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {} func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {} func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {}
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go
/* Ginkgo's Default Reporter A number of command line flags are available to tweak Ginkgo's default output. These are documented [here](http://onsi.github.io/ginkgo/#running_tests) */ package reporters import ( "fmt" "io" "runtime" "strings" "sync" "time" "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/types" ) type DefaultReporter struct { conf types.ReporterConfig writer io.Writer // managing the emission stream lastCharWasNewline bool lastEmissionWasDelimiter bool // rendering specDenoter string retryDenoter string formatter formatter.Formatter runningInParallel bool lock *sync.Mutex } func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { reporter := NewDefaultReporter(conf, writer) reporter.formatter = formatter.New(formatter.ColorModePassthrough) return reporter } func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { reporter := &DefaultReporter{ conf: conf, writer: writer, lastCharWasNewline: true, lastEmissionWasDelimiter: false, specDenoter: "•", retryDenoter: "↺", formatter: formatter.NewWithNoColorBool(conf.NoColor), lock: &sync.Mutex{}, } if runtime.GOOS == "windows" { reporter.specDenoter = "+" reporter.retryDenoter = "R" } return reporter } /* The Reporter Interface */ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) { r.emit(r.f("[%d] {{bold}}%s{{/}} ", report.SuiteConfig.RandomSeed, report.SuiteDescription)) if len(report.SuiteLabels) > 0 { r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) } r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) if report.SuiteConfig.ParallelTotal > 1 { r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) } } else { banner := r.f("Running Suite: %s - %s", report.SuiteDescription, report.SuitePath) r.emitBlock(banner) bannerWidth := len(banner) if len(report.SuiteLabels) > 0 { labels := strings.Join(report.SuiteLabels, ", ") r.emitBlock(r.f("{{coral}}[%s]{{/}} ", labels)) if len(labels)+2 > bannerWidth { bannerWidth = len(labels) + 2 } } r.emitBlock(strings.Repeat("=", bannerWidth)) out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) if report.SuiteConfig.RandomizeAllSpecs { out += r.f(" - will randomize all specs") } r.emitBlock(out) r.emit("\n") r.emitBlock(r.f("Will run {{bold}}%d{{/}} of {{bold}}%d{{/}} specs", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) if report.SuiteConfig.ParallelTotal > 1 { r.emitBlock(r.f("Running in parallel across {{bold}}%d{{/}} processes", report.SuiteConfig.ParallelTotal)) } } } func (r *DefaultReporter) SuiteDidEnd(report types.Report) { failures := report.SpecReports.WithState(types.SpecStateFailureStates) if len(failures) > 0 { r.emitBlock("\n") if len(failures) > 1 { r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) } else { r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) } for _, specReport := range failures { highlightColor, heading := "{{red}}", "[FAIL]" switch specReport.State { case types.SpecStatePanicked: highlightColor, heading = "{{magenta}}", "[PANICKED!]" case types.SpecStateAborted: highlightColor, heading = "{{coral}}", "[ABORTED]" case types.SpecStateTimedout: highlightColor, heading = "{{orange}}", "[TIMEDOUT]" case types.SpecStateInterrupted: highlightColor, heading = "{{orange}}", "[INTERRUPTED]" } locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true) r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) } } //summarize the suite if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) return } r.emitBlock("\n") color, status := "{{green}}{{bold}}", "SUCCESS!" if !report.SuiteSucceeded { color, status = "{{red}}{{bold}}", "FAIL!" } specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), report.PreRunStats.TotalSpecs, report.RunTime.Seconds()), ) switch len(report.SpecialSuiteFailureReasons) { case 0: r.emit(r.f(color+"%s{{/}} -- ", status)) case 1: r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) default: r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) } if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) } else { r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) if specs.CountOfFlakedSpecs() > 0 { r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) } if specs.CountOfRepeatedSpecs() > 0 { r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) } r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) } } func (r *DefaultReporter) WillRun(report types.SpecReport) { v := r.conf.Verbosity() if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel { return } r.emitDelimiter(0) r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { r.emitBlock("\n") if r.conf.GithubOutput { r.emitBlock(r.fi(1, "::group::%s", sectionName)) } else { r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) } fn() if r.conf.GithubOutput { r.emitBlock(r.fi(1, "::endgroup::")) } else { r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) } } func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel //should we completely omit this spec? if report.State.Is(types.SpecStateSkipped) && r.conf.SilenceSkips { return } header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { header = fmt.Sprintf("[%s]", report.LeafNodeType) } highlightColor := r.highlightColorForState(report.State) // have we already been streaming the timeline? timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel // should we show the timeline? var timeline types.Timeline showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed()) if showTimeline { timeline = report.Timeline().WithoutHiddenReportEntries() keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) || (report.Failed() && r.conf.ShowNodeEvents) if !keepVeryVerboseSpecEvents { timeline = timeline.WithoutVeryVerboseSpecEvents() } if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" { // the timeline is completely empty - don't show it showTimeline = false } if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 { //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report failure, isFailure := timeline[0].(types.Failure) if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) { showTimeline = false } } } // should we have a separate section for always-visible reports? showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) // should we have a separate section for captured stdout/stderr showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "") // given all that - do we have any actual content to show? or are we a single denoter in a stream? reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped)) // should we show a runtime? includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "") // should we show the codelocation block? showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed) switch report.State { case types.SpecStatePassed: if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent { return } if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { header = fmt.Sprintf("%s PASSED", header) } if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true } case types.SpecStatePending: header = "P" if v.GT(types.VerbosityLevelSuccinct) { header, reportHasContent = "P [PENDING]", true } case types.SpecStateSkipped: header = "S" if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") { header, reportHasContent = "S [SKIPPED]", true } default: header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State)) if report.MaxMustPassRepeatedly > 1 { header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts) } } // If we have no content to show, just emit the header and return if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) if r.conf.ForceNewlines { r.emit("\n") } return } if includeRuntime { header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) } // Emit header if !timelineHasBeenStreaming { r.emitDelimiter(0) } r.emitBlock(r.f(highlightColor + header + "{{/}}")) if showCodeLocation { r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false)) } //Emit Stdout/Stderr Output if showSeparateStdSection { r.wrapTextBlock("Captured StdOut/StdErr Output", func() { r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) }) } if showSeparateVisibilityAlwaysReportsSection { r.wrapTextBlock("Report Entries", func() { for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { r.emitReportEntry(1, entry) } }) } if showTimeline { r.wrapTextBlock("Timeline", func() { r.emitTimeline(1, report, timeline) }) } // Emit Failure Message if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) { r.emitBlock("\n") r.emitFailure(1, report.State, report.Failure, true) if len(report.AdditionalFailures) > 0 { r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}")) } } r.emitDelimiter(0) } func (r *DefaultReporter) highlightColorForState(state types.SpecState) string { switch state { case types.SpecStatePassed: return "{{green}}" case types.SpecStatePending: return "{{yellow}}" case types.SpecStateSkipped: return "{{cyan}}" case types.SpecStateFailed: return "{{red}}" case types.SpecStateTimedout: return "{{orange}}" case types.SpecStatePanicked: return "{{magenta}}" case types.SpecStateInterrupted: return "{{orange}}" case types.SpecStateAborted: return "{{coral}}" default: return "{{gray}}" } } func (r *DefaultReporter) humanReadableState(state types.SpecState) string { return strings.ToUpper(state.String()) } func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) { isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) gw := report.CapturedGinkgoWriterOutput cursor := 0 for _, entry := range timeline { tl := entry.GetTimelineLocation() if tl.Offset < len(gw) { r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) cursor = tl.Offset } else if cursor < len(gw) { r.emit(r.fi(indent, "%s", gw[cursor:])) cursor = len(gw) } switch x := entry.(type) { case types.Failure: if isVeryVerbose { r.emitFailure(indent, report.State, x, false) } else { r.emitShortFailure(indent, report.State, x) } case types.AdditionalFailure: if isVeryVerbose { r.emitFailure(indent, x.State, x.Failure, true) } else { r.emitShortFailure(indent, x.State, x.Failure) } case types.ReportEntry: r.emitReportEntry(indent, x) case types.ProgressReport: r.emitProgressReport(indent, false, x) case types.SpecEvent: if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { r.emitSpecEvent(indent, x, isVeryVerbose) } } } if cursor < len(gw) { r.emit(r.fi(indent, "%s", gw[cursor:])) } } func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) { if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) { r.emitShortFailure(1, state, failure) } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) { r.emitFailure(1, state, failure, true) } } func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) { r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}", r.humanReadableState(state), failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), )) } func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) if r.conf.GithubOutput { level := "error" if state.Is(types.SpecStateSkipped) { level = "notice" } r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) } else { r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) } if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) } if r.conf.FullTrace || failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}")) r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace)) } if !failure.ProgressReport.IsZero() { r.emitBlock("\n") r.emitProgressReport(indent, false, failure.ProgressReport) } if failure.AdditionalFailure != nil && includeAdditionalFailure { r.emitBlock("\n") r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true) } } func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { r.emitDelimiter(1) if report.RunningInParallel { r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) } shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) r.emitProgressReport(1, shouldEmitGW, report) r.emitDelimiter(1) } func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { if report.Message != "" { r.emitBlock(r.fi(indent, report.Message+"\n")) indent += 1 } if report.LeafNodeText != "" { subjectIndent := indent if len(report.ContainerHierarchyTexts) > 0 { r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " "))) r.emit(" ") subjectIndent = 0 } r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond))) r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation)) indent += 1 } if report.CurrentNodeType != types.NodeTypeInvalid { r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType)) if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) { r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText)) } r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation)) indent += 1 } if report.CurrentStepText != "" { r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond))) r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation)) indent += 1 } if indent > 0 { indent -= 1 } if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { r.emit("\n") r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n") if len(lines) <= limit { r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput)) } else { r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) for _, line := range lines[len(lines)-limit-1:] { r.emitBlock(r.fi(indent+1, "%s", line)) } } r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) } if !report.SpecGoroutine().IsZero() { r.emit("\n") r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n")) r.emitGoroutines(indent, report.SpecGoroutine()) } if len(report.AdditionalReports) > 0 { r.emit("\n") r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}")) for i, additionalReport := range report.AdditionalReports { r.emit(r.fi(indent+1, additionalReport)) if i < len(report.AdditionalReports)-1 { r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10))) } } r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}")) } highlightedGoroutines := report.HighlightedGoroutines() if len(highlightedGoroutines) > 0 { r.emit("\n") r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n")) r.emitGoroutines(indent, highlightedGoroutines...) } otherGoroutines := report.OtherGoroutines() if len(otherGoroutines) > 0 { r.emit("\n") r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) r.emitGoroutines(indent, otherGoroutines...) } } func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever { return } r.emitReportEntry(1, entry) } func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) { r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))) if representation := entry.StringRepresentation(); representation != "" { r.emitBlock(r.fi(indent+1, representation)) } } func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) { v := r.conf.Verbosity() if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) { r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)) } } func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) { location := "" if includeLocation { location = fmt.Sprintf("- %s ", event.CodeLocation.String()) } switch event.SpecEventType { case types.SpecEventInvalid: return case types.SpecEventByStart: r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) case types.SpecEventByEnd: r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) case types.SpecEventNodeStart: r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) case types.SpecEventNodeEnd: r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) case types.SpecEventSpecRepeat: r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) case types.SpecEventSpecRetry: r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) } } func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) { for idx, g := range goroutines { color := "{{gray}}" if g.HasHighlights() { color = "{{orange}}" } r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State)) for _, fc := range g.Stack { if fc.Highlight { r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function)) r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line)) r.emitSource(indent+3, fc) } else { r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function)) r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line)) } } if idx+1 < len(goroutines) { r.emit("\n") } } } func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { lines := fc.Source if len(lines) == 0 { return } lTrim := 100000 for _, line := range lines { lTrimLine := len(line) - len(strings.TrimLeft(line, " \t")) if lTrimLine < lTrim && len(line) > 0 { lTrim = lTrimLine } } if lTrim == 100000 { lTrim = 0 } for idx, line := range lines { if len(line) > lTrim { line = line[lTrim:] } if idx == fc.SourceHighlight { r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line)) } else { r.emit(r.fi(indent, "| %s\n", line)) } } } /* Emitting to the writer */ func (r *DefaultReporter) emit(s string) { r._emit(s, false, false) } func (r *DefaultReporter) emitBlock(s string) { r._emit(s, true, false) } func (r *DefaultReporter) emitDelimiter(indent uint) { r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true) } // a bit ugly - but we're trying to minimize locking on this hot codepath func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { if len(s) == 0 { return } r.lock.Lock() defer r.lock.Unlock() if isDelimiter && r.lastEmissionWasDelimiter { return } if block && !r.lastCharWasNewline { r.writer.Write([]byte("\n")) } r.lastCharWasNewline = (s[len(s)-1:] == "\n") r.writer.Write([]byte(s)) if block && !r.lastCharWasNewline { r.writer.Write([]byte("\n")) r.lastCharWasNewline = true } r.lastEmissionWasDelimiter = isDelimiter } /* Rendering text */ func (r *DefaultReporter) f(format string, args ...interface{}) string { return r.formatter.F(format, args...) } func (r *DefaultReporter) fi(indentation uint, format string, args ...interface{}) string { return r.formatter.Fi(indentation, format, args...) } func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) } func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) } else { texts = append(texts, r.f(report.LeafNodeText)) } labels = append(labels, report.LeafNodeLabels) locations = append(locations, report.LeafNodeLocation) failureLocation := report.Failure.FailureNodeLocation if usePreciseFailureLocation { failureLocation = report.Failure.Location } highlightIndex := -1 switch report.Failure.FailureNodeContext { case types.FailureNodeAtTopLevel: texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) locations = append([]types.CodeLocation{failureLocation}, locations...) labels = append([][]string{{}}, labels...) highlightIndex = 0 case types.FailureNodeInContainer: i := report.Failure.FailureNodeContainerIndex texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType) locations[i] = failureLocation highlightIndex = i case types.FailureNodeIsLeafNode: i := len(texts) - 1 texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText) locations[i] = failureLocation highlightIndex = i default: //there is no failure, so we highlight the leaf ndoe highlightIndex = len(texts) - 1 } out := "" if veryVerbose { for i := range texts { if i == highlightIndex { out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i]) } else { out += r.fi(uint(i), "%s", texts[i]) } if len(labels[i]) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) } out += "\n" out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) } } else { for i := range texts { style := "{{/}}" if i%2 == 1 { style = "{{gray}}" } if i == highlightIndex { style = highlightColor + "{{bold}}" } out += r.f(style+"%s", texts[i]) if i < len(texts)-1 { out += " " } else { out += r.f("{{/}}") } } flattenedLabels := report.Labels() if len(flattenedLabels) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) } out += "\n" if usePreciseFailureLocation { out += r.f("{{gray}}%s{{/}}", failureLocation) } else { leafLocation := locations[len(locations)-1] if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) { out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation) out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation) } else { out += r.f("{{gray}}%s{{/}}", leafLocation) } } } return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go
/* JUnit XML Reporter for Ginkgo For usage instructions: http://onsi.github.io/ginkgo/#generating_junit_xml_output The schema used for the generated JUnit xml file was adapted from https://llg.cubic.org/docs/junit/ */ package reporters import ( "encoding/xml" "fmt" "os" "path" "regexp" "strings" "github.com/onsi/ginkgo/v2/config" "github.com/onsi/ginkgo/v2/types" ) type JunitReportConfig struct { // Spec States for which no timeline should be emitted for system-err // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs OmitTimelinesForSpecState types.SpecState // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags OmitFailureMessageAttr bool //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out OmitCapturedStdOutErr bool // Enable OmitSpecLabels to prevent labels from appearing in the spec name OmitSpecLabels bool // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name OmitLeafNodeType bool // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes OmitSuiteSetupNodes bool } type JUnitTestSuites struct { XMLName xml.Name `xml:"testsuites"` // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) Tests int `xml:"tests,attr"` // Disabled maps onto specs that are pending and/or skipped Disabled int `xml:"disabled,attr"` // Errors maps onto specs that panicked or were interrupted Errors int `xml:"errors,attr"` // Failures maps onto specs that failed Failures int `xml:"failures,attr"` // Time is the time in seconds to execute all test suites Time float64 `xml:"time,attr"` //The set of all test suites TestSuites []JUnitTestSuite `xml:"testsuite"` } type JUnitTestSuite struct { // Name maps onto the description of the test suite - maps onto Report.SuiteDescription Name string `xml:"name,attr"` // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath Package string `xml:"package,attr"` // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite) Tests int `xml:"tests,attr"` // Disabled maps onto specs that are pending Disabled int `xml:"disabled,attr"` // Skiped maps onto specs that are skipped Skipped int `xml:"skipped,attr"` // Errors maps onto specs that panicked or were interrupted Errors int `xml:"errors,attr"` // Failures maps onto specs that failed Failures int `xml:"failures,attr"` // Time is the time in seconds to execute all the test suite - maps onto Report.RunTime Time float64 `xml:"time,attr"` // Timestamp is the ISO 8601 formatted start-time of the suite - maps onto Report.StartTime Timestamp string `xml:"timestamp,attr"` //Properties captures the information stored in the rest of the Report type (including SuiteConfig) as key-value pairs Properties JUnitProperties `xml:"properties"` //TestCases capture the individual specs TestCases []JUnitTestCase `xml:"testcase"` } type JUnitProperties struct { Properties []JUnitProperty `xml:"property"` } func (jup JUnitProperties) WithName(name string) string { for _, property := range jup.Properties { if property.Name == name { return property.Value } } return "" } type JUnitProperty struct { Name string `xml:"name,attr"` Value string `xml:"value,attr"` } var ownerRE = regexp.MustCompile(`(?i)^owner:(.*)$`) type JUnitTestCase struct { // Name maps onto the full text of the spec - equivalent to "[SpecReport.LeafNodeType] SpecReport.FullText()" Name string `xml:"name,attr"` // Classname maps onto the name of the test suite - equivalent to Report.SuiteDescription Classname string `xml:"classname,attr"` // Status maps onto the string representation of SpecReport.State Status string `xml:"status,attr"` // Time is the time in seconds to execute the spec - maps onto SpecReport.RunTime Time float64 `xml:"time,attr"` // Owner is the owner the spec - is set if a label matching Label("owner:X") is provided. The last matching label is used as the owner, thereby allowing specs to override owners specified in container nodes. Owner string `xml:"owner,attr,omitempty"` //Skipped is populated with a message if the test was skipped or pending Skipped *JUnitSkipped `xml:"skipped,omitempty"` //Error is populated if the test panicked or was interrupted Error *JUnitError `xml:"error,omitempty"` //Failure is populated if the test failed Failure *JUnitFailure `xml:"failure,omitempty"` //SystemOut maps onto any captured stdout/stderr output - maps onto SpecReport.CapturedStdOutErr SystemOut string `xml:"system-out,omitempty"` //SystemOut maps onto any captured GinkgoWriter output - maps onto SpecReport.CapturedGinkgoWriterOutput SystemErr string `xml:"system-err,omitempty"` } type JUnitSkipped struct { // Message maps onto "pending" if the test was marked pending, "skipped" if the test was marked skipped, and "skipped - REASON" if the user called Skip(REASON) Message string `xml:"message,attr"` } type JUnitError struct { //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted" Message string `xml:"message,attr"` //Type is one of "panicked" or "interrupted" Type string `xml:"type,attr"` //Description maps onto the captured stack trace for a panic, or the failure message for an interrupt which will include the dump of running goroutines Description string `xml:",chardata"` } type JUnitFailure struct { //Message maps onto the failure message - equivalent to SpecReport.Failure.Message Message string `xml:"message,attr"` //Type is "failed" Type string `xml:"type,attr"` //Description maps onto the location and stack trace of the failure Description string `xml:",chardata"` } func GenerateJUnitReport(report types.Report, dst string) error { return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{}) } func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error { suite := JUnitTestSuite{ Name: report.SuiteDescription, Package: report.SuitePath, Time: report.RunTime.Seconds(), Timestamp: report.StartTime.Format("2006-01-02T15:04:05"), Properties: JUnitProperties{ Properties: []JUnitProperty{ {"SuiteSucceeded", fmt.Sprintf("%t", report.SuiteSucceeded)}, {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, {"LabelFilter", report.SuiteConfig.LabelFilter}, {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, {"SkipFiles", strings.Join(report.SuiteConfig.SkipFiles, ";")}, {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, {"FailOnEmpty", fmt.Sprintf("%t", report.SuiteConfig.FailOnEmpty)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, }, }, } for _, spec := range report.SpecReports { if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt { continue } name := fmt.Sprintf("[%s]", spec.LeafNodeType) if config.OmitLeafNodeType { name = "" } if spec.FullText() != "" { name = name + " " + spec.FullText() } labels := spec.Labels() if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } owner := "" for _, label := range labels { if matches := ownerRE.FindStringSubmatch(label); len(matches) == 2 { owner = matches[1] } } name = strings.TrimSpace(name) test := JUnitTestCase{ Name: name, Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), Owner: owner, } if !spec.State.Is(config.OmitTimelinesForSpecState) { test.SystemErr = systemErrForUnstructuredReporters(spec) } if !config.OmitCapturedStdOutErr { test.SystemOut = systemOutForUnstructuredReporters(spec) } suite.Tests += 1 switch spec.State { case types.SpecStateSkipped: message := "skipped" if spec.Failure.Message != "" { message += " - " + spec.Failure.Message } test.Skipped = &JUnitSkipped{Message: message} suite.Skipped += 1 case types.SpecStatePending: test.Skipped = &JUnitSkipped{Message: "pending"} suite.Disabled += 1 case types.SpecStateFailed: test.Failure = &JUnitFailure{ Message: spec.Failure.Message, Type: "failed", Description: failureDescriptionForUnstructuredReporters(spec), } if config.OmitFailureMessageAttr { test.Failure.Message = "" } suite.Failures += 1 case types.SpecStateTimedout: test.Failure = &JUnitFailure{ Message: spec.Failure.Message, Type: "timedout", Description: failureDescriptionForUnstructuredReporters(spec), } if config.OmitFailureMessageAttr { test.Failure.Message = "" } suite.Failures += 1 case types.SpecStateInterrupted: test.Error = &JUnitError{ Message: spec.Failure.Message, Type: "interrupted", Description: failureDescriptionForUnstructuredReporters(spec), } if config.OmitFailureMessageAttr { test.Error.Message = "" } suite.Errors += 1 case types.SpecStateAborted: test.Failure = &JUnitFailure{ Message: spec.Failure.Message, Type: "aborted", Description: failureDescriptionForUnstructuredReporters(spec), } if config.OmitFailureMessageAttr { test.Failure.Message = "" } suite.Errors += 1 case types.SpecStatePanicked: test.Error = &JUnitError{ Message: spec.Failure.ForwardedPanic, Type: "panicked", Description: failureDescriptionForUnstructuredReporters(spec), } if config.OmitFailureMessageAttr { test.Error.Message = "" } suite.Errors += 1 } suite.TestCases = append(suite.TestCases, test) } junitReport := JUnitTestSuites{ Tests: suite.Tests, Disabled: suite.Disabled + suite.Skipped, Errors: suite.Errors, Failures: suite.Failures, Time: suite.Time, TestSuites: []JUnitTestSuite{suite}, } if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { return err } f, err := os.Create(dst) if err != nil { return err } f.WriteString(xml.Header) encoder := xml.NewEncoder(f) encoder.Indent(" ", " ") encoder.Encode(junitReport) return f.Close() } func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) { messages := []string{} mergedReport := JUnitTestSuites{} for _, source := range sources { report := JUnitTestSuites{} f, err := os.Open(source) if err != nil { messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) continue } err = xml.NewDecoder(f).Decode(&report) _ = f.Close() if err != nil { messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) continue } os.Remove(source) mergedReport.Tests += report.Tests mergedReport.Disabled += report.Disabled mergedReport.Errors += report.Errors mergedReport.Failures += report.Failures mergedReport.Time += report.Time mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) } if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { return messages, err } f, err := os.Create(dst) if err != nil { return messages, err } f.WriteString(xml.Header) encoder := xml.NewEncoder(f) encoder.Indent(" ", " ") encoder.Encode(mergedReport) return messages, f.Close() } func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { out := &strings.Builder{} NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true) if len(spec.AdditionalFailures) > 0 { out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n") } return out.String() } func systemErrForUnstructuredReporters(spec types.SpecReport) string { return RenderTimeline(spec, true) } func RenderTimeline(spec types.SpecReport, noColor bool) string { out := &strings.Builder{} NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) return out.String() } func systemOutForUnstructuredReporters(spec types.SpecReport) string { return spec.CapturedStdOutErr } // Deprecated JUnitReporter (so folks can still compile their suites) type JUnitReporter struct{} func NewJUnitReporter(_ string) *JUnitReporter { return &JUnitReporter{} } func (reporter *JUnitReporter) SuiteWillBegin(_ config.GinkgoConfigType, _ *types.SuiteSummary) {} func (reporter *JUnitReporter) BeforeSuiteDidRun(_ *types.SetupSummary) {} func (reporter *JUnitReporter) SpecWillRun(_ *types.SpecSummary) {} func (reporter *JUnitReporter) SpecDidComplete(_ *types.SpecSummary) {} func (reporter *JUnitReporter) AfterSuiteDidRun(_ *types.SetupSummary) {} func (reporter *JUnitReporter) SuiteDidEnd(_ *types.SuiteSummary) {}
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go
/* TeamCity Reporter for Ginkgo Makes use of TeamCity's support for Service Messages http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-ReportingTests */ package reporters import ( "fmt" "os" "path" "strings" "github.com/onsi/ginkgo/v2/types" ) func tcEscape(s string) string { s = strings.ReplaceAll(s, "|", "||") s = strings.ReplaceAll(s, "'", "|'") s = strings.ReplaceAll(s, "\n", "|n") s = strings.ReplaceAll(s, "\r", "|r") s = strings.ReplaceAll(s, "[", "|[") s = strings.ReplaceAll(s, "]", "|]") return s } func GenerateTeamcityReport(report types.Report, dst string) error { if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { return err } f, err := os.Create(dst) if err != nil { return err } name := report.SuiteDescription labels := report.SuiteLabels if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) for _, spec := range report.SpecReports { name := fmt.Sprintf("[%s]", spec.LeafNodeType) if spec.FullText() != "" { name = name + " " + spec.FullText() } labels := spec.Labels() if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } name = tcEscape(name) fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) switch spec.State { case types.SpecStatePending: fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='pending']\n", name) case types.SpecStateSkipped: message := "skipped" if spec.Failure.Message != "" { message += " - " + spec.Failure.Message } fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message)) case types.SpecStateFailed: details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStatePanicked: details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details)) case types.SpecStateTimedout: details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStateInterrupted: details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStateAborted: details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) } fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec))) fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec))) fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0)) } fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription)) return f.Close() } func MergeAndCleanupTeamcityReports(sources []string, dst string) ([]string, error) { messages := []string{} merged := []byte{} for _, source := range sources { data, err := os.ReadFile(source) if err != nil { messages = append(messages, fmt.Sprintf("Could not open %s:\n%s", source, err.Error())) continue } os.Remove(source) merged = append(merged, data...) } return messages, os.WriteFile(dst, merged, 0666) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/tree.go
vendor/github.com/onsi/ginkgo/v2/internal/tree.go
package internal import "github.com/onsi/ginkgo/v2/types" type TreeNode struct { Node Node Parent *TreeNode Children TreeNodes } func (tn *TreeNode) AppendChild(child *TreeNode) { tn.Children = append(tn.Children, child) child.Parent = tn } func (tn *TreeNode) AncestorNodeChain() Nodes { if tn.Parent == nil || tn.Parent.Node.IsZero() { return Nodes{tn.Node} } return append(tn.Parent.AncestorNodeChain(), tn.Node) } type TreeNodes []*TreeNode func (tn TreeNodes) Nodes() Nodes { out := make(Nodes, len(tn)) for i := range tn { out[i] = tn[i].Node } return out } func (tn TreeNodes) WithID(id uint) *TreeNode { for i := range tn { if tn[i].Node.ID == id { return tn[i] } } return nil } func GenerateSpecsFromTreeRoot(tree *TreeNode) Specs { var walkTree func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs walkTree = func(nestingLevel int, lNodes Nodes, rNodes Nodes, trees TreeNodes) Specs { tests := Specs{} nodes := make(Nodes, len(trees)) for i := range trees { nodes[i] = trees[i].Node nodes[i].NestingLevel = nestingLevel } for i := range nodes { if !nodes[i].NodeType.Is(types.NodeTypesForContainerAndIt) { continue } leftNodes, rightNodes := nodes.SplitAround(nodes[i]) leftNodes = leftNodes.WithoutType(types.NodeTypesForContainerAndIt) rightNodes = rightNodes.WithoutType(types.NodeTypesForContainerAndIt) leftNodes = lNodes.CopyAppend(leftNodes...) rightNodes = rightNodes.CopyAppend(rNodes...) if nodes[i].NodeType.Is(types.NodeTypeIt) { tests = append(tests, Spec{Nodes: leftNodes.CopyAppend(nodes[i]).CopyAppend(rightNodes...)}) } else { treeNode := trees.WithID(nodes[i].ID) tests = append(tests, walkTree(nestingLevel+1, leftNodes.CopyAppend(nodes[i]), rightNodes, treeNode.Children)...) } } return tests } return walkTree(0, Nodes{}, Nodes{}, tree.Children) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go
package internal import ( "bufio" "bytes" "context" "fmt" "io" "os" "os/signal" "path/filepath" "runtime" "strconv" "strings" "time" "github.com/onsi/ginkgo/v2/types" ) var _SOURCE_CACHE = map[string][]string{} type ProgressSignalRegistrar func(func()) context.CancelFunc func RegisterForProgressSignal(handler func()) context.CancelFunc { signalChannel := make(chan os.Signal, 1) if len(PROGRESS_SIGNALS) > 0 { signal.Notify(signalChannel, PROGRESS_SIGNALS...) } ctx, cancel := context.WithCancel(context.Background()) go func() { for { select { case <-signalChannel: handler() case <-ctx.Done(): signal.Stop(signalChannel) return } } }() return cancel } type ProgressStepCursor struct { Text string CodeLocation types.CodeLocation StartTime time.Time } func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) { pr := types.ProgressReport{ ParallelProcess: report.ParallelProcess, RunningInParallel: isRunningInParallel, ContainerHierarchyTexts: report.ContainerHierarchyTexts, LeafNodeText: report.LeafNodeText, LeafNodeLocation: report.LeafNodeLocation, SpecStartTime: report.StartTime, CurrentNodeType: currentNode.NodeType, CurrentNodeText: currentNode.Text, CurrentNodeLocation: currentNode.CodeLocation, CurrentNodeStartTime: currentNodeStartTime, CurrentStepText: currentStep.Message, CurrentStepLocation: currentStep.CodeLocation, CurrentStepStartTime: currentStep.TimelineLocation.Time, AdditionalReports: additionalReports, CapturedGinkgoWriterOutput: gwOutput, TimelineLocation: timelineLocation, } goroutines, err := extractRunningGoroutines() if err != nil { return pr, err } pr.Goroutines = goroutines // now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest: packagesOfInterest := map[string]bool{} packageFromFilename := func(filename string) string { return filepath.Dir(filename) } addPackageFor := func(filename string) { if filename != "" { packagesOfInterest[packageFromFilename(filename)] = true } } isPackageOfInterest := func(filename string) bool { stackPackage := packageFromFilename(filename) for packageOfInterest := range packagesOfInterest { if strings.HasPrefix(stackPackage, packageOfInterest) { return true } } return false } for _, location := range report.ContainerHierarchyLocations { addPackageFor(location.FileName) } addPackageFor(report.LeafNodeLocation.FileName) addPackageFor(currentNode.CodeLocation.FileName) addPackageFor(currentStep.CodeLocation.FileName) //First, we find the SpecGoroutine - this will be the goroutine that includes `runNode` specGoRoutineIdx := -1 runNodeFunctionCallIdx := -1 OUTER: for goroutineIdx, goroutine := range pr.Goroutines { for functionCallIdx, functionCall := range goroutine.Stack { if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") { specGoRoutineIdx = goroutineIdx runNodeFunctionCallIdx = functionCallIdx break OUTER } } } //Now, we find the first non-Ginkgo function call if specGoRoutineIdx > -1 { for runNodeFunctionCallIdx >= 0 { fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename // these are all things that could potentially happen from within ginkgo if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") { runNodeFunctionCallIdx-- continue } if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") { } //found it! lets add its package of interest addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename) break } } ginkgoEntryPointIdx := -1 OUTER_GINKGO_ENTRY_POINT: for goroutineIdx, goroutine := range pr.Goroutines { for _, functionCall := range goroutine.Stack { if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") { ginkgoEntryPointIdx = goroutineIdx break OUTER_GINKGO_ENTRY_POINT } } } // Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest` // Any goroutines with highlighted lines end up in the HighlightGoRoutines for goroutineIdx, goroutine := range pr.Goroutines { if goroutineIdx == ginkgoEntryPointIdx { continue } if goroutineIdx == specGoRoutineIdx { pr.Goroutines[goroutineIdx].IsSpecGoroutine = true } for functionCallIdx, functionCall := range goroutine.Stack { if isPackageOfInterest(functionCall.Filename) { goroutine.Stack[functionCallIdx].Highlight = true goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots) } } } if !includeAll { goroutines := []types.Goroutine{pr.SpecGoroutine()} goroutines = append(goroutines, pr.HighlightedGoroutines()...) pr.Goroutines = goroutines } return pr, nil } func extractRunningGoroutines() ([]types.Goroutine, error) { var stack []byte for size := 64 * 1024; ; size *= 2 { stack = make([]byte, size) if n := runtime.Stack(stack, true); n < size { stack = stack[:n] break } } r := bufio.NewReader(bytes.NewReader(stack)) out := []types.Goroutine{} idx := -1 for { line, err := r.ReadString('\n') if err == io.EOF { break } line = strings.TrimSuffix(line, "\n") //skip blank lines if line == "" { continue } //parse headers for new goroutine frames if strings.HasPrefix(line, "goroutine") { out = append(out, types.Goroutine{}) idx = len(out) - 1 line = strings.TrimPrefix(line, "goroutine ") line = strings.TrimSuffix(line, ":") fields := strings.SplitN(line, " ", 2) if len(fields) != 2 { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line)) } out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64) if err != nil { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1])) } out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]") continue } //if we are here we must be at a function call entry in the stack functionCall := types.FunctionCall{ Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by' } line, err = r.ReadString('\n') line = strings.TrimSuffix(line, "\n") if err == io.EOF { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function)) } line = strings.TrimLeft(line, " \t") delimiterIdx := strings.LastIndex(line, ":") if delimiterIdx == -1 { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line)) } functionCall.Filename = line[:delimiterIdx] line = strings.Split(line[delimiterIdx+1:], " ")[0] lineNumber, err := strconv.ParseInt(line, 10, 64) functionCall.Line = int(lineNumber) if err != nil { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error())) } out[idx].Stack = append(out[idx].Stack, functionCall) } return out, nil } func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) { if filename == "" { return []string{}, 0 } var lines []string var ok bool if lines, ok = _SOURCE_CACHE[filename]; !ok { sourceRoots := []string{""} sourceRoots = append(sourceRoots, configuredSourceRoots...) var data []byte var err error var found bool for _, root := range sourceRoots { data, err = os.ReadFile(filepath.Join(root, filename)) if err == nil { found = true break } } if !found { return []string{}, 0 } lines = strings.Split(string(data), "\n") _SOURCE_CACHE[filename] = lines } startIndex := lineNumber - span - 1 endIndex := startIndex + span + span + 1 if startIndex < 0 { startIndex = 0 } if endIndex > len(lines) { endIndex = len(lines) } highlightIndex := lineNumber - 1 - startIndex return lines[startIndex:endIndex], highlightIndex }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go
vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go
//go:build freebsd || openbsd || netbsd || darwin || dragonfly // +build freebsd openbsd netbsd darwin dragonfly package internal import ( "os" "syscall" ) var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1}
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/failer.go
vendor/github.com/onsi/ginkgo/v2/internal/failer.go
package internal import ( "fmt" "sync" "github.com/onsi/ginkgo/v2/types" ) type Failer struct { lock *sync.Mutex failure types.Failure state types.SpecState } func NewFailer() *Failer { return &Failer{ lock: &sync.Mutex{}, state: types.SpecStatePassed, } } func (f *Failer) GetState() types.SpecState { f.lock.Lock() defer f.lock.Unlock() return f.state } func (f *Failer) GetFailure() types.Failure { f.lock.Lock() defer f.lock.Unlock() return f.failure } func (f *Failer) Panic(location types.CodeLocation, forwardedPanic interface{}) { f.lock.Lock() defer f.lock.Unlock() if f.state == types.SpecStatePassed { f.state = types.SpecStatePanicked f.failure = types.Failure{ Message: "Test Panicked", Location: location, ForwardedPanic: fmt.Sprintf("%v", forwardedPanic), } } } func (f *Failer) Fail(message string, location types.CodeLocation) { f.lock.Lock() defer f.lock.Unlock() if f.state == types.SpecStatePassed { f.state = types.SpecStateFailed f.failure = types.Failure{ Message: message, Location: location, } } } func (f *Failer) Skip(message string, location types.CodeLocation) { f.lock.Lock() defer f.lock.Unlock() if f.state == types.SpecStatePassed { f.state = types.SpecStateSkipped f.failure = types.Failure{ Message: message, Location: location, } } } func (f *Failer) AbortSuite(message string, location types.CodeLocation) { f.lock.Lock() defer f.lock.Unlock() if f.state == types.SpecStatePassed { f.state = types.SpecStateAborted f.failure = types.Failure{ Message: message, Location: location, } } } func (f *Failer) Drain() (types.SpecState, types.Failure) { f.lock.Lock() defer f.lock.Unlock() failure := f.failure outcome := f.state f.state = types.SpecStatePassed f.failure = types.Failure{} return outcome, failure }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go
package internal import ( "time" "github.com/onsi/ginkgo/v2/types" ) type ReportEntry = types.ReportEntry func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (ReportEntry, error) { out := ReportEntry{ Visibility: types.ReportEntryVisibilityAlways, Name: name, Location: cl, Time: time.Now(), } var didSetValue = false for _, arg := range args { switch x := arg.(type) { case types.ReportEntryVisibility: out.Visibility = x case types.CodeLocation: out.Location = x case Offset: out.Location = types.NewCodeLocation(2 + int(x)) case time.Time: out.Time = x default: if didSetValue { return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg) } out.Value = types.WrapEntryValue(arg) didSetValue = true } } return out, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/node.go
vendor/github.com/onsi/ginkgo/v2/internal/node.go
package internal import ( "context" "fmt" "reflect" "sort" "sync" "time" "github.com/onsi/ginkgo/v2/types" ) var _global_node_id_counter = uint(0) var _global_id_mutex = &sync.Mutex{} func UniqueNodeID() uint { // There's a reace in the internal integration tests if we don't make // accessing _global_node_id_counter safe across goroutines. _global_id_mutex.Lock() defer _global_id_mutex.Unlock() _global_node_id_counter += 1 return _global_node_id_counter } type Node struct { ID uint NodeType types.NodeType Text string Body func(SpecContext) CodeLocation types.CodeLocation NestingLevel int HasContext bool SynchronizedBeforeSuiteProc1Body func(SpecContext) []byte SynchronizedBeforeSuiteProc1BodyHasContext bool SynchronizedBeforeSuiteAllProcsBody func(SpecContext, []byte) SynchronizedBeforeSuiteAllProcsBodyHasContext bool SynchronizedAfterSuiteAllProcsBody func(SpecContext) SynchronizedAfterSuiteAllProcsBodyHasContext bool SynchronizedAfterSuiteProc1Body func(SpecContext) SynchronizedAfterSuiteProc1BodyHasContext bool ReportEachBody func(SpecContext, types.SpecReport) ReportSuiteBody func(SpecContext, types.Report) MarkedFocus bool MarkedPending bool MarkedSerial bool MarkedOrdered bool MarkedContinueOnFailure bool MarkedOncePerOrdered bool FlakeAttempts int MustPassRepeatedly int Labels Labels PollProgressAfter time.Duration PollProgressInterval time.Duration NodeTimeout time.Duration SpecTimeout time.Duration GracePeriod time.Duration NodeIDWhereCleanupWasGenerated uint } // Decoration Types type focusType bool type pendingType bool type serialType bool type orderedType bool type continueOnFailureType bool type honorsOrderedType bool type suppressProgressReporting bool const Focus = focusType(true) const Pending = pendingType(true) const Serial = serialType(true) const Ordered = orderedType(true) const ContinueOnFailure = continueOnFailureType(true) const OncePerOrdered = honorsOrderedType(true) const SuppressProgressReporting = suppressProgressReporting(true) type FlakeAttempts uint type MustPassRepeatedly uint type Offset uint type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing type Labels []string type PollProgressInterval time.Duration type PollProgressAfter time.Duration type NodeTimeout time.Duration type SpecTimeout time.Duration type GracePeriod time.Duration func (l Labels) MatchesLabelFilter(query string) bool { return types.MustParseLabelFilter(query)(l) } func UnionOfLabels(labels ...Labels) Labels { out := Labels{} seen := map[string]bool{} for _, labelSet := range labels { for _, label := range labelSet { if !seen[label] { seen[label] = true out = append(out, label) } } } return out } func PartitionDecorations(args ...interface{}) ([]interface{}, []interface{}) { decorations := []interface{}{} remainingArgs := []interface{}{} for _, arg := range args { if isDecoration(arg) { decorations = append(decorations, arg) } else { remainingArgs = append(remainingArgs, arg) } } return decorations, remainingArgs } func isDecoration(arg interface{}) bool { switch t := reflect.TypeOf(arg); { case t == nil: return false case t == reflect.TypeOf(Offset(0)): return true case t == reflect.TypeOf(types.CodeLocation{}): return true case t == reflect.TypeOf(Focus): return true case t == reflect.TypeOf(Pending): return true case t == reflect.TypeOf(Serial): return true case t == reflect.TypeOf(Ordered): return true case t == reflect.TypeOf(ContinueOnFailure): return true case t == reflect.TypeOf(OncePerOrdered): return true case t == reflect.TypeOf(SuppressProgressReporting): return true case t == reflect.TypeOf(FlakeAttempts(0)): return true case t == reflect.TypeOf(MustPassRepeatedly(0)): return true case t == reflect.TypeOf(Labels{}): return true case t == reflect.TypeOf(PollProgressInterval(0)): return true case t == reflect.TypeOf(PollProgressAfter(0)): return true case t == reflect.TypeOf(NodeTimeout(0)): return true case t == reflect.TypeOf(SpecTimeout(0)): return true case t == reflect.TypeOf(GracePeriod(0)): return true case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): return true default: return false } } func isSliceOfDecorations(slice interface{}) bool { vSlice := reflect.ValueOf(slice) if vSlice.Len() == 0 { return false } for i := 0; i < vSlice.Len(); i++ { if !isDecoration(vSlice.Index(i).Interface()) { return false } } return true } var contextType = reflect.TypeOf(new(context.Context)).Elem() var specContextType = reflect.TypeOf(new(SpecContext)).Elem() func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) { baseOffset := 2 node := Node{ ID: UniqueNodeID(), NodeType: nodeType, Text: text, Labels: Labels{}, CodeLocation: types.NewCodeLocation(baseOffset), NestingLevel: -1, PollProgressAfter: -1, PollProgressInterval: -1, GracePeriod: -1, } errors := []error{} appendError := func(err error) { if err != nil { errors = append(errors, err) } } args = unrollInterfaceSlice(args) remainingArgs := []interface{}{} // First get the CodeLocation up-to-date for _, arg := range args { switch v := arg.(type) { case Offset: node.CodeLocation = types.NewCodeLocation(baseOffset + int(v)) case types.CodeLocation: node.CodeLocation = v default: remainingArgs = append(remainingArgs, arg) } } labelsSeen := map[string]bool{} trackedFunctionError := false args = remainingArgs remainingArgs = []interface{}{} // now process the rest of the args for _, arg := range args { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(float64(0)): break // ignore deprecated timeouts case t == reflect.TypeOf(Focus): node.MarkedFocus = bool(arg.(focusType)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Focus")) } case t == reflect.TypeOf(Pending): node.MarkedPending = bool(arg.(pendingType)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Pending")) } case t == reflect.TypeOf(Serial): node.MarkedSerial = bool(arg.(serialType)) if !labelsSeen["Serial"] { node.Labels = append(node.Labels, "Serial") } if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Serial")) } case t == reflect.TypeOf(Ordered): node.MarkedOrdered = bool(arg.(orderedType)) if !nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered")) } case t == reflect.TypeOf(ContinueOnFailure): node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType)) if !nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure")) } case t == reflect.TypeOf(OncePerOrdered): node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType)) if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered")) } case t == reflect.TypeOf(SuppressProgressReporting): deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting()) case t == reflect.TypeOf(FlakeAttempts(0)): node.FlakeAttempts = int(arg.(FlakeAttempts)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts")) } case t == reflect.TypeOf(MustPassRepeatedly(0)): node.MustPassRepeatedly = int(arg.(MustPassRepeatedly)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "MustPassRepeatedly")) } case t == reflect.TypeOf(PollProgressAfter(0)): node.PollProgressAfter = time.Duration(arg.(PollProgressAfter)) if nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter")) } case t == reflect.TypeOf(PollProgressInterval(0)): node.PollProgressInterval = time.Duration(arg.(PollProgressInterval)) if nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval")) } case t == reflect.TypeOf(NodeTimeout(0)): node.NodeTimeout = time.Duration(arg.(NodeTimeout)) if nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "NodeTimeout")) } case t == reflect.TypeOf(SpecTimeout(0)): node.SpecTimeout = time.Duration(arg.(SpecTimeout)) if !nodeType.Is(types.NodeTypeIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecTimeout")) } case t == reflect.TypeOf(GracePeriod(0)): node.GracePeriod = time.Duration(arg.(GracePeriod)) if nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod")) } case t == reflect.TypeOf(Labels{}): if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) } for _, label := range arg.(Labels) { if !labelsSeen[label] { labelsSeen[label] = true label, err := types.ValidateAndCleanupLabel(label, node.CodeLocation) node.Labels = append(node.Labels, label) appendError(err) } } case t.Kind() == reflect.Func: if nodeType.Is(types.NodeTypeContainer) { if node.Body != nil { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true break } if t.NumOut() > 0 || t.NumIn() > 0 { appendError(types.GinkgoErrors.InvalidBodyTypeForContainer(t, node.CodeLocation, nodeType)) trackedFunctionError = true break } body := arg.(func()) node.Body = func(SpecContext) { body() } } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { if node.ReportEachBody == nil { if fn, ok := arg.(func(types.SpecReport)); ok { node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) } } else { node.ReportEachBody = arg.(func(SpecContext, types.SpecReport)) node.HasContext = true } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true break } } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { if node.ReportSuiteBody == nil { if fn, ok := arg.(func(types.Report)); ok { node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) } } else { node.ReportSuiteBody = arg.(func(SpecContext, types.Report)) node.HasContext = true } } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true break } } else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) { if node.SynchronizedBeforeSuiteProc1Body != nil && node.SynchronizedBeforeSuiteAllProcsBody != nil { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true break } if node.SynchronizedBeforeSuiteProc1Body == nil { body, hasContext := extractSynchronizedBeforeSuiteProc1Body(arg) if body == nil { appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t, node.CodeLocation)) trackedFunctionError = true } node.SynchronizedBeforeSuiteProc1Body, node.SynchronizedBeforeSuiteProc1BodyHasContext = body, hasContext } else if node.SynchronizedBeforeSuiteAllProcsBody == nil { body, hasContext := extractSynchronizedBeforeSuiteAllProcsBody(arg) if body == nil { appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t, node.CodeLocation)) trackedFunctionError = true } node.SynchronizedBeforeSuiteAllProcsBody, node.SynchronizedBeforeSuiteAllProcsBodyHasContext = body, hasContext } } else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) { if node.SynchronizedAfterSuiteAllProcsBody != nil && node.SynchronizedAfterSuiteProc1Body != nil { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true break } body, hasContext := extractBodyFunction(deprecationTracker, node.CodeLocation, arg) if body == nil { appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) trackedFunctionError = true break } if node.SynchronizedAfterSuiteAllProcsBody == nil { node.SynchronizedAfterSuiteAllProcsBody, node.SynchronizedAfterSuiteAllProcsBodyHasContext = body, hasContext } else if node.SynchronizedAfterSuiteProc1Body == nil { node.SynchronizedAfterSuiteProc1Body, node.SynchronizedAfterSuiteProc1BodyHasContext = body, hasContext } } else { if node.Body != nil { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true break } node.Body, node.HasContext = extractBodyFunction(deprecationTracker, node.CodeLocation, arg) if node.Body == nil { appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) trackedFunctionError = true break } } default: remainingArgs = append(remainingArgs, arg) } } // validations if node.MarkedPending && node.MarkedFocus { appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } if node.MarkedContinueOnFailure && !node.MarkedOrdered { appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)) } hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 { appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType)) } if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError { appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) } if node.NodeType.Is(types.NodeTypeSynchronizedBeforeSuite) && !trackedFunctionError && (node.SynchronizedBeforeSuiteProc1Body == nil || node.SynchronizedBeforeSuiteAllProcsBody == nil) { appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) } if node.NodeType.Is(types.NodeTypeSynchronizedAfterSuite) && !trackedFunctionError && (node.SynchronizedAfterSuiteProc1Body == nil || node.SynchronizedAfterSuiteAllProcsBody == nil) { appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) } for _, arg := range remainingArgs { appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg)) } if node.FlakeAttempts > 0 && node.MustPassRepeatedly > 0 { appendError(types.GinkgoErrors.InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(node.CodeLocation, nodeType)) } if len(errors) > 0 { return Node{}, errors } return node, errors } var doneType = reflect.TypeOf(make(Done)) func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) { t := reflect.TypeOf(arg) if t.NumOut() > 0 || t.NumIn() > 1 { return nil, false } if t.NumIn() == 1 { if t.In(0) == doneType { deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl) deprecatedAsyncBody := arg.(func(Done)) return func(SpecContext) { deprecatedAsyncBody(make(Done)) }, false } else if t.In(0).Implements(specContextType) { return arg.(func(SpecContext)), true } else if t.In(0).Implements(contextType) { body := arg.(func(context.Context)) return func(c SpecContext) { body(c) }, true } return nil, false } body := arg.(func()) return func(SpecContext) { body() }, false } var byteType = reflect.TypeOf([]byte{}) func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) { t := reflect.TypeOf(arg) v := reflect.ValueOf(arg) if t.NumOut() > 1 || t.NumIn() > 1 { return nil, false } else if t.NumOut() == 1 && t.Out(0) != byteType { return nil, false } else if t.NumIn() == 1 && !t.In(0).Implements(contextType) { return nil, false } hasContext := t.NumIn() == 1 return func(c SpecContext) []byte { var out []reflect.Value if hasContext { out = v.Call([]reflect.Value{reflect.ValueOf(c)}) } else { out = v.Call([]reflect.Value{}) } if len(out) == 1 { return (out[0].Interface()).([]byte) } else { return []byte{} } }, hasContext } func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) { t := reflect.TypeOf(arg) v := reflect.ValueOf(arg) hasContext, hasByte := false, false if t.NumOut() > 0 || t.NumIn() > 2 { return nil, false } else if t.NumIn() == 2 && t.In(0).Implements(contextType) && t.In(1) == byteType { hasContext, hasByte = true, true } else if t.NumIn() == 1 && t.In(0).Implements(contextType) { hasContext = true } else if t.NumIn() == 1 && t.In(0) == byteType { hasByte = true } else if t.NumIn() != 0 { return nil, false } return func(c SpecContext, b []byte) { in := []reflect.Value{} if hasContext { in = append(in, reflect.ValueOf(c)) } if hasByte { in = append(in, reflect.ValueOf(b)) } v.Call(in) }, hasContext } var errInterface = reflect.TypeOf((*error)(nil)).Elem() func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { decorations, remainingArgs := PartitionDecorations(args...) baseOffset := 2 cl := types.NewCodeLocation(baseOffset) finalArgs := []interface{}{} for _, arg := range decorations { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(Offset(0)): cl = types.NewCodeLocation(baseOffset + int(arg.(Offset))) case t == reflect.TypeOf(types.CodeLocation{}): cl = arg.(types.CodeLocation) default: finalArgs = append(finalArgs, arg) } } finalArgs = append(finalArgs, cl) if len(remainingArgs) == 0 { return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)} } callback := reflect.ValueOf(remainingArgs[0]) if !(callback.Kind() == reflect.Func) { return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)} } callArgs := []reflect.Value{} for _, arg := range remainingArgs[1:] { callArgs = append(callArgs, reflect.ValueOf(arg)) } hasContext := false t := callback.Type() if t.NumIn() > 0 { if t.In(0).Implements(specContextType) { hasContext = true } else if t.In(0).Implements(contextType) && (len(callArgs) == 0 || !callArgs[0].Type().Implements(contextType)) { hasContext = true } } handleFailure := func(out []reflect.Value) { if len(out) == 0 { return } last := out[len(out)-1] if last.Type().Implements(errInterface) && !last.IsNil() { fail(fmt.Sprintf("DeferCleanup callback returned error: %v", last), cl) } } if hasContext { finalArgs = append(finalArgs, func(c SpecContext) { out := callback.Call(append([]reflect.Value{reflect.ValueOf(c)}, callArgs...)) handleFailure(out) }) } else { finalArgs = append(finalArgs, func() { out := callback.Call(callArgs) handleFailure(out) }) } return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...) } func (n Node) IsZero() bool { return n.ID == 0 } /* Nodes */ type Nodes []Node func (n Nodes) Clone() Nodes { nodes := make(Nodes, len(n)) copy(nodes, n) return nodes } func (n Nodes) CopyAppend(nodes ...Node) Nodes { numN := len(n) out := make(Nodes, numN+len(nodes)) copy(out, n) for j, node := range nodes { out[numN+j] = node } return out } func (n Nodes) SplitAround(pivot Node) (Nodes, Nodes) { pivotIdx := len(n) for i := range n { if n[i].ID == pivot.ID { pivotIdx = i break } } left := n[:pivotIdx] right := Nodes{} if pivotIdx+1 < len(n) { right = n[pivotIdx+1:] } return left, right } func (n Nodes) FirstNodeWithType(nodeTypes types.NodeType) Node { for i := range n { if n[i].NodeType.Is(nodeTypes) { return n[i] } } return Node{} } func (n Nodes) WithType(nodeTypes types.NodeType) Nodes { count := 0 for i := range n { if n[i].NodeType.Is(nodeTypes) { count++ } } out, j := make(Nodes, count), 0 for i := range n { if n[i].NodeType.Is(nodeTypes) { out[j] = n[i] j++ } } return out } func (n Nodes) WithoutType(nodeTypes types.NodeType) Nodes { count := 0 for i := range n { if !n[i].NodeType.Is(nodeTypes) { count++ } } out, j := make(Nodes, count), 0 for i := range n { if !n[i].NodeType.Is(nodeTypes) { out[j] = n[i] j++ } } return out } func (n Nodes) WithoutNode(nodeToExclude Node) Nodes { idxToExclude := len(n) for i := range n { if n[i].ID == nodeToExclude.ID { idxToExclude = i break } } if idxToExclude == len(n) { return n } out, j := make(Nodes, len(n)-1), 0 for i := range n { if i == idxToExclude { continue } out[j] = n[i] j++ } return out } func (n Nodes) Filter(filter func(Node) bool) Nodes { trufa, count := make([]bool, len(n)), 0 for i := range n { if filter(n[i]) { trufa[i] = true count += 1 } } out, j := make(Nodes, count), 0 for i := range n { if trufa[i] { out[j] = n[i] j++ } } return out } func (n Nodes) FirstSatisfying(filter func(Node) bool) Node { for i := range n { if filter(n[i]) { return n[i] } } return Node{} } func (n Nodes) WithinNestingLevel(deepestNestingLevel int) Nodes { count := 0 for i := range n { if n[i].NestingLevel <= deepestNestingLevel { count++ } } out, j := make(Nodes, count), 0 for i := range n { if n[i].NestingLevel <= deepestNestingLevel { out[j] = n[i] j++ } } return out } func (n Nodes) SortedByDescendingNestingLevel() Nodes { out := make(Nodes, len(n)) copy(out, n) sort.SliceStable(out, func(i int, j int) bool { return out[i].NestingLevel > out[j].NestingLevel }) return out } func (n Nodes) SortedByAscendingNestingLevel() Nodes { out := make(Nodes, len(n)) copy(out, n) sort.SliceStable(out, func(i int, j int) bool { return out[i].NestingLevel < out[j].NestingLevel }) return out } func (n Nodes) FirstWithNestingLevel(level int) Node { for i := range n { if n[i].NestingLevel == level { return n[i] } } return Node{} } func (n Nodes) Reverse() Nodes { out := make(Nodes, len(n)) for i := range n { out[len(n)-1-i] = n[i] } return out } func (n Nodes) Texts() []string { out := make([]string, len(n)) for i := range n { out[i] = n[i].Text } return out } func (n Nodes) Labels() [][]string { out := make([][]string, len(n)) for i := range n { if n[i].Labels == nil { out[i] = []string{} } else { out[i] = []string(n[i].Labels) } } return out } func (n Nodes) UnionOfLabels() []string { out := []string{} seen := map[string]bool{} for i := range n { for _, label := range n[i].Labels { if !seen[label] { seen[label] = true out = append(out, label) } } } return out } func (n Nodes) CodeLocations() []types.CodeLocation { out := make([]types.CodeLocation, len(n)) for i := range n { out[i] = n[i].CodeLocation } return out } func (n Nodes) BestTextFor(node Node) string { if node.Text != "" { return node.Text } parentNestingLevel := node.NestingLevel - 1 for i := range n { if n[i].Text != "" && n[i].NestingLevel == parentNestingLevel { return n[i].Text } } return "" } func (n Nodes) ContainsNodeID(id uint) bool { for i := range n { if n[i].ID == id { return true } } return false } func (n Nodes) HasNodeMarkedPending() bool { for i := range n { if n[i].MarkedPending { return true } } return false } func (n Nodes) HasNodeMarkedFocus() bool { for i := range n { if n[i].MarkedFocus { return true } } return false } func (n Nodes) HasNodeMarkedSerial() bool { for i := range n { if n[i].MarkedSerial { return true } } return false } func (n Nodes) FirstNodeMarkedOrdered() Node { for i := range n { if n[i].MarkedOrdered { return n[i] } } return Node{} } func (n Nodes) IndexOfFirstNodeMarkedOrdered() int { for i := range n { if n[i].MarkedOrdered { return i } } return -1 } func (n Nodes) GetMaxFlakeAttempts() int { maxFlakeAttempts := 0 for i := range n { if n[i].FlakeAttempts > 0 { maxFlakeAttempts = n[i].FlakeAttempts } } return maxFlakeAttempts } func (n Nodes) GetMaxMustPassRepeatedly() int { maxMustPassRepeatedly := 0 for i := range n { if n[i].MustPassRepeatedly > 0 { maxMustPassRepeatedly = n[i].MustPassRepeatedly } } return maxMustPassRepeatedly } func unrollInterfaceSlice(args interface{}) []interface{} { v := reflect.ValueOf(args) if v.Kind() != reflect.Slice { return []interface{}{args} } out := []interface{}{} for i := 0; i < v.Len(); i++ { el := reflect.ValueOf(v.Index(i).Interface()) if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) { out = append(out, unrollInterfaceSlice(el.Interface())...) } else { out = append(out, v.Index(i).Interface()) } } return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/group.go
vendor/github.com/onsi/ginkgo/v2/internal/group.go
package internal import ( "fmt" "time" "github.com/onsi/ginkgo/v2/types" ) type runOncePair struct { //nodeId should only run once... nodeID uint nodeType types.NodeType //...for specs in a hierarchy that includes this context containerID uint } func (pair runOncePair) isZero() bool { return pair.nodeID == 0 } func runOncePairForNode(node Node, containerID uint) runOncePair { return runOncePair{ nodeID: node.ID, nodeType: node.NodeType, containerID: containerID, } } type runOncePairs []runOncePair func runOncePairsForSpec(spec Spec) runOncePairs { pairs := runOncePairs{} containers := spec.Nodes.WithType(types.NodeTypeContainer) for _, node := range spec.Nodes { if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) { pairs = append(pairs, runOncePairForNode(node, containers.FirstWithNestingLevel(node.NestingLevel-1).ID)) } else if node.NodeType.Is(types.NodeTypeBeforeEach|types.NodeTypeJustBeforeEach|types.NodeTypeAfterEach|types.NodeTypeJustAfterEach) && node.MarkedOncePerOrdered { passedIntoAnOrderedContainer := false firstOrderedContainerDeeperThanNode := containers.FirstSatisfying(func(container Node) bool { passedIntoAnOrderedContainer = passedIntoAnOrderedContainer || container.MarkedOrdered return container.NestingLevel >= node.NestingLevel && passedIntoAnOrderedContainer }) if firstOrderedContainerDeeperThanNode.IsZero() { continue } pairs = append(pairs, runOncePairForNode(node, firstOrderedContainerDeeperThanNode.ID)) } } return pairs } func (pairs runOncePairs) runOncePairFor(nodeID uint) runOncePair { for i := range pairs { if pairs[i].nodeID == nodeID { return pairs[i] } } return runOncePair{} } func (pairs runOncePairs) hasRunOncePair(pair runOncePair) bool { for i := range pairs { if pairs[i] == pair { return true } } return false } func (pairs runOncePairs) withType(nodeTypes types.NodeType) runOncePairs { count := 0 for i := range pairs { if pairs[i].nodeType.Is(nodeTypes) { count++ } } out, j := make(runOncePairs, count), 0 for i := range pairs { if pairs[i].nodeType.Is(nodeTypes) { out[j] = pairs[i] j++ } } return out } type group struct { suite *Suite specs Specs runOncePairs map[uint]runOncePairs runOnceTracker map[runOncePair]types.SpecState succeeded bool failedInARunOnceBefore bool continueOnFailure bool } func newGroup(suite *Suite) *group { return &group{ suite: suite, runOncePairs: map[uint]runOncePairs{}, runOnceTracker: map[runOncePair]types.SpecState{}, succeeded: true, failedInARunOnceBefore: false, continueOnFailure: false, } } func (g *group) initialReportForSpec(spec Spec) types.SpecReport { return types.SpecReport{ ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, LeafNodeType: types.NodeTypeIt, LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), ParallelProcess: g.suite.config.ParallelProcess, RunningInParallel: g.suite.isRunningInParallel(), IsSerial: spec.Nodes.HasNodeMarkedSerial(), IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), } } func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) { if spec.Nodes.HasNodeMarkedPending() { return types.SpecStatePending, types.Failure{} } if spec.Skip { return types.SpecStateSkipped, types.Failure{} } if g.suite.interruptHandler.Status().Interrupted() || g.suite.skipAll { return types.SpecStateSkipped, types.Failure{} } if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) { return types.SpecStateSkipped, types.Failure{} } if !g.succeeded && !g.continueOnFailure { return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), "Spec skipped because an earlier spec in an ordered container failed") } if g.failedInARunOnceBefore && g.continueOnFailure { return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), "Spec skipped because a BeforeAll node failed") } beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach) for _, pair := range beforeOncePairs { if g.runOnceTracker[pair].Is(types.SpecStateSkipped) { return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), fmt.Sprintf("Spec skipped because Skip() was called in %s", pair.nodeType)) } } if g.suite.config.DryRun { return types.SpecStatePassed, types.Failure{} } return g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure } func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool { lastSpecID := uint(0) for idx := range g.specs { if g.specs[idx].Skip { continue } sID := g.specs[idx].SubjectID() if g.runOncePairs[sID].hasRunOncePair(pair) { lastSpecID = sID } } return lastSpecID == specID } func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool { failedInARunOnceBefore := false pairs := g.runOncePairs[spec.SubjectID()] nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll) nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel() nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...) nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt)) terminatingNode, terminatingPair := Node{}, runOncePair{} deadline := time.Time{} if spec.SpecTimeout() > 0 { deadline = time.Now().Add(spec.SpecTimeout()) } for _, node := range nodes { oncePair := pairs.runOncePairFor(node.ID) if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) { continue } g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node)) g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) if !oncePair.isZero() { g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State } if g.suite.currentSpecReport.State != types.SpecStatePassed { terminatingNode, terminatingPair = node, oncePair failedInARunOnceBefore = !terminatingPair.isZero() break } } afterNodeWasRun := map[uint]bool{} includeDeferCleanups := false for { nodes := spec.Nodes.WithType(types.NodeTypeAfterEach) nodes = append(nodes, spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel() nodes = append(spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel(), nodes...) if !terminatingNode.IsZero() { nodes = nodes.WithinNestingLevel(terminatingNode.NestingLevel) } if includeDeferCleanups { nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse()...) nodes = append(nodes, g.suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse()...) } nodes = nodes.Filter(func(node Node) bool { if afterNodeWasRun[node.ID] { //this node has already been run on this attempt, don't rerun it return false } var pair runOncePair switch node.NodeType { case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll: // check if we were generated in an AfterNode that has already run if afterNodeWasRun[node.NodeIDWhereCleanupWasGenerated] { return true // we were, so we should definitely run this cleanup now } // looks like this cleanup nodes was generated by a before node or it. // the run-once status of a cleanup node is governed by the run-once status of its generator pair = pairs.runOncePairFor(node.NodeIDWhereCleanupWasGenerated) default: pair = pairs.runOncePairFor(node.ID) } if pair.isZero() { // this node is not governed by any run-once policy, we should run it return true } // it's our last chance to run if we're the last spec for our oncePair isLastSpecWithPair := g.isLastSpecWithPair(spec.SubjectID(), pair) switch g.suite.currentSpecReport.State { case types.SpecStatePassed: //this attempt is passing... return isLastSpecWithPair //...we should run-once if we'this is our last chance case types.SpecStateSkipped: //the spec was skipped by the user... if isLastSpecWithPair { return true //...we're the last spec, so we should run the AfterNode } if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel { return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run } case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed... if isFinalAttempt { if g.continueOnFailure { return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run) } else { return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run } } if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) { return terminatingNode.ID == node.NodeIDWhereCleanupWasGenerated // we should run this node if we're a clean-up generated by it } else { return terminatingNode.NestingLevel == node.NestingLevel // ...or if we're at the same nesting level } } case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted return true //...that means the test run is over and we should clean up the stack. Run the AfterNode } return false }) if len(nodes) == 0 && includeDeferCleanups { break } for _, node := range nodes { afterNodeWasRun[node.ID] = true state, failure := g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node)) g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { g.suite.currentSpecReport.State = state g.suite.currentSpecReport.Failure = failure } else if state.Is(types.SpecStateFailureStates) { g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, types.AdditionalFailure{State: state, Failure: failure}) } } includeDeferCleanups = true } return failedInARunOnceBefore } func (g *group) run(specs Specs) { g.specs = specs g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure for _, spec := range g.specs { g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec) } for _, spec := range g.specs { g.suite.selectiveLock.Lock() g.suite.currentSpecReport = g.initialReportForSpec(spec) g.suite.selectiveLock.Unlock() g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec) g.suite.reporter.WillRun(g.suite.currentSpecReport) g.suite.reportEach(spec, types.NodeTypeReportBeforeEach) skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending) g.suite.currentSpecReport.StartTime = time.Now() failedInARunOnceBefore := false if !skip { var maxAttempts = 1 if g.suite.config.MustPassRepeatedly > 0 { maxAttempts = g.suite.config.MustPassRepeatedly g.suite.currentSpecReport.MaxMustPassRepeatedly = maxAttempts } else if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { maxAttempts = max(1, spec.MustPassRepeatedly()) } else if g.suite.config.FlakeAttempts > 0 { maxAttempts = g.suite.config.FlakeAttempts g.suite.currentSpecReport.MaxFlakeAttempts = maxAttempts } else if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { maxAttempts = max(1, spec.FlakeAttempts()) } for attempt := 0; attempt < maxAttempts; attempt++ { g.suite.currentSpecReport.NumAttempts = attempt + 1 g.suite.writer.Truncate() g.suite.outputInterceptor.StartInterceptingOutput() if attempt > 0 { if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt}) } if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt}) } } failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec) g.suite.currentSpecReport.EndTime = time.Now() g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime) g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes()) g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput() if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates | types.SpecStateSkipped) { break } } if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { break } else if attempt < maxAttempts-1 { af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure} af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message) g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af) } } } } g.suite.reportEach(spec, types.NodeTypeReportAfterEach) g.suite.processCurrentSpecReport() if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { g.succeeded = false g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore } g.suite.selectiveLock.Lock() g.suite.currentSpecReport = types.SpecReport{} g.suite.selectiveLock.Unlock() } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/spec.go
vendor/github.com/onsi/ginkgo/v2/internal/spec.go
package internal import ( "strings" "time" "github.com/onsi/ginkgo/v2/types" ) type Spec struct { Nodes Nodes Skip bool } func (s Spec) SubjectID() uint { return s.Nodes.FirstNodeWithType(types.NodeTypeIt).ID } func (s Spec) Text() string { texts := []string{} for i := range s.Nodes { if s.Nodes[i].Text != "" { texts = append(texts, s.Nodes[i].Text) } } return strings.Join(texts, " ") } func (s Spec) FirstNodeWithType(nodeTypes types.NodeType) Node { return s.Nodes.FirstNodeWithType(nodeTypes) } func (s Spec) FlakeAttempts() int { flakeAttempts := 0 for i := range s.Nodes { if s.Nodes[i].FlakeAttempts > 0 { flakeAttempts = s.Nodes[i].FlakeAttempts } } return flakeAttempts } func (s Spec) MustPassRepeatedly() int { mustPassRepeatedly := 0 for i := range s.Nodes { if s.Nodes[i].MustPassRepeatedly > 0 { mustPassRepeatedly = s.Nodes[i].MustPassRepeatedly } } return mustPassRepeatedly } func (s Spec) SpecTimeout() time.Duration { return s.FirstNodeWithType(types.NodeTypeIt).SpecTimeout } type Specs []Spec func (s Specs) HasAnySpecsMarkedPending() bool { for i := range s { if s[i].Nodes.HasNodeMarkedPending() { return true } } return false } func (s Specs) CountWithoutSkip() int { n := 0 for i := range s { if !s[i].Skip { n += 1 } } return n } func (s Specs) AtIndices(indices SpecIndices) Specs { out := make(Specs, len(indices)) for i, idx := range indices { out[i] = s[idx] } return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
vendor/github.com/onsi/ginkgo/v2/internal/progress_report_wasm.go
//go:build wasm package internal import ( "os" "syscall" ) var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
vendor/github.com/onsi/ginkgo/v2/internal/ordering.go
package internal import ( "math/rand" "sort" "github.com/onsi/ginkgo/v2/types" ) type SortableSpecs struct { Specs Specs Indexes []int } func NewSortableSpecs(specs Specs) *SortableSpecs { indexes := make([]int, len(specs)) for i := range specs { indexes[i] = i } return &SortableSpecs{ Specs: specs, Indexes: indexes, } } func (s *SortableSpecs) Len() int { return len(s.Indexes) } func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] } func (s *SortableSpecs) Less(i, j int) bool { a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]] aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt) firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered() if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID { // strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID } // if either spec is in an ordered container - only use the nodes up to the outermost ordered container if firstOrderedAIdx > -1 { aNodes = aNodes[:firstOrderedAIdx+1] } if firstOrderedBIdx > -1 { bNodes = bNodes[:firstOrderedBIdx+1] } for i := 0; i < len(aNodes) && i < len(bNodes); i++ { aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation if aCL.FileName != bCL.FileName { return aCL.FileName < bCL.FileName } if aCL.LineNumber != bCL.LineNumber { return aCL.LineNumber < bCL.LineNumber } } // either everything is equal or we have different lengths of CLs if len(aNodes) != len(bNodes) { return len(aNodes) < len(bNodes) } // ok, now we are sure everything was equal. so we use the spec text to break ties for i := 0; i < len(aNodes); i++ { if aNodes[i].Text != bNodes[i].Text { return aNodes[i].Text < bNodes[i].Text } } // ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID } type GroupedSpecIndices []SpecIndices type SpecIndices []int func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) { /* Ginkgo has sophisticated support for randomizing specs. Specs are guaranteed to have the same order for a given seed across test runs. By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging experience - specs within a given container run in the order they appear in the file. Developers can set -randomizeAllSpecs to shuffle _all_ specs. In addition, spec containers can be marked as Ordered. Specs within an Ordered container are never shuffled. Finally, specs and spec containers can be marked as Serial. When running in parallel, serial specs run on Process #1 _after_ all other processes have finished. */ // Seed a new random source based on thee configured random seed. r := rand.New(rand.NewSource(suiteConfig.RandomSeed)) // first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs. sortableSpecs := NewSortableSpecs(specs) sort.Sort(sortableSpecs) // then we break things into execution groups // a group represents a single unit of execution and is a collection of SpecIndices // usually a group is just a single spec, however ordered containers must be preserved as a single group executionGroupIDs := []uint{} executionGroups := map[uint]SpecIndices{} for _, idx := range sortableSpecs.Indexes { spec := specs[idx] groupNode := spec.Nodes.FirstNodeMarkedOrdered() if groupNode.IsZero() { groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt) } executionGroups[groupNode.ID] = append(executionGroups[groupNode.ID], idx) if len(executionGroups[groupNode.ID]) == 1 { executionGroupIDs = append(executionGroupIDs, groupNode.ID) } } // now, we only shuffle all the execution groups if we're randomizing all specs, otherwise // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs shufflableGroupingIDs := []uint{} shufflableGroupingIDToGroupIDs := map[uint][]uint{} // for each execution group we're going to have to pick a node to represent how the // execution group is grouped for shuffling: nodeTypesToShuffle := types.NodeTypesForContainerAndIt if suiteConfig.RandomizeAllSpecs { nodeTypesToShuffle = types.NodeTypeIt } //so, for each execution group: for _, groupID := range executionGroupIDs { // pick out a representative spec representativeSpec := specs[executionGroups[groupID][0]] // and grab the node on the spec that will represent which shufflable group this execution group belongs tu shufflableGroupingNode := representativeSpec.Nodes.FirstNodeWithType(nodeTypesToShuffle) //add the execution group to its shufflable group shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID] = append(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID], groupID) //and if it's the first one in if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 { // record the shuffleable group ID shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID) } } // now we permute the sorted shufflable grouping IDs and build the ordered Groups orderedGroups := GroupedSpecIndices{} permutation := r.Perm(len(shufflableGroupingIDs)) for _, j := range permutation { //let's get the execution group IDs for this shufflable group: executionGroupIDsForJ := shufflableGroupingIDToGroupIDs[shufflableGroupingIDs[j]] // and we'll add their associated specindices to the orderedGroups slice: for _, executionGroupID := range executionGroupIDsForJ { orderedGroups = append(orderedGroups, executionGroups[executionGroupID]) } } // If we're running in series, we're done. if suiteConfig.ParallelTotal == 1 { return orderedGroups, GroupedSpecIndices{} } // We're running in parallel so we need to partition the ordered groups into a parallelizable set and a serialized set. // The parallelizable groups will run across all Ginkgo processes... // ...the serial groups will only run on Process #1 after all other processes have exited. parallelizableGroups, serialGroups := GroupedSpecIndices{}, GroupedSpecIndices{} for _, specIndices := range orderedGroups { if specs[specIndices[0]].Nodes.HasNodeMarkedSerial() { serialGroups = append(serialGroups, specIndices) } else { parallelizableGroups = append(parallelizableGroups, specIndices) } } return parallelizableGroups, serialGroups }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_wasm.go
//go:build wasm package internal func NewOutputInterceptor() OutputInterceptor { return &NoopOutputInterceptor{} }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/writer.go
vendor/github.com/onsi/ginkgo/v2/internal/writer.go
package internal import ( "bytes" "fmt" "io" "sync" "github.com/go-logr/logr" "github.com/go-logr/logr/funcr" ) type WriterMode uint const ( WriterModeStreamAndBuffer WriterMode = iota WriterModeBufferOnly ) type WriterInterface interface { io.Writer Truncate() Bytes() []byte Len() int } // Writer implements WriterInterface and GinkgoWriterInterface type Writer struct { buffer *bytes.Buffer outWriter io.Writer lock *sync.Mutex mode WriterMode streamIndent []byte indentNext bool teeWriters []io.Writer } func NewWriter(outWriter io.Writer) *Writer { return &Writer{ buffer: &bytes.Buffer{}, lock: &sync.Mutex{}, outWriter: outWriter, mode: WriterModeStreamAndBuffer, streamIndent: []byte(" "), indentNext: true, } } func (w *Writer) SetMode(mode WriterMode) { w.lock.Lock() defer w.lock.Unlock() w.mode = mode } func (w *Writer) Len() int { w.lock.Lock() defer w.lock.Unlock() return w.buffer.Len() } var newline = []byte("\n") func (w *Writer) Write(b []byte) (n int, err error) { w.lock.Lock() defer w.lock.Unlock() for _, teeWriter := range w.teeWriters { teeWriter.Write(b) } if w.mode == WriterModeStreamAndBuffer { line, remaining, found := []byte{}, b, false for len(remaining) > 0 { line, remaining, found = bytes.Cut(remaining, newline) if len(line) > 0 { if w.indentNext { w.outWriter.Write(w.streamIndent) w.indentNext = false } w.outWriter.Write(line) } if found { w.outWriter.Write(newline) w.indentNext = true } } } return w.buffer.Write(b) } func (w *Writer) Truncate() { w.lock.Lock() defer w.lock.Unlock() w.buffer.Reset() } func (w *Writer) Bytes() []byte { w.lock.Lock() defer w.lock.Unlock() b := w.buffer.Bytes() copied := make([]byte, len(b)) copy(copied, b) return copied } // GinkgoWriterInterface func (w *Writer) TeeTo(writer io.Writer) { w.lock.Lock() defer w.lock.Unlock() w.teeWriters = append(w.teeWriters, writer) } func (w *Writer) ClearTeeWriters() { w.lock.Lock() defer w.lock.Unlock() w.teeWriters = []io.Writer{} } func (w *Writer) Print(a ...interface{}) { fmt.Fprint(w, a...) } func (w *Writer) Printf(format string, a ...interface{}) { fmt.Fprintf(w, format, a...) } func (w *Writer) Println(a ...interface{}) { fmt.Fprintln(w, a...) } func GinkgoLogrFunc(writer *Writer) logr.Logger { return funcr.New(func(prefix, args string) { if prefix == "" { writer.Printf("%s\n", args) } else { writer.Printf("%s %s\n", prefix, args) } }, funcr.Options{}) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/counter.go
vendor/github.com/onsi/ginkgo/v2/internal/counter.go
package internal func MakeIncrementingIndexCounter() func() (int, error) { idx := -1 return func() (int, error) { idx += 1 return idx, nil } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go
package internal import ( "bytes" "io" "os" "time" ) const BAILOUT_TIME = 1 * time.Second const BAILOUT_MESSAGE = `Ginkgo detected an issue while intercepting output. When running in parallel, Ginkgo captures stdout and stderr output and attaches it to the running spec. It looks like that process is getting stuck for this suite. This usually happens if you, or a library you are using, spin up an external process and set cmd.Stdout = os.Stdout and/or cmd.Stderr = os.Stderr. This causes the external process to keep Ginkgo's output interceptor pipe open and causes output interception to hang. Ginkgo has detected this and shortcircuited the capture process. The specs will continue running after this message however output from the external process that caused this issue will not be captured. You have several options to fix this. In preferred order they are: 1. Pass GinkgoWriter instead of os.Stdout or os.Stderr to your process. 2. Ensure your process exits before the current spec completes. If your process is long-lived and must cross spec boundaries, this option won't work for you. 3. Pause Ginkgo's output interceptor before starting your process and then resume it after. Use PauseOutputInterception() and ResumeOutputInterception() to do this. 4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will turn off all output interception but allow specs to run in parallel without this issue. You may miss important output if you do this including output from Go's race detector. More details on issue #851 - https://github.com/onsi/ginkgo/issues/851 ` /* The OutputInterceptor is used by to intercept and capture all stdin and stderr output during a test run. */ type OutputInterceptor interface { StartInterceptingOutput() StartInterceptingOutputAndForwardTo(io.Writer) StopInterceptingAndReturnOutput() string PauseIntercepting() ResumeIntercepting() Shutdown() } type NoopOutputInterceptor struct{} func (interceptor NoopOutputInterceptor) StartInterceptingOutput() {} func (interceptor NoopOutputInterceptor) StartInterceptingOutputAndForwardTo(io.Writer) {} func (interceptor NoopOutputInterceptor) StopInterceptingAndReturnOutput() string { return "" } func (interceptor NoopOutputInterceptor) PauseIntercepting() {} func (interceptor NoopOutputInterceptor) ResumeIntercepting() {} func (interceptor NoopOutputInterceptor) Shutdown() {} type pipePair struct { reader *os.File writer *os.File } func startPipeFactory(pipeChannel chan pipePair, shutdown chan interface{}) { for { //make the next pipe... pair := pipePair{} pair.reader, pair.writer, _ = os.Pipe() select { //...and provide it to the next consumer (they are responsible for closing the files) case pipeChannel <- pair: continue //...or close the files if we were told to shutdown case <-shutdown: pair.reader.Close() pair.writer.Close() return } } } type interceptorImplementation interface { CreateStdoutStderrClones() (*os.File, *os.File) ConnectPipeToStdoutStderr(*os.File) RestoreStdoutStderrFromClones(*os.File, *os.File) ShutdownClones(*os.File, *os.File) } type genericOutputInterceptor struct { intercepting bool stdoutClone *os.File stderrClone *os.File pipe pipePair shutdown chan interface{} emergencyBailout chan interface{} pipeChannel chan pipePair interceptedContent chan string forwardTo io.Writer accumulatedOutput string implementation interceptorImplementation } func (interceptor *genericOutputInterceptor) StartInterceptingOutput() { interceptor.StartInterceptingOutputAndForwardTo(io.Discard) } func (interceptor *genericOutputInterceptor) StartInterceptingOutputAndForwardTo(w io.Writer) { if interceptor.intercepting { return } interceptor.accumulatedOutput = "" interceptor.forwardTo = w interceptor.ResumeIntercepting() } func (interceptor *genericOutputInterceptor) StopInterceptingAndReturnOutput() string { if interceptor.intercepting { interceptor.PauseIntercepting() } return interceptor.accumulatedOutput } func (interceptor *genericOutputInterceptor) ResumeIntercepting() { if interceptor.intercepting { return } interceptor.intercepting = true if interceptor.stdoutClone == nil { interceptor.stdoutClone, interceptor.stderrClone = interceptor.implementation.CreateStdoutStderrClones() interceptor.shutdown = make(chan interface{}) go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown) } // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr) // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running interceptor.pipe = <-interceptor.pipeChannel interceptor.emergencyBailout = make(chan interface{}) //Spin up a goroutine to copy data from the pipe into a buffer, this is how we capture any output the user is emitting go func() { buffer := &bytes.Buffer{} destination := io.MultiWriter(buffer, interceptor.forwardTo) copyFinished := make(chan interface{}) reader := interceptor.pipe.reader go func() { io.Copy(destination, reader) reader.Close() // close the read end of the pipe so we don't leak a file descriptor close(copyFinished) }() select { case <-copyFinished: interceptor.interceptedContent <- buffer.String() case <-interceptor.emergencyBailout: interceptor.interceptedContent <- "" } }() interceptor.implementation.ConnectPipeToStdoutStderr(interceptor.pipe.writer) } func (interceptor *genericOutputInterceptor) PauseIntercepting() { if !interceptor.intercepting { return } // first we have to close the write end of the pipe. To do this we have to close all file descriptors pointing // to the write end. So that would be the pipewriter itself, and FD #1 and FD #2 if we've Dup2'd them interceptor.pipe.writer.Close() // the pipewriter itself // we also need to stop intercepting. we do that by reconnecting the stdout and stderr file descriptions back to their respective #1 and #2 file descriptors; // this also closes #1 and #2 before it points that their original stdout and stderr file descriptions interceptor.implementation.RestoreStdoutStderrFromClones(interceptor.stdoutClone, interceptor.stderrClone) var content string select { case content = <-interceptor.interceptedContent: case <-time.After(BAILOUT_TIME): /* By closing all the pipe writer's file descriptors associated with the pipe writer's file description the io.Copy reading from the reader should eventually receive an EOF and exit. **However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process will have a file descriptor pointing to the pipe writer's file description and it will not close until the external process exits. That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine and file descriptor (those these will be cleaned up when the process exits). We tack on a message to notify the user that they've hit this edgecase and encourage them to address it. */ close(interceptor.emergencyBailout) content = <-interceptor.interceptedContent + BAILOUT_MESSAGE } interceptor.accumulatedOutput += content interceptor.intercepting = false } func (interceptor *genericOutputInterceptor) Shutdown() { interceptor.PauseIntercepting() if interceptor.stdoutClone != nil { close(interceptor.shutdown) interceptor.implementation.ShutdownClones(interceptor.stdoutClone, interceptor.stderrClone) interceptor.stdoutClone = nil interceptor.stderrClone = nil } } /* This is used on windows builds but included here so it can be explicitly tested on unix systems too */ func NewOSGlobalReassigningOutputInterceptor() OutputInterceptor { return &genericOutputInterceptor{ interceptedContent: make(chan string), pipeChannel: make(chan pipePair), shutdown: make(chan interface{}), implementation: &osGlobalReassigningOutputInterceptorImpl{}, } } type osGlobalReassigningOutputInterceptorImpl struct{} func (impl *osGlobalReassigningOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) { return os.Stdout, os.Stderr } func (impl *osGlobalReassigningOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) { os.Stdout = pipeWriter os.Stderr = pipeWriter } func (impl *osGlobalReassigningOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) { os.Stdout = stdoutClone os.Stderr = stderrClone } func (impl *osGlobalReassigningOutputInterceptorImpl) ShutdownClones(_ *os.File, _ *os.File) { //noop }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go
vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go
//go:build linux || solaris // +build linux solaris package internal import ( "os" "syscall" ) var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1}
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go
package internal import ( "context" "sort" "strings" "sync" "github.com/onsi/ginkgo/v2/types" ) type ProgressReporterManager struct { lock *sync.Mutex progressReporters map[int]func() string prCounter int } func NewProgressReporterManager() *ProgressReporterManager { return &ProgressReporterManager{ progressReporters: map[int]func() string{}, lock: &sync.Mutex{}, } } func (prm *ProgressReporterManager) AttachProgressReporter(reporter func() string) func() { prm.lock.Lock() defer prm.lock.Unlock() prm.prCounter += 1 prCounter := prm.prCounter prm.progressReporters[prCounter] = reporter return func() { prm.lock.Lock() defer prm.lock.Unlock() delete(prm.progressReporters, prCounter) } } func (prm *ProgressReporterManager) QueryProgressReporters(ctx context.Context, failer *Failer) []string { prm.lock.Lock() keys := []int{} for key := range prm.progressReporters { keys = append(keys, key) } sort.Ints(keys) reporters := []func() string{} for _, key := range keys { reporters = append(reporters, prm.progressReporters[key]) } prm.lock.Unlock() if len(reporters) == 0 { return nil } out := []string{} for _, reporter := range reporters { reportC := make(chan string, 1) go func() { defer func() { e := recover() if e != nil { failer.Panic(types.NewCodeLocationWithStackTrace(1), e) reportC <- "failed to query attached progress reporter" } }() reportC <- reporter() }() var report string select { case report = <-reportC: case <-ctx.Done(): return out } if strings.TrimSpace(report) != "" { out = append(out, report) } } return out }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go
package internal import ( "context" "github.com/onsi/ginkgo/v2/types" ) type SpecContext interface { context.Context SpecReport() types.SpecReport AttachProgressReporter(func() string) func() } type specContext struct { context.Context *ProgressReporterManager cancel context.CancelCauseFunc suite *Suite } /* SpecContext includes a reference to `suite` and embeds itself in itself as a "GINKGO_SPEC_CONTEXT" value. This allows users to create child Contexts without having down-stream consumers (e.g. Gomega) lose access to the SpecContext and its methods. This allows us to build extensions on top of Ginkgo that simply take an all-encompassing context. Note that while SpecContext is used to enforce deadlines by Ginkgo it is not configured as a context.WithDeadline. Instead, Ginkgo owns responsibility for cancelling the context when the deadline elapses. This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation. */ func NewSpecContext(suite *Suite) *specContext { ctx, cancel := context.WithCancelCause(context.Background()) sc := &specContext{ cancel: cancel, suite: suite, ProgressReporterManager: NewProgressReporterManager(), } ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies return sc } func (sc *specContext) SpecReport() types.SpecReport { return sc.suite.CurrentSpecReport() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_win.go
// +build windows package internal func NewOutputInterceptor() OutputInterceptor { return NewOSGlobalReassigningOutputInterceptor() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/focus.go
vendor/github.com/onsi/ginkgo/v2/internal/focus.go
package internal import ( "regexp" "strings" "github.com/onsi/ginkgo/v2/types" ) /* If a container marked as focus has a descendant that is also marked as focus, Ginkgo's policy is to unmark the container's focus. This gives developers a more intuitive experience when debugging specs. It is common to focus a container to just run a subset of specs, then identify the specific specs within the container to focus - this policy allows the developer to simply focus those specific specs and not need to go back and turn the focus off of the container: As a common example, consider: FDescribe("something to debug", function() { It("works", function() {...}) It("works", function() {...}) FIt("doesn't work", function() {...}) It("works", function() {...}) }) here the developer's intent is to focus in on the `"doesn't work"` spec and not to run the adjacent specs in the focused `"something to debug"` container. The nested policy applied by this function enables this behavior. */ func ApplyNestedFocusPolicyToTree(tree *TreeNode) { var walkTree func(tree *TreeNode) bool walkTree = func(tree *TreeNode) bool { if tree.Node.MarkedPending { return false } hasFocusedDescendant := false for _, child := range tree.Children { childHasFocus := walkTree(child) hasFocusedDescendant = hasFocusedDescendant || childHasFocus } tree.Node.MarkedFocus = tree.Node.MarkedFocus && !hasFocusedDescendant return tree.Node.MarkedFocus || hasFocusedDescendant } walkTree(tree) } /* Ginkgo supports focussing specs using `FIt`, `FDescribe`, etc. - this is called "programmatic focus" It also supports focussing specs using regular expressions on the command line (`-focus=`, `-skip=`) that match against spec text and file filters (`-focus-files=`, `-skip-files=`) that match against code locations for nodes in specs. When both programmatic and file filters are provided their results are ANDed together. If multiple kinds of filters are provided, the file filters run first followed by the regex filters. This function sets the `Skip` property on specs by applying Ginkgo's focus policy: - If there are no CLI arguments and no programmatic focus, do nothing. - If a spec somewhere has programmatic focus skip any specs that have no programmatic focus. - If there are CLI arguments parse them and skip any specs that either don't match the focus filters or do match the skip filters. *Note:* specs with pending nodes are Skipped when created by NewSpec. */ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { focusString := strings.Join(suiteConfig.FocusStrings, "|") skipString := strings.Join(suiteConfig.SkipStrings, "|") type SkipCheck func(spec Spec) bool // by default, skip any specs marked pending skipChecks := []SkipCheck{func(spec Spec) bool { return spec.Nodes.HasNodeMarkedPending() }} hasProgrammaticFocus := false for _, spec := range specs { if spec.Nodes.HasNodeMarkedFocus() && !spec.Nodes.HasNodeMarkedPending() { hasProgrammaticFocus = true break } } if hasProgrammaticFocus { skipChecks = append(skipChecks, func(spec Spec) bool { return !spec.Nodes.HasNodeMarkedFocus() }) } if suiteConfig.LabelFilter != "" { labelFilter, _ := types.ParseLabelFilter(suiteConfig.LabelFilter) skipChecks = append(skipChecks, func(spec Spec) bool { return !labelFilter(UnionOfLabels(suiteLabels, spec.Nodes.UnionOfLabels())) }) } if len(suiteConfig.FocusFiles) > 0 { focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles) skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) }) } if len(suiteConfig.SkipFiles) > 0 { skipFilters, _ := types.ParseFileFilters(suiteConfig.SkipFiles) skipChecks = append(skipChecks, func(spec Spec) bool { return skipFilters.Matches(spec.Nodes.CodeLocations()) }) } if focusString != "" { // skip specs that don't match the focus string re := regexp.MustCompile(focusString) skipChecks = append(skipChecks, func(spec Spec) bool { return !re.MatchString(description + " " + spec.Text()) }) } if skipString != "" { // skip specs that match the skip string re := regexp.MustCompile(skipString) skipChecks = append(skipChecks, func(spec Spec) bool { return re.MatchString(description + " " + spec.Text()) }) } // skip specs if shouldSkip() is true. note that we do nothing if shouldSkip() is false to avoid overwriting skip status established by the node's pending status processedSpecs := Specs{} for _, spec := range specs { for _, skipCheck := range skipChecks { if skipCheck(spec) { spec.Skip = true break } } processedSpecs = append(processedSpecs, spec) } return processedSpecs, hasProgrammaticFocus }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/suite.go
vendor/github.com/onsi/ginkgo/v2/internal/suite.go
package internal import ( "fmt" "sync" "time" "github.com/onsi/ginkgo/v2/internal/interrupt_handler" "github.com/onsi/ginkgo/v2/internal/parallel_support" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" "golang.org/x/net/context" ) type Phase uint const ( PhaseBuildTopLevel Phase = iota PhaseBuildTree PhaseRun ) var PROGRESS_REPORTER_DEADLING = 5 * time.Second type Suite struct { tree *TreeNode topLevelContainers Nodes *ProgressReporterManager phase Phase suiteNodes Nodes cleanupNodes Nodes failer *Failer reporter reporters.Reporter writer WriterInterface outputInterceptor OutputInterceptor interruptHandler interrupt_handler.InterruptHandlerInterface config types.SuiteConfig deadline time.Time skipAll bool report types.Report currentSpecReport types.SpecReport currentNode Node currentNodeStartTime time.Time currentSpecContext *specContext currentByStep types.SpecEvent timelineOrder int /* We don't need to lock around all operations. Just those that *could* happen concurrently. Suite, generally, only runs one node at a time - and so the possibiity for races is small. In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in. However, there are some operations that can happen concurrently: - AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020). They both form a self-contained read-write pair and so a lock in them is sufficent. - generateProgressReport can be invoked at any point in time by an interrupt or a progres poll. Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor. To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*. In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock). Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself. */ selectiveLock *sync.Mutex client parallel_support.Client } func NewSuite() *Suite { return &Suite{ tree: &TreeNode{}, phase: PhaseBuildTopLevel, ProgressReporterManager: NewProgressReporterManager(), selectiveLock: &sync.Mutex{}, } } func (suite *Suite) Clone() (*Suite, error) { if suite.phase != PhaseBuildTopLevel { return nil, fmt.Errorf("cannot clone suite after tree has been built") } return &Suite{ tree: &TreeNode{}, phase: PhaseBuildTopLevel, ProgressReporterManager: NewProgressReporterManager(), topLevelContainers: suite.topLevelContainers.Clone(), suiteNodes: suite.suiteNodes.Clone(), selectiveLock: &sync.Mutex{}, }, nil } func (suite *Suite) BuildTree() error { // During PhaseBuildTopLevel, the top level containers are stored in suite.topLevelCotainers and entered // We now enter PhaseBuildTree where these top level containers are entered and added to the spec tree suite.phase = PhaseBuildTree for _, topLevelContainer := range suite.topLevelContainers { err := suite.PushNode(topLevelContainer) if err != nil { return err } } return nil } func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { if suite.phase != PhaseBuildTree { panic("cannot run before building the tree = call suite.BuildTree() first") } ApplyNestedFocusPolicyToTree(suite.tree) specs := GenerateSpecsFromTreeRoot(suite.tree) specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig) suite.phase = PhaseRun suite.client = client suite.failer = failer suite.reporter = reporter suite.writer = writer suite.outputInterceptor = outputInterceptor suite.interruptHandler = interruptHandler suite.config = suiteConfig if suite.config.Timeout > 0 { suite.deadline = time.Now().Add(suite.config.Timeout) } cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal) success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) cancelProgressHandler() return success, hasProgrammaticFocus } func (suite *Suite) InRunPhase() bool { return suite.phase == PhaseRun } /* Tree Construction methods PushNode is used during PhaseBuildTopLevel and PhaseBuildTree */ func (suite *Suite) PushNode(node Node) error { if node.NodeType.Is(types.NodeTypeCleanupInvalid | types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) { return suite.pushCleanupNode(node) } if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { return suite.pushSuiteNode(node) } if suite.phase == PhaseRun { return types.GinkgoErrors.PushingNodeInRunPhase(node.NodeType, node.CodeLocation) } if node.MarkedSerial { firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() if !firstOrderedNode.IsZero() && !firstOrderedNode.MarkedSerial { return types.GinkgoErrors.InvalidSerialNodeInNonSerialOrderedContainer(node.CodeLocation, node.NodeType) } } if node.NodeType.Is(types.NodeTypeBeforeAll | types.NodeTypeAfterAll) { firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() if firstOrderedNode.IsZero() { return types.GinkgoErrors.SetupNodeNotInOrderedContainer(node.CodeLocation, node.NodeType) } } if node.MarkedContinueOnFailure { firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() if !firstOrderedNode.IsZero() { return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation) } } if node.NodeType == types.NodeTypeContainer { // During PhaseBuildTopLevel we only track the top level containers without entering them // We only enter the top level container nodes during PhaseBuildTree // // This ensures the tree is only constructed after `go spec` has called `flag.Parse()` and gives // the user an opportunity to load suiteConfiguration information in the `TestX` go spec hook just before `RunSpecs` // is invoked. This makes the lifecycle easier to reason about and solves issues like #693. if suite.phase == PhaseBuildTopLevel { suite.topLevelContainers = append(suite.topLevelContainers, node) return nil } if suite.phase == PhaseBuildTree { parentTree := suite.tree suite.tree = &TreeNode{Node: node} parentTree.AppendChild(suite.tree) err := func() (err error) { defer func() { if e := recover(); e != nil { err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation) } }() node.Body(nil) return err }() suite.tree = parentTree return err } } else { suite.tree.AppendChild(&TreeNode{Node: node}) return nil } return nil } func (suite *Suite) pushSuiteNode(node Node) error { if suite.phase == PhaseBuildTree { return types.GinkgoErrors.SuiteNodeInNestedContext(node.NodeType, node.CodeLocation) } if suite.phase == PhaseRun { return types.GinkgoErrors.SuiteNodeDuringRunPhase(node.NodeType, node.CodeLocation) } switch node.NodeType { case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite: existingBefores := suite.suiteNodes.WithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite) if len(existingBefores) > 0 { return types.GinkgoErrors.MultipleBeforeSuiteNodes(node.NodeType, node.CodeLocation, existingBefores[0].NodeType, existingBefores[0].CodeLocation) } case types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: existingAfters := suite.suiteNodes.WithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite) if len(existingAfters) > 0 { return types.GinkgoErrors.MultipleAfterSuiteNodes(node.NodeType, node.CodeLocation, existingAfters[0].NodeType, existingAfters[0].CodeLocation) } } suite.suiteNodes = append(suite.suiteNodes, node) return nil } func (suite *Suite) pushCleanupNode(node Node) error { if suite.phase != PhaseRun || suite.currentNode.IsZero() { return types.GinkgoErrors.PushingCleanupNodeDuringTreeConstruction(node.CodeLocation) } switch suite.currentNode.NodeType { case types.NodeTypeBeforeSuite, types.NodeTypeSynchronizedBeforeSuite, types.NodeTypeAfterSuite, types.NodeTypeSynchronizedAfterSuite: node.NodeType = types.NodeTypeCleanupAfterSuite case types.NodeTypeBeforeAll, types.NodeTypeAfterAll: node.NodeType = types.NodeTypeCleanupAfterAll case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite: return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType) case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite: return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation) default: node.NodeType = types.NodeTypeCleanupAfterEach } node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID node.NestingLevel = suite.currentNode.NestingLevel suite.selectiveLock.Lock() suite.cleanupNodes = append(suite.cleanupNodes, node) suite.selectiveLock.Unlock() return nil } func (suite *Suite) generateTimelineLocation() types.TimelineLocation { suite.selectiveLock.Lock() defer suite.selectiveLock.Unlock() suite.timelineOrder += 1 return types.TimelineLocation{ Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(), Order: suite.timelineOrder, Time: time.Now(), } } func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent { event.TimelineLocation = suite.generateTimelineLocation() suite.selectiveLock.Lock() suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) suite.selectiveLock.Unlock() suite.reporter.EmitSpecEvent(event) return event } func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) { event := startEvent event.SpecEventType = eventType event.TimelineLocation = suite.generateTimelineLocation() event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time) suite.selectiveLock.Lock() suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) suite.selectiveLock.Unlock() suite.reporter.EmitSpecEvent(event) } func (suite *Suite) By(text string, callback ...func()) error { cl := types.NewCodeLocation(2) if suite.phase != PhaseRun { return types.GinkgoErrors.ByNotDuringRunPhase(cl) } event := suite.handleSpecEvent(types.SpecEvent{ SpecEventType: types.SpecEventByStart, CodeLocation: cl, Message: text, }) suite.selectiveLock.Lock() suite.currentByStep = event suite.selectiveLock.Unlock() if len(callback) == 1 { defer func() { suite.selectiveLock.Lock() suite.currentByStep = types.SpecEvent{} suite.selectiveLock.Unlock() suite.handleSpecEventEnd(types.SpecEventByEnd, event) }() callback[0]() } else if len(callback) > 1 { panic("just one callback per By, please") } return nil } /* Spec Running methods - used during PhaseRun */ func (suite *Suite) CurrentSpecReport() types.SpecReport { suite.selectiveLock.Lock() defer suite.selectiveLock.Unlock() report := suite.currentSpecReport if suite.writer != nil { report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) } report.ReportEntries = make([]ReportEntry, len(report.ReportEntries)) copy(report.ReportEntries, suite.currentSpecReport.ReportEntries) return report } // Only valid in the preview context. In general suite.report only includes // the specs run by _this_ node - it is only at the end of the suite that // the parallel reports are aggregated. However in the preview context we run // in series and func (suite *Suite) GetPreviewReport() types.Report { suite.selectiveLock.Lock() defer suite.selectiveLock.Unlock() return suite.report } func (suite *Suite) AddReportEntry(entry ReportEntry) error { if suite.phase != PhaseRun { return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location) } entry.TimelineLocation = suite.generateTimelineLocation() entry.Time = entry.TimelineLocation.Time suite.selectiveLock.Lock() suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry) suite.selectiveLock.Unlock() suite.reporter.EmitReportEntry(entry) return nil } func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport { timelineLocation := suite.generateTimelineLocation() suite.selectiveLock.Lock() defer suite.selectiveLock.Unlock() deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING) defer cancel() var additionalReports []string if suite.currentSpecContext != nil { additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...) } additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...) gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes()) pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport) if err != nil { fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error()) } return pr } func (suite *Suite) handleProgressSignal() { report := suite.generateProgressReport(false) report.Message = "{{bold}}You've requested a progress report:{{/}}" suite.emitProgressReport(report) } func (suite *Suite) emitProgressReport(report types.ProgressReport) { suite.selectiveLock.Lock() suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput()) suite.selectiveLock.Unlock() suite.reporter.EmitProgressReport(report) if suite.isRunningInParallel() { err := suite.client.PostEmitProgressReport(report) if err != nil { fmt.Println(err.Error()) } } } func (suite *Suite) isRunningInParallel() bool { return suite.config.ParallelTotal > 1 } func (suite *Suite) processCurrentSpecReport() { suite.reporter.DidRun(suite.currentSpecReport) if suite.isRunningInParallel() { suite.client.PostDidRun(suite.currentSpecReport) } suite.report.SpecReports = append(suite.report.SpecReports, suite.currentSpecReport) if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { suite.report.SuiteSucceeded = false if suite.config.FailFast || suite.currentSpecReport.State.Is(types.SpecStateAborted) { suite.skipAll = true if suite.isRunningInParallel() { suite.client.PostAbort() } } } } func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { numSpecsThatWillBeRun := specs.CountWithoutSkip() suite.report = types.Report{ SuitePath: suitePath, SuiteDescription: description, SuiteLabels: suiteLabels, SuiteConfig: suite.config, SuiteHasProgrammaticFocus: hasProgrammaticFocus, PreRunStats: types.PreRunStats{ TotalSpecs: len(specs), SpecsThatWillRun: numSpecsThatWillBeRun, }, StartTime: time.Now(), } suite.reporter.SuiteWillBegin(suite.report) if suite.isRunningInParallel() { suite.client.PostSuiteWillBegin(suite.report) } suite.report.SuiteSucceeded = true suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite) ranBeforeSuite := suite.report.SuiteSucceeded if suite.report.SuiteSucceeded { suite.runBeforeSuite(numSpecsThatWillBeRun) } if suite.report.SuiteSucceeded { groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config) nextIndex := MakeIncrementingIndexCounter() if suite.isRunningInParallel() { nextIndex = suite.client.FetchNextCounter } for { groupedSpecIdx, err := nextIndex() if err != nil { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, fmt.Sprintf("Failed to iterate over specs:\n%s", err.Error())) suite.report.SuiteSucceeded = false break } if groupedSpecIdx >= len(groupedSpecIndices) { if suite.config.ParallelProcess == 1 && len(serialGroupedSpecIndices) > 0 { groupedSpecIndices, serialGroupedSpecIndices, nextIndex = serialGroupedSpecIndices, GroupedSpecIndices{}, MakeIncrementingIndexCounter() suite.client.BlockUntilNonprimaryProcsHaveFinished() continue } break } // the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts // we encapsulate that complexity in the notion of a Group that can run // Group is really just an extension of suite so it gets passed a suite and has access to all its internals // Note that group is stateful and intended for single use! newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx])) } if suite.config.FailOnPending && specs.HasAnySpecsMarkedPending() { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected pending specs and --fail-on-pending is set") suite.report.SuiteSucceeded = false } if suite.config.FailOnEmpty && specs.CountWithoutSkip() == 0 { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Detected no specs ran and --fail-on-empty is set") suite.report.SuiteSucceeded = false } } if ranBeforeSuite { suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) } interruptStatus := suite.interruptHandler.Status() if interruptStatus.Interrupted() { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String()) suite.report.SuiteSucceeded = false } suite.report.EndTime = time.Now() suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime) if !suite.deadline.IsZero() && suite.report.EndTime.After(suite.deadline) { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite Timeout Elapsed") suite.report.SuiteSucceeded = false } suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite) suite.reporter.SuiteDidEnd(suite.report) if suite.isRunningInParallel() { suite.client.PostSuiteDidEnd(suite.report) } return suite.report.SuiteSucceeded } func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite) if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ LeafNodeType: beforeSuiteNode.NodeType, LeafNodeLocation: beforeSuiteNode.CodeLocation, ParallelProcess: suite.config.ParallelProcess, RunningInParallel: suite.isRunningInParallel(), } suite.selectiveLock.Unlock() suite.reporter.WillRun(suite.currentSpecReport) suite.runSuiteNode(beforeSuiteNode) if suite.currentSpecReport.State.Is(types.SpecStateSkipped) { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite") suite.skipAll = true } suite.processCurrentSpecReport() } } func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite) if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ LeafNodeType: afterSuiteNode.NodeType, LeafNodeLocation: afterSuiteNode.CodeLocation, ParallelProcess: suite.config.ParallelProcess, RunningInParallel: suite.isRunningInParallel(), } suite.selectiveLock.Unlock() suite.reporter.WillRun(suite.currentSpecReport) suite.runSuiteNode(afterSuiteNode) suite.processCurrentSpecReport() } afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse() if len(afterSuiteCleanup) > 0 { for _, cleanupNode := range afterSuiteCleanup { suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ LeafNodeType: cleanupNode.NodeType, LeafNodeLocation: cleanupNode.CodeLocation, ParallelProcess: suite.config.ParallelProcess, RunningInParallel: suite.isRunningInParallel(), } suite.selectiveLock.Unlock() suite.reporter.WillRun(suite.currentSpecReport) suite.runSuiteNode(cleanupNode) suite.processCurrentSpecReport() } } } func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { nodes := spec.Nodes.WithType(nodeType) if nodeType == types.NodeTypeReportAfterEach { nodes = nodes.SortedByDescendingNestingLevel() } if nodeType == types.NodeTypeReportBeforeEach { nodes = nodes.SortedByAscendingNestingLevel() } if len(nodes) == 0 { return } for i := range nodes { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() report := suite.currentSpecReport nodes[i].Body = func(ctx SpecContext) { nodes[i].ReportEachBody(ctx, report) } state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) // If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state. // Also, if the reporter is every aborted - always override the state to propagate the abort if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) { suite.currentSpecReport.State = state suite.currentSpecReport.Failure = failure } suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes()) suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() } } func (suite *Suite) runSuiteNode(node Node) { if suite.config.DryRun { suite.currentSpecReport.State = types.SpecStatePassed return } suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() suite.currentSpecReport.StartTime = time.Now() var err error switch node.NodeType { case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite: suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") case types.NodeTypeCleanupAfterSuite: if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 { err = suite.client.BlockUntilNonprimaryProcsHaveFinished() } if err == nil { suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") } case types.NodeTypeSynchronizedBeforeSuite: var data []byte var runAllProcs bool if suite.config.ParallelProcess == 1 { if suite.config.ParallelTotal > 1 { suite.outputInterceptor.StopInterceptingAndReturnOutput() suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client) } node.Body = func(c SpecContext) { data = node.SynchronizedBeforeSuiteProc1Body(c) } node.HasContext = node.SynchronizedBeforeSuiteProc1BodyHasContext suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") if suite.config.ParallelTotal > 1 { suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() suite.outputInterceptor.StartInterceptingOutput() if suite.currentSpecReport.State.Is(types.SpecStatePassed) { err = suite.client.PostSynchronizedBeforeSuiteCompleted(types.SpecStatePassed, data) } else { err = suite.client.PostSynchronizedBeforeSuiteCompleted(suite.currentSpecReport.State, nil) } } runAllProcs = suite.currentSpecReport.State.Is(types.SpecStatePassed) && err == nil } else { var proc1State types.SpecState proc1State, data, err = suite.client.BlockUntilSynchronizedBeforeSuiteData() switch proc1State { case types.SpecStatePassed: runAllProcs = true case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1() case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped: suite.currentSpecReport.State = proc1State } } if runAllProcs { node.Body = func(c SpecContext) { node.SynchronizedBeforeSuiteAllProcsBody(c, data) } node.HasContext = node.SynchronizedBeforeSuiteAllProcsBodyHasContext suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") } case types.NodeTypeSynchronizedAfterSuite: node.Body = node.SynchronizedAfterSuiteAllProcsBody node.HasContext = node.SynchronizedAfterSuiteAllProcsBodyHasContext suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") if suite.config.ParallelProcess == 1 { if suite.config.ParallelTotal > 1 { err = suite.client.BlockUntilNonprimaryProcsHaveFinished() } if err == nil { if suite.config.ParallelTotal > 1 { suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client) } node.Body = node.SynchronizedAfterSuiteProc1Body node.HasContext = node.SynchronizedAfterSuiteProc1BodyHasContext state, failure := suite.runNode(node, time.Time{}, "") if suite.currentSpecReport.State.Is(types.SpecStatePassed) { suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure } } } } if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) } suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() } func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) { nodes := suite.suiteNodes.WithType(nodeType) // only run ReportAfterSuite on proc 1 if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 { return } // if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 { state, err := suite.client.BlockUntilReportBeforeSuiteCompleted() if err != nil || state.Is(types.SpecStateFailed) { suite.report.SuiteSucceeded = false } return } for _, node := range nodes { suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ LeafNodeType: node.NodeType, LeafNodeLocation: node.CodeLocation, LeafNodeText: node.Text, ParallelProcess: suite.config.ParallelProcess, RunningInParallel: suite.isRunningInParallel(), } suite.selectiveLock.Unlock() suite.reporter.WillRun(suite.currentSpecReport) suite.runReportSuiteNode(node, suite.report) suite.processCurrentSpecReport() } // if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 { if suite.report.SuiteSucceeded { suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed) } else { suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed) } } } func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() suite.currentSpecReport.StartTime = time.Now() // if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and // (b) always fetch the latest report as prior ReportAfterSuites will contribute to it if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() { aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport() if err != nil { suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) return } report = report.Add(aggregatedReport) } node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) } suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput() } func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) { if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) { suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node) } interruptStatus := suite.interruptHandler.Status() if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { return types.SpecStateSkipped, types.Failure{} } if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt) { return types.SpecStateSkipped, types.Failure{} } if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) { return types.SpecStateSkipped, types.Failure{} } suite.selectiveLock.Lock() suite.currentNode = node suite.currentNodeStartTime = time.Now() suite.currentByStep = types.SpecEvent{} suite.selectiveLock.Unlock() defer func() { suite.selectiveLock.Lock() suite.currentNode = Node{} suite.currentNodeStartTime = time.Time{} suite.selectiveLock.Unlock() }() if text == "" { text = "TOP-LEVEL" } event := suite.handleSpecEvent(types.SpecEvent{ SpecEventType: types.SpecEventNodeStart, NodeType: node.NodeType, Message: text, CodeLocation: node.CodeLocation, }) defer func() { suite.handleSpecEventEnd(types.SpecEventNodeEnd, event) }() var failure types.Failure failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation if node.NodeType.Is(types.NodeTypeIt) || node.NodeType.Is(types.NodeTypesForSuiteLevelNodes) { failure.FailureNodeContext = types.FailureNodeIsLeafNode } else if node.NestingLevel <= 0 { failure.FailureNodeContext = types.FailureNodeAtTopLevel } else { failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1 } var outcome types.SpecState gracePeriod := suite.config.GracePeriod if node.GracePeriod >= 0 { gracePeriod = node.GracePeriod } now := time.Now() deadline := suite.deadline timeoutInPlay := "suite" if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) { deadline = specDeadline timeoutInPlay = "spec" } if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) { deadline = now.Add(node.NodeTimeout) timeoutInPlay = "node" }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go
vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go
//go:build windows // +build windows package internal import "os" var PROGRESS_SIGNALS = []os.Signal{}
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go
//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris // +build freebsd openbsd netbsd dragonfly darwin linux solaris package internal import ( "os" "golang.org/x/sys/unix" ) func NewOutputInterceptor() OutputInterceptor { return &genericOutputInterceptor{ interceptedContent: make(chan string), pipeChannel: make(chan pipePair), shutdown: make(chan interface{}), implementation: &dupSyscallOutputInterceptorImpl{}, } } type dupSyscallOutputInterceptorImpl struct{} func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.File, *os.File) { // To clone stdout and stderr we: // First, create two clone file descriptors that point to the stdout and stderr file descriptions stdoutCloneFD, _ := unix.Dup(1) stderrCloneFD, _ := unix.Dup(2) // Important, set the fds to FD_CLOEXEC to prevent them leaking into childs // https://github.com/onsi/ginkgo/issues/1191 flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0) if err == nil { unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC) } flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0) if err == nil { unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC) } // And then wrap the clone file descriptors in files. // One benefit of this (that we don't use yet) is that we can actually write // to these files to emit output to the console even though we're intercepting output stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone") stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone") //these clones remain alive throughout the lifecycle of the suite and don't need to be recreated //this speeds things up a bit, actually. return stdoutClone, stderrClone } func (impl *dupSyscallOutputInterceptorImpl) ConnectPipeToStdoutStderr(pipeWriter *os.File) { // To redirect output to our pipe we need to point the 1 and 2 file descriptors (which is how the world tries to log things) // to the write end of the pipe. // We do this with Dup2 (possibly Dup3 on some architectures) to have file descriptors 1 and 2 point to the same file description as the pipeWriter // This effectively shunts data written to stdout and stderr to the write end of our pipe unix.Dup2(int(pipeWriter.Fd()), 1) unix.Dup2(int(pipeWriter.Fd()), 2) } func (impl *dupSyscallOutputInterceptorImpl) RestoreStdoutStderrFromClones(stdoutClone *os.File, stderrClone *os.File) { // To restore stdour/stderr from the clones we have the 1 and 2 file descriptors // point to the original file descriptions that we saved off in the clones. // This has the added benefit of closing the connection between these descriptors and the write end of the pipe // which is important to cause the io.Copy on the pipe.Reader to end. unix.Dup2(int(stdoutClone.Fd()), 1) unix.Dup2(int(stderrClone.Fd()), 2) } func (impl *dupSyscallOutputInterceptorImpl) ShutdownClones(stdoutClone *os.File, stderrClone *os.File) { // We're done with the clones so we can close them to clean up after ourselves stdoutClone.Close() stderrClone.Close() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go
package interrupt_handler import ( "os" "os/signal" "sync" "syscall" "time" "github.com/onsi/ginkgo/v2/internal/parallel_support" ) var ABORT_POLLING_INTERVAL = 500 * time.Millisecond type InterruptCause uint const ( InterruptCauseInvalid InterruptCause = iota InterruptCauseSignal InterruptCauseAbortByOtherProcess ) type InterruptLevel uint const ( InterruptLevelUninterrupted InterruptLevel = iota InterruptLevelCleanupAndReport InterruptLevelReportOnly InterruptLevelBailOut ) func (ic InterruptCause) String() string { switch ic { case InterruptCauseSignal: return "Interrupted by User" case InterruptCauseAbortByOtherProcess: return "Interrupted by Other Ginkgo Process" } return "INVALID_INTERRUPT_CAUSE" } type InterruptStatus struct { Channel chan interface{} Level InterruptLevel Cause InterruptCause } func (s InterruptStatus) Interrupted() bool { return s.Level != InterruptLevelUninterrupted } func (s InterruptStatus) Message() string { return s.Cause.String() } func (s InterruptStatus) ShouldIncludeProgressReport() bool { return s.Cause != InterruptCauseAbortByOtherProcess } type InterruptHandlerInterface interface { Status() InterruptStatus } type InterruptHandler struct { c chan interface{} lock *sync.Mutex level InterruptLevel cause InterruptCause client parallel_support.Client stop chan interface{} signals []os.Signal requestAbortCheck chan interface{} } func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { if len(signals) == 0 { signals = []os.Signal{os.Interrupt, syscall.SIGTERM} } handler := &InterruptHandler{ c: make(chan interface{}), lock: &sync.Mutex{}, stop: make(chan interface{}), requestAbortCheck: make(chan interface{}), client: client, signals: signals, } handler.registerForInterrupts() return handler } func (handler *InterruptHandler) Stop() { close(handler.stop) } func (handler *InterruptHandler) registerForInterrupts() { // os signal handling signalChannel := make(chan os.Signal, 1) signal.Notify(signalChannel, handler.signals...) // cross-process abort handling var abortChannel chan interface{} if handler.client != nil { abortChannel = make(chan interface{}) go func() { pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) for { select { case <-pollTicker.C: if handler.client.ShouldAbort() { close(abortChannel) pollTicker.Stop() return } case <-handler.requestAbortCheck: if handler.client.ShouldAbort() { close(abortChannel) pollTicker.Stop() return } case <-handler.stop: pollTicker.Stop() return } } }() } go func(abortChannel chan interface{}) { var interruptCause InterruptCause for { select { case <-signalChannel: interruptCause = InterruptCauseSignal case <-abortChannel: interruptCause = InterruptCauseAbortByOtherProcess case <-handler.stop: signal.Stop(signalChannel) return } abortChannel = nil handler.lock.Lock() oldLevel := handler.level handler.cause = interruptCause if handler.level == InterruptLevelUninterrupted { handler.level = InterruptLevelCleanupAndReport } else if handler.level == InterruptLevelCleanupAndReport { handler.level = InterruptLevelReportOnly } else if handler.level == InterruptLevelReportOnly { handler.level = InterruptLevelBailOut } if handler.level != oldLevel { close(handler.c) handler.c = make(chan interface{}) } handler.lock.Unlock() } }(abortChannel) } func (handler *InterruptHandler) Status() InterruptStatus { handler.lock.Lock() status := InterruptStatus{ Level: handler.level, Channel: handler.c, Cause: handler.cause, } handler.lock.Unlock() if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() { close(handler.requestAbortCheck) <-status.Channel return handler.Status() } return status }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_windows.go
//go:build windows // +build windows package interrupt_handler func SwallowSigQuit() { //noop }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/sigquit_swallower_unix.go
//go:build freebsd || openbsd || netbsd || dragonfly || darwin || linux || solaris // +build freebsd openbsd netbsd dragonfly darwin linux solaris package interrupt_handler import ( "os" "os/signal" "syscall" ) func SwallowSigQuit() { c := make(chan os.Signal, 1024) signal.Notify(c, syscall.SIGQUIT) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
vendor/github.com/onsi/ginkgo/v2/internal/global/init.go
package global import ( "github.com/onsi/ginkgo/v2/internal" ) var Suite *internal.Suite var Failer *internal.Failer var backupSuite *internal.Suite func init() { InitializeGlobals() } func InitializeGlobals() { Failer = internal.NewFailer() Suite = internal.NewSuite() } func PushClone() error { var err error backupSuite, err = Suite.Clone() return err } func PopClone() { Suite = backupSuite }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go
package parallel_support import ( "bytes" "encoding/json" "fmt" "io" "net/http" "time" "github.com/onsi/ginkgo/v2/types" ) type httpClient struct { serverHost string } func newHttpClient(serverHost string) *httpClient { return &httpClient{ serverHost: serverHost, } } func (client *httpClient) Connect() bool { resp, err := http.Get(client.serverHost + "/up") if err != nil { return false } resp.Body.Close() return resp.StatusCode == http.StatusOK } func (client *httpClient) Close() error { return nil } func (client *httpClient) post(path string, data interface{}) error { var body io.Reader if data != nil { encoded, err := json.Marshal(data) if err != nil { return err } body = bytes.NewBuffer(encoded) } resp, err := http.Post(client.serverHost+path, "application/json", body) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("received unexpected status code %d", resp.StatusCode) } return nil } func (client *httpClient) poll(path string, data interface{}) error { for { resp, err := http.Get(client.serverHost + path) if err != nil { return err } if resp.StatusCode == http.StatusTooEarly { resp.Body.Close() time.Sleep(POLLING_INTERVAL) continue } defer resp.Body.Close() if resp.StatusCode == http.StatusGone { return ErrorGone } if resp.StatusCode == http.StatusFailedDependency { return ErrorFailed } if resp.StatusCode != http.StatusOK { return fmt.Errorf("received unexpected status code %d", resp.StatusCode) } if data != nil { return json.NewDecoder(resp.Body).Decode(data) } return nil } } func (client *httpClient) PostSuiteWillBegin(report types.Report) error { return client.post("/suite-will-begin", report) } func (client *httpClient) PostDidRun(report types.SpecReport) error { return client.post("/did-run", report) } func (client *httpClient) PostSuiteDidEnd(report types.Report) error { return client.post("/suite-did-end", report) } func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error { return client.post("/progress-report", report) } func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { return client.post("/report-before-suite-completed", state) } func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { var state types.SpecState err := client.poll("/report-before-suite-state", &state) if err == ErrorGone { return types.SpecStateFailed, nil } return state, err } func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, Data: data, } return client.post("/before-suite-completed", beforeSuiteState) } func (client *httpClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { var beforeSuiteState BeforeSuiteState err := client.poll("/before-suite-state", &beforeSuiteState) if err == ErrorGone { return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() } return beforeSuiteState.State, beforeSuiteState.Data, err } func (client *httpClient) BlockUntilNonprimaryProcsHaveFinished() error { return client.poll("/have-nonprimary-procs-finished", nil) } func (client *httpClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { var report types.Report err := client.poll("/aggregated-nonprimary-procs-report", &report) if err == ErrorGone { return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() } return report, err } func (client *httpClient) FetchNextCounter() (int, error) { var counter ParallelIndexCounter err := client.poll("/counter", &counter) return counter.Index, err } func (client *httpClient) PostAbort() error { return client.post("/abort", nil) } func (client *httpClient) ShouldAbort() bool { err := client.poll("/abort", nil) if err == ErrorGone { return true } return false } func (client *httpClient) Write(p []byte) (int, error) { resp, err := http.Post(client.serverHost+"/emit-output", "text/plain;charset=UTF-8 ", bytes.NewReader(p)) resp.Body.Close() if resp.StatusCode != http.StatusOK { return 0, fmt.Errorf("failed to emit output") } return len(p), err }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go
/* The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). */ package parallel_support import ( "encoding/json" "io" "net" "net/http" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" ) /* httpServer spins up on an automatically selected port and listens for communication from the forwarding reporter. It then forwards that communication to attached reporters. */ type httpServer struct { listener net.Listener handler *ServerHandler } // Create a new server, automatically selecting a port func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, err } return &httpServer{ listener: listener, handler: newServerHandler(parallelTotal, reporter), }, nil } // Start the server. You don't need to `go s.Start()`, just `s.Start()` func (server *httpServer) Start() { httpServer := &http.Server{} mux := http.NewServeMux() httpServer.Handler = mux //streaming endpoints mux.HandleFunc("/suite-will-begin", server.specSuiteWillBegin) mux.HandleFunc("/did-run", server.didRun) mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd) mux.HandleFunc("/emit-output", server.emitOutput) mux.HandleFunc("/progress-report", server.emitProgressReport) //synchronization endpoints mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted) mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState) mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) mux.HandleFunc("/aggregated-nonprimary-procs-report", server.handleAggregatedNonprimaryProcsReport) mux.HandleFunc("/counter", server.handleCounter) mux.HandleFunc("/up", server.handleUp) mux.HandleFunc("/abort", server.handleAbort) go httpServer.Serve(server.listener) } // Stop the server func (server *httpServer) Close() { server.listener.Close() } // The address the server can be reached it. Pass this into the `ForwardingReporter`. func (server *httpServer) Address() string { return "http://" + server.listener.Addr().String() } func (server *httpServer) GetSuiteDone() chan interface{} { return server.handler.done } func (server *httpServer) GetOutputDestination() io.Writer { return server.handler.outputDestination } func (server *httpServer) SetOutputDestination(w io.Writer) { server.handler.outputDestination = w } func (server *httpServer) RegisterAlive(node int, alive func() bool) { server.handler.registerAlive(node, alive) } // // Streaming Endpoints // // The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { defer request.Body.Close() if json.NewDecoder(request.Body).Decode(object) != nil { writer.WriteHeader(http.StatusBadRequest) return false } return true } func (server *httpServer) handleError(err error, writer http.ResponseWriter) bool { if err == nil { return false } switch err { case ErrorEarly: writer.WriteHeader(http.StatusTooEarly) case ErrorGone: writer.WriteHeader(http.StatusGone) case ErrorFailed: writer.WriteHeader(http.StatusFailedDependency) default: writer.WriteHeader(http.StatusInternalServerError) } return true } func (server *httpServer) specSuiteWillBegin(writer http.ResponseWriter, request *http.Request) { var report types.Report if !server.decode(writer, request, &report) { return } server.handleError(server.handler.SpecSuiteWillBegin(report, voidReceiver), writer) } func (server *httpServer) didRun(writer http.ResponseWriter, request *http.Request) { var report types.SpecReport if !server.decode(writer, request, &report) { return } server.handleError(server.handler.DidRun(report, voidReceiver), writer) } func (server *httpServer) specSuiteDidEnd(writer http.ResponseWriter, request *http.Request) { var report types.Report if !server.decode(writer, request, &report) { return } server.handleError(server.handler.SpecSuiteDidEnd(report, voidReceiver), writer) } func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.Request) { output, err := io.ReadAll(request.Body) if err != nil { writer.WriteHeader(http.StatusInternalServerError) return } var n int server.handleError(server.handler.EmitOutput(output, &n), writer) } func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) { var report types.ProgressReport if !server.decode(writer, request, &report) { return } server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer) } func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { var state types.SpecState if !server.decode(writer, request, &state) { return } server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer) } func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { var state types.SpecState if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) { return } json.NewEncoder(writer).Encode(state) } func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { var beforeSuiteState BeforeSuiteState if !server.decode(writer, request, &beforeSuiteState) { return } server.handleError(server.handler.BeforeSuiteCompleted(beforeSuiteState, voidReceiver), writer) } func (server *httpServer) handleBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { var beforeSuiteState BeforeSuiteState if server.handleError(server.handler.BeforeSuiteState(voidSender, &beforeSuiteState), writer) { return } json.NewEncoder(writer).Encode(beforeSuiteState) } func (server *httpServer) handleHaveNonprimaryProcsFinished(writer http.ResponseWriter, request *http.Request) { if server.handleError(server.handler.HaveNonprimaryProcsFinished(voidSender, voidReceiver), writer) { return } writer.WriteHeader(http.StatusOK) } func (server *httpServer) handleAggregatedNonprimaryProcsReport(writer http.ResponseWriter, request *http.Request) { var aggregatedReport types.Report if server.handleError(server.handler.AggregatedNonprimaryProcsReport(voidSender, &aggregatedReport), writer) { return } json.NewEncoder(writer).Encode(aggregatedReport) } func (server *httpServer) handleCounter(writer http.ResponseWriter, request *http.Request) { var n int if server.handleError(server.handler.Counter(voidSender, &n), writer) { return } json.NewEncoder(writer).Encode(ParallelIndexCounter{Index: n}) } func (server *httpServer) handleUp(writer http.ResponseWriter, request *http.Request) { writer.WriteHeader(http.StatusOK) } func (server *httpServer) handleAbort(writer http.ResponseWriter, request *http.Request) { if request.Method == "GET" { var shouldAbort bool server.handler.ShouldAbort(voidSender, &shouldAbort) if shouldAbort { writer.WriteHeader(http.StatusGone) } else { writer.WriteHeader(http.StatusOK) } } else { server.handler.Abort(voidSender, voidReceiver) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go
package parallel_support import ( "net/rpc" "time" "github.com/onsi/ginkgo/v2/types" ) type rpcClient struct { serverHost string client *rpc.Client } func newRPCClient(serverHost string) *rpcClient { return &rpcClient{ serverHost: serverHost, } } func (client *rpcClient) Connect() bool { var err error if client.client != nil { return true } client.client, err = rpc.DialHTTPPath("tcp", client.serverHost, "/") if err != nil { client.client = nil return false } return true } func (client *rpcClient) Close() error { return client.client.Close() } func (client *rpcClient) poll(method string, data interface{}) error { for { err := client.client.Call(method, voidSender, data) if err == nil { return nil } switch err.Error() { case ErrorEarly.Error(): time.Sleep(POLLING_INTERVAL) case ErrorGone.Error(): return ErrorGone case ErrorFailed.Error(): return ErrorFailed default: return err } } } func (client *rpcClient) PostSuiteWillBegin(report types.Report) error { return client.client.Call("Server.SpecSuiteWillBegin", report, voidReceiver) } func (client *rpcClient) PostDidRun(report types.SpecReport) error { return client.client.Call("Server.DidRun", report, voidReceiver) } func (client *rpcClient) PostSuiteDidEnd(report types.Report) error { return client.client.Call("Server.SpecSuiteDidEnd", report, voidReceiver) } func (client *rpcClient) Write(p []byte) (int, error) { var n int err := client.client.Call("Server.EmitOutput", p, &n) return n, err } func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error { return client.client.Call("Server.EmitProgressReport", report, voidReceiver) } func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver) } func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { var state types.SpecState err := client.poll("Server.ReportBeforeSuiteState", &state) if err == ErrorGone { return types.SpecStateFailed, nil } return state, err } func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, Data: data, } return client.client.Call("Server.BeforeSuiteCompleted", beforeSuiteState, voidReceiver) } func (client *rpcClient) BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) { var beforeSuiteState BeforeSuiteState err := client.poll("Server.BeforeSuiteState", &beforeSuiteState) if err == ErrorGone { return types.SpecStateInvalid, nil, types.GinkgoErrors.SynchronizedBeforeSuiteDisappearedOnProc1() } return beforeSuiteState.State, beforeSuiteState.Data, err } func (client *rpcClient) BlockUntilNonprimaryProcsHaveFinished() error { return client.poll("Server.HaveNonprimaryProcsFinished", voidReceiver) } func (client *rpcClient) BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) { var report types.Report err := client.poll("Server.AggregatedNonprimaryProcsReport", &report) if err == ErrorGone { return types.Report{}, types.GinkgoErrors.AggregatedReportUnavailableDueToNodeDisappearing() } return report, err } func (client *rpcClient) FetchNextCounter() (int, error) { var counter int err := client.client.Call("Server.Counter", voidSender, &counter) return counter, err } func (client *rpcClient) PostAbort() error { return client.client.Call("Server.Abort", voidSender, voidReceiver) } func (client *rpcClient) ShouldAbort() bool { var shouldAbort bool client.client.Call("Server.ShouldAbort", voidSender, &shouldAbort) return shouldAbort }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go
package parallel_support import ( "fmt" "io" "os" "time" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" ) type BeforeSuiteState struct { Data []byte State types.SpecState } type ParallelIndexCounter struct { Index int } var ErrorGone = fmt.Errorf("gone") var ErrorFailed = fmt.Errorf("failed") var ErrorEarly = fmt.Errorf("early") var POLLING_INTERVAL = 50 * time.Millisecond type Server interface { Start() Close() Address() string RegisterAlive(node int, alive func() bool) GetSuiteDone() chan interface{} GetOutputDestination() io.Writer SetOutputDestination(io.Writer) } type Client interface { Connect() bool Close() error PostSuiteWillBegin(report types.Report) error PostDidRun(report types.SpecReport) error PostSuiteDidEnd(report types.Report) error PostReportBeforeSuiteCompleted(state types.SpecState) error BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) BlockUntilNonprimaryProcsHaveFinished() error BlockUntilAggregatedNonprimaryProcsReport() (types.Report, error) FetchNextCounter() (int, error) PostAbort() error ShouldAbort() bool PostEmitProgressReport(report types.ProgressReport) error Write(p []byte) (int, error) } func NewServer(parallelTotal int, reporter reporters.Reporter) (Server, error) { if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { return newHttpServer(parallelTotal, reporter) } else { return newRPCServer(parallelTotal, reporter) } } func NewClient(serverHost string) Client { if os.Getenv("GINKGO_PARALLEL_PROTOCOL") == "HTTP" { return newHttpClient(serverHost) } else { return newRPCClient(serverHost) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go
package parallel_support import ( "io" "os" "sync" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" ) type Void struct{} var voidReceiver *Void = &Void{} var voidSender Void // ServerHandler is an RPC-compatible handler that is shared between the http server and the rpc server. // It handles all the business logic to avoid duplication between the two servers type ServerHandler struct { done chan interface{} outputDestination io.Writer reporter reporters.Reporter alives []func() bool lock *sync.Mutex beforeSuiteState BeforeSuiteState reportBeforeSuiteState types.SpecState parallelTotal int counter int counterLock *sync.Mutex shouldAbort bool numSuiteDidBegins int numSuiteDidEnds int aggregatedReport types.Report reportHoldingArea []types.SpecReport } func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { return &ServerHandler{ reporter: reporter, lock: &sync.Mutex{}, counterLock: &sync.Mutex{}, alives: make([]func() bool, parallelTotal), beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, parallelTotal: parallelTotal, outputDestination: os.Stdout, done: make(chan interface{}), } } func (handler *ServerHandler) SpecSuiteWillBegin(report types.Report, _ *Void) error { handler.lock.Lock() defer handler.lock.Unlock() handler.numSuiteDidBegins += 1 // all summaries are identical, so it's fine to simply emit the last one of these if handler.numSuiteDidBegins == handler.parallelTotal { handler.reporter.SuiteWillBegin(report) for _, summary := range handler.reportHoldingArea { handler.reporter.WillRun(summary) handler.reporter.DidRun(summary) } handler.reportHoldingArea = nil } return nil } func (handler *ServerHandler) DidRun(report types.SpecReport, _ *Void) error { handler.lock.Lock() defer handler.lock.Unlock() if handler.numSuiteDidBegins == handler.parallelTotal { handler.reporter.WillRun(report) handler.reporter.DidRun(report) } else { handler.reportHoldingArea = append(handler.reportHoldingArea, report) } return nil } func (handler *ServerHandler) SpecSuiteDidEnd(report types.Report, _ *Void) error { handler.lock.Lock() defer handler.lock.Unlock() handler.numSuiteDidEnds += 1 if handler.numSuiteDidEnds == 1 { handler.aggregatedReport = report } else { handler.aggregatedReport = handler.aggregatedReport.Add(report) } if handler.numSuiteDidEnds == handler.parallelTotal { handler.reporter.SuiteDidEnd(handler.aggregatedReport) close(handler.done) } return nil } func (handler *ServerHandler) EmitOutput(output []byte, n *int) error { var err error *n, err = handler.outputDestination.Write(output) return err } func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error { handler.lock.Lock() defer handler.lock.Unlock() handler.reporter.EmitProgressReport(report) return nil } func (handler *ServerHandler) registerAlive(proc int, alive func() bool) { handler.lock.Lock() defer handler.lock.Unlock() handler.alives[proc-1] = alive } func (handler *ServerHandler) procIsAlive(proc int) bool { handler.lock.Lock() defer handler.lock.Unlock() alive := handler.alives[proc-1] if alive == nil { return true } return alive() } func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { for i := 2; i <= handler.parallelTotal; i++ { if handler.procIsAlive(i) { return false } } return true } func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error { handler.lock.Lock() defer handler.lock.Unlock() handler.reportBeforeSuiteState = reportBeforeSuiteState return nil } func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error { proc1IsAlive := handler.procIsAlive(1) handler.lock.Lock() defer handler.lock.Unlock() if handler.reportBeforeSuiteState == types.SpecStateInvalid { if proc1IsAlive { return ErrorEarly } else { return ErrorGone } } *reportBeforeSuiteState = handler.reportBeforeSuiteState return nil } func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { handler.lock.Lock() defer handler.lock.Unlock() handler.beforeSuiteState = beforeSuiteState return nil } func (handler *ServerHandler) BeforeSuiteState(_ Void, beforeSuiteState *BeforeSuiteState) error { proc1IsAlive := handler.procIsAlive(1) handler.lock.Lock() defer handler.lock.Unlock() if handler.beforeSuiteState.State == types.SpecStateInvalid { if proc1IsAlive { return ErrorEarly } else { return ErrorGone } } *beforeSuiteState = handler.beforeSuiteState return nil } func (handler *ServerHandler) HaveNonprimaryProcsFinished(_ Void, _ *Void) error { if handler.haveNonprimaryProcsFinished() { return nil } else { return ErrorEarly } } func (handler *ServerHandler) AggregatedNonprimaryProcsReport(_ Void, report *types.Report) error { if handler.haveNonprimaryProcsFinished() { handler.lock.Lock() defer handler.lock.Unlock() if handler.numSuiteDidEnds == handler.parallelTotal-1 { *report = handler.aggregatedReport return nil } else { return ErrorGone } } else { return ErrorEarly } } func (handler *ServerHandler) Counter(_ Void, counter *int) error { handler.counterLock.Lock() defer handler.counterLock.Unlock() *counter = handler.counter handler.counter++ return nil } func (handler *ServerHandler) Abort(_ Void, _ *Void) error { handler.lock.Lock() defer handler.lock.Unlock() handler.shouldAbort = true return nil } func (handler *ServerHandler) ShouldAbort(_ Void, shouldAbort *bool) error { handler.lock.Lock() defer handler.lock.Unlock() *shouldAbort = handler.shouldAbort return nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_server.go
/* The remote package provides the pieces to allow Ginkgo test suites to report to remote listeners. This is used, primarily, to enable streaming parallel test output but has, in principal, broader applications (e.g. streaming test output to a browser). */ package parallel_support import ( "io" "net" "net/http" "net/rpc" "github.com/onsi/ginkgo/v2/reporters" ) /* RPCServer spins up on an automatically selected port and listens for communication from the forwarding reporter. It then forwards that communication to attached reporters. */ type RPCServer struct { listener net.Listener handler *ServerHandler } //Create a new server, automatically selecting a port func newRPCServer(parallelTotal int, reporter reporters.Reporter) (*RPCServer, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { return nil, err } return &RPCServer{ listener: listener, handler: newServerHandler(parallelTotal, reporter), }, nil } //Start the server. You don't need to `go s.Start()`, just `s.Start()` func (server *RPCServer) Start() { rpcServer := rpc.NewServer() rpcServer.RegisterName("Server", server.handler) //register the handler's methods as the server httpServer := &http.Server{} httpServer.Handler = rpcServer go httpServer.Serve(server.listener) } //Stop the server func (server *RPCServer) Close() { server.listener.Close() } //The address the server can be reached it. Pass this into the `ForwardingReporter`. func (server *RPCServer) Address() string { return server.listener.Addr().String() } func (server *RPCServer) GetSuiteDone() chan interface{} { return server.handler.done } func (server *RPCServer) GetOutputDestination() io.Writer { return server.handler.outputDestination } func (server *RPCServer) SetOutputDestination(w io.Writer) { server.handler.outputDestination = w } func (server *RPCServer) RegisterAlive(node int, alive func() bool) { server.handler.registerAlive(node, alive) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go
package testingtproxy import ( "fmt" "io" "os" "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" ) type failFunc func(message string, callerSkip ...int) type skipFunc func(message string, callerSkip ...int) type cleanupFunc func(args ...any) type reportFunc func() types.SpecReport type addReportEntryFunc func(names string, args ...any) type ginkgoWriterInterface interface { io.Writer Print(a ...interface{}) Printf(format string, a ...interface{}) Println(a ...interface{}) } type ginkgoRecoverFunc func() type attachProgressReporterFunc func(func() string) func() func New(writer ginkgoWriterInterface, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, addReportEntry addReportEntryFunc, ginkgoRecover ginkgoRecoverFunc, attachProgressReporter attachProgressReporterFunc, randomSeed int64, parallelProcess int, parallelTotal int, noColor bool, offset int) *ginkgoTestingTProxy { return &ginkgoTestingTProxy{ fail: fail, offset: offset, writer: writer, skip: skip, cleanup: cleanup, report: report, addReportEntry: addReportEntry, ginkgoRecover: ginkgoRecover, attachProgressReporter: attachProgressReporter, randomSeed: randomSeed, parallelProcess: parallelProcess, parallelTotal: parallelTotal, f: formatter.NewWithNoColorBool(noColor), } } type ginkgoTestingTProxy struct { fail failFunc skip skipFunc cleanup cleanupFunc report reportFunc offset int writer ginkgoWriterInterface addReportEntry addReportEntryFunc ginkgoRecover ginkgoRecoverFunc attachProgressReporter attachProgressReporterFunc randomSeed int64 parallelProcess int parallelTotal int f formatter.Formatter } // basic testing.T support func (t *ginkgoTestingTProxy) Cleanup(f func()) { t.cleanup(f, internal.Offset(1)) } func (t *ginkgoTestingTProxy) Setenv(key, value string) { originalValue, exists := os.LookupEnv(key) if exists { t.cleanup(os.Setenv, key, originalValue, internal.Offset(1)) } else { t.cleanup(os.Unsetenv, key, internal.Offset(1)) } err := os.Setenv(key, value) if err != nil { t.fail(fmt.Sprintf("Failed to set environment variable: %v", err), 1) } } func (t *ginkgoTestingTProxy) Error(args ...interface{}) { t.fail(fmt.Sprintln(args...), t.offset) } func (t *ginkgoTestingTProxy) Errorf(format string, args ...interface{}) { t.fail(fmt.Sprintf(format, args...), t.offset) } func (t *ginkgoTestingTProxy) Fail() { t.fail("failed", t.offset) } func (t *ginkgoTestingTProxy) FailNow() { t.fail("failed", t.offset) } func (t *ginkgoTestingTProxy) Failed() bool { return t.report().Failed() } func (t *ginkgoTestingTProxy) Fatal(args ...interface{}) { t.fail(fmt.Sprintln(args...), t.offset) } func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { t.fail(fmt.Sprintf(format, args...), t.offset) } func (t *ginkgoTestingTProxy) Helper() { types.MarkAsHelper(1) } func (t *ginkgoTestingTProxy) Log(args ...interface{}) { fmt.Fprintln(t.writer, args...) } func (t *ginkgoTestingTProxy) Logf(format string, args ...interface{}) { t.Log(fmt.Sprintf(format, args...)) } func (t *ginkgoTestingTProxy) Name() string { return t.report().FullText() } func (t *ginkgoTestingTProxy) Parallel() { // No-op } func (t *ginkgoTestingTProxy) Skip(args ...interface{}) { t.skip(fmt.Sprintln(args...), t.offset) } func (t *ginkgoTestingTProxy) SkipNow() { t.skip("skip", t.offset) } func (t *ginkgoTestingTProxy) Skipf(format string, args ...interface{}) { t.skip(fmt.Sprintf(format, args...), t.offset) } func (t *ginkgoTestingTProxy) Skipped() bool { return t.report().State.Is(types.SpecStateSkipped) } func (t *ginkgoTestingTProxy) TempDir() string { tmpDir, err := os.MkdirTemp("", "ginkgo") if err != nil { t.fail(fmt.Sprintf("Failed to create temporary directory: %v", err), 1) return "" } t.cleanup(os.RemoveAll, tmpDir) return tmpDir } // FullGinkgoTInterface func (t *ginkgoTestingTProxy) AddReportEntryVisibilityAlways(name string, args ...any) { finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityAlways} t.addReportEntry(name, append(finalArgs, args...)...) } func (t *ginkgoTestingTProxy) AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) { finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose} t.addReportEntry(name, append(finalArgs, args...)...) } func (t *ginkgoTestingTProxy) AddReportEntryVisibilityNever(name string, args ...any) { finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityNever} t.addReportEntry(name, append(finalArgs, args...)...) } func (t *ginkgoTestingTProxy) Print(a ...any) { t.writer.Print(a...) } func (t *ginkgoTestingTProxy) Printf(format string, a ...any) { t.writer.Printf(format, a...) } func (t *ginkgoTestingTProxy) Println(a ...any) { t.writer.Println(a...) } func (t *ginkgoTestingTProxy) F(format string, args ...any) string { return t.f.F(format, args...) } func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) string { return t.f.Fi(indentation, format, args...) } func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string { return t.f.Fiw(indentation, maxWidth, format, args...) } func (t *ginkgoTestingTProxy) RenderTimeline() string { return reporters.RenderTimeline(t.report(), false) } func (t *ginkgoTestingTProxy) GinkgoRecover() { t.ginkgoRecover() } func (t *ginkgoTestingTProxy) DeferCleanup(args ...any) { finalArgs := []any{internal.Offset(1)} t.cleanup(append(finalArgs, args...)...) } func (t *ginkgoTestingTProxy) RandomSeed() int64 { return t.randomSeed } func (t *ginkgoTestingTProxy) ParallelProcess() int { return t.parallelProcess } func (t *ginkgoTestingTProxy) ParallelTotal() int { return t.parallelTotal } func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() { return t.attachProgressReporter(f) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
package config // GinkgoConfigType has been deprecated and its equivalent now lives in // the types package. You can no longer access Ginkgo configuration from the config // package. Instead use the DSL's GinkgoConfiguration() function to get copies of the // current configuration // // GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error // It will be removed in a future minor release of Ginkgo type GinkgoConfigType = DeprecatedGinkgoConfigType type DeprecatedGinkgoConfigType struct { RandomSeed int64 RandomizeAllSpecs bool RegexScansFilePath bool FocusStrings []string SkipStrings []string SkipMeasurements bool FailOnPending bool FailFast bool FlakeAttempts int EmitSpecProgress bool DryRun bool DebugParallel bool ParallelNode int ParallelTotal int SyncHost string StreamHost string } // DefaultReporterConfigType has been deprecated and its equivalent now lives in // the types package. You can no longer access Ginkgo configuration from the config // package. Instead use the DSL's GinkgoConfiguration() function to get copies of the // current configuration // // DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error // It will be removed in a future minor release of Ginkgo type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType type DeprecatedDefaultReporterConfigType struct { NoColor bool SlowSpecThreshold float64 NoisyPendings bool NoisySkippings bool Succinct bool Verbose bool FullTrace bool ReportPassed bool ReportFile string } // Sadly there is no way to gracefully deprecate access to these global config variables. // Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method // These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} // Sadly there is no way to gracefully deprecate access to these global config variables. // Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method // These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} // Sadly there is no way to gracefully deprecate access to these global config variables. // Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method // These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} // Sadly there is no way to gracefully deprecate access to these global config variables. // Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method // These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/onsi/ginkgo/config/config.go
vendor/github.com/onsi/ginkgo/config/config.go
/* Ginkgo accepts a number of configuration options. These are documented [here](http://onsi.github.io/ginkgo/#the-ginkgo-cli) You can also learn more via ginkgo help or (I kid you not): go test -asdf */ package config import ( "flag" "time" "fmt" ) const VERSION = "1.16.5" type GinkgoConfigType struct { RandomSeed int64 RandomizeAllSpecs bool RegexScansFilePath bool FocusStrings []string SkipStrings []string SkipMeasurements bool FailOnPending bool FailFast bool FlakeAttempts int EmitSpecProgress bool DryRun bool DebugParallel bool ParallelNode int ParallelTotal int SyncHost string StreamHost string } var GinkgoConfig = GinkgoConfigType{} type DefaultReporterConfigType struct { NoColor bool SlowSpecThreshold float64 NoisyPendings bool NoisySkippings bool Succinct bool Verbose bool FullTrace bool ReportPassed bool ReportFile string } var DefaultReporterConfig = DefaultReporterConfigType{} func processPrefix(prefix string) string { if prefix != "" { prefix += "." } return prefix } type flagFunc func(string) func (f flagFunc) String() string { return "" } func (f flagFunc) Set(s string) error { f(s); return nil } func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { prefix = processPrefix(prefix) flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.") flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.") flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.") flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.") flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.") flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.") flagSet.Var(flagFunc(flagFocus), prefix+"focus", "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed.") flagSet.Var(flagFunc(flagSkip), prefix+"skip", "If set, ginkgo will only run specs that do not match this regular expression. Can be specified multiple times, values are ORed.") flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).") flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.") flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.") flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.") if includeParallelFlags { flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.") flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.") flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.") flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.") } flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.") flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.") flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.") flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.") flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs") flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.") flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.") } func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string { prefix = processPrefix(prefix) result := make([]string, 0) if ginkgo.RandomSeed > 0 { result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed)) } if ginkgo.RandomizeAllSpecs { result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix)) } if ginkgo.SkipMeasurements { result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix)) } if ginkgo.FailOnPending { result = append(result, fmt.Sprintf("--%sfailOnPending", prefix)) } if ginkgo.FailFast { result = append(result, fmt.Sprintf("--%sfailFast", prefix)) } if ginkgo.DryRun { result = append(result, fmt.Sprintf("--%sdryRun", prefix)) } for _, s := range ginkgo.FocusStrings { result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, s)) } for _, s := range ginkgo.SkipStrings { result = append(result, fmt.Sprintf("--%sskip=%s", prefix, s)) } if ginkgo.FlakeAttempts > 1 { result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts)) } if ginkgo.EmitSpecProgress { result = append(result, fmt.Sprintf("--%sprogress", prefix)) } if ginkgo.DebugParallel { result = append(result, fmt.Sprintf("--%sdebug", prefix)) } if ginkgo.ParallelNode != 0 { result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode)) } if ginkgo.ParallelTotal != 0 { result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal)) } if ginkgo.StreamHost != "" { result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost)) } if ginkgo.SyncHost != "" { result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost)) } if ginkgo.RegexScansFilePath { result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix)) } if reporter.NoColor { result = append(result, fmt.Sprintf("--%snoColor", prefix)) } if reporter.SlowSpecThreshold > 0 { result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold)) } if !reporter.NoisyPendings { result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix)) } if !reporter.NoisySkippings { result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix)) } if reporter.Verbose { result = append(result, fmt.Sprintf("--%sv", prefix)) } if reporter.Succinct { result = append(result, fmt.Sprintf("--%ssuccinct", prefix)) } if reporter.FullTrace { result = append(result, fmt.Sprintf("--%strace", prefix)) } if reporter.ReportPassed { result = append(result, fmt.Sprintf("--%sreportPassed", prefix)) } if reporter.ReportFile != "" { result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile)) } return result } // flagFocus implements the -focus flag. func flagFocus(arg string) { if arg != "" { GinkgoConfig.FocusStrings = append(GinkgoConfig.FocusStrings, arg) } } // flagSkip implements the -skip flag. func flagSkip(arg string) { if arg != "" { GinkgoConfig.SkipStrings = append(GinkgoConfig.SkipStrings, arg) } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/tree.go
vendor/github.com/gabriel-vasile/mimetype/tree.go
package mimetype import ( "sync" "github.com/gabriel-vasile/mimetype/internal/magic" ) // mimetype stores the list of MIME types in a tree structure with // "application/octet-stream" at the root of the hierarchy. The hierarchy // approach minimizes the number of checks that need to be done on the input // and allows for more precise results once the base type of file has been // identified. // // root is a detector which passes for any slice of bytes. // When a detector passes the check, the children detectors // are tried in order to find a more accurate MIME type. var root = newMIME("application/octet-stream", "", func([]byte, uint32) bool { return true }, xpm, sevenZ, zip, pdf, fdf, ole, ps, psd, p7s, ogg, png, jpg, jxl, jp2, jpx, jpm, jxs, gif, webp, exe, elf, ar, tar, xar, bz2, fits, tiff, bmp, ico, mp3, flac, midi, ape, musePack, amr, wav, aiff, au, mpeg, quickTime, mp4, webM, avi, flv, mkv, asf, aac, voc, m3u, rmvb, gzip, class, swf, crx, ttf, woff, woff2, otf, ttc, eot, wasm, shx, dbf, dcm, rar, djvu, mobi, lit, bpg, cbor, sqlite3, dwg, nes, lnk, macho, qcp, icns, hdr, mrc, mdb, accdb, zstd, cab, rpm, xz, lzip, torrent, cpio, tzif, xcf, pat, gbr, glb, cabIS, jxr, parquet, // Keep text last because it is the slowest check. text, ) // errMIME is returned from Detect functions when err is not nil. // Detect could return root for erroneous cases, but it needs to lock mu in order to do so. // errMIME is same as root but it does not require locking. var errMIME = newMIME("application/octet-stream", "", func([]byte, uint32) bool { return false }) // mu guards access to the root MIME tree. Access to root must be synchronized with this lock. var mu = &sync.RWMutex{} // The list of nodes appended to the root node. var ( xz = newMIME("application/x-xz", ".xz", magic.Xz) gzip = newMIME("application/gzip", ".gz", magic.Gzip).alias( "application/x-gzip", "application/x-gunzip", "application/gzipped", "application/gzip-compressed", "application/x-gzip-compressed", "gzip/document") sevenZ = newMIME("application/x-7z-compressed", ".7z", magic.SevenZ) // APK must be checked before JAR because APK is a subset of JAR. // This means APK should be a child of JAR detector, but in practice, // the decisive signature for JAR might be located at the end of the file // and not reachable because of library readLimit. zip = newMIME("application/zip", ".zip", magic.Zip, xlsx, docx, pptx, epub, apk, jar, odt, ods, odp, odg, odf, odc, sxc). alias("application/x-zip", "application/x-zip-compressed") tar = newMIME("application/x-tar", ".tar", magic.Tar) xar = newMIME("application/x-xar", ".xar", magic.Xar) bz2 = newMIME("application/x-bzip2", ".bz2", magic.Bz2) pdf = newMIME("application/pdf", ".pdf", magic.Pdf). alias("application/x-pdf") fdf = newMIME("application/vnd.fdf", ".fdf", magic.Fdf) xlsx = newMIME("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ".xlsx", magic.Xlsx) docx = newMIME("application/vnd.openxmlformats-officedocument.wordprocessingml.document", ".docx", magic.Docx) pptx = newMIME("application/vnd.openxmlformats-officedocument.presentationml.presentation", ".pptx", magic.Pptx) epub = newMIME("application/epub+zip", ".epub", magic.Epub) jar = newMIME("application/jar", ".jar", magic.Jar) apk = newMIME("application/vnd.android.package-archive", ".apk", magic.APK) ole = newMIME("application/x-ole-storage", "", magic.Ole, msi, aaf, msg, xls, pub, ppt, doc) msi = newMIME("application/x-ms-installer", ".msi", magic.Msi). alias("application/x-windows-installer", "application/x-msi") aaf = newMIME("application/octet-stream", ".aaf", magic.Aaf) doc = newMIME("application/msword", ".doc", magic.Doc). alias("application/vnd.ms-word") ppt = newMIME("application/vnd.ms-powerpoint", ".ppt", magic.Ppt). alias("application/mspowerpoint") pub = newMIME("application/vnd.ms-publisher", ".pub", magic.Pub) xls = newMIME("application/vnd.ms-excel", ".xls", magic.Xls). alias("application/msexcel") msg = newMIME("application/vnd.ms-outlook", ".msg", magic.Msg) ps = newMIME("application/postscript", ".ps", magic.Ps) fits = newMIME("application/fits", ".fits", magic.Fits) ogg = newMIME("application/ogg", ".ogg", magic.Ogg, oggAudio, oggVideo). alias("application/x-ogg") oggAudio = newMIME("audio/ogg", ".oga", magic.OggAudio) oggVideo = newMIME("video/ogg", ".ogv", magic.OggVideo) text = newMIME("text/plain", ".txt", magic.Text, html, svg, xml, php, js, lua, perl, python, json, ndJSON, rtf, srt, tcl, csv, tsv, vCard, iCalendar, warc, vtt) xml = newMIME("text/xml", ".xml", magic.XML, rss, atom, x3d, kml, xliff, collada, gml, gpx, tcx, amf, threemf, xfdf, owl2). alias("application/xml") json = newMIME("application/json", ".json", magic.JSON, geoJSON, har) har = newMIME("application/json", ".har", magic.HAR) csv = newMIME("text/csv", ".csv", magic.Csv) tsv = newMIME("text/tab-separated-values", ".tsv", magic.Tsv) geoJSON = newMIME("application/geo+json", ".geojson", magic.GeoJSON) ndJSON = newMIME("application/x-ndjson", ".ndjson", magic.NdJSON) html = newMIME("text/html", ".html", magic.HTML) php = newMIME("text/x-php", ".php", magic.Php) rtf = newMIME("text/rtf", ".rtf", magic.Rtf).alias("application/rtf") js = newMIME("text/javascript", ".js", magic.Js). alias("application/x-javascript", "application/javascript") srt = newMIME("application/x-subrip", ".srt", magic.Srt). alias("application/x-srt", "text/x-srt") vtt = newMIME("text/vtt", ".vtt", magic.Vtt) lua = newMIME("text/x-lua", ".lua", magic.Lua) perl = newMIME("text/x-perl", ".pl", magic.Perl) python = newMIME("text/x-python", ".py", magic.Python). alias("text/x-script.python", "application/x-python") tcl = newMIME("text/x-tcl", ".tcl", magic.Tcl). alias("application/x-tcl") vCard = newMIME("text/vcard", ".vcf", magic.VCard) iCalendar = newMIME("text/calendar", ".ics", magic.ICalendar) svg = newMIME("image/svg+xml", ".svg", magic.Svg) rss = newMIME("application/rss+xml", ".rss", magic.Rss). alias("text/rss") owl2 = newMIME("application/owl+xml", ".owl", magic.Owl2) atom = newMIME("application/atom+xml", ".atom", magic.Atom) x3d = newMIME("model/x3d+xml", ".x3d", magic.X3d) kml = newMIME("application/vnd.google-earth.kml+xml", ".kml", magic.Kml) xliff = newMIME("application/x-xliff+xml", ".xlf", magic.Xliff) collada = newMIME("model/vnd.collada+xml", ".dae", magic.Collada) gml = newMIME("application/gml+xml", ".gml", magic.Gml) gpx = newMIME("application/gpx+xml", ".gpx", magic.Gpx) tcx = newMIME("application/vnd.garmin.tcx+xml", ".tcx", magic.Tcx) amf = newMIME("application/x-amf", ".amf", magic.Amf) threemf = newMIME("application/vnd.ms-package.3dmanufacturing-3dmodel+xml", ".3mf", magic.Threemf) png = newMIME("image/png", ".png", magic.Png, apng) apng = newMIME("image/vnd.mozilla.apng", ".png", magic.Apng) jpg = newMIME("image/jpeg", ".jpg", magic.Jpg) jxl = newMIME("image/jxl", ".jxl", magic.Jxl) jp2 = newMIME("image/jp2", ".jp2", magic.Jp2) jpx = newMIME("image/jpx", ".jpf", magic.Jpx) jpm = newMIME("image/jpm", ".jpm", magic.Jpm). alias("video/jpm") jxs = newMIME("image/jxs", ".jxs", magic.Jxs) xpm = newMIME("image/x-xpixmap", ".xpm", magic.Xpm) bpg = newMIME("image/bpg", ".bpg", magic.Bpg) gif = newMIME("image/gif", ".gif", magic.Gif) webp = newMIME("image/webp", ".webp", magic.Webp) tiff = newMIME("image/tiff", ".tiff", magic.Tiff) bmp = newMIME("image/bmp", ".bmp", magic.Bmp). alias("image/x-bmp", "image/x-ms-bmp") ico = newMIME("image/x-icon", ".ico", magic.Ico) icns = newMIME("image/x-icns", ".icns", magic.Icns) psd = newMIME("image/vnd.adobe.photoshop", ".psd", magic.Psd). alias("image/x-psd", "application/photoshop") heic = newMIME("image/heic", ".heic", magic.Heic) heicSeq = newMIME("image/heic-sequence", ".heic", magic.HeicSequence) heif = newMIME("image/heif", ".heif", magic.Heif) heifSeq = newMIME("image/heif-sequence", ".heif", magic.HeifSequence) hdr = newMIME("image/vnd.radiance", ".hdr", magic.Hdr) avif = newMIME("image/avif", ".avif", magic.AVIF) mp3 = newMIME("audio/mpeg", ".mp3", magic.Mp3). alias("audio/x-mpeg", "audio/mp3") flac = newMIME("audio/flac", ".flac", magic.Flac) midi = newMIME("audio/midi", ".midi", magic.Midi). alias("audio/mid", "audio/sp-midi", "audio/x-mid", "audio/x-midi") ape = newMIME("audio/ape", ".ape", magic.Ape) musePack = newMIME("audio/musepack", ".mpc", magic.MusePack) wav = newMIME("audio/wav", ".wav", magic.Wav). alias("audio/x-wav", "audio/vnd.wave", "audio/wave") aiff = newMIME("audio/aiff", ".aiff", magic.Aiff).alias("audio/x-aiff") au = newMIME("audio/basic", ".au", magic.Au) amr = newMIME("audio/amr", ".amr", magic.Amr). alias("audio/amr-nb") aac = newMIME("audio/aac", ".aac", magic.AAC) voc = newMIME("audio/x-unknown", ".voc", magic.Voc) aMp4 = newMIME("audio/mp4", ".mp4", magic.AMp4). alias("audio/x-mp4a") m4a = newMIME("audio/x-m4a", ".m4a", magic.M4a) m3u = newMIME("application/vnd.apple.mpegurl", ".m3u", magic.M3u). alias("audio/mpegurl") m4v = newMIME("video/x-m4v", ".m4v", magic.M4v) mj2 = newMIME("video/mj2", ".mj2", magic.Mj2) dvb = newMIME("video/vnd.dvb.file", ".dvb", magic.Dvb) mp4 = newMIME("video/mp4", ".mp4", magic.Mp4, avif, threeGP, threeG2, aMp4, mqv, m4a, m4v, heic, heicSeq, heif, heifSeq, mj2, dvb) webM = newMIME("video/webm", ".webm", magic.WebM). alias("audio/webm") mpeg = newMIME("video/mpeg", ".mpeg", magic.Mpeg) quickTime = newMIME("video/quicktime", ".mov", magic.QuickTime) mqv = newMIME("video/quicktime", ".mqv", magic.Mqv) threeGP = newMIME("video/3gpp", ".3gp", magic.ThreeGP). alias("video/3gp", "audio/3gpp") threeG2 = newMIME("video/3gpp2", ".3g2", magic.ThreeG2). alias("video/3g2", "audio/3gpp2") avi = newMIME("video/x-msvideo", ".avi", magic.Avi). alias("video/avi", "video/msvideo") flv = newMIME("video/x-flv", ".flv", magic.Flv) mkv = newMIME("video/x-matroska", ".mkv", magic.Mkv) asf = newMIME("video/x-ms-asf", ".asf", magic.Asf). alias("video/asf", "video/x-ms-wmv") rmvb = newMIME("application/vnd.rn-realmedia-vbr", ".rmvb", magic.Rmvb) class = newMIME("application/x-java-applet", ".class", magic.Class) swf = newMIME("application/x-shockwave-flash", ".swf", magic.SWF) crx = newMIME("application/x-chrome-extension", ".crx", magic.CRX) ttf = newMIME("font/ttf", ".ttf", magic.Ttf). alias("font/sfnt", "application/x-font-ttf", "application/font-sfnt") woff = newMIME("font/woff", ".woff", magic.Woff) woff2 = newMIME("font/woff2", ".woff2", magic.Woff2) otf = newMIME("font/otf", ".otf", magic.Otf) ttc = newMIME("font/collection", ".ttc", magic.Ttc) eot = newMIME("application/vnd.ms-fontobject", ".eot", magic.Eot) wasm = newMIME("application/wasm", ".wasm", magic.Wasm) shp = newMIME("application/vnd.shp", ".shp", magic.Shp) shx = newMIME("application/vnd.shx", ".shx", magic.Shx, shp) dbf = newMIME("application/x-dbf", ".dbf", magic.Dbf) exe = newMIME("application/vnd.microsoft.portable-executable", ".exe", magic.Exe) elf = newMIME("application/x-elf", "", magic.Elf, elfObj, elfExe, elfLib, elfDump) elfObj = newMIME("application/x-object", "", magic.ElfObj) elfExe = newMIME("application/x-executable", "", magic.ElfExe) elfLib = newMIME("application/x-sharedlib", ".so", magic.ElfLib) elfDump = newMIME("application/x-coredump", "", magic.ElfDump) ar = newMIME("application/x-archive", ".a", magic.Ar, deb). alias("application/x-unix-archive") deb = newMIME("application/vnd.debian.binary-package", ".deb", magic.Deb) rpm = newMIME("application/x-rpm", ".rpm", magic.RPM) dcm = newMIME("application/dicom", ".dcm", magic.Dcm) odt = newMIME("application/vnd.oasis.opendocument.text", ".odt", magic.Odt, ott). alias("application/x-vnd.oasis.opendocument.text") ott = newMIME("application/vnd.oasis.opendocument.text-template", ".ott", magic.Ott). alias("application/x-vnd.oasis.opendocument.text-template") ods = newMIME("application/vnd.oasis.opendocument.spreadsheet", ".ods", magic.Ods, ots). alias("application/x-vnd.oasis.opendocument.spreadsheet") ots = newMIME("application/vnd.oasis.opendocument.spreadsheet-template", ".ots", magic.Ots). alias("application/x-vnd.oasis.opendocument.spreadsheet-template") odp = newMIME("application/vnd.oasis.opendocument.presentation", ".odp", magic.Odp, otp). alias("application/x-vnd.oasis.opendocument.presentation") otp = newMIME("application/vnd.oasis.opendocument.presentation-template", ".otp", magic.Otp). alias("application/x-vnd.oasis.opendocument.presentation-template") odg = newMIME("application/vnd.oasis.opendocument.graphics", ".odg", magic.Odg, otg). alias("application/x-vnd.oasis.opendocument.graphics") otg = newMIME("application/vnd.oasis.opendocument.graphics-template", ".otg", magic.Otg). alias("application/x-vnd.oasis.opendocument.graphics-template") odf = newMIME("application/vnd.oasis.opendocument.formula", ".odf", magic.Odf). alias("application/x-vnd.oasis.opendocument.formula") odc = newMIME("application/vnd.oasis.opendocument.chart", ".odc", magic.Odc). alias("application/x-vnd.oasis.opendocument.chart") sxc = newMIME("application/vnd.sun.xml.calc", ".sxc", magic.Sxc) rar = newMIME("application/x-rar-compressed", ".rar", magic.RAR). alias("application/x-rar") djvu = newMIME("image/vnd.djvu", ".djvu", magic.DjVu) mobi = newMIME("application/x-mobipocket-ebook", ".mobi", magic.Mobi) lit = newMIME("application/x-ms-reader", ".lit", magic.Lit) sqlite3 = newMIME("application/vnd.sqlite3", ".sqlite", magic.Sqlite). alias("application/x-sqlite3") dwg = newMIME("image/vnd.dwg", ".dwg", magic.Dwg). alias("image/x-dwg", "application/acad", "application/x-acad", "application/autocad_dwg", "application/dwg", "application/x-dwg", "application/x-autocad", "drawing/dwg") warc = newMIME("application/warc", ".warc", magic.Warc) nes = newMIME("application/vnd.nintendo.snes.rom", ".nes", magic.Nes) lnk = newMIME("application/x-ms-shortcut", ".lnk", magic.Lnk) macho = newMIME("application/x-mach-binary", ".macho", magic.MachO) qcp = newMIME("audio/qcelp", ".qcp", magic.Qcp) mrc = newMIME("application/marc", ".mrc", magic.Marc) mdb = newMIME("application/x-msaccess", ".mdb", magic.MsAccessMdb) accdb = newMIME("application/x-msaccess", ".accdb", magic.MsAccessAce) zstd = newMIME("application/zstd", ".zst", magic.Zstd) cab = newMIME("application/vnd.ms-cab-compressed", ".cab", magic.Cab) cabIS = newMIME("application/x-installshield", ".cab", magic.InstallShieldCab) lzip = newMIME("application/lzip", ".lz", magic.Lzip).alias("application/x-lzip") torrent = newMIME("application/x-bittorrent", ".torrent", magic.Torrent) cpio = newMIME("application/x-cpio", ".cpio", magic.Cpio) tzif = newMIME("application/tzif", "", magic.TzIf) p7s = newMIME("application/pkcs7-signature", ".p7s", magic.P7s) xcf = newMIME("image/x-xcf", ".xcf", magic.Xcf) pat = newMIME("image/x-gimp-pat", ".pat", magic.Pat) gbr = newMIME("image/x-gimp-gbr", ".gbr", magic.Gbr) xfdf = newMIME("application/vnd.adobe.xfdf", ".xfdf", magic.Xfdf) glb = newMIME("model/gltf-binary", ".glb", magic.Glb) jxr = newMIME("image/jxr", ".jxr", magic.Jxr).alias("image/vnd.ms-photo") parquet = newMIME("application/vnd.apache.parquet", ".parquet", magic.Par1). alias("application/x-parquet") cbor = newMIME("application/cbor", ".cbor", magic.CBOR) )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/mime.go
vendor/github.com/gabriel-vasile/mimetype/mime.go
package mimetype import ( "mime" "github.com/gabriel-vasile/mimetype/internal/charset" "github.com/gabriel-vasile/mimetype/internal/magic" ) // MIME struct holds information about a file format: the string representation // of the MIME type, the extension and the parent file format. type MIME struct { mime string aliases []string extension string // detector receives the raw input and a limit for the number of bytes it is // allowed to check. It returns whether the input matches a signature or not. detector magic.Detector children []*MIME parent *MIME } // String returns the string representation of the MIME type, e.g., "application/zip". func (m *MIME) String() string { return m.mime } // Extension returns the file extension associated with the MIME type. // It includes the leading dot, as in ".html". When the file format does not // have an extension, the empty string is returned. func (m *MIME) Extension() string { return m.extension } // Parent returns the parent MIME type from the hierarchy. // Each MIME type has a non-nil parent, except for the root MIME type. // // For example, the application/json and text/html MIME types have text/plain as // their parent because they are text files who happen to contain JSON or HTML. // Another example is the ZIP format, which is used as container // for Microsoft Office files, EPUB files, JAR files, and others. func (m *MIME) Parent() *MIME { return m.parent } // Is checks whether this MIME type, or any of its aliases, is equal to the // expected MIME type. MIME type equality test is done on the "type/subtype" // section, ignores any optional MIME parameters, ignores any leading and // trailing whitespace, and is case insensitive. func (m *MIME) Is(expectedMIME string) bool { // Parsing is needed because some detected MIME types contain parameters // that need to be stripped for the comparison. expectedMIME, _, _ = mime.ParseMediaType(expectedMIME) found, _, _ := mime.ParseMediaType(m.mime) if expectedMIME == found { return true } for _, alias := range m.aliases { if alias == expectedMIME { return true } } return false } func newMIME( mime, extension string, detector magic.Detector, children ...*MIME) *MIME { m := &MIME{ mime: mime, extension: extension, detector: detector, children: children, } for _, c := range children { c.parent = m } return m } func (m *MIME) alias(aliases ...string) *MIME { m.aliases = aliases return m } // match does a depth-first search on the signature tree. It returns the deepest // successful node for which all the children detection functions fail. func (m *MIME) match(in []byte, readLimit uint32) *MIME { for _, c := range m.children { if c.detector(in, readLimit) { return c.match(in, readLimit) } } needsCharset := map[string]func([]byte) string{ "text/plain": charset.FromPlain, "text/html": charset.FromHTML, "text/xml": charset.FromXML, } // ps holds optional MIME parameters. ps := map[string]string{} if f, ok := needsCharset[m.mime]; ok { if cset := f(in); cset != "" { ps["charset"] = cset } } return m.cloneHierarchy(ps) } // flatten transforms an hierarchy of MIMEs into a slice of MIMEs. func (m *MIME) flatten() []*MIME { out := []*MIME{m} for _, c := range m.children { out = append(out, c.flatten()...) } return out } // clone creates a new MIME with the provided optional MIME parameters. func (m *MIME) clone(ps map[string]string) *MIME { clonedMIME := m.mime if len(ps) > 0 { clonedMIME = mime.FormatMediaType(m.mime, ps) } return &MIME{ mime: clonedMIME, aliases: m.aliases, extension: m.extension, } } // cloneHierarchy creates a clone of m and all its ancestors. The optional MIME // parameters are set on the last child of the hierarchy. func (m *MIME) cloneHierarchy(ps map[string]string) *MIME { ret := m.clone(ps) lastChild := ret for p := m.Parent(); p != nil; p = p.Parent() { pClone := p.clone(nil) lastChild.parent = pClone lastChild = pClone } return ret } func (m *MIME) lookup(mime string) *MIME { for _, n := range append(m.aliases, m.mime) { if n == mime { return m } } for _, c := range m.children { if m := c.lookup(mime); m != nil { return m } } return nil } // Extend adds detection for a sub-format. The detector is a function // returning true when the raw input file satisfies a signature. // The sub-format will be detected if all the detectors in the parent chain return true. // The extension should include the leading dot, as in ".html". func (m *MIME) Extend(detector func(raw []byte, limit uint32) bool, mime, extension string, aliases ...string) { c := &MIME{ mime: mime, extension: extension, detector: detector, parent: m, aliases: aliases, } mu.Lock() m.children = append([]*MIME{c}, m.children...) mu.Unlock() }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/mimetype.go
vendor/github.com/gabriel-vasile/mimetype/mimetype.go
// Package mimetype uses magic number signatures to detect the MIME type of a file. // // File formats are stored in a hierarchy with application/octet-stream at its root. // For example, the hierarchy for HTML format is application/octet-stream -> // text/plain -> text/html. package mimetype import ( "io" "mime" "os" "sync/atomic" ) var defaultLimit uint32 = 3072 // readLimit is the maximum number of bytes from the input used when detecting. var readLimit uint32 = defaultLimit // Detect returns the MIME type found from the provided byte slice. // // The result is always a valid MIME type, with application/octet-stream // returned when identification failed. func Detect(in []byte) *MIME { // Using atomic because readLimit can be written at the same time in other goroutine. l := atomic.LoadUint32(&readLimit) if l > 0 && len(in) > int(l) { in = in[:l] } mu.RLock() defer mu.RUnlock() return root.match(in, l) } // DetectReader returns the MIME type of the provided reader. // // The result is always a valid MIME type, with application/octet-stream // returned when identification failed with or without an error. // Any error returned is related to the reading from the input reader. // // DetectReader assumes the reader offset is at the start. If the input is an // io.ReadSeeker you previously read from, it should be rewinded before detection: // // reader.Seek(0, io.SeekStart) func DetectReader(r io.Reader) (*MIME, error) { var in []byte var err error // Using atomic because readLimit can be written at the same time in other goroutine. l := atomic.LoadUint32(&readLimit) if l == 0 { in, err = io.ReadAll(r) if err != nil { return errMIME, err } } else { var n int in = make([]byte, l) // io.UnexpectedEOF means len(r) < len(in). It is not an error in this case, // it just means the input file is smaller than the allocated bytes slice. n, err = io.ReadFull(r, in) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return errMIME, err } in = in[:n] } mu.RLock() defer mu.RUnlock() return root.match(in, l), nil } // DetectFile returns the MIME type of the provided file. // // The result is always a valid MIME type, with application/octet-stream // returned when identification failed with or without an error. // Any error returned is related to the opening and reading from the input file. func DetectFile(path string) (*MIME, error) { f, err := os.Open(path) if err != nil { return errMIME, err } defer f.Close() return DetectReader(f) } // EqualsAny reports whether s MIME type is equal to any MIME type in mimes. // MIME type equality test is done on the "type/subtype" section, ignores // any optional MIME parameters, ignores any leading and trailing whitespace, // and is case insensitive. func EqualsAny(s string, mimes ...string) bool { s, _, _ = mime.ParseMediaType(s) for _, m := range mimes { m, _, _ = mime.ParseMediaType(m) if s == m { return true } } return false } // SetLimit sets the maximum number of bytes read from input when detecting the MIME type. // Increasing the limit provides better detection for file formats which store // their magical numbers towards the end of the file: docx, pptx, xlsx, etc. // During detection data is read in a single block of size limit, i.e. it is not buffered. // A limit of 0 means the whole input file will be used. func SetLimit(limit uint32) { // Using atomic because readLimit can be read at the same time in other goroutine. atomic.StoreUint32(&readLimit, limit) } // Extend adds detection for other file formats. // It is equivalent to calling Extend() on the root mime type "application/octet-stream". func Extend(detector func(raw []byte, limit uint32) bool, mime, extension string, aliases ...string) { root.Extend(detector, mime, extension, aliases...) } // Lookup finds a MIME object by its string representation. // The representation can be the main mime type, or any of its aliases. func Lookup(mime string) *MIME { mu.RLock() defer mu.RUnlock() return root.lookup(mime) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go
vendor/github.com/gabriel-vasile/mimetype/internal/charset/charset.go
package charset import ( "bytes" "encoding/xml" "strings" "unicode/utf8" "golang.org/x/net/html" ) const ( F = 0 /* character never appears in text */ T = 1 /* character appears in plain ASCII text */ I = 2 /* character appears in ISO-8859 text */ X = 3 /* character appears in non-ISO extended ASCII (Mac, IBM PC) */ ) var ( boms = []struct { bom []byte enc string }{ {[]byte{0xEF, 0xBB, 0xBF}, "utf-8"}, {[]byte{0x00, 0x00, 0xFE, 0xFF}, "utf-32be"}, {[]byte{0xFF, 0xFE, 0x00, 0x00}, "utf-32le"}, {[]byte{0xFE, 0xFF}, "utf-16be"}, {[]byte{0xFF, 0xFE}, "utf-16le"}, } // https://github.com/file/file/blob/fa93fb9f7d21935f1c7644c47d2975d31f12b812/src/encoding.c#L241 textChars = [256]byte{ /* BEL BS HT LF VT FF CR */ F, F, F, F, F, F, F, T, T, T, T, T, T, T, F, F, /* 0x0X */ /* ESC */ F, F, F, F, F, F, F, F, F, F, F, T, F, F, F, F, /* 0x1X */ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x2X */ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x3X */ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x4X */ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x5X */ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x6X */ T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, F, /* 0x7X */ /* NEL */ X, X, X, X, X, T, X, X, X, X, X, X, X, X, X, X, /* 0x8X */ X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, /* 0x9X */ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xaX */ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xbX */ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xcX */ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xdX */ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xeX */ I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xfX */ } ) // FromBOM returns the charset declared in the BOM of content. func FromBOM(content []byte) string { for _, b := range boms { if bytes.HasPrefix(content, b.bom) { return b.enc } } return "" } // FromPlain returns the charset of a plain text. It relies on BOM presence // and it falls back on checking each byte in content. func FromPlain(content []byte) string { if len(content) == 0 { return "" } if cset := FromBOM(content); cset != "" { return cset } origContent := content // Try to detect UTF-8. // First eliminate any partial rune at the end. for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- { b := content[i] if b < 0x80 { break } if utf8.RuneStart(b) { content = content[:i] break } } hasHighBit := false for _, c := range content { if c >= 0x80 { hasHighBit = true break } } if hasHighBit && utf8.Valid(content) { return "utf-8" } // ASCII is a subset of UTF8. Follow W3C recommendation and replace with UTF8. if ascii(origContent) { return "utf-8" } return latin(origContent) } func latin(content []byte) string { hasControlBytes := false for _, b := range content { t := textChars[b] if t != T && t != I { return "" } if b >= 0x80 && b <= 0x9F { hasControlBytes = true } } // Code range 0x80 to 0x9F is reserved for control characters in ISO-8859-1 // (so-called C1 Controls). Windows 1252, however, has printable punctuation // characters in this range. if hasControlBytes { return "windows-1252" } return "iso-8859-1" } func ascii(content []byte) bool { for _, b := range content { if textChars[b] != T { return false } } return true } // FromXML returns the charset of an XML document. It relies on the XML // header <?xml version="1.0" encoding="UTF-8"?> and falls back on the plain // text content. func FromXML(content []byte) string { if cset := fromXML(content); cset != "" { return cset } return FromPlain(content) } func fromXML(content []byte) string { content = trimLWS(content) dec := xml.NewDecoder(bytes.NewReader(content)) rawT, err := dec.RawToken() if err != nil { return "" } t, ok := rawT.(xml.ProcInst) if !ok { return "" } return strings.ToLower(xmlEncoding(string(t.Inst))) } // FromHTML returns the charset of an HTML document. It first looks if a BOM is // present and if so uses it to determine the charset. If no BOM is present, // it relies on the meta tag <meta charset="UTF-8"> and falls back on the // plain text content. func FromHTML(content []byte) string { if cset := FromBOM(content); cset != "" { return cset } if cset := fromHTML(content); cset != "" { return cset } return FromPlain(content) } func fromHTML(content []byte) string { z := html.NewTokenizer(bytes.NewReader(content)) for { switch z.Next() { case html.ErrorToken: return "" case html.StartTagToken, html.SelfClosingTagToken: tagName, hasAttr := z.TagName() if !bytes.Equal(tagName, []byte("meta")) { continue } attrList := make(map[string]bool) gotPragma := false const ( dontKnow = iota doNeedPragma doNotNeedPragma ) needPragma := dontKnow name := "" for hasAttr { var key, val []byte key, val, hasAttr = z.TagAttr() ks := string(key) if attrList[ks] { continue } attrList[ks] = true for i, c := range val { if 'A' <= c && c <= 'Z' { val[i] = c + 0x20 } } switch ks { case "http-equiv": if bytes.Equal(val, []byte("content-type")) { gotPragma = true } case "content": name = fromMetaElement(string(val)) if name != "" { needPragma = doNeedPragma } case "charset": name = string(val) needPragma = doNotNeedPragma } } if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma { continue } if strings.HasPrefix(name, "utf-16") { name = "utf-8" } return name } } } func fromMetaElement(s string) string { for s != "" { csLoc := strings.Index(s, "charset") if csLoc == -1 { return "" } s = s[csLoc+len("charset"):] s = strings.TrimLeft(s, " \t\n\f\r") if !strings.HasPrefix(s, "=") { continue } s = s[1:] s = strings.TrimLeft(s, " \t\n\f\r") if s == "" { return "" } if q := s[0]; q == '"' || q == '\'' { s = s[1:] closeQuote := strings.IndexRune(s, rune(q)) if closeQuote == -1 { return "" } return s[:closeQuote] } end := strings.IndexAny(s, "; \t\n\f\r") if end == -1 { end = len(s) } return s[:end] } return "" } func xmlEncoding(s string) string { param := "encoding=" idx := strings.Index(s, param) if idx == -1 { return "" } v := s[idx+len(param):] if v == "" { return "" } if v[0] != '\'' && v[0] != '"' { return "" } idx = strings.IndexRune(v[1:], rune(v[0])) if idx == -1 { return "" } return v[1 : idx+1] } // trimLWS trims whitespace from beginning of the input. // TODO: find a way to call trimLWS once per detection instead of once in each // detector which needs the trimmed input. func trimLWS(in []byte) []byte { firstNonWS := 0 for ; firstNonWS < len(in) && isWS(in[firstNonWS]); firstNonWS++ { } return in[firstNonWS:] } func isWS(b byte) bool { return b == '\t' || b == '\n' || b == '\x0c' || b == '\r' || b == ' ' }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
vendor/github.com/gabriel-vasile/mimetype/internal/json/json.go
// Copyright (c) 2009 The Go Authors. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Package json provides a JSON value parser state machine. // This package is almost entirely copied from the Go stdlib. // Changes made to it permit users of the package to tell // if some slice of bytes is a valid beginning of a json string. package json import ( "fmt" "sync" ) type ( scanStatus int ) const ( parseObjectKey = iota // parsing object key (before colon) parseObjectValue // parsing object value (after colon) parseArrayValue // parsing array value scanContinue scanStatus = iota // uninteresting byte scanBeginLiteral // end implied by next result != scanContinue scanBeginObject // begin object scanObjectKey // just finished object key (string) scanObjectValue // just finished non-last object value scanEndObject // end object (implies scanObjectValue if possible) scanBeginArray // begin array scanArrayValue // just finished array value scanEndArray // end array (implies scanArrayValue if possible) scanSkipSpace // space byte; can skip; known to be last "continue" result scanEnd // top-level value ended *before* this byte; known to be first "stop" result scanError // hit an error, scanner.err. // This limits the max nesting depth to prevent stack overflow. // This is permitted by https://tools.ietf.org/html/rfc7159#section-9 maxNestingDepth = 10000 ) type ( scanner struct { step func(*scanner, byte) scanStatus parseState []int endTop bool err error index int } ) var scannerPool = sync.Pool{ New: func() any { return &scanner{} }, } func newScanner() *scanner { s := scannerPool.Get().(*scanner) s.reset() return s } func freeScanner(s *scanner) { // Avoid hanging on to too much memory in extreme cases. if len(s.parseState) > 1024 { s.parseState = nil } scannerPool.Put(s) } // Scan returns the number of bytes scanned and if there was any error // in trying to reach the end of data. func Scan(data []byte) (int, error) { s := newScanner() defer freeScanner(s) _ = checkValid(data, s) return s.index, s.err } // checkValid verifies that data is valid JSON-encoded data. // scan is passed in for use by checkValid to avoid an allocation. func checkValid(data []byte, scan *scanner) error { for _, c := range data { scan.index++ if scan.step(scan, c) == scanError { return scan.err } } if scan.eof() == scanError { return scan.err } return nil } func isSpace(c byte) bool { return c == ' ' || c == '\t' || c == '\r' || c == '\n' } func (s *scanner) reset() { s.step = stateBeginValue s.parseState = s.parseState[0:0] s.err = nil s.endTop = false s.index = 0 } // eof tells the scanner that the end of input has been reached. // It returns a scan status just as s.step does. func (s *scanner) eof() scanStatus { if s.err != nil { return scanError } if s.endTop { return scanEnd } s.step(s, ' ') if s.endTop { return scanEnd } if s.err == nil { s.err = fmt.Errorf("unexpected end of JSON input") } return scanError } // pushParseState pushes a new parse state p onto the parse stack. // an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned. func (s *scanner) pushParseState(c byte, newParseState int, successState scanStatus) scanStatus { s.parseState = append(s.parseState, newParseState) if len(s.parseState) <= maxNestingDepth { return successState } return s.error(c, "exceeded max depth") } // popParseState pops a parse state (already obtained) off the stack // and updates s.step accordingly. func (s *scanner) popParseState() { n := len(s.parseState) - 1 s.parseState = s.parseState[0:n] if n == 0 { s.step = stateEndTop s.endTop = true } else { s.step = stateEndValue } } // stateBeginValueOrEmpty is the state after reading `[`. func stateBeginValueOrEmpty(s *scanner, c byte) scanStatus { if c <= ' ' && isSpace(c) { return scanSkipSpace } if c == ']' { return stateEndValue(s, c) } return stateBeginValue(s, c) } // stateBeginValue is the state at the beginning of the input. func stateBeginValue(s *scanner, c byte) scanStatus { if c <= ' ' && isSpace(c) { return scanSkipSpace } switch c { case '{': s.step = stateBeginStringOrEmpty return s.pushParseState(c, parseObjectKey, scanBeginObject) case '[': s.step = stateBeginValueOrEmpty return s.pushParseState(c, parseArrayValue, scanBeginArray) case '"': s.step = stateInString return scanBeginLiteral case '-': s.step = stateNeg return scanBeginLiteral case '0': // beginning of 0.123 s.step = state0 return scanBeginLiteral case 't': // beginning of true s.step = stateT return scanBeginLiteral case 'f': // beginning of false s.step = stateF return scanBeginLiteral case 'n': // beginning of null s.step = stateN return scanBeginLiteral } if '1' <= c && c <= '9' { // beginning of 1234.5 s.step = state1 return scanBeginLiteral } return s.error(c, "looking for beginning of value") } // stateBeginStringOrEmpty is the state after reading `{`. func stateBeginStringOrEmpty(s *scanner, c byte) scanStatus { if c <= ' ' && isSpace(c) { return scanSkipSpace } if c == '}' { n := len(s.parseState) s.parseState[n-1] = parseObjectValue return stateEndValue(s, c) } return stateBeginString(s, c) } // stateBeginString is the state after reading `{"key": value,`. func stateBeginString(s *scanner, c byte) scanStatus { if c <= ' ' && isSpace(c) { return scanSkipSpace } if c == '"' { s.step = stateInString return scanBeginLiteral } return s.error(c, "looking for beginning of object key string") } // stateEndValue is the state after completing a value, // such as after reading `{}` or `true` or `["x"`. func stateEndValue(s *scanner, c byte) scanStatus { n := len(s.parseState) if n == 0 { // Completed top-level before the current byte. s.step = stateEndTop s.endTop = true return stateEndTop(s, c) } if c <= ' ' && isSpace(c) { s.step = stateEndValue return scanSkipSpace } ps := s.parseState[n-1] switch ps { case parseObjectKey: if c == ':' { s.parseState[n-1] = parseObjectValue s.step = stateBeginValue return scanObjectKey } return s.error(c, "after object key") case parseObjectValue: if c == ',' { s.parseState[n-1] = parseObjectKey s.step = stateBeginString return scanObjectValue } if c == '}' { s.popParseState() return scanEndObject } return s.error(c, "after object key:value pair") case parseArrayValue: if c == ',' { s.step = stateBeginValue return scanArrayValue } if c == ']' { s.popParseState() return scanEndArray } return s.error(c, "after array element") } return s.error(c, "") } // stateEndTop is the state after finishing the top-level value, // such as after reading `{}` or `[1,2,3]`. // Only space characters should be seen now. func stateEndTop(s *scanner, c byte) scanStatus { if c != ' ' && c != '\t' && c != '\r' && c != '\n' { // Complain about non-space byte on next call. s.error(c, "after top-level value") } return scanEnd } // stateInString is the state after reading `"`. func stateInString(s *scanner, c byte) scanStatus { if c == '"' { s.step = stateEndValue return scanContinue } if c == '\\' { s.step = stateInStringEsc return scanContinue } if c < 0x20 { return s.error(c, "in string literal") } return scanContinue } // stateInStringEsc is the state after reading `"\` during a quoted string. func stateInStringEsc(s *scanner, c byte) scanStatus { switch c { case 'b', 'f', 'n', 'r', 't', '\\', '/', '"': s.step = stateInString return scanContinue case 'u': s.step = stateInStringEscU return scanContinue } return s.error(c, "in string escape code") } // stateInStringEscU is the state after reading `"\u` during a quoted string. func stateInStringEscU(s *scanner, c byte) scanStatus { if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { s.step = stateInStringEscU1 return scanContinue } // numbers return s.error(c, "in \\u hexadecimal character escape") } // stateInStringEscU1 is the state after reading `"\u1` during a quoted string. func stateInStringEscU1(s *scanner, c byte) scanStatus { if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { s.step = stateInStringEscU12 return scanContinue } // numbers return s.error(c, "in \\u hexadecimal character escape") } // stateInStringEscU12 is the state after reading `"\u12` during a quoted string. func stateInStringEscU12(s *scanner, c byte) scanStatus { if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { s.step = stateInStringEscU123 return scanContinue } // numbers return s.error(c, "in \\u hexadecimal character escape") } // stateInStringEscU123 is the state after reading `"\u123` during a quoted string. func stateInStringEscU123(s *scanner, c byte) scanStatus { if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' { s.step = stateInString return scanContinue } // numbers return s.error(c, "in \\u hexadecimal character escape") } // stateNeg is the state after reading `-` during a number. func stateNeg(s *scanner, c byte) scanStatus { if c == '0' { s.step = state0 return scanContinue } if '1' <= c && c <= '9' { s.step = state1 return scanContinue } return s.error(c, "in numeric literal") } // state1 is the state after reading a non-zero integer during a number, // such as after reading `1` or `100` but not `0`. func state1(s *scanner, c byte) scanStatus { if '0' <= c && c <= '9' { s.step = state1 return scanContinue } return state0(s, c) } // state0 is the state after reading `0` during a number. func state0(s *scanner, c byte) scanStatus { if c == '.' { s.step = stateDot return scanContinue } if c == 'e' || c == 'E' { s.step = stateE return scanContinue } return stateEndValue(s, c) } // stateDot is the state after reading the integer and decimal point in a number, // such as after reading `1.`. func stateDot(s *scanner, c byte) scanStatus { if '0' <= c && c <= '9' { s.step = stateDot0 return scanContinue } return s.error(c, "after decimal point in numeric literal") } // stateDot0 is the state after reading the integer, decimal point, and subsequent // digits of a number, such as after reading `3.14`. func stateDot0(s *scanner, c byte) scanStatus { if '0' <= c && c <= '9' { return scanContinue } if c == 'e' || c == 'E' { s.step = stateE return scanContinue } return stateEndValue(s, c) } // stateE is the state after reading the mantissa and e in a number, // such as after reading `314e` or `0.314e`. func stateE(s *scanner, c byte) scanStatus { if c == '+' || c == '-' { s.step = stateESign return scanContinue } return stateESign(s, c) } // stateESign is the state after reading the mantissa, e, and sign in a number, // such as after reading `314e-` or `0.314e+`. func stateESign(s *scanner, c byte) scanStatus { if '0' <= c && c <= '9' { s.step = stateE0 return scanContinue } return s.error(c, "in exponent of numeric literal") } // stateE0 is the state after reading the mantissa, e, optional sign, // and at least one digit of the exponent in a number, // such as after reading `314e-2` or `0.314e+1` or `3.14e0`. func stateE0(s *scanner, c byte) scanStatus { if '0' <= c && c <= '9' { return scanContinue } return stateEndValue(s, c) } // stateT is the state after reading `t`. func stateT(s *scanner, c byte) scanStatus { if c == 'r' { s.step = stateTr return scanContinue } return s.error(c, "in literal true (expecting 'r')") } // stateTr is the state after reading `tr`. func stateTr(s *scanner, c byte) scanStatus { if c == 'u' { s.step = stateTru return scanContinue } return s.error(c, "in literal true (expecting 'u')") } // stateTru is the state after reading `tru`. func stateTru(s *scanner, c byte) scanStatus { if c == 'e' { s.step = stateEndValue return scanContinue } return s.error(c, "in literal true (expecting 'e')") } // stateF is the state after reading `f`. func stateF(s *scanner, c byte) scanStatus { if c == 'a' { s.step = stateFa return scanContinue } return s.error(c, "in literal false (expecting 'a')") } // stateFa is the state after reading `fa`. func stateFa(s *scanner, c byte) scanStatus { if c == 'l' { s.step = stateFal return scanContinue } return s.error(c, "in literal false (expecting 'l')") } // stateFal is the state after reading `fal`. func stateFal(s *scanner, c byte) scanStatus { if c == 's' { s.step = stateFals return scanContinue } return s.error(c, "in literal false (expecting 's')") } // stateFals is the state after reading `fals`. func stateFals(s *scanner, c byte) scanStatus { if c == 'e' { s.step = stateEndValue return scanContinue } return s.error(c, "in literal false (expecting 'e')") } // stateN is the state after reading `n`. func stateN(s *scanner, c byte) scanStatus { if c == 'u' { s.step = stateNu return scanContinue } return s.error(c, "in literal null (expecting 'u')") } // stateNu is the state after reading `nu`. func stateNu(s *scanner, c byte) scanStatus { if c == 'l' { s.step = stateNul return scanContinue } return s.error(c, "in literal null (expecting 'l')") } // stateNul is the state after reading `nul`. func stateNul(s *scanner, c byte) scanStatus { if c == 'l' { s.step = stateEndValue return scanContinue } return s.error(c, "in literal null (expecting 'l')") } // stateError is the state after reaching a syntax error, // such as after reading `[1}` or `5.1.2`. func stateError(s *scanner, c byte) scanStatus { return scanError } // error records an error and switches to the error state. func (s *scanner) error(c byte, context string) scanStatus { s.step = stateError s.err = fmt.Errorf("invalid character <<%c>> %s", c, context) return scanError }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/text.go
package magic import ( "bytes" "strings" "time" "github.com/gabriel-vasile/mimetype/internal/charset" "github.com/gabriel-vasile/mimetype/internal/json" ) var ( // HTML matches a Hypertext Markup Language file. HTML = markup( []byte("<!DOCTYPE HTML"), []byte("<HTML"), []byte("<HEAD"), []byte("<SCRIPT"), []byte("<IFRAME"), []byte("<H1"), []byte("<DIV"), []byte("<FONT"), []byte("<TABLE"), []byte("<A"), []byte("<STYLE"), []byte("<TITLE"), []byte("<B"), []byte("<BODY"), []byte("<BR"), []byte("<P"), ) // XML matches an Extensible Markup Language file. XML = markup([]byte("<?XML")) // Owl2 matches an Owl ontology file. Owl2 = xml(newXMLSig("Ontology", `xmlns="http://www.w3.org/2002/07/owl#"`)) // Rss matches a Rich Site Summary file. Rss = xml(newXMLSig("rss", "")) // Atom matches an Atom Syndication Format file. Atom = xml(newXMLSig("feed", `xmlns="http://www.w3.org/2005/Atom"`)) // Kml matches a Keyhole Markup Language file. Kml = xml( newXMLSig("kml", `xmlns="http://www.opengis.net/kml/2.2"`), newXMLSig("kml", `xmlns="http://earth.google.com/kml/2.0"`), newXMLSig("kml", `xmlns="http://earth.google.com/kml/2.1"`), newXMLSig("kml", `xmlns="http://earth.google.com/kml/2.2"`), ) // Xliff matches a XML Localization Interchange File Format file. Xliff = xml(newXMLSig("xliff", `xmlns="urn:oasis:names:tc:xliff:document:1.2"`)) // Collada matches a COLLAborative Design Activity file. Collada = xml(newXMLSig("COLLADA", `xmlns="http://www.collada.org/2005/11/COLLADASchema"`)) // Gml matches a Geography Markup Language file. Gml = xml( newXMLSig("", `xmlns:gml="http://www.opengis.net/gml"`), newXMLSig("", `xmlns:gml="http://www.opengis.net/gml/3.2"`), newXMLSig("", `xmlns:gml="http://www.opengis.net/gml/3.3/exr"`), ) // Gpx matches a GPS Exchange Format file. Gpx = xml(newXMLSig("gpx", `xmlns="http://www.topografix.com/GPX/1/1"`)) // Tcx matches a Training Center XML file. Tcx = xml(newXMLSig("TrainingCenterDatabase", `xmlns="http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2"`)) // X3d matches an Extensible 3D Graphics file. X3d = xml(newXMLSig("X3D", `xmlns:xsd="http://www.w3.org/2001/XMLSchema-instance"`)) // Amf matches an Additive Manufacturing XML file. Amf = xml(newXMLSig("amf", "")) // Threemf matches a 3D Manufacturing Format file. Threemf = xml(newXMLSig("model", `xmlns="http://schemas.microsoft.com/3dmanufacturing/core/2015/02"`)) // Xfdf matches a XML Forms Data Format file. Xfdf = xml(newXMLSig("xfdf", `xmlns="http://ns.adobe.com/xfdf/"`)) // VCard matches a Virtual Contact File. VCard = ciPrefix([]byte("BEGIN:VCARD\n"), []byte("BEGIN:VCARD\r\n")) // ICalendar matches a iCalendar file. ICalendar = ciPrefix([]byte("BEGIN:VCALENDAR\n"), []byte("BEGIN:VCALENDAR\r\n")) phpPageF = ciPrefix( []byte("<?PHP"), []byte("<?\n"), []byte("<?\r"), []byte("<? "), ) phpScriptF = shebang( []byte("/usr/local/bin/php"), []byte("/usr/bin/php"), []byte("/usr/bin/env php"), ) // Js matches a Javascript file. Js = shebang( []byte("/bin/node"), []byte("/usr/bin/node"), []byte("/bin/nodejs"), []byte("/usr/bin/nodejs"), []byte("/usr/bin/env node"), []byte("/usr/bin/env nodejs"), ) // Lua matches a Lua programming language file. Lua = shebang( []byte("/usr/bin/lua"), []byte("/usr/local/bin/lua"), []byte("/usr/bin/env lua"), ) // Perl matches a Perl programming language file. Perl = shebang( []byte("/usr/bin/perl"), []byte("/usr/bin/env perl"), ) // Python matches a Python programming language file. Python = shebang( []byte("/usr/bin/python"), []byte("/usr/local/bin/python"), []byte("/usr/bin/env python"), ) // Tcl matches a Tcl programming language file. Tcl = shebang( []byte("/usr/bin/tcl"), []byte("/usr/local/bin/tcl"), []byte("/usr/bin/env tcl"), []byte("/usr/bin/tclsh"), []byte("/usr/local/bin/tclsh"), []byte("/usr/bin/env tclsh"), []byte("/usr/bin/wish"), []byte("/usr/local/bin/wish"), []byte("/usr/bin/env wish"), ) // Rtf matches a Rich Text Format file. Rtf = prefix([]byte("{\\rtf")) ) // Text matches a plain text file. // // TODO: This function does not parse BOM-less UTF16 and UTF32 files. Not really // sure it should. Linux file utility also requires a BOM for UTF16 and UTF32. func Text(raw []byte, limit uint32) bool { // First look for BOM. if cset := charset.FromBOM(raw); cset != "" { return true } // Binary data bytes as defined here: https://mimesniff.spec.whatwg.org/#binary-data-byte for _, b := range raw { if b <= 0x08 || b == 0x0B || 0x0E <= b && b <= 0x1A || 0x1C <= b && b <= 0x1F { return false } } return true } // Php matches a PHP: Hypertext Preprocessor file. func Php(raw []byte, limit uint32) bool { if res := phpPageF(raw, limit); res { return res } return phpScriptF(raw, limit) } // JSON matches a JavaScript Object Notation file. func JSON(raw []byte, limit uint32) bool { raw = trimLWS(raw) // #175 A single JSON string, number or bool is not considered JSON. // JSON objects and arrays are reported as JSON. if len(raw) < 2 || (raw[0] != '[' && raw[0] != '{') { return false } parsed, err := json.Scan(raw) // If the full file content was provided, check there is no error. if limit == 0 || len(raw) < int(limit) { return err == nil } // If a section of the file was provided, check if all of it was parsed. return parsed == len(raw) && len(raw) > 0 } // GeoJSON matches a RFC 7946 GeoJSON file. // // GeoJSON detection implies searching for key:value pairs like: `"type": "Feature"` // in the input. // BUG(gabriel-vasile): The "type" key should be searched for in the root object. func GeoJSON(raw []byte, limit uint32) bool { raw = trimLWS(raw) if len(raw) == 0 { return false } // GeoJSON is always a JSON object, not a JSON array or any other JSON value. if raw[0] != '{' { return false } s := []byte(`"type"`) si, sl := bytes.Index(raw, s), len(s) if si == -1 { return false } // If the "type" string is the suffix of the input, // there is no need to search for the value of the key. if si+sl == len(raw) { return false } // Skip the "type" part. raw = raw[si+sl:] // Skip any whitespace before the colon. raw = trimLWS(raw) // Check for colon. if len(raw) == 0 || raw[0] != ':' { return false } // Skip any whitespace after the colon. raw = trimLWS(raw[1:]) geoJSONTypes := [][]byte{ []byte(`"Feature"`), []byte(`"FeatureCollection"`), []byte(`"Point"`), []byte(`"LineString"`), []byte(`"Polygon"`), []byte(`"MultiPoint"`), []byte(`"MultiLineString"`), []byte(`"MultiPolygon"`), []byte(`"GeometryCollection"`), } for _, t := range geoJSONTypes { if bytes.HasPrefix(raw, t) { return true } } return false } // NdJSON matches a Newline delimited JSON file. All complete lines from raw // must be valid JSON documents meaning they contain one of the valid JSON data // types. func NdJSON(raw []byte, limit uint32) bool { lCount, hasObjOrArr := 0, false raw = dropLastLine(raw, limit) var l []byte for len(raw) != 0 { l, raw = scanLine(raw) // Empty lines are allowed in NDJSON. if l = trimRWS(trimLWS(l)); len(l) == 0 { continue } _, err := json.Scan(l) if err != nil { return false } if l[0] == '[' || l[0] == '{' { hasObjOrArr = true } lCount++ } return lCount > 1 && hasObjOrArr } // HAR matches a HAR Spec file. // Spec: http://www.softwareishard.com/blog/har-12-spec/ func HAR(raw []byte, limit uint32) bool { s := []byte(`"log"`) si, sl := bytes.Index(raw, s), len(s) if si == -1 { return false } // If the "log" string is the suffix of the input, // there is no need to search for the value of the key. if si+sl == len(raw) { return false } // Skip the "log" part. raw = raw[si+sl:] // Skip any whitespace before the colon. raw = trimLWS(raw) // Check for colon. if len(raw) == 0 || raw[0] != ':' { return false } // Skip any whitespace after the colon. raw = trimLWS(raw[1:]) harJSONTypes := [][]byte{ []byte(`"version"`), []byte(`"creator"`), []byte(`"entries"`), } for _, t := range harJSONTypes { si := bytes.Index(raw, t) if si > -1 { return true } } return false } // Svg matches a SVG file. func Svg(raw []byte, limit uint32) bool { return bytes.Contains(raw, []byte("<svg")) } // Srt matches a SubRip file. func Srt(raw []byte, _ uint32) bool { line, raw := scanLine(raw) // First line must be 1. if string(line) != "1" { return false } line, raw = scanLine(raw) secondLine := string(line) // Timestamp format (e.g: 00:02:16,612 --> 00:02:19,376) limits secondLine // length to exactly 29 characters. if len(secondLine) != 29 { return false } // Decimal separator of fractional seconds in the timestamps must be a // comma, not a period. if strings.Contains(secondLine, ".") { return false } // Second line must be a time range. ts := strings.Split(secondLine, " --> ") if len(ts) != 2 { return false } const layout = "15:04:05,000" t0, err := time.Parse(layout, ts[0]) if err != nil { return false } t1, err := time.Parse(layout, ts[1]) if err != nil { return false } if t0.After(t1) { return false } line, _ = scanLine(raw) // A third line must exist and not be empty. This is the actual subtitle text. return len(line) != 0 } // Vtt matches a Web Video Text Tracks (WebVTT) file. See // https://www.iana.org/assignments/media-types/text/vtt. func Vtt(raw []byte, limit uint32) bool { // Prefix match. prefixes := [][]byte{ {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0A}, // UTF-8 BOM, "WEBVTT" and a line feed {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0D}, // UTF-8 BOM, "WEBVTT" and a carriage return {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x20}, // UTF-8 BOM, "WEBVTT" and a space {0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x09}, // UTF-8 BOM, "WEBVTT" and a horizontal tab {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0A}, // "WEBVTT" and a line feed {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x0D}, // "WEBVTT" and a carriage return {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x20}, // "WEBVTT" and a space {0x57, 0x45, 0x42, 0x56, 0x54, 0x54, 0x09}, // "WEBVTT" and a horizontal tab } for _, p := range prefixes { if bytes.HasPrefix(raw, p) { return true } } // Exact match. return bytes.Equal(raw, []byte{0xEF, 0xBB, 0xBF, 0x57, 0x45, 0x42, 0x56, 0x54, 0x54}) || // UTF-8 BOM and "WEBVTT" bytes.Equal(raw, []byte{0x57, 0x45, 0x42, 0x56, 0x54, 0x54}) // "WEBVTT" } // dropCR drops a terminal \r from the data. func dropCR(data []byte) []byte { if len(data) > 0 && data[len(data)-1] == '\r' { return data[0 : len(data)-1] } return data } func scanLine(b []byte) (line, remainder []byte) { line, remainder, _ = bytes.Cut(b, []byte("\n")) return dropCR(line), remainder }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/document.go
package magic import "bytes" var ( // Pdf matches a Portable Document Format file. // https://github.com/file/file/blob/11010cc805546a3e35597e67e1129a481aed40e8/magic/Magdir/pdf Pdf = prefix( // usual pdf signature []byte("%PDF-"), // new-line prefixed signature []byte("\012%PDF-"), // UTF-8 BOM prefixed signature []byte("\xef\xbb\xbf%PDF-"), ) // Fdf matches a Forms Data Format file. Fdf = prefix([]byte("%FDF")) // Mobi matches a Mobi file. Mobi = offset([]byte("BOOKMOBI"), 60) // Lit matches a Microsoft Lit file. Lit = prefix([]byte("ITOLITLS")) ) // DjVu matches a DjVu file. func DjVu(raw []byte, limit uint32) bool { if len(raw) < 12 { return false } if !bytes.HasPrefix(raw, []byte{0x41, 0x54, 0x26, 0x54, 0x46, 0x4F, 0x52, 0x4D}) { return false } return bytes.HasPrefix(raw[12:], []byte("DJVM")) || bytes.HasPrefix(raw[12:], []byte("DJVU")) || bytes.HasPrefix(raw[12:], []byte("DJVI")) || bytes.HasPrefix(raw[12:], []byte("THUM")) } // P7s matches an .p7s signature File (PEM, Base64). func P7s(raw []byte, limit uint32) bool { // Check for PEM Encoding. if bytes.HasPrefix(raw, []byte("-----BEGIN PKCS7")) { return true } // Check if DER Encoding is long enough. if len(raw) < 20 { return false } // Magic Bytes for the signedData ASN.1 encoding. startHeader := [][]byte{{0x30, 0x80}, {0x30, 0x81}, {0x30, 0x82}, {0x30, 0x83}, {0x30, 0x84}} signedDataMatch := []byte{0x06, 0x09, 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x07} // Check if Header is correct. There are multiple valid headers. for i, match := range startHeader { // If first bytes match, then check for ASN.1 Object Type. if bytes.HasPrefix(raw, match) { if bytes.HasPrefix(raw[i+2:], signedDataMatch) { return true } } } return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/ogg.go
package magic import ( "bytes" ) /* NOTE: In May 2003, two Internet RFCs were published relating to the format. The Ogg bitstream was defined in RFC 3533 (which is classified as 'informative') and its Internet content type (application/ogg) in RFC 3534 (which is, as of 2006, a proposed standard protocol). In September 2008, RFC 3534 was obsoleted by RFC 5334, which added content types video/ogg, audio/ogg and filename extensions .ogx, .ogv, .oga, .spx. See: https://tools.ietf.org/html/rfc3533 https://developer.mozilla.org/en-US/docs/Web/HTTP/Configuring_servers_for_Ogg_media#Serve_media_with_the_correct_MIME_type https://github.com/file/file/blob/master/magic/Magdir/vorbis */ // Ogg matches an Ogg file. func Ogg(raw []byte, limit uint32) bool { return bytes.HasPrefix(raw, []byte("\x4F\x67\x67\x53\x00")) } // OggAudio matches an audio ogg file. func OggAudio(raw []byte, limit uint32) bool { return len(raw) >= 37 && (bytes.HasPrefix(raw[28:], []byte("\x7fFLAC")) || bytes.HasPrefix(raw[28:], []byte("\x01vorbis")) || bytes.HasPrefix(raw[28:], []byte("OpusHead")) || bytes.HasPrefix(raw[28:], []byte("Speex\x20\x20\x20"))) } // OggVideo matches a video ogg file. func OggVideo(raw []byte, limit uint32) bool { return len(raw) >= 37 && (bytes.HasPrefix(raw[28:], []byte("\x80theora")) || bytes.HasPrefix(raw[28:], []byte("fishead\x00")) || bytes.HasPrefix(raw[28:], []byte("\x01video\x00\x00\x00"))) // OGM video }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/ms_office.go
package magic import ( "bytes" "encoding/binary" ) // Xlsx matches a Microsoft Excel 2007 file. func Xlsx(raw []byte, limit uint32) bool { return zipContains(raw, []byte("xl/"), true) } // Docx matches a Microsoft Word 2007 file. func Docx(raw []byte, limit uint32) bool { return zipContains(raw, []byte("word/"), true) } // Pptx matches a Microsoft PowerPoint 2007 file. func Pptx(raw []byte, limit uint32) bool { return zipContains(raw, []byte("ppt/"), true) } // Ole matches an Open Linking and Embedding file. // // https://en.wikipedia.org/wiki/Object_Linking_and_Embedding func Ole(raw []byte, limit uint32) bool { return bytes.HasPrefix(raw, []byte{0xD0, 0xCF, 0x11, 0xE0, 0xA1, 0xB1, 0x1A, 0xE1}) } // Aaf matches an Advanced Authoring Format file. // See: https://pyaaf.readthedocs.io/en/latest/about.html // See: https://en.wikipedia.org/wiki/Advanced_Authoring_Format func Aaf(raw []byte, limit uint32) bool { if len(raw) < 31 { return false } return bytes.HasPrefix(raw[8:], []byte{0x41, 0x41, 0x46, 0x42, 0x0D, 0x00, 0x4F, 0x4D}) && (raw[30] == 0x09 || raw[30] == 0x0C) } // Doc matches a Microsoft Word 97-2003 file. // See: https://github.com/decalage2/oletools/blob/412ee36ae45e70f42123e835871bac956d958461/oletools/common/clsid.py func Doc(raw []byte, _ uint32) bool { clsids := [][]byte{ // Microsoft Word 97-2003 Document (Word.Document.8) {0x06, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}, // Microsoft Word 6.0-7.0 Document (Word.Document.6) {0x00, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}, // Microsoft Word Picture (Word.Picture.8) {0x07, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46}, } for _, clsid := range clsids { if matchOleClsid(raw, clsid) { return true } } return false } // Ppt matches a Microsoft PowerPoint 97-2003 file or a PowerPoint 95 presentation. func Ppt(raw []byte, limit uint32) bool { // Root CLSID test is the safest way to detect identify OLE, however, the format // often places the root CLSID at the end of the file. if matchOleClsid(raw, []byte{ 0x10, 0x8d, 0x81, 0x64, 0x9b, 0x4f, 0xcf, 0x11, 0x86, 0xea, 0x00, 0xaa, 0x00, 0xb9, 0x29, 0xe8, }) || matchOleClsid(raw, []byte{ 0x70, 0xae, 0x7b, 0xea, 0x3b, 0xfb, 0xcd, 0x11, 0xa9, 0x03, 0x00, 0xaa, 0x00, 0x51, 0x0e, 0xa3, }) { return true } lin := len(raw) if lin < 520 { return false } pptSubHeaders := [][]byte{ {0xA0, 0x46, 0x1D, 0xF0}, {0x00, 0x6E, 0x1E, 0xF0}, {0x0F, 0x00, 0xE8, 0x03}, } for _, h := range pptSubHeaders { if bytes.HasPrefix(raw[512:], h) { return true } } if bytes.HasPrefix(raw[512:], []byte{0xFD, 0xFF, 0xFF, 0xFF}) && raw[518] == 0x00 && raw[519] == 0x00 { return true } return lin > 1152 && bytes.Contains(raw[1152:min(4096, lin)], []byte("P\x00o\x00w\x00e\x00r\x00P\x00o\x00i\x00n\x00t\x00 D\x00o\x00c\x00u\x00m\x00e\x00n\x00t")) } // Xls matches a Microsoft Excel 97-2003 file. func Xls(raw []byte, limit uint32) bool { // Root CLSID test is the safest way to detect identify OLE, however, the format // often places the root CLSID at the end of the file. if matchOleClsid(raw, []byte{ 0x10, 0x08, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, }) || matchOleClsid(raw, []byte{ 0x20, 0x08, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, }) { return true } lin := len(raw) if lin < 520 { return false } xlsSubHeaders := [][]byte{ {0x09, 0x08, 0x10, 0x00, 0x00, 0x06, 0x05, 0x00}, {0xFD, 0xFF, 0xFF, 0xFF, 0x10}, {0xFD, 0xFF, 0xFF, 0xFF, 0x1F}, {0xFD, 0xFF, 0xFF, 0xFF, 0x22}, {0xFD, 0xFF, 0xFF, 0xFF, 0x23}, {0xFD, 0xFF, 0xFF, 0xFF, 0x28}, {0xFD, 0xFF, 0xFF, 0xFF, 0x29}, } for _, h := range xlsSubHeaders { if bytes.HasPrefix(raw[512:], h) { return true } } return lin > 1152 && bytes.Contains(raw[1152:min(4096, lin)], []byte("W\x00k\x00s\x00S\x00S\x00W\x00o\x00r\x00k\x00B\x00o\x00o\x00k")) } // Pub matches a Microsoft Publisher file. func Pub(raw []byte, limit uint32) bool { return matchOleClsid(raw, []byte{ 0x01, 0x12, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, }) } // Msg matches a Microsoft Outlook email file. func Msg(raw []byte, limit uint32) bool { return matchOleClsid(raw, []byte{ 0x0B, 0x0D, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, }) } // Msi matches a Microsoft Windows Installer file. // http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File func Msi(raw []byte, limit uint32) bool { return matchOleClsid(raw, []byte{ 0x84, 0x10, 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, }) } // Helper to match by a specific CLSID of a compound file. // // http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File func matchOleClsid(in []byte, clsid []byte) bool { // Microsoft Compound files v3 have a sector length of 512, while v4 has 4096. // Change sector offset depending on file version. // https://www.loc.gov/preservation/digital/formats/fdd/fdd000392.shtml sectorLength := 512 if len(in) < sectorLength { return false } if in[26] == 0x04 && in[27] == 0x00 { sectorLength = 4096 } // SecID of first sector of the directory stream. firstSecID := int(binary.LittleEndian.Uint32(in[48:52])) // Expected offset of CLSID for root storage object. clsidOffset := sectorLength*(1+firstSecID) + 80 if len(in) <= clsidOffset+16 { return false } return bytes.HasPrefix(in[clsidOffset:], clsid) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/archive.go
package magic import ( "bytes" "encoding/binary" ) var ( // SevenZ matches a 7z archive. SevenZ = prefix([]byte{0x37, 0x7A, 0xBC, 0xAF, 0x27, 0x1C}) // Gzip matches gzip files based on http://www.zlib.org/rfc-gzip.html#header-trailer. Gzip = prefix([]byte{0x1f, 0x8b}) // Fits matches an Flexible Image Transport System file. Fits = prefix([]byte{ 0x53, 0x49, 0x4D, 0x50, 0x4C, 0x45, 0x20, 0x20, 0x3D, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x54, }) // Xar matches an eXtensible ARchive format file. Xar = prefix([]byte{0x78, 0x61, 0x72, 0x21}) // Bz2 matches a bzip2 file. Bz2 = prefix([]byte{0x42, 0x5A, 0x68}) // Ar matches an ar (Unix) archive file. Ar = prefix([]byte{0x21, 0x3C, 0x61, 0x72, 0x63, 0x68, 0x3E}) // Deb matches a Debian package file. Deb = offset([]byte{ 0x64, 0x65, 0x62, 0x69, 0x61, 0x6E, 0x2D, 0x62, 0x69, 0x6E, 0x61, 0x72, 0x79, }, 8) // Warc matches a Web ARChive file. Warc = prefix([]byte("WARC/1.0"), []byte("WARC/1.1")) // Cab matches a Microsoft Cabinet archive file. Cab = prefix([]byte("MSCF\x00\x00\x00\x00")) // Xz matches an xz compressed stream based on https://tukaani.org/xz/xz-file-format.txt. Xz = prefix([]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}) // Lzip matches an Lzip compressed file. Lzip = prefix([]byte{0x4c, 0x5a, 0x49, 0x50}) // RPM matches an RPM or Delta RPM package file. RPM = prefix([]byte{0xed, 0xab, 0xee, 0xdb}, []byte("drpm")) // Cpio matches a cpio archive file. Cpio = prefix([]byte("070707"), []byte("070701"), []byte("070702")) // RAR matches a RAR archive file. RAR = prefix([]byte("Rar!\x1A\x07\x00"), []byte("Rar!\x1A\x07\x01\x00")) ) // InstallShieldCab matches an InstallShield Cabinet archive file. func InstallShieldCab(raw []byte, _ uint32) bool { return len(raw) > 7 && bytes.Equal(raw[0:4], []byte("ISc(")) && raw[6] == 0 && (raw[7] == 1 || raw[7] == 2 || raw[7] == 4) } // Zstd matches a Zstandard archive file. // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md func Zstd(raw []byte, limit uint32) bool { if len(raw) < 4 { return false } sig := binary.LittleEndian.Uint32(raw) // Check for Zstandard frames and skippable frames. return (sig >= 0xFD2FB522 && sig <= 0xFD2FB528) || (sig >= 0x184D2A50 && sig <= 0x184D2A5F) } // CRX matches a Chrome extension file: a zip archive prepended by a package header. func CRX(raw []byte, limit uint32) bool { const minHeaderLen = 16 if len(raw) < minHeaderLen || !bytes.HasPrefix(raw, []byte("Cr24")) { return false } pubkeyLen := binary.LittleEndian.Uint32(raw[8:12]) sigLen := binary.LittleEndian.Uint32(raw[12:16]) zipOffset := minHeaderLen + pubkeyLen + sigLen if uint32(len(raw)) < zipOffset { return false } return Zip(raw[zipOffset:], limit) } // Tar matches a (t)ape (ar)chive file. // Tar files are divided into 512 bytes records. First record contains a 257 // bytes header padded with NUL. func Tar(raw []byte, _ uint32) bool { const sizeRecord = 512 // The structure of a tar header: // type TarHeader struct { // Name [100]byte // Mode [8]byte // Uid [8]byte // Gid [8]byte // Size [12]byte // Mtime [12]byte // Chksum [8]byte // Linkflag byte // Linkname [100]byte // Magic [8]byte // Uname [32]byte // Gname [32]byte // Devmajor [8]byte // Devminor [8]byte // } if len(raw) < sizeRecord { return false } raw = raw[:sizeRecord] // First 100 bytes of the header represent the file name. // Check if file looks like Gentoo GLEP binary package. if bytes.Contains(raw[:100], []byte("/gpkg-1\x00")) { return false } // Get the checksum recorded into the file. recsum := tarParseOctal(raw[148:156]) if recsum == -1 { return false } sum1, sum2 := tarChksum(raw) return recsum == sum1 || recsum == sum2 } // tarParseOctal converts octal string to decimal int. func tarParseOctal(b []byte) int64 { // Because unused fields are filled with NULs, we need to skip leading NULs. // Fields may also be padded with spaces or NULs. // So we remove leading and trailing NULs and spaces to be sure. b = bytes.Trim(b, " \x00") if len(b) == 0 { return -1 } ret := int64(0) for _, b := range b { if b == 0 { break } if !(b >= '0' && b <= '7') { return -1 } ret = (ret << 3) | int64(b-'0') } return ret } // tarChksum computes the checksum for the header block b. // The actual checksum is written to same b block after it has been calculated. // Before calculation the bytes from b reserved for checksum have placeholder // value of ASCII space 0x20. // POSIX specifies a sum of the unsigned byte values, but the Sun tar used // signed byte values. We compute and return both. func tarChksum(b []byte) (unsigned, signed int64) { for i, c := range b { if 148 <= i && i < 156 { c = ' ' // Treat the checksum field itself as all spaces. } unsigned += int64(c) signed += int64(int8(c)) } return unsigned, signed }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/image.go
package magic import "bytes" var ( // Png matches a Portable Network Graphics file. // https://www.w3.org/TR/PNG/ Png = prefix([]byte{0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A}) // Apng matches an Animated Portable Network Graphics file. // https://wiki.mozilla.org/APNG_Specification Apng = offset([]byte("acTL"), 37) // Jpg matches a Joint Photographic Experts Group file. Jpg = prefix([]byte{0xFF, 0xD8, 0xFF}) // Jp2 matches a JPEG 2000 Image file (ISO 15444-1). Jp2 = jpeg2k([]byte{0x6a, 0x70, 0x32, 0x20}) // Jpx matches a JPEG 2000 Image file (ISO 15444-2). Jpx = jpeg2k([]byte{0x6a, 0x70, 0x78, 0x20}) // Jpm matches a JPEG 2000 Image file (ISO 15444-6). Jpm = jpeg2k([]byte{0x6a, 0x70, 0x6D, 0x20}) // Gif matches a Graphics Interchange Format file. Gif = prefix([]byte("GIF87a"), []byte("GIF89a")) // Bmp matches a bitmap image file. Bmp = prefix([]byte{0x42, 0x4D}) // Ps matches a PostScript file. Ps = prefix([]byte("%!PS-Adobe-")) // Psd matches a Photoshop Document file. Psd = prefix([]byte("8BPS")) // Ico matches an ICO file. Ico = prefix([]byte{0x00, 0x00, 0x01, 0x00}, []byte{0x00, 0x00, 0x02, 0x00}) // Icns matches an ICNS (Apple Icon Image format) file. Icns = prefix([]byte("icns")) // Tiff matches a Tagged Image File Format file. Tiff = prefix([]byte{0x49, 0x49, 0x2A, 0x00}, []byte{0x4D, 0x4D, 0x00, 0x2A}) // Bpg matches a Better Portable Graphics file. Bpg = prefix([]byte{0x42, 0x50, 0x47, 0xFB}) // Xcf matches GIMP image data. Xcf = prefix([]byte("gimp xcf")) // Pat matches GIMP pattern data. Pat = offset([]byte("GPAT"), 20) // Gbr matches GIMP brush data. Gbr = offset([]byte("GIMP"), 20) // Hdr matches Radiance HDR image. // https://web.archive.org/web/20060913152809/http://local.wasp.uwa.edu.au/~pbourke/dataformats/pic/ Hdr = prefix([]byte("#?RADIANCE\n")) // Xpm matches X PixMap image data. Xpm = prefix([]byte{0x2F, 0x2A, 0x20, 0x58, 0x50, 0x4D, 0x20, 0x2A, 0x2F}) // Jxs matches a JPEG XS coded image file (ISO/IEC 21122-3). Jxs = prefix([]byte{0x00, 0x00, 0x00, 0x0C, 0x4A, 0x58, 0x53, 0x20, 0x0D, 0x0A, 0x87, 0x0A}) // Jxr matches Microsoft HD JXR photo file. Jxr = prefix([]byte{0x49, 0x49, 0xBC, 0x01}) ) func jpeg2k(sig []byte) Detector { return func(raw []byte, _ uint32) bool { if len(raw) < 24 { return false } if !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x20, 0x20}) && !bytes.Equal(raw[4:8], []byte{0x6A, 0x50, 0x32, 0x20}) { return false } return bytes.Equal(raw[20:24], sig) } } // Webp matches a WebP file. func Webp(raw []byte, _ uint32) bool { return len(raw) > 12 && bytes.Equal(raw[0:4], []byte("RIFF")) && bytes.Equal(raw[8:12], []byte{0x57, 0x45, 0x42, 0x50}) } // Dwg matches a CAD drawing file. func Dwg(raw []byte, _ uint32) bool { if len(raw) < 6 || raw[0] != 0x41 || raw[1] != 0x43 { return false } dwgVersions := [][]byte{ {0x31, 0x2E, 0x34, 0x30}, {0x31, 0x2E, 0x35, 0x30}, {0x32, 0x2E, 0x31, 0x30}, {0x31, 0x30, 0x30, 0x32}, {0x31, 0x30, 0x30, 0x33}, {0x31, 0x30, 0x30, 0x34}, {0x31, 0x30, 0x30, 0x36}, {0x31, 0x30, 0x30, 0x39}, {0x31, 0x30, 0x31, 0x32}, {0x31, 0x30, 0x31, 0x34}, {0x31, 0x30, 0x31, 0x35}, {0x31, 0x30, 0x31, 0x38}, {0x31, 0x30, 0x32, 0x31}, {0x31, 0x30, 0x32, 0x34}, {0x31, 0x30, 0x33, 0x32}, } for _, d := range dwgVersions { if bytes.Equal(raw[2:6], d) { return true } } return false } // Jxl matches JPEG XL image file. func Jxl(raw []byte, _ uint32) bool { return bytes.HasPrefix(raw, []byte{0xFF, 0x0A}) || bytes.HasPrefix(raw, []byte("\x00\x00\x00\x0cJXL\x20\x0d\x0a\x87\x0a")) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/ftyp.go
package magic import ( "bytes" ) var ( // AVIF matches an AV1 Image File Format still or animated. // Wikipedia page seems outdated listing image/avif-sequence for animations. // https://github.com/AOMediaCodec/av1-avif/issues/59 AVIF = ftyp([]byte("avif"), []byte("avis")) // ThreeGP matches a 3GPP file. ThreeGP = ftyp( []byte("3gp1"), []byte("3gp2"), []byte("3gp3"), []byte("3gp4"), []byte("3gp5"), []byte("3gp6"), []byte("3gp7"), []byte("3gs7"), []byte("3ge6"), []byte("3ge7"), []byte("3gg6"), ) // ThreeG2 matches a 3GPP2 file. ThreeG2 = ftyp( []byte("3g24"), []byte("3g25"), []byte("3g26"), []byte("3g2a"), []byte("3g2b"), []byte("3g2c"), []byte("KDDI"), ) // AMp4 matches an audio MP4 file. AMp4 = ftyp( // audio for Adobe Flash Player 9+ []byte("F4A "), []byte("F4B "), // Apple iTunes AAC-LC (.M4A) Audio []byte("M4B "), []byte("M4P "), // MPEG-4 (.MP4) for SonyPSP []byte("MSNV"), // Nero Digital AAC Audio []byte("NDAS"), ) // Mqv matches a Sony / Mobile QuickTime file. Mqv = ftyp([]byte("mqt ")) // M4a matches an audio M4A file. M4a = ftyp([]byte("M4A ")) // M4v matches an Appl4 M4V video file. M4v = ftyp([]byte("M4V "), []byte("M4VH"), []byte("M4VP")) // Heic matches a High Efficiency Image Coding (HEIC) file. Heic = ftyp([]byte("heic"), []byte("heix")) // HeicSequence matches a High Efficiency Image Coding (HEIC) file sequence. HeicSequence = ftyp([]byte("hevc"), []byte("hevx")) // Heif matches a High Efficiency Image File Format (HEIF) file. Heif = ftyp([]byte("mif1"), []byte("heim"), []byte("heis"), []byte("avic")) // HeifSequence matches a High Efficiency Image File Format (HEIF) file sequence. HeifSequence = ftyp([]byte("msf1"), []byte("hevm"), []byte("hevs"), []byte("avcs")) // Mj2 matches a Motion JPEG 2000 file: https://en.wikipedia.org/wiki/Motion_JPEG_2000. Mj2 = ftyp([]byte("mj2s"), []byte("mjp2"), []byte("MFSM"), []byte("MGSV")) // Dvb matches a Digital Video Broadcasting file: https://dvb.org. // https://cconcolato.github.io/mp4ra/filetype.html // https://github.com/file/file/blob/512840337ead1076519332d24fefcaa8fac36e06/magic/Magdir/animation#L135-L154 Dvb = ftyp( []byte("dby1"), []byte("dsms"), []byte("dts1"), []byte("dts2"), []byte("dts3"), []byte("dxo "), []byte("dmb1"), []byte("dmpf"), []byte("drc1"), []byte("dv1a"), []byte("dv1b"), []byte("dv2a"), []byte("dv2b"), []byte("dv3a"), []byte("dv3b"), []byte("dvr1"), []byte("dvt1"), []byte("emsg")) // TODO: add support for remaining video formats at ftyps.com. ) // QuickTime matches a QuickTime File Format file. // https://www.loc.gov/preservation/digital/formats/fdd/fdd000052.shtml // https://developer.apple.com/library/archive/documentation/QuickTime/QTFF/QTFFChap1/qtff1.html#//apple_ref/doc/uid/TP40000939-CH203-38190 // https://github.com/apache/tika/blob/0f5570691133c75ac4472c3340354a6c4080b104/tika-core/src/main/resources/org/apache/tika/mime/tika-mimetypes.xml#L7758-L7777 func QuickTime(raw []byte, _ uint32) bool { if len(raw) < 12 { return false } // First 4 bytes represent the size of the atom as unsigned int. // Next 4 bytes are the type of the atom. // For `ftyp` atoms check if first byte in size is 0, otherwise, a text file // which happens to contain 'ftypqt ' at index 4 will trigger a false positive. if bytes.Equal(raw[4:12], []byte("ftypqt ")) || bytes.Equal(raw[4:12], []byte("ftypmoov")) { return raw[0] == 0x00 } basicAtomTypes := [][]byte{ []byte("moov\x00"), []byte("mdat\x00"), []byte("free\x00"), []byte("skip\x00"), []byte("pnot\x00"), } for _, a := range basicAtomTypes { if bytes.Equal(raw[4:9], a) { return true } } return bytes.Equal(raw[:8], []byte("\x00\x00\x00\x08wide")) } // Mp4 detects an .mp4 file. Mp4 detections only does a basic ftyp check. // Mp4 has many registered and unregistered code points so it's hard to keep track // of all. Detection will default on video/mp4 for all ftyp files. // ISO_IEC_14496-12 is the specification for the iso container. func Mp4(raw []byte, _ uint32) bool { if len(raw) < 12 { return false } // ftyps are made out of boxes. The first 4 bytes of the box represent // its size in big-endian uint32. First box is the ftyp box and it is small // in size. Check most significant byte is 0 to filter out false positive // text files that happen to contain the string "ftyp" at index 4. if raw[0] != 0 { return false } return bytes.Equal(raw[4:8], []byte("ftyp")) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/zip.go
package magic import ( "bytes" "encoding/binary" ) var ( // Odt matches an OpenDocument Text file. Odt = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.text"), 30) // Ott matches an OpenDocument Text Template file. Ott = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.text-template"), 30) // Ods matches an OpenDocument Spreadsheet file. Ods = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet"), 30) // Ots matches an OpenDocument Spreadsheet Template file. Ots = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.spreadsheet-template"), 30) // Odp matches an OpenDocument Presentation file. Odp = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.presentation"), 30) // Otp matches an OpenDocument Presentation Template file. Otp = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.presentation-template"), 30) // Odg matches an OpenDocument Drawing file. Odg = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.graphics"), 30) // Otg matches an OpenDocument Drawing Template file. Otg = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.graphics-template"), 30) // Odf matches an OpenDocument Formula file. Odf = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.formula"), 30) // Odc matches an OpenDocument Chart file. Odc = offset([]byte("mimetypeapplication/vnd.oasis.opendocument.chart"), 30) // Epub matches an EPUB file. Epub = offset([]byte("mimetypeapplication/epub+zip"), 30) // Sxc matches an OpenOffice Spreadsheet file. Sxc = offset([]byte("mimetypeapplication/vnd.sun.xml.calc"), 30) ) // Zip matches a zip archive. func Zip(raw []byte, limit uint32) bool { return len(raw) > 3 && raw[0] == 0x50 && raw[1] == 0x4B && (raw[2] == 0x3 || raw[2] == 0x5 || raw[2] == 0x7) && (raw[3] == 0x4 || raw[3] == 0x6 || raw[3] == 0x8) } // Jar matches a Java archive file. func Jar(raw []byte, limit uint32) bool { return zipContains(raw, []byte("META-INF/MANIFEST.MF"), false) } func zipContains(raw, sig []byte, msoCheck bool) bool { b := readBuf(raw) pk := []byte("PK\003\004") if len(b) < 0x1E { return false } if !b.advance(0x1E) { return false } if bytes.HasPrefix(b, sig) { return true } if msoCheck { skipFiles := [][]byte{ []byte("[Content_Types].xml"), []byte("_rels/.rels"), []byte("docProps"), []byte("customXml"), []byte("[trash]"), } hasSkipFile := false for _, sf := range skipFiles { if bytes.HasPrefix(b, sf) { hasSkipFile = true break } } if !hasSkipFile { return false } } searchOffset := binary.LittleEndian.Uint32(raw[18:]) + 49 if !b.advance(int(searchOffset)) { return false } nextHeader := bytes.Index(raw[searchOffset:], pk) if !b.advance(nextHeader) { return false } if bytes.HasPrefix(b, sig) { return true } for i := 0; i < 4; i++ { if !b.advance(0x1A) { return false } nextHeader = bytes.Index(b, pk) if nextHeader == -1 { return false } if !b.advance(nextHeader + 0x1E) { return false } if bytes.HasPrefix(b, sig) { return true } } return false } // APK matches an Android Package Archive. // The source of signatures is https://github.com/file/file/blob/1778642b8ba3d947a779a36fcd81f8e807220a19/magic/Magdir/archive#L1820-L1887 func APK(raw []byte, _ uint32) bool { apkSignatures := [][]byte{ []byte("AndroidManifest.xml"), []byte("META-INF/com/android/build/gradle/app-metadata.properties"), []byte("classes.dex"), []byte("resources.arsc"), []byte("res/drawable"), } for _, sig := range apkSignatures { if zipContains(raw, sig, false) { return true } } return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/database.go
package magic var ( // Sqlite matches an SQLite database file. Sqlite = prefix([]byte{ 0x53, 0x51, 0x4c, 0x69, 0x74, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x20, 0x33, 0x00, }) // MsAccessAce matches Microsoft Access dababase file. MsAccessAce = offset([]byte("Standard ACE DB"), 4) // MsAccessMdb matches legacy Microsoft Access database file (JET, 2003 and earlier). MsAccessMdb = offset([]byte("Standard Jet DB"), 4) )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/video.go
package magic import ( "bytes" ) var ( // Flv matches a Flash video file. Flv = prefix([]byte("\x46\x4C\x56\x01")) // Asf matches an Advanced Systems Format file. Asf = prefix([]byte{ 0x30, 0x26, 0xB2, 0x75, 0x8E, 0x66, 0xCF, 0x11, 0xA6, 0xD9, 0x00, 0xAA, 0x00, 0x62, 0xCE, 0x6C, }) // Rmvb matches a RealMedia Variable Bitrate file. Rmvb = prefix([]byte{0x2E, 0x52, 0x4D, 0x46}) ) // WebM matches a WebM file. func WebM(raw []byte, limit uint32) bool { return isMatroskaFileTypeMatched(raw, "webm") } // Mkv matches a mkv file. func Mkv(raw []byte, limit uint32) bool { return isMatroskaFileTypeMatched(raw, "matroska") } // isMatroskaFileTypeMatched is used for webm and mkv file matching. // It checks for .Eߣ sequence. If the sequence is found, // then it means it is Matroska media container, including WebM. // Then it verifies which of the file type it is representing by matching the // file specific string. func isMatroskaFileTypeMatched(in []byte, flType string) bool { if bytes.HasPrefix(in, []byte("\x1A\x45\xDF\xA3")) { return isFileTypeNamePresent(in, flType) } return false } // isFileTypeNamePresent accepts the matroska input data stream and searches // for the given file type in the stream. Return whether a match is found. // The logic of search is: find first instance of \x42\x82 and then // search for given string after n bytes of above instance. func isFileTypeNamePresent(in []byte, flType string) bool { ind, maxInd, lenIn := 0, 4096, len(in) if lenIn < maxInd { // restricting length to 4096 maxInd = lenIn } ind = bytes.Index(in[:maxInd], []byte("\x42\x82")) if ind > 0 && lenIn > ind+2 { ind += 2 // filetype name will be present exactly // n bytes after the match of the two bytes "\x42\x82" n := vintWidth(int(in[ind])) if lenIn > ind+n { return bytes.HasPrefix(in[ind+n:], []byte(flType)) } } return false } // vintWidth parses the variable-integer width in matroska containers func vintWidth(v int) int { mask, max, num := 128, 8, 1 for num < max && v&mask == 0 { mask = mask >> 1 num++ } return num } // Mpeg matches a Moving Picture Experts Group file. func Mpeg(raw []byte, limit uint32) bool { return len(raw) > 3 && bytes.HasPrefix(raw, []byte{0x00, 0x00, 0x01}) && raw[3] >= 0xB0 && raw[3] <= 0xBF } // Avi matches an Audio Video Interleaved file. func Avi(raw []byte, limit uint32) bool { return len(raw) > 16 && bytes.Equal(raw[:4], []byte("RIFF")) && bytes.Equal(raw[8:16], []byte("AVI LIST")) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go
package magic import ( "bufio" "bytes" "encoding/csv" "errors" "io" "sync" ) // A bufio.Reader pool to alleviate problems with memory allocations. var readerPool = sync.Pool{ New: func() any { // Initiate with empty source reader. return bufio.NewReader(nil) }, } func newReader(r io.Reader) *bufio.Reader { br := readerPool.Get().(*bufio.Reader) br.Reset(r) return br } // Csv matches a comma-separated values file. func Csv(raw []byte, limit uint32) bool { return sv(raw, ',', limit) } // Tsv matches a tab-separated values file. func Tsv(raw []byte, limit uint32) bool { return sv(raw, '\t', limit) } func sv(in []byte, comma rune, limit uint32) bool { in = dropLastLine(in, limit) br := newReader(bytes.NewReader(in)) defer readerPool.Put(br) r := csv.NewReader(br) r.Comma = comma r.ReuseRecord = true r.LazyQuotes = true r.Comment = '#' lines := 0 for { _, err := r.Read() if errors.Is(err, io.EOF) { break } if err != nil { return false } lines++ } return r.FieldsPerRecord > 1 && lines > 1 } // dropLastLine drops the last incomplete line from b. // // mimetype limits itself to ReadLimit bytes when performing a detection. // This means, for file formats like CSV for NDJSON, the last line of the input // can be an incomplete line. func dropLastLine(b []byte, readLimit uint32) []byte { if readLimit == 0 || uint32(len(b)) < readLimit { return b } for i := len(b) - 1; i > 0; i-- { if b[i] == '\n' { return b[:i] } } return b }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/font.go
package magic import ( "bytes" ) var ( // Woff matches a Web Open Font Format file. Woff = prefix([]byte("wOFF")) // Woff2 matches a Web Open Font Format version 2 file. Woff2 = prefix([]byte("wOF2")) // Otf matches an OpenType font file. Otf = prefix([]byte{0x4F, 0x54, 0x54, 0x4F, 0x00}) ) // Ttf matches a TrueType font file. func Ttf(raw []byte, limit uint32) bool { if !bytes.HasPrefix(raw, []byte{0x00, 0x01, 0x00, 0x00}) { return false } return !MsAccessAce(raw, limit) && !MsAccessMdb(raw, limit) } // Eot matches an Embedded OpenType font file. func Eot(raw []byte, limit uint32) bool { return len(raw) > 35 && bytes.Equal(raw[34:36], []byte{0x4C, 0x50}) && (bytes.Equal(raw[8:11], []byte{0x02, 0x00, 0x01}) || bytes.Equal(raw[8:11], []byte{0x01, 0x00, 0x00}) || bytes.Equal(raw[8:11], []byte{0x02, 0x00, 0x02})) } // Ttc matches a TrueType Collection font file. func Ttc(raw []byte, limit uint32) bool { return len(raw) > 7 && bytes.HasPrefix(raw, []byte("ttcf")) && (bytes.Equal(raw[4:8], []byte{0x00, 0x01, 0x00, 0x00}) || bytes.Equal(raw[4:8], []byte{0x00, 0x02, 0x00, 0x00})) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go
package magic import ( "bytes" "debug/macho" "encoding/binary" ) var ( // Lnk matches Microsoft lnk binary format. Lnk = prefix([]byte{0x4C, 0x00, 0x00, 0x00, 0x01, 0x14, 0x02, 0x00}) // Wasm matches a web assembly File Format file. Wasm = prefix([]byte{0x00, 0x61, 0x73, 0x6D}) // Exe matches a Windows/DOS executable file. Exe = prefix([]byte{0x4D, 0x5A}) // Elf matches an Executable and Linkable Format file. Elf = prefix([]byte{0x7F, 0x45, 0x4C, 0x46}) // Nes matches a Nintendo Entertainment system ROM file. Nes = prefix([]byte{0x4E, 0x45, 0x53, 0x1A}) // SWF matches an Adobe Flash swf file. SWF = prefix([]byte("CWS"), []byte("FWS"), []byte("ZWS")) // Torrent has bencoded text in the beginning. Torrent = prefix([]byte("d8:announce")) // PAR1 matches a parquet file. Par1 = prefix([]byte{0x50, 0x41, 0x52, 0x31}) // CBOR matches a Concise Binary Object Representation https://cbor.io/ CBOR = prefix([]byte{0xD9, 0xD9, 0xF7}) ) // Java bytecode and Mach-O binaries share the same magic number. // More info here https://github.com/threatstack/libmagic/blob/master/magic/Magdir/cafebabe func classOrMachOFat(in []byte) bool { // There should be at least 8 bytes for both of them because the only way to // quickly distinguish them is by comparing byte at position 7 if len(in) < 8 { return false } return binary.BigEndian.Uint32(in) == macho.MagicFat } // Class matches a java class file. func Class(raw []byte, limit uint32) bool { return classOrMachOFat(raw) && raw[7] > 30 } // MachO matches Mach-O binaries format. func MachO(raw []byte, limit uint32) bool { if classOrMachOFat(raw) && raw[7] < 0x14 { return true } if len(raw) < 4 { return false } be := binary.BigEndian.Uint32(raw) le := binary.LittleEndian.Uint32(raw) return be == macho.Magic32 || le == macho.Magic32 || be == macho.Magic64 || le == macho.Magic64 } // Dbf matches a dBase file. // https://www.dbase.com/Knowledgebase/INT/db7_file_fmt.htm func Dbf(raw []byte, limit uint32) bool { if len(raw) < 68 { return false } // 3rd and 4th bytes contain the last update month and day of month. if !(0 < raw[2] && raw[2] < 13 && 0 < raw[3] && raw[3] < 32) { return false } // 12, 13, 30, 31 are reserved bytes and always filled with 0x00. if raw[12] != 0x00 || raw[13] != 0x00 || raw[30] != 0x00 || raw[31] != 0x00 { return false } // Production MDX flag; // 0x01 if a production .MDX file exists for this table; // 0x00 if no .MDX file exists. if raw[28] > 0x01 { return false } // dbf type is dictated by the first byte. dbfTypes := []byte{ 0x02, 0x03, 0x04, 0x05, 0x30, 0x31, 0x32, 0x42, 0x62, 0x7B, 0x82, 0x83, 0x87, 0x8A, 0x8B, 0x8E, 0xB3, 0xCB, 0xE5, 0xF5, 0xF4, 0xFB, } for _, b := range dbfTypes { if raw[0] == b { return true } } return false } // ElfObj matches an object file. func ElfObj(raw []byte, limit uint32) bool { return len(raw) > 17 && ((raw[16] == 0x01 && raw[17] == 0x00) || (raw[16] == 0x00 && raw[17] == 0x01)) } // ElfExe matches an executable file. func ElfExe(raw []byte, limit uint32) bool { return len(raw) > 17 && ((raw[16] == 0x02 && raw[17] == 0x00) || (raw[16] == 0x00 && raw[17] == 0x02)) } // ElfLib matches a shared library file. func ElfLib(raw []byte, limit uint32) bool { return len(raw) > 17 && ((raw[16] == 0x03 && raw[17] == 0x00) || (raw[16] == 0x00 && raw[17] == 0x03)) } // ElfDump matches a core dump file. func ElfDump(raw []byte, limit uint32) bool { return len(raw) > 17 && ((raw[16] == 0x04 && raw[17] == 0x00) || (raw[16] == 0x00 && raw[17] == 0x04)) } // Dcm matches a DICOM medical format file. func Dcm(raw []byte, limit uint32) bool { return len(raw) > 131 && bytes.Equal(raw[128:132], []byte{0x44, 0x49, 0x43, 0x4D}) } // Marc matches a MARC21 (MAchine-Readable Cataloging) file. func Marc(raw []byte, limit uint32) bool { // File is at least 24 bytes ("leader" field size). if len(raw) < 24 { return false } // Fixed bytes at offset 20. if !bytes.Equal(raw[20:24], []byte("4500")) { return false } // First 5 bytes are ASCII digits. for i := 0; i < 5; i++ { if raw[i] < '0' || raw[i] > '9' { return false } } // Field terminator is present in first 2048 bytes. return bytes.Contains(raw[:min(2048, len(raw))], []byte{0x1E}) } // Glb matches a glTF model format file. // GLB is the binary file format representation of 3D models saved in // the GL transmission Format (glTF). // GLB uses little endian and its header structure is as follows: // // <-- 12-byte header --> // | magic | version | length | // | (uint32) | (uint32) | (uint32) | // | \x67\x6C\x54\x46 | \x01\x00\x00\x00 | ... | // | g l T F | 1 | ... | // // Visit [glTF specification] and [IANA glTF entry] for more details. // // [glTF specification]: https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html // [IANA glTF entry]: https://www.iana.org/assignments/media-types/model/gltf-binary var Glb = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"), []byte("\x67\x6C\x54\x46\x01\x00\x00\x00")) // TzIf matches a Time Zone Information Format (TZif) file. // See more: https://tools.ietf.org/id/draft-murchison-tzdist-tzif-00.html#rfc.section.3 // Its header structure is shown below: // // +---------------+---+ // | magic (4) | <-+-- version (1) // +---------------+---+---------------------------------------+ // | [unused - reserved for future use] (15) | // +---------------+---------------+---------------+-----------+ // | isutccnt (4) | isstdcnt (4) | leapcnt (4) | // +---------------+---------------+---------------+ // | timecnt (4) | typecnt (4) | charcnt (4) | func TzIf(raw []byte, limit uint32) bool { // File is at least 44 bytes (header size). if len(raw) < 44 { return false } if !bytes.HasPrefix(raw, []byte("TZif")) { return false } // Field "typecnt" MUST not be zero. if binary.BigEndian.Uint32(raw[36:40]) == 0 { return false } // Version has to be NUL (0x00), '2' (0x32) or '3' (0x33). return raw[4] == 0x00 || raw[4] == 0x32 || raw[4] == 0x33 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go
// Package magic holds the matching functions used to find MIME types. package magic import ( "bytes" "fmt" ) type ( // Detector receiveѕ the raw data of a file and returns whether the data // meets any conditions. The limit parameter is an upper limit to the number // of bytes received and is used to tell if the byte slice represents the // whole file or is just the header of a file: len(raw) < limit or len(raw)>limit. Detector func(raw []byte, limit uint32) bool xmlSig struct { // the local name of the root tag localName []byte // the namespace of the XML document xmlns []byte } ) // prefix creates a Detector which returns true if any of the provided signatures // is the prefix of the raw input. func prefix(sigs ...[]byte) Detector { return func(raw []byte, limit uint32) bool { for _, s := range sigs { if bytes.HasPrefix(raw, s) { return true } } return false } } // offset creates a Detector which returns true if the provided signature can be // found at offset in the raw input. func offset(sig []byte, offset int) Detector { return func(raw []byte, limit uint32) bool { return len(raw) > offset && bytes.HasPrefix(raw[offset:], sig) } } // ciPrefix is like prefix but the check is case insensitive. func ciPrefix(sigs ...[]byte) Detector { return func(raw []byte, limit uint32) bool { for _, s := range sigs { if ciCheck(s, raw) { return true } } return false } } func ciCheck(sig, raw []byte) bool { if len(raw) < len(sig)+1 { return false } // perform case insensitive check for i, b := range sig { db := raw[i] if 'A' <= b && b <= 'Z' { db &= 0xDF } if b != db { return false } } return true } // xml creates a Detector which returns true if any of the provided XML signatures // matches the raw input. func xml(sigs ...xmlSig) Detector { return func(raw []byte, limit uint32) bool { raw = trimLWS(raw) if len(raw) == 0 { return false } for _, s := range sigs { if xmlCheck(s, raw) { return true } } return false } } func xmlCheck(sig xmlSig, raw []byte) bool { raw = raw[:min(len(raw), 512)] if len(sig.localName) == 0 { return bytes.Index(raw, sig.xmlns) > 0 } if len(sig.xmlns) == 0 { return bytes.Index(raw, sig.localName) > 0 } localNameIndex := bytes.Index(raw, sig.localName) return localNameIndex != -1 && localNameIndex < bytes.Index(raw, sig.xmlns) } // markup creates a Detector which returns true is any of the HTML signatures // matches the raw input. func markup(sigs ...[]byte) Detector { return func(raw []byte, limit uint32) bool { if bytes.HasPrefix(raw, []byte{0xEF, 0xBB, 0xBF}) { // We skip the UTF-8 BOM if present to ensure we correctly // process any leading whitespace. The presence of the BOM // is taken into account during charset detection in charset.go. raw = trimLWS(raw[3:]) } else { raw = trimLWS(raw) } if len(raw) == 0 { return false } for _, s := range sigs { if markupCheck(s, raw) { return true } } return false } } func markupCheck(sig, raw []byte) bool { if len(raw) < len(sig)+1 { return false } // perform case insensitive check for i, b := range sig { db := raw[i] if 'A' <= b && b <= 'Z' { db &= 0xDF } if b != db { return false } } // Next byte must be space or right angle bracket. if db := raw[len(sig)]; db != ' ' && db != '>' { return false } return true } // ftyp creates a Detector which returns true if any of the FTYP signatures // matches the raw input. func ftyp(sigs ...[]byte) Detector { return func(raw []byte, limit uint32) bool { if len(raw) < 12 { return false } for _, s := range sigs { if bytes.Equal(raw[8:12], s) { return true } } return false } } func newXMLSig(localName, xmlns string) xmlSig { ret := xmlSig{xmlns: []byte(xmlns)} if localName != "" { ret.localName = []byte(fmt.Sprintf("<%s", localName)) } return ret } // A valid shebang starts with the "#!" characters, // followed by any number of spaces, // followed by the path to the interpreter, // and, optionally, followed by the arguments for the interpreter. // // Ex: // // #! /usr/bin/env php // // /usr/bin/env is the interpreter, php is the first and only argument. func shebang(sigs ...[]byte) Detector { return func(raw []byte, limit uint32) bool { for _, s := range sigs { if shebangCheck(s, firstLine(raw)) { return true } } return false } } func shebangCheck(sig, raw []byte) bool { if len(raw) < len(sig)+2 { return false } if raw[0] != '#' || raw[1] != '!' { return false } return bytes.Equal(trimLWS(trimRWS(raw[2:])), sig) } // trimLWS trims whitespace from beginning of the input. func trimLWS(in []byte) []byte { firstNonWS := 0 for ; firstNonWS < len(in) && isWS(in[firstNonWS]); firstNonWS++ { } return in[firstNonWS:] } // trimRWS trims whitespace from the end of the input. func trimRWS(in []byte) []byte { lastNonWS := len(in) - 1 for ; lastNonWS > 0 && isWS(in[lastNonWS]); lastNonWS-- { } return in[:lastNonWS+1] } func firstLine(in []byte) []byte { lineEnd := 0 for ; lineEnd < len(in) && in[lineEnd] != '\n'; lineEnd++ { } return in[:lineEnd] } func isWS(b byte) bool { return b == '\t' || b == '\n' || b == '\x0c' || b == '\r' || b == ' ' } func min(a, b int) int { if a < b { return a } return b } type readBuf []byte func (b *readBuf) advance(n int) bool { if n < 0 || len(*b) < n { return false } *b = (*b)[n:] return true }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/geo.go
package magic import ( "bytes" "encoding/binary" ) // Shp matches a shape format file. // https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf func Shp(raw []byte, limit uint32) bool { if len(raw) < 112 { return false } if !(binary.BigEndian.Uint32(raw[0:4]) == 9994 && binary.BigEndian.Uint32(raw[4:8]) == 0 && binary.BigEndian.Uint32(raw[8:12]) == 0 && binary.BigEndian.Uint32(raw[12:16]) == 0 && binary.BigEndian.Uint32(raw[16:20]) == 0 && binary.BigEndian.Uint32(raw[20:24]) == 0 && binary.LittleEndian.Uint32(raw[28:32]) == 1000) { return false } shapeTypes := []int{ 0, // Null shape 1, // Point 3, // Polyline 5, // Polygon 8, // MultiPoint 11, // PointZ 13, // PolylineZ 15, // PolygonZ 18, // MultiPointZ 21, // PointM 23, // PolylineM 25, // PolygonM 28, // MultiPointM 31, // MultiPatch } for _, st := range shapeTypes { if st == int(binary.LittleEndian.Uint32(raw[108:112])) { return true } } return false } // Shx matches a shape index format file. // https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf func Shx(raw []byte, limit uint32) bool { return bytes.HasPrefix(raw, []byte{0x00, 0x00, 0x27, 0x0A}) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go
vendor/github.com/gabriel-vasile/mimetype/internal/magic/audio.go
package magic import ( "bytes" "encoding/binary" ) var ( // Flac matches a Free Lossless Audio Codec file. Flac = prefix([]byte("\x66\x4C\x61\x43\x00\x00\x00\x22")) // Midi matches a Musical Instrument Digital Interface file. Midi = prefix([]byte("\x4D\x54\x68\x64")) // Ape matches a Monkey's Audio file. Ape = prefix([]byte("\x4D\x41\x43\x20\x96\x0F\x00\x00\x34\x00\x00\x00\x18\x00\x00\x00\x90\xE3")) // MusePack matches a Musepack file. MusePack = prefix([]byte("MPCK")) // Au matches a Sun Microsystems au file. Au = prefix([]byte("\x2E\x73\x6E\x64")) // Amr matches an Adaptive Multi-Rate file. Amr = prefix([]byte("\x23\x21\x41\x4D\x52")) // Voc matches a Creative Voice file. Voc = prefix([]byte("Creative Voice File")) // M3u matches a Playlist file. M3u = prefix([]byte("#EXTM3U")) // AAC matches an Advanced Audio Coding file. AAC = prefix([]byte{0xFF, 0xF1}, []byte{0xFF, 0xF9}) ) // Mp3 matches an mp3 file. func Mp3(raw []byte, limit uint32) bool { if len(raw) < 3 { return false } if bytes.HasPrefix(raw, []byte("ID3")) { // MP3s with an ID3v2 tag will start with "ID3" // ID3v1 tags, however appear at the end of the file. return true } // Match MP3 files without tags switch binary.BigEndian.Uint16(raw[:2]) & 0xFFFE { case 0xFFFA: // MPEG ADTS, layer III, v1 return true case 0xFFF2: // MPEG ADTS, layer III, v2 return true case 0xFFE2: // MPEG ADTS, layer III, v2.5 return true } return false } // Wav matches a Waveform Audio File Format file. func Wav(raw []byte, limit uint32) bool { return len(raw) > 12 && bytes.Equal(raw[:4], []byte("RIFF")) && bytes.Equal(raw[8:12], []byte{0x57, 0x41, 0x56, 0x45}) } // Aiff matches Audio Interchange File Format file. func Aiff(raw []byte, limit uint32) bool { return len(raw) > 12 && bytes.Equal(raw[:4], []byte{0x46, 0x4F, 0x52, 0x4D}) && bytes.Equal(raw[8:12], []byte{0x41, 0x49, 0x46, 0x46}) } // Qcp matches a Qualcomm Pure Voice file. func Qcp(raw []byte, limit uint32) bool { return len(raw) > 12 && bytes.Equal(raw[:4], []byte("RIFF")) && bytes.Equal(raw[8:12], []byte("QLCM")) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/klauspost/cpuid/v2/cpuid.go
vendor/github.com/klauspost/cpuid/v2/cpuid.go
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. // Package cpuid provides information about the CPU running the current program. // // CPU features are detected on startup, and kept for fast access through the life of the application. // Currently x86 / x64 (AMD64) as well as arm64 is supported. // // You can access the CPU information by accessing the shared CPU variable of the cpuid library. // // Package home: https://github.com/klauspost/cpuid package cpuid import ( "flag" "fmt" "math" "math/bits" "os" "runtime" "strings" ) // AMD refererence: https://www.amd.com/system/files/TechDocs/25481.pdf // and Processor Programming Reference (PPR) // Vendor is a representation of a CPU vendor. type Vendor int const ( VendorUnknown Vendor = iota Intel AMD VIA Transmeta NSC KVM // Kernel-based Virtual Machine MSVM // Microsoft Hyper-V or Windows Virtual PC VMware XenHVM Bhyve Hygon SiS RDC Ampere ARM Broadcom Cavium DEC Fujitsu Infineon Motorola NVIDIA AMCC Qualcomm Marvell QEMU QNX ACRN SRE Apple lastVendor ) //go:generate stringer -type=FeatureID,Vendor // FeatureID is the ID of a specific cpu feature. type FeatureID int const ( // Keep index -1 as unknown UNKNOWN = -1 // x86 features ADX FeatureID = iota // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) AESNI // Advanced Encryption Standard New Instructions AMD3DNOW // AMD 3DNOW AMD3DNOWEXT // AMD 3DNowExt AMXBF16 // Tile computational operations on BFLOAT16 numbers AMXFP16 // Tile computational operations on FP16 numbers AMXINT8 // Tile computational operations on 8-bit integers AMXFP8 // Tile computational operations on FP8 numbers AMXTILE // Tile architecture AMXTF32 // Tile architecture AMXCOMPLEX // Matrix Multiplication of TF32 Tiles into Packed Single Precision Tile APX_F // Intel APX AVX // AVX functions AVX10 // If set the Intel AVX10 Converged Vector ISA is supported AVX10_128 // If set indicates that AVX10 128-bit vector support is present AVX10_256 // If set indicates that AVX10 256-bit vector support is present AVX10_512 // If set indicates that AVX10 512-bit vector support is present AVX2 // AVX2 functions AVX512BF16 // AVX-512 BFLOAT16 Instructions AVX512BITALG // AVX-512 Bit Algorithms AVX512BW // AVX-512 Byte and Word Instructions AVX512CD // AVX-512 Conflict Detection Instructions AVX512DQ // AVX-512 Doubleword and Quadword Instructions AVX512ER // AVX-512 Exponential and Reciprocal Instructions AVX512F // AVX-512 Foundation AVX512FP16 // AVX-512 FP16 Instructions AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions AVX512PF // AVX-512 Prefetch Instructions AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 AVX512VL // AVX-512 Vector Length Extensions AVX512VNNI // AVX-512 Vector Neural Network Instructions AVX512VP2INTERSECT // AVX-512 Intersect for D/Q AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword AVXIFMA // AVX-IFMA instructions AVXNECONVERT // AVX-NE-CONVERT instructions AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one AVXVNNI // AVX (VEX encoded) VNNI neural network instructions AVXVNNIINT8 // AVX-VNNI-INT8 instructions AVXVNNIINT16 // AVX-VNNI-INT16 instructions BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 BMI1 // Bit Manipulation Instruction Set 1 BMI2 // Bit Manipulation Instruction Set 2 CETIBT // Intel CET Indirect Branch Tracking CETSS // Intel CET Shadow Stack CLDEMOTE // Cache Line Demote CLMUL // Carry-less Multiplication CLZERO // CLZERO instruction supported CMOV // i686 CMOV CMPCCXADD // CMPCCXADD instructions CMPSB_SCADBS_SHORT // Fast short CMPSB and SCASB CMPXCHG8 // CMPXCHG8 instruction CPBOOST // Core Performance Boost CPPC // AMD: Collaborative Processor Performance Control CX16 // CMPXCHG16B Instruction EFER_LMSLE_UNS // AMD: =Core::X86::Msr::EFER[LMSLE] is not supported, and MBZ ENQCMD // Enqueue Command ERMS // Enhanced REP MOVSB/STOSB F16C // Half-precision floating-point conversion FLUSH_L1D // Flush L1D cache FMA3 // Intel FMA 3. Does not imply AVX. FMA4 // Bulldozer FMA4 functions FP128 // AMD: When set, the internal FP/SIMD execution datapath is no more than 128-bits wide FP256 // AMD: When set, the internal FP/SIMD execution datapath is no more than 256-bits wide FSRM // Fast Short Rep Mov FXSR // FXSAVE, FXRESTOR instructions, CR4 bit 9 FXSROPT // FXSAVE/FXRSTOR optimizations GFNI // Galois Field New Instructions. May require other features (AVX, AVX512VL,AVX512F) based on usage. HLE // Hardware Lock Elision HRESET // If set CPU supports history reset and the IA32_HRESET_ENABLE MSR HTT // Hyperthreading (enabled) HWA // Hardware assert supported. Indicates support for MSRC001_10 HYBRID_CPU // This part has CPUs of more than one type. HYPERVISOR // This bit has been reserved by Intel & AMD for use by hypervisors IA32_ARCH_CAP // IA32_ARCH_CAPABILITIES MSR (Intel) IA32_CORE_CAP // IA32_CORE_CAPABILITIES MSR IBPB // Indirect Branch Restricted Speculation (IBRS) and Indirect Branch Predictor Barrier (IBPB) IBPB_BRTYPE // Indicates that MSR 49h (PRED_CMD) bit 0 (IBPB) flushes all branch type predictions from the CPU branch predictor IBRS // AMD: Indirect Branch Restricted Speculation IBRS_PREFERRED // AMD: IBRS is preferred over software solution IBRS_PROVIDES_SMP // AMD: IBRS provides Same Mode Protection IBS // Instruction Based Sampling (AMD) IBSBRNTRGT // Instruction Based Sampling Feature (AMD) IBSFETCHSAM // Instruction Based Sampling Feature (AMD) IBSFFV // Instruction Based Sampling Feature (AMD) IBSOPCNT // Instruction Based Sampling Feature (AMD) IBSOPCNTEXT // Instruction Based Sampling Feature (AMD) IBSOPSAM // Instruction Based Sampling Feature (AMD) IBSRDWROPCNT // Instruction Based Sampling Feature (AMD) IBSRIPINVALIDCHK // Instruction Based Sampling Feature (AMD) IBS_FETCH_CTLX // AMD: IBS fetch control extended MSR supported IBS_OPDATA4 // AMD: IBS op data 4 MSR supported IBS_OPFUSE // AMD: Indicates support for IbsOpFuse IBS_PREVENTHOST // Disallowing IBS use by the host supported IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 IDPRED_CTRL // IPRED_DIS INT_WBINVD // WBINVD/WBNOINVD are interruptible. INVLPGB // NVLPGB and TLBSYNC instruction supported KEYLOCKER // Key locker KEYLOCKERW // Key locker wide LAHF // LAHF/SAHF in long mode LAM // If set, CPU supports Linear Address Masking LBRVIRT // LBR virtualization LZCNT // LZCNT instruction MCAOVERFLOW // MCA overflow recovery support. MCDT_NO // Processor do not exhibit MXCSR Configuration Dependent Timing behavior and do not need to mitigate it. MCOMMIT // MCOMMIT instruction supported MD_CLEAR // VERW clears CPU buffers MMX // standard MMX MMXEXT // SSE integer functions or AMD MMX ext MOVBE // MOVBE instruction (big-endian) MOVDIR64B // Move 64 Bytes as Direct Store MOVDIRI // Move Doubleword as Direct Store MOVSB_ZL // Fast Zero-Length MOVSB MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD MPX // Intel MPX (Memory Protection Extensions) MSRIRC // Instruction Retired Counter MSR available MSRLIST // Read/Write List of Model Specific Registers MSR_PAGEFLUSH // Page Flush MSR available NRIPS // Indicates support for NRIP save on VMEXIT NX // NX (No-Execute) bit OSXSAVE // XSAVE enabled by OS PCONFIG // PCONFIG for Intel Multi-Key Total Memory Encryption POPCNT // POPCNT instruction PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled PREFETCHI // PREFETCHIT0/1 instructions PSFD // Predictive Store Forward Disable RDPRU // RDPRU instruction supported RDRAND // RDRAND instruction is available RDSEED // RDSEED instruction is available RDTSCP // RDTSCP Instruction RRSBA_CTRL // Restricted RSB Alternate RTM // Restricted Transactional Memory RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. SBPB // Indicates support for the Selective Branch Predictor Barrier SERIALIZE // Serialize Instruction Execution SEV // AMD Secure Encrypted Virtualization supported SEV_64BIT // AMD SEV guest execution only allowed from a 64-bit host SEV_ALTERNATIVE // AMD SEV Alternate Injection supported SEV_DEBUGSWAP // Full debug state swap supported for SEV-ES guests SEV_ES // AMD SEV Encrypted State supported SEV_RESTRICTED // AMD SEV Restricted Injection supported SEV_SNP // AMD SEV Secure Nested Paging supported SGX // Software Guard Extensions SGXLC // Software Guard Extensions Launch Control SHA // Intel SHA Extensions SME // AMD Secure Memory Encryption supported SME_COHERENT // AMD Hardware cache coherency across encryption domains enforced SPEC_CTRL_SSBD // Speculative Store Bypass Disable SRBDS_CTRL // SRBDS mitigation MSR available SRSO_MSR_FIX // Indicates that software may use MSR BP_CFG[BpSpecReduce] to mitigate SRSO. SRSO_NO // Indicates the CPU is not subject to the SRSO vulnerability SRSO_USER_KERNEL_NO // Indicates the CPU is not subject to the SRSO vulnerability across user/kernel boundaries SSE // SSE functions SSE2 // P4 SSE functions SSE3 // Prescott SSE3 functions SSE4 // Penryn SSE4.1 functions SSE42 // Nehalem SSE4.2 functions SSE4A // AMD Barcelona microarchitecture SSE4a instructions SSSE3 // Conroe SSSE3 functions STIBP // Single Thread Indirect Branch Predictors STIBP_ALWAYSON // AMD: Single Thread Indirect Branch Prediction Mode has Enhanced Performance and may be left Always On STOSB_SHORT // Fast short STOSB SUCCOR // Software uncorrectable error containment and recovery capability. SVM // AMD Secure Virtual Machine SVMDA // Indicates support for the SVM decode assists. SVMFBASID // SVM, Indicates that TLB flush events, including CR3 writes and CR4.PGE toggles, flush only the current ASID's TLB entries. Also indicates support for the extended VMCBTLB_Control SVML // AMD SVM lock. Indicates support for SVM-Lock. SVMNP // AMD SVM nested paging SVMPF // SVM pause intercept filter. Indicates support for the pause intercept filter SVMPFT // SVM PAUSE filter threshold. Indicates support for the PAUSE filter cycle count threshold SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. SYSEE // SYSENTER and SYSEXIT instructions TBM // AMD Trailing Bit Manipulation TDX_GUEST // Intel Trust Domain Extensions Guest TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. TSCRATEMSR // MSR based TSC rate control. Indicates support for MSR TSC ratio MSRC000_0104 TSXLDTRK // Intel TSX Suspend Load Address Tracking VAES // Vector AES. AVX(512) versions requires additional checks. VMCBCLEAN // VMCB clean bits. Indicates support for VMCB clean bits. VMPL // AMD VM Permission Levels supported VMSA_REGPROT // AMD VMSA Register Protection supported VMX // Virtual Machine Extensions VPCLMULQDQ // Carry-Less Multiplication Quadword. Requires AVX for 3 register versions. VTE // AMD Virtual Transparent Encryption supported WAITPKG // TPAUSE, UMONITOR, UMWAIT WBNOINVD // Write Back and Do Not Invalidate Cache WRMSRNS // Non-Serializing Write to Model Specific Register X87 // FPU XGETBV1 // Supports XGETBV with ECX = 1 XOP // Bulldozer XOP functions XSAVE // XSAVE, XRESTOR, XSETBV, XGETBV XSAVEC // Supports XSAVEC and the compacted form of XRSTOR. XSAVEOPT // XSAVEOPT available XSAVES // Supports XSAVES/XRSTORS and IA32_XSS // ARM features: AESARM // AES instructions ARMCPUID // Some CPU ID registers readable at user-level ASIMD // Advanced SIMD ASIMDDP // SIMD Dot Product ASIMDHP // Advanced SIMD half-precision floating point ASIMDRDM // Rounding Double Multiply Accumulate/Subtract (SQRDMLAH/SQRDMLSH) ATOMICS // Large System Extensions (LSE) CRC32 // CRC32/CRC32C instructions DCPOP // Data cache clean to Point of Persistence (DC CVAP) EVTSTRM // Generic timer FCMA // Floatin point complex number addition and multiplication FHM // FMLAL and FMLSL instructions FP // Single-precision and double-precision floating point FPHP // Half-precision floating point GPA // Generic Pointer Authentication JSCVT // Javascript-style double->int convert (FJCVTZS) LRCPC // Weaker release consistency (LDAPR, etc) PMULL // Polynomial Multiply instructions (PMULL/PMULL2) RNDR // Random Number instructions TLB // Outer Shareable and TLB range maintenance instructions TS // Flag manipulation instructions SHA1 // SHA-1 instructions (SHA1C, etc) SHA2 // SHA-2 instructions (SHA256H, etc) SHA3 // SHA-3 instructions (EOR3, RAXI, XAR, BCAX) SHA512 // SHA512 instructions SM3 // SM3 instructions SM4 // SM4 instructions SVE // Scalable Vector Extension // Keep it last. It automatically defines the size of []flagSet lastID firstID FeatureID = UNKNOWN + 1 ) // CPUInfo contains information about the detected system CPU. type CPUInfo struct { BrandName string // Brand name reported by the CPU VendorID Vendor // Comparable CPU vendor ID VendorString string // Raw vendor string. HypervisorVendorID Vendor // Hypervisor vendor HypervisorVendorString string // Raw hypervisor vendor string featureSet flagSet // Features of the CPU PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. Family int // CPU family number Model int // CPU model number Stepping int // CPU stepping info CacheLine int // Cache line size in bytes. Will be 0 if undetectable. Hz int64 // Clock speed, if known, 0 otherwise. Will attempt to contain base clock speed. BoostFreq int64 // Max clock speed, if known, 0 otherwise Cache struct { L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected L2 int // L2 Cache (per core or shared). Will be -1 if undetected L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected } SGX SGXSupport AMDMemEncryption AMDMemEncryptionSupport AVX10Level uint8 maxFunc uint32 maxExFunc uint32 } var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) var xgetbv func(index uint32) (eax, edx uint32) var rdtscpAsm func() (eax, ebx, ecx, edx uint32) var darwinHasAVX512 = func() bool { return false } // CPU contains information about the CPU as detected on startup, // or when Detect last was called. // // Use this as the primary entry point to you data. var CPU CPUInfo func init() { initCPU() Detect() } // Detect will re-detect current CPU info. // This will replace the content of the exported CPU variable. // // Unless you expect the CPU to change while you are running your program // you should not need to call this function. // If you call this, you must ensure that no other goroutine is accessing the // exported CPU variable. func Detect() { // Set defaults CPU.ThreadsPerCore = 1 CPU.Cache.L1I = -1 CPU.Cache.L1D = -1 CPU.Cache.L2 = -1 CPU.Cache.L3 = -1 safe := true if detectArmFlag != nil { safe = !*detectArmFlag } addInfo(&CPU, safe) if displayFeats != nil && *displayFeats { fmt.Println("cpu features:", strings.Join(CPU.FeatureSet(), ",")) // Exit with non-zero so tests will print value. os.Exit(1) } if disableFlag != nil { s := strings.Split(*disableFlag, ",") for _, feat := range s { feat := ParseFeature(strings.TrimSpace(feat)) if feat != UNKNOWN { CPU.featureSet.unset(feat) } } } } // DetectARM will detect ARM64 features. // This is NOT done automatically since it can potentially crash // if the OS does not handle the command. // If in the future this can be done safely this function may not // do anything. func DetectARM() { addInfo(&CPU, false) } var detectArmFlag *bool var displayFeats *bool var disableFlag *string // Flags will enable flags. // This must be called *before* flag.Parse AND // Detect must be called after the flags have been parsed. // Note that this means that any detection used in init() functions // will not contain these flags. func Flags() { disableFlag = flag.String("cpu.disable", "", "disable cpu features; comma separated list") displayFeats = flag.Bool("cpu.features", false, "lists cpu features and exits") detectArmFlag = flag.Bool("cpu.arm", false, "allow ARM features to be detected; can potentially crash") } // Supports returns whether the CPU supports all of the requested features. func (c CPUInfo) Supports(ids ...FeatureID) bool { for _, id := range ids { if !c.featureSet.inSet(id) { return false } } return true } // Has allows for checking a single feature. // Should be inlined by the compiler. func (c *CPUInfo) Has(id FeatureID) bool { return c.featureSet.inSet(id) } // AnyOf returns whether the CPU supports one or more of the requested features. func (c CPUInfo) AnyOf(ids ...FeatureID) bool { for _, id := range ids { if c.featureSet.inSet(id) { return true } } return false } // Features contains several features combined for a fast check using // CpuInfo.HasAll type Features *flagSet // CombineFeatures allows to combine several features for a close to constant time lookup. func CombineFeatures(ids ...FeatureID) Features { var v flagSet for _, id := range ids { v.set(id) } return &v } func (c *CPUInfo) HasAll(f Features) bool { return c.featureSet.hasSetP(f) } // https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels var oneOfLevel = CombineFeatures(SYSEE, SYSCALL) var level1Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2) var level2Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3) var level3Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE) var level4Features = CombineFeatures(CMOV, CMPXCHG8, X87, FXSR, MMX, SSE, SSE2, CX16, LAHF, POPCNT, SSE3, SSE4, SSE42, SSSE3, AVX, AVX2, BMI1, BMI2, F16C, FMA3, LZCNT, MOVBE, OSXSAVE, AVX512F, AVX512BW, AVX512CD, AVX512DQ, AVX512VL) // X64Level returns the microarchitecture level detected on the CPU. // If features are lacking or non x64 mode, 0 is returned. // See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels func (c CPUInfo) X64Level() int { if !c.featureSet.hasOneOf(oneOfLevel) { return 0 } if c.featureSet.hasSetP(level4Features) { return 4 } if c.featureSet.hasSetP(level3Features) { return 3 } if c.featureSet.hasSetP(level2Features) { return 2 } if c.featureSet.hasSetP(level1Features) { return 1 } return 0 } // Disable will disable one or several features. func (c *CPUInfo) Disable(ids ...FeatureID) bool { for _, id := range ids { c.featureSet.unset(id) } return true } // Enable will disable one or several features even if they were undetected. // This is of course not recommended for obvious reasons. func (c *CPUInfo) Enable(ids ...FeatureID) bool { for _, id := range ids { c.featureSet.set(id) } return true } // IsVendor returns true if vendor is recognized as Intel func (c CPUInfo) IsVendor(v Vendor) bool { return c.VendorID == v } // FeatureSet returns all available features as strings. func (c CPUInfo) FeatureSet() []string { s := make([]string, 0, c.featureSet.nEnabled()) s = append(s, c.featureSet.Strings()...) return s } // RTCounter returns the 64-bit time-stamp counter // Uses the RDTSCP instruction. The value 0 is returned // if the CPU does not support the instruction. func (c CPUInfo) RTCounter() uint64 { if !c.Has(RDTSCP) { return 0 } a, _, _, d := rdtscpAsm() return uint64(a) | (uint64(d) << 32) } // Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. // This variable is OS dependent, but on Linux contains information // about the current cpu/core the code is running on. // If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. func (c CPUInfo) Ia32TscAux() uint32 { if !c.Has(RDTSCP) { return 0 } _, _, ecx, _ := rdtscpAsm() return ecx } // SveLengths returns arm SVE vector and predicate lengths in bits. // Will return 0, 0 if SVE is not enabled or otherwise unable to detect. func (c CPUInfo) SveLengths() (vl, pl uint64) { if !c.Has(SVE) { return 0, 0 } return getVectorLength() } // LogicalCPU will return the Logical CPU the code is currently executing on. // This is likely to change when the OS re-schedules the running thread // to another CPU. // If the current core cannot be detected, -1 will be returned. func (c CPUInfo) LogicalCPU() int { if c.maxFunc < 1 { return -1 } _, ebx, _, _ := cpuid(1) return int(ebx >> 24) } // frequencies tries to compute the clock speed of the CPU. If leaf 15 is // supported, use it, otherwise parse the brand string. Yes, really. func (c *CPUInfo) frequencies() { c.Hz, c.BoostFreq = 0, 0 mfi := maxFunctionID() if mfi >= 0x15 { eax, ebx, ecx, _ := cpuid(0x15) if eax != 0 && ebx != 0 && ecx != 0 { c.Hz = (int64(ecx) * int64(ebx)) / int64(eax) } } if mfi >= 0x16 { a, b, _, _ := cpuid(0x16) // Base... if a&0xffff > 0 { c.Hz = int64(a&0xffff) * 1_000_000 } // Boost... if b&0xffff > 0 { c.BoostFreq = int64(b&0xffff) * 1_000_000 } } if c.Hz > 0 { return } // computeHz determines the official rated speed of a CPU from its brand // string. This insanity is *actually the official documented way to do // this according to Intel*, prior to leaf 0x15 existing. The official // documentation only shows this working for exactly `x.xx` or `xxxx` // cases, e.g., `2.50GHz` or `1300MHz`; this parser will accept other // sizes. model := c.BrandName hz := strings.LastIndex(model, "Hz") if hz < 3 { return } var multiplier int64 switch model[hz-1] { case 'M': multiplier = 1000 * 1000 case 'G': multiplier = 1000 * 1000 * 1000 case 'T': multiplier = 1000 * 1000 * 1000 * 1000 } if multiplier == 0 { return } freq := int64(0) divisor := int64(0) decimalShift := int64(1) var i int for i = hz - 2; i >= 0 && model[i] != ' '; i-- { if model[i] >= '0' && model[i] <= '9' { freq += int64(model[i]-'0') * decimalShift decimalShift *= 10 } else if model[i] == '.' { if divisor != 0 { return } divisor = decimalShift } else { return } } // we didn't find a space if i < 0 { return } if divisor != 0 { c.Hz = (freq * multiplier) / divisor return } c.Hz = freq * multiplier } // VM Will return true if the cpu id indicates we are in // a virtual machine. func (c CPUInfo) VM() bool { return CPU.featureSet.inSet(HYPERVISOR) } // flags contains detected cpu features and characteristics type flags uint64 // log2(bits_in_uint64) const flagBitsLog2 = 6 const flagBits = 1 << flagBitsLog2 const flagMask = flagBits - 1 // flagSet contains detected cpu features and characteristics in an array of flags type flagSet [(lastID + flagMask) / flagBits]flags func (s *flagSet) inSet(feat FeatureID) bool { return s[feat>>flagBitsLog2]&(1<<(feat&flagMask)) != 0 } func (s *flagSet) set(feat FeatureID) { s[feat>>flagBitsLog2] |= 1 << (feat & flagMask) } // setIf will set a feature if boolean is true. func (s *flagSet) setIf(cond bool, features ...FeatureID) { if cond { for _, offset := range features { s[offset>>flagBitsLog2] |= 1 << (offset & flagMask) } } } func (s *flagSet) unset(offset FeatureID) { bit := flags(1 << (offset & flagMask)) s[offset>>flagBitsLog2] = s[offset>>flagBitsLog2] & ^bit } // or with another flagset. func (s *flagSet) or(other flagSet) { for i, v := range other[:] { s[i] |= v } } // hasSet returns whether all features are present. func (s *flagSet) hasSet(other flagSet) bool { for i, v := range other[:] { if s[i]&v != v { return false } } return true } // hasSet returns whether all features are present. func (s *flagSet) hasSetP(other *flagSet) bool { for i, v := range other[:] { if s[i]&v != v { return false } } return true } // hasOneOf returns whether one or more features are present. func (s *flagSet) hasOneOf(other *flagSet) bool { for i, v := range other[:] { if s[i]&v != 0 { return true } } return false } // nEnabled will return the number of enabled flags. func (s *flagSet) nEnabled() (n int) { for _, v := range s[:] { n += bits.OnesCount64(uint64(v)) } return n } func flagSetWith(feat ...FeatureID) flagSet { var res flagSet for _, f := range feat { res.set(f) } return res } // ParseFeature will parse the string and return the ID of the matching feature. // Will return UNKNOWN if not found. func ParseFeature(s string) FeatureID { s = strings.ToUpper(s) for i := firstID; i < lastID; i++ { if i.String() == s { return i } } return UNKNOWN } // Strings returns an array of the detected features for FlagsSet. func (s flagSet) Strings() []string { if len(s) == 0 { return []string{""} } r := make([]string, 0) for i := firstID; i < lastID; i++ { if s.inSet(i) { r = append(r, i.String()) } } return r } func maxExtendedFunction() uint32 { eax, _, _, _ := cpuid(0x80000000) return eax } func maxFunctionID() uint32 { a, _, _, _ := cpuid(0) return a } func brandName() string { if maxExtendedFunction() >= 0x80000004 { v := make([]uint32, 0, 48) for i := uint32(0); i < 3; i++ { a, b, c, d := cpuid(0x80000002 + i) v = append(v, a, b, c, d) } return strings.Trim(string(valAsString(v...)), " ") } return "unknown" } func threadsPerCore() int { mfi := maxFunctionID() vend, _ := vendorID() if mfi < 0x4 || (vend != Intel && vend != AMD) { return 1 } if mfi < 0xb { if vend != Intel { return 1 } _, b, _, d := cpuid(1) if (d & (1 << 28)) != 0 { // v will contain logical core count v := (b >> 16) & 255 if v > 1 { a4, _, _, _ := cpuid(4) // physical cores v2 := (a4 >> 26) + 1 if v2 > 0 { return int(v) / int(v2) } } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
vendor/github.com/klauspost/cpuid/v2/os_other_arm64.go
// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. //go:build arm64 && !linux && !darwin // +build arm64,!linux,!darwin package cpuid import "runtime" func detectOS(c *CPUInfo) bool { c.PhysicalCores = runtime.NumCPU() // For now assuming 1 thread per core... c.ThreadsPerCore = 1 c.LogicalCores = c.PhysicalCores return false }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
vendor/github.com/klauspost/cpuid/v2/detect_arm64.go
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. //go:build arm64 && !gccgo && !noasm && !appengine // +build arm64,!gccgo,!noasm,!appengine package cpuid import "runtime" func getMidr() (midr uint64) func getProcFeatures() (procFeatures uint64) func getInstAttributes() (instAttrReg0, instAttrReg1 uint64) func getVectorLength() (vl, pl uint64) func initCPU() { cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } xgetbv = func(uint32) (a, b uint32) { return 0, 0 } rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } } func addInfo(c *CPUInfo, safe bool) { // Seems to be safe to assume on ARM64 c.CacheLine = 64 detectOS(c) // ARM64 disabled since it may crash if interrupt is not intercepted by OS. if safe && !c.Has(ARMCPUID) && runtime.GOOS != "freebsd" { return } midr := getMidr() // MIDR_EL1 - Main ID Register // https://developer.arm.com/docs/ddi0595/h/aarch64-system-registers/midr_el1 // x--------------------------------------------------x // | Name | bits | visible | // |--------------------------------------------------| // | Implementer | [31-24] | y | // |--------------------------------------------------| // | Variant | [23-20] | y | // |--------------------------------------------------| // | Architecture | [19-16] | y | // |--------------------------------------------------| // | PartNum | [15-4] | y | // |--------------------------------------------------| // | Revision | [3-0] | y | // x--------------------------------------------------x switch (midr >> 24) & 0xff { case 0xC0: c.VendorString = "Ampere Computing" c.VendorID = Ampere case 0x41: c.VendorString = "Arm Limited" c.VendorID = ARM case 0x42: c.VendorString = "Broadcom Corporation" c.VendorID = Broadcom case 0x43: c.VendorString = "Cavium Inc" c.VendorID = Cavium case 0x44: c.VendorString = "Digital Equipment Corporation" c.VendorID = DEC case 0x46: c.VendorString = "Fujitsu Ltd" c.VendorID = Fujitsu case 0x49: c.VendorString = "Infineon Technologies AG" c.VendorID = Infineon case 0x4D: c.VendorString = "Motorola or Freescale Semiconductor Inc" c.VendorID = Motorola case 0x4E: c.VendorString = "NVIDIA Corporation" c.VendorID = NVIDIA case 0x50: c.VendorString = "Applied Micro Circuits Corporation" c.VendorID = AMCC case 0x51: c.VendorString = "Qualcomm Inc" c.VendorID = Qualcomm case 0x56: c.VendorString = "Marvell International Ltd" c.VendorID = Marvell case 0x69: c.VendorString = "Intel Corporation" c.VendorID = Intel } // Lower 4 bits: Architecture // Architecture Meaning // 0b0001 Armv4. // 0b0010 Armv4T. // 0b0011 Armv5 (obsolete). // 0b0100 Armv5T. // 0b0101 Armv5TE. // 0b0110 Armv5TEJ. // 0b0111 Armv6. // 0b1111 Architectural features are individually identified in the ID_* registers, see 'ID registers'. // Upper 4 bit: Variant // An IMPLEMENTATION DEFINED variant number. // Typically, this field is used to distinguish between different product variants, or major revisions of a product. c.Family = int(midr>>16) & 0xff // PartNum, bits [15:4] // An IMPLEMENTATION DEFINED primary part number for the device. // On processors implemented by Arm, if the top four bits of the primary // part number are 0x0 or 0x7, the variant and architecture are encoded differently. // Revision, bits [3:0] // An IMPLEMENTATION DEFINED revision number for the device. c.Model = int(midr) & 0xffff procFeatures := getProcFeatures() // ID_AA64PFR0_EL1 - Processor Feature Register 0 // x--------------------------------------------------x // | Name | bits | visible | // |--------------------------------------------------| // | DIT | [51-48] | y | // |--------------------------------------------------| // | SVE | [35-32] | y | // |--------------------------------------------------| // | GIC | [27-24] | n | // |--------------------------------------------------| // | AdvSIMD | [23-20] | y | // |--------------------------------------------------| // | FP | [19-16] | y | // |--------------------------------------------------| // | EL3 | [15-12] | n | // |--------------------------------------------------| // | EL2 | [11-8] | n | // |--------------------------------------------------| // | EL1 | [7-4] | n | // |--------------------------------------------------| // | EL0 | [3-0] | n | // x--------------------------------------------------x var f flagSet // if procFeatures&(0xf<<48) != 0 { // fmt.Println("DIT") // } f.setIf(procFeatures&(0xf<<32) != 0, SVE) if procFeatures&(0xf<<20) != 15<<20 { f.set(ASIMD) // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64pfr0_el1 // 0b0001 --> As for 0b0000, and also includes support for half-precision floating-point arithmetic. f.setIf(procFeatures&(0xf<<20) == 1<<20, FPHP, ASIMDHP) } f.setIf(procFeatures&(0xf<<16) != 0, FP) instAttrReg0, instAttrReg1 := getInstAttributes() // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 // // ID_AA64ISAR0_EL1 - Instruction Set Attribute Register 0 // x--------------------------------------------------x // | Name | bits | visible | // |--------------------------------------------------| // | RNDR | [63-60] | y | // |--------------------------------------------------| // | TLB | [59-56] | y | // |--------------------------------------------------| // | TS | [55-52] | y | // |--------------------------------------------------| // | FHM | [51-48] | y | // |--------------------------------------------------| // | DP | [47-44] | y | // |--------------------------------------------------| // | SM4 | [43-40] | y | // |--------------------------------------------------| // | SM3 | [39-36] | y | // |--------------------------------------------------| // | SHA3 | [35-32] | y | // |--------------------------------------------------| // | RDM | [31-28] | y | // |--------------------------------------------------| // | ATOMICS | [23-20] | y | // |--------------------------------------------------| // | CRC32 | [19-16] | y | // |--------------------------------------------------| // | SHA2 | [15-12] | y | // |--------------------------------------------------| // | SHA1 | [11-8] | y | // |--------------------------------------------------| // | AES | [7-4] | y | // x--------------------------------------------------x f.setIf(instAttrReg0&(0xf<<60) != 0, RNDR) f.setIf(instAttrReg0&(0xf<<56) != 0, TLB) f.setIf(instAttrReg0&(0xf<<52) != 0, TS) f.setIf(instAttrReg0&(0xf<<48) != 0, FHM) f.setIf(instAttrReg0&(0xf<<44) != 0, ASIMDDP) f.setIf(instAttrReg0&(0xf<<40) != 0, SM4) f.setIf(instAttrReg0&(0xf<<36) != 0, SM3) f.setIf(instAttrReg0&(0xf<<32) != 0, SHA3) f.setIf(instAttrReg0&(0xf<<28) != 0, ASIMDRDM) f.setIf(instAttrReg0&(0xf<<20) != 0, ATOMICS) f.setIf(instAttrReg0&(0xf<<16) != 0, CRC32) f.setIf(instAttrReg0&(0xf<<12) != 0, SHA2) // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 // 0b0010 --> As 0b0001, plus SHA512H, SHA512H2, SHA512SU0, and SHA512SU1 instructions implemented. f.setIf(instAttrReg0&(0xf<<12) == 2<<12, SHA512) f.setIf(instAttrReg0&(0xf<<8) != 0, SHA1) f.setIf(instAttrReg0&(0xf<<4) != 0, AESARM) // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar0_el1 // 0b0010 --> As for 0b0001, plus PMULL/PMULL2 instructions operating on 64-bit data quantities. f.setIf(instAttrReg0&(0xf<<4) == 2<<4, PMULL) // https://developer.arm.com/docs/ddi0595/b/aarch64-system-registers/id_aa64isar1_el1 // // ID_AA64ISAR1_EL1 - Instruction set attribute register 1 // x--------------------------------------------------x // | Name | bits | visible | // |--------------------------------------------------| // | GPI | [31-28] | y | // |--------------------------------------------------| // | GPA | [27-24] | y | // |--------------------------------------------------| // | LRCPC | [23-20] | y | // |--------------------------------------------------| // | FCMA | [19-16] | y | // |--------------------------------------------------| // | JSCVT | [15-12] | y | // |--------------------------------------------------| // | API | [11-8] | y | // |--------------------------------------------------| // | APA | [7-4] | y | // |--------------------------------------------------| // | DPB | [3-0] | y | // x--------------------------------------------------x // if instAttrReg1&(0xf<<28) != 0 { // fmt.Println("GPI") // } f.setIf(instAttrReg1&(0xf<<28) != 24, GPA) f.setIf(instAttrReg1&(0xf<<20) != 0, LRCPC) f.setIf(instAttrReg1&(0xf<<16) != 0, FCMA) f.setIf(instAttrReg1&(0xf<<12) != 0, JSCVT) // if instAttrReg1&(0xf<<8) != 0 { // fmt.Println("API") // } // if instAttrReg1&(0xf<<4) != 0 { // fmt.Println("APA") // } f.setIf(instAttrReg1&(0xf<<0) != 0, DCPOP) // Store c.featureSet.or(f) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
vendor/github.com/klauspost/cpuid/v2/os_safe_linux_arm64.go
// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. //go:build nounsafe // +build nounsafe package cpuid var hwcap uint
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/klauspost/cpuid/v2/detect_ref.go
vendor/github.com/klauspost/cpuid/v2/detect_ref.go
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. //go:build (!amd64 && !386 && !arm64) || gccgo || noasm || appengine // +build !amd64,!386,!arm64 gccgo noasm appengine package cpuid func initCPU() { cpuid = func(uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } cpuidex = func(x, y uint32) (a, b, c, d uint32) { return 0, 0, 0, 0 } xgetbv = func(uint32) (a, b uint32) { return 0, 0 } rdtscpAsm = func() (a, b, c, d uint32) { return 0, 0, 0, 0 } } func addInfo(info *CPUInfo, safe bool) {} func getVectorLength() (vl, pl uint64) { return 0, 0 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/klauspost/cpuid/v2/featureid_string.go
vendor/github.com/klauspost/cpuid/v2/featureid_string.go
// Code generated by "stringer -type=FeatureID,Vendor"; DO NOT EDIT. package cpuid import "strconv" func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[ADX-1] _ = x[AESNI-2] _ = x[AMD3DNOW-3] _ = x[AMD3DNOWEXT-4] _ = x[AMXBF16-5] _ = x[AMXFP16-6] _ = x[AMXINT8-7] _ = x[AMXFP8-8] _ = x[AMXTILE-9] _ = x[AMXTF32-10] _ = x[AMXCOMPLEX-11] _ = x[APX_F-12] _ = x[AVX-13] _ = x[AVX10-14] _ = x[AVX10_128-15] _ = x[AVX10_256-16] _ = x[AVX10_512-17] _ = x[AVX2-18] _ = x[AVX512BF16-19] _ = x[AVX512BITALG-20] _ = x[AVX512BW-21] _ = x[AVX512CD-22] _ = x[AVX512DQ-23] _ = x[AVX512ER-24] _ = x[AVX512F-25] _ = x[AVX512FP16-26] _ = x[AVX512IFMA-27] _ = x[AVX512PF-28] _ = x[AVX512VBMI-29] _ = x[AVX512VBMI2-30] _ = x[AVX512VL-31] _ = x[AVX512VNNI-32] _ = x[AVX512VP2INTERSECT-33] _ = x[AVX512VPOPCNTDQ-34] _ = x[AVXIFMA-35] _ = x[AVXNECONVERT-36] _ = x[AVXSLOW-37] _ = x[AVXVNNI-38] _ = x[AVXVNNIINT8-39] _ = x[AVXVNNIINT16-40] _ = x[BHI_CTRL-41] _ = x[BMI1-42] _ = x[BMI2-43] _ = x[CETIBT-44] _ = x[CETSS-45] _ = x[CLDEMOTE-46] _ = x[CLMUL-47] _ = x[CLZERO-48] _ = x[CMOV-49] _ = x[CMPCCXADD-50] _ = x[CMPSB_SCADBS_SHORT-51] _ = x[CMPXCHG8-52] _ = x[CPBOOST-53] _ = x[CPPC-54] _ = x[CX16-55] _ = x[EFER_LMSLE_UNS-56] _ = x[ENQCMD-57] _ = x[ERMS-58] _ = x[F16C-59] _ = x[FLUSH_L1D-60] _ = x[FMA3-61] _ = x[FMA4-62] _ = x[FP128-63] _ = x[FP256-64] _ = x[FSRM-65] _ = x[FXSR-66] _ = x[FXSROPT-67] _ = x[GFNI-68] _ = x[HLE-69] _ = x[HRESET-70] _ = x[HTT-71] _ = x[HWA-72] _ = x[HYBRID_CPU-73] _ = x[HYPERVISOR-74] _ = x[IA32_ARCH_CAP-75] _ = x[IA32_CORE_CAP-76] _ = x[IBPB-77] _ = x[IBPB_BRTYPE-78] _ = x[IBRS-79] _ = x[IBRS_PREFERRED-80] _ = x[IBRS_PROVIDES_SMP-81] _ = x[IBS-82] _ = x[IBSBRNTRGT-83] _ = x[IBSFETCHSAM-84] _ = x[IBSFFV-85] _ = x[IBSOPCNT-86] _ = x[IBSOPCNTEXT-87] _ = x[IBSOPSAM-88] _ = x[IBSRDWROPCNT-89] _ = x[IBSRIPINVALIDCHK-90] _ = x[IBS_FETCH_CTLX-91] _ = x[IBS_OPDATA4-92] _ = x[IBS_OPFUSE-93] _ = x[IBS_PREVENTHOST-94] _ = x[IBS_ZEN4-95] _ = x[IDPRED_CTRL-96] _ = x[INT_WBINVD-97] _ = x[INVLPGB-98] _ = x[KEYLOCKER-99] _ = x[KEYLOCKERW-100] _ = x[LAHF-101] _ = x[LAM-102] _ = x[LBRVIRT-103] _ = x[LZCNT-104] _ = x[MCAOVERFLOW-105] _ = x[MCDT_NO-106] _ = x[MCOMMIT-107] _ = x[MD_CLEAR-108] _ = x[MMX-109] _ = x[MMXEXT-110] _ = x[MOVBE-111] _ = x[MOVDIR64B-112] _ = x[MOVDIRI-113] _ = x[MOVSB_ZL-114] _ = x[MOVU-115] _ = x[MPX-116] _ = x[MSRIRC-117] _ = x[MSRLIST-118] _ = x[MSR_PAGEFLUSH-119] _ = x[NRIPS-120] _ = x[NX-121] _ = x[OSXSAVE-122] _ = x[PCONFIG-123] _ = x[POPCNT-124] _ = x[PPIN-125] _ = x[PREFETCHI-126] _ = x[PSFD-127] _ = x[RDPRU-128] _ = x[RDRAND-129] _ = x[RDSEED-130] _ = x[RDTSCP-131] _ = x[RRSBA_CTRL-132] _ = x[RTM-133] _ = x[RTM_ALWAYS_ABORT-134] _ = x[SBPB-135] _ = x[SERIALIZE-136] _ = x[SEV-137] _ = x[SEV_64BIT-138] _ = x[SEV_ALTERNATIVE-139] _ = x[SEV_DEBUGSWAP-140] _ = x[SEV_ES-141] _ = x[SEV_RESTRICTED-142] _ = x[SEV_SNP-143] _ = x[SGX-144] _ = x[SGXLC-145] _ = x[SHA-146] _ = x[SME-147] _ = x[SME_COHERENT-148] _ = x[SPEC_CTRL_SSBD-149] _ = x[SRBDS_CTRL-150] _ = x[SRSO_MSR_FIX-151] _ = x[SRSO_NO-152] _ = x[SRSO_USER_KERNEL_NO-153] _ = x[SSE-154] _ = x[SSE2-155] _ = x[SSE3-156] _ = x[SSE4-157] _ = x[SSE42-158] _ = x[SSE4A-159] _ = x[SSSE3-160] _ = x[STIBP-161] _ = x[STIBP_ALWAYSON-162] _ = x[STOSB_SHORT-163] _ = x[SUCCOR-164] _ = x[SVM-165] _ = x[SVMDA-166] _ = x[SVMFBASID-167] _ = x[SVML-168] _ = x[SVMNP-169] _ = x[SVMPF-170] _ = x[SVMPFT-171] _ = x[SYSCALL-172] _ = x[SYSEE-173] _ = x[TBM-174] _ = x[TDX_GUEST-175] _ = x[TLB_FLUSH_NESTED-176] _ = x[TME-177] _ = x[TOPEXT-178] _ = x[TSCRATEMSR-179] _ = x[TSXLDTRK-180] _ = x[VAES-181] _ = x[VMCBCLEAN-182] _ = x[VMPL-183] _ = x[VMSA_REGPROT-184] _ = x[VMX-185] _ = x[VPCLMULQDQ-186] _ = x[VTE-187] _ = x[WAITPKG-188] _ = x[WBNOINVD-189] _ = x[WRMSRNS-190] _ = x[X87-191] _ = x[XGETBV1-192] _ = x[XOP-193] _ = x[XSAVE-194] _ = x[XSAVEC-195] _ = x[XSAVEOPT-196] _ = x[XSAVES-197] _ = x[AESARM-198] _ = x[ARMCPUID-199] _ = x[ASIMD-200] _ = x[ASIMDDP-201] _ = x[ASIMDHP-202] _ = x[ASIMDRDM-203] _ = x[ATOMICS-204] _ = x[CRC32-205] _ = x[DCPOP-206] _ = x[EVTSTRM-207] _ = x[FCMA-208] _ = x[FHM-209] _ = x[FP-210] _ = x[FPHP-211] _ = x[GPA-212] _ = x[JSCVT-213] _ = x[LRCPC-214] _ = x[PMULL-215] _ = x[RNDR-216] _ = x[TLB-217] _ = x[TS-218] _ = x[SHA1-219] _ = x[SHA2-220] _ = x[SHA3-221] _ = x[SHA512-222] _ = x[SM3-223] _ = x[SM4-224] _ = x[SVE-225] _ = x[lastID-226] _ = x[firstID-0] } const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXFP8AMXTILEAMXTF32AMXCOMPLEXAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8AVXVNNIINT16BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBPB_BRTYPEIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSBPBSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSRSO_MSR_FIXSRSO_NOSRSO_USER_KERNEL_NOSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFHMFPFPHPGPAJSCVTLRCPCPMULLRNDRTLBTSSHA1SHA2SHA3SHA512SM3SM4SVElastID" var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 61, 68, 75, 85, 90, 93, 98, 107, 116, 125, 129, 139, 151, 159, 167, 175, 183, 190, 200, 210, 218, 228, 239, 247, 257, 275, 290, 297, 309, 316, 323, 334, 346, 354, 358, 362, 368, 373, 381, 386, 392, 396, 405, 423, 431, 438, 442, 446, 460, 466, 470, 474, 483, 487, 491, 496, 501, 505, 509, 516, 520, 523, 529, 532, 535, 545, 555, 568, 581, 585, 596, 600, 614, 631, 634, 644, 655, 661, 669, 680, 688, 700, 716, 730, 741, 751, 766, 774, 785, 795, 802, 811, 821, 825, 828, 835, 840, 851, 858, 865, 873, 876, 882, 887, 896, 903, 911, 915, 918, 924, 931, 944, 949, 951, 958, 965, 971, 975, 984, 988, 993, 999, 1005, 1011, 1021, 1024, 1040, 1044, 1053, 1056, 1065, 1080, 1093, 1099, 1113, 1120, 1123, 1128, 1131, 1134, 1146, 1160, 1170, 1182, 1189, 1208, 1211, 1215, 1219, 1223, 1228, 1233, 1238, 1243, 1257, 1268, 1274, 1277, 1282, 1291, 1295, 1300, 1305, 1311, 1318, 1323, 1326, 1335, 1351, 1354, 1360, 1370, 1378, 1382, 1391, 1395, 1407, 1410, 1420, 1423, 1430, 1438, 1445, 1448, 1455, 1458, 1463, 1469, 1477, 1483, 1489, 1497, 1502, 1509, 1516, 1524, 1531, 1536, 1541, 1548, 1552, 1555, 1557, 1561, 1564, 1569, 1574, 1579, 1583, 1586, 1588, 1592, 1596, 1600, 1606, 1609, 1612, 1615, 1621} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { return "FeatureID(" + strconv.FormatInt(int64(i), 10) + ")" } return _FeatureID_name[_FeatureID_index[i]:_FeatureID_index[i+1]] } func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[VendorUnknown-0] _ = x[Intel-1] _ = x[AMD-2] _ = x[VIA-3] _ = x[Transmeta-4] _ = x[NSC-5] _ = x[KVM-6] _ = x[MSVM-7] _ = x[VMware-8] _ = x[XenHVM-9] _ = x[Bhyve-10] _ = x[Hygon-11] _ = x[SiS-12] _ = x[RDC-13] _ = x[Ampere-14] _ = x[ARM-15] _ = x[Broadcom-16] _ = x[Cavium-17] _ = x[DEC-18] _ = x[Fujitsu-19] _ = x[Infineon-20] _ = x[Motorola-21] _ = x[NVIDIA-22] _ = x[AMCC-23] _ = x[Qualcomm-24] _ = x[Marvell-25] _ = x[QEMU-26] _ = x[QNX-27] _ = x[ACRN-28] _ = x[SRE-29] _ = x[Apple-30] _ = x[lastVendor-31] } const _Vendor_name = "VendorUnknownIntelAMDVIATransmetaNSCKVMMSVMVMwareXenHVMBhyveHygonSiSRDCAmpereARMBroadcomCaviumDECFujitsuInfineonMotorolaNVIDIAAMCCQualcommMarvellQEMUQNXACRNSREApplelastVendor" var _Vendor_index = [...]uint8{0, 13, 18, 21, 24, 33, 36, 39, 43, 49, 55, 60, 65, 68, 71, 77, 80, 88, 94, 97, 104, 112, 120, 126, 130, 138, 145, 149, 152, 156, 159, 164, 174} func (i Vendor) String() string { if i < 0 || i >= Vendor(len(_Vendor_index)-1) { return "Vendor(" + strconv.FormatInt(int64(i), 10) + ")" } return _Vendor_name[_Vendor_index[i]:_Vendor_index[i+1]] }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/klauspost/cpuid/v2/detect_x86.go
vendor/github.com/klauspost/cpuid/v2/detect_x86.go
// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. //go:build (386 && !gccgo && !noasm && !appengine) || (amd64 && !gccgo && !noasm && !appengine) // +build 386,!gccgo,!noasm,!appengine amd64,!gccgo,!noasm,!appengine package cpuid func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) func asmXgetbv(index uint32) (eax, edx uint32) func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) func asmDarwinHasAVX512() bool func initCPU() { cpuid = asmCpuid cpuidex = asmCpuidex xgetbv = asmXgetbv rdtscpAsm = asmRdtscpAsm darwinHasAVX512 = asmDarwinHasAVX512 } func addInfo(c *CPUInfo, safe bool) { c.maxFunc = maxFunctionID() c.maxExFunc = maxExtendedFunction() c.BrandName = brandName() c.CacheLine = cacheLine() c.Family, c.Model, c.Stepping = familyModel() c.featureSet = support() c.SGX = hasSGX(c.featureSet.inSet(SGX), c.featureSet.inSet(SGXLC)) c.AMDMemEncryption = hasAMDMemEncryption(c.featureSet.inSet(SME) || c.featureSet.inSet(SEV)) c.ThreadsPerCore = threadsPerCore() c.LogicalCores = logicalCores() c.PhysicalCores = physicalCores() c.VendorID, c.VendorString = vendorID() c.HypervisorVendorID, c.HypervisorVendorString = hypervisorVendorID() c.AVX10Level = c.supportAVX10() c.cacheSize() c.frequencies() } func getVectorLength() (vl, pl uint64) { return 0, 0 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
vendor/github.com/klauspost/cpuid/v2/os_unsafe_linux_arm64.go
// Copyright (c) 2021 Klaus Post, released under MIT License. See LICENSE file. //go:build !nounsafe // +build !nounsafe package cpuid import _ "unsafe" // needed for go:linkname //go:linkname hwcap internal/cpu.HWCap var hwcap uint
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
vendor/github.com/klauspost/cpuid/v2/os_darwin_arm64.go
// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. package cpuid import ( "runtime" "strings" "golang.org/x/sys/unix" ) func detectOS(c *CPUInfo) bool { if runtime.GOOS != "ios" { tryToFillCPUInfoFomSysctl(c) } // There are no hw.optional sysctl values for the below features on Mac OS 11.0 // to detect their supported state dynamically. Assume the CPU features that // Apple Silicon M1 supports to be available as a minimal set of features // to all Go programs running on darwin/arm64. // TODO: Add more if we know them. c.featureSet.setIf(runtime.GOOS != "ios", AESARM, PMULL, SHA1, SHA2) return true } func sysctlGetBool(name string) bool { value, err := unix.SysctlUint32(name) if err != nil { return false } return value != 0 } func sysctlGetString(name string) string { value, err := unix.Sysctl(name) if err != nil { return "" } return value } func sysctlGetInt(unknown int, names ...string) int { for _, name := range names { value, err := unix.SysctlUint32(name) if err != nil { continue } if value != 0 { return int(value) } } return unknown } func sysctlGetInt64(unknown int, names ...string) int { for _, name := range names { value64, err := unix.SysctlUint64(name) if err != nil { continue } if int(value64) != unknown { return int(value64) } } return unknown } func setFeature(c *CPUInfo, name string, feature FeatureID) { c.featureSet.setIf(sysctlGetBool(name), feature) } func tryToFillCPUInfoFomSysctl(c *CPUInfo) { c.BrandName = sysctlGetString("machdep.cpu.brand_string") if len(c.BrandName) != 0 { c.VendorString = strings.Fields(c.BrandName)[0] } c.PhysicalCores = sysctlGetInt(runtime.NumCPU(), "hw.physicalcpu") c.ThreadsPerCore = sysctlGetInt(1, "machdep.cpu.thread_count", "kern.num_threads") / sysctlGetInt(1, "hw.physicalcpu") c.LogicalCores = sysctlGetInt(runtime.NumCPU(), "machdep.cpu.core_count") c.Family = sysctlGetInt(0, "machdep.cpu.family", "hw.cpufamily") c.Model = sysctlGetInt(0, "machdep.cpu.model") c.CacheLine = sysctlGetInt64(0, "hw.cachelinesize") c.Cache.L1I = sysctlGetInt64(-1, "hw.l1icachesize") c.Cache.L1D = sysctlGetInt64(-1, "hw.l1dcachesize") c.Cache.L2 = sysctlGetInt64(-1, "hw.l2cachesize") c.Cache.L3 = sysctlGetInt64(-1, "hw.l3cachesize") // from https://developer.arm.com/downloads/-/exploration-tools/feature-names-for-a-profile setFeature(c, "hw.optional.arm.FEAT_AES", AESARM) setFeature(c, "hw.optional.AdvSIMD", ASIMD) setFeature(c, "hw.optional.arm.FEAT_DotProd", ASIMDDP) setFeature(c, "hw.optional.arm.FEAT_RDM", ASIMDRDM) setFeature(c, "hw.optional.FEAT_CRC32", CRC32) setFeature(c, "hw.optional.arm.FEAT_DPB", DCPOP) // setFeature(c, "", EVTSTRM) setFeature(c, "hw.optional.arm.FEAT_FCMA", FCMA) setFeature(c, "hw.optional.arm.FEAT_FHM", FHM) setFeature(c, "hw.optional.arm.FEAT_FP", FP) setFeature(c, "hw.optional.arm.FEAT_FP16", FPHP) setFeature(c, "hw.optional.arm.FEAT_PAuth", GPA) setFeature(c, "hw.optional.arm.FEAT_RNG", RNDR) setFeature(c, "hw.optional.arm.FEAT_JSCVT", JSCVT) setFeature(c, "hw.optional.arm.FEAT_LRCPC", LRCPC) setFeature(c, "hw.optional.arm.FEAT_PMULL", PMULL) setFeature(c, "hw.optional.arm.FEAT_SHA1", SHA1) setFeature(c, "hw.optional.arm.FEAT_SHA256", SHA2) setFeature(c, "hw.optional.arm.FEAT_SHA3", SHA3) setFeature(c, "hw.optional.arm.FEAT_SHA512", SHA512) setFeature(c, "hw.optional.arm.FEAT_TLBIOS", TLB) setFeature(c, "hw.optional.arm.FEAT_TLBIRANGE", TLB) setFeature(c, "hw.optional.arm.FEAT_FlagM", TS) setFeature(c, "hw.optional.arm.FEAT_FlagM2", TS) // setFeature(c, "", SM3) // setFeature(c, "", SM4) setFeature(c, "hw.optional.arm.FEAT_SVE", SVE) // from empirical observation setFeature(c, "hw.optional.AdvSIMD_HPFPCvt", ASIMDHP) setFeature(c, "hw.optional.armv8_1_atomics", ATOMICS) setFeature(c, "hw.optional.floatingpoint", FP) setFeature(c, "hw.optional.armv8_2_sha3", SHA3) setFeature(c, "hw.optional.armv8_2_sha512", SHA512) setFeature(c, "hw.optional.armv8_3_compnum", FCMA) setFeature(c, "hw.optional.armv8_crc32", CRC32) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go
vendor/github.com/klauspost/cpuid/v2/os_linux_arm64.go
// Copyright (c) 2020 Klaus Post, released under MIT License. See LICENSE file. // Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file located // here https://github.com/golang/sys/blob/master/LICENSE package cpuid import ( "encoding/binary" "io/ioutil" "runtime" ) // HWCAP bits. const ( hwcap_FP = 1 << 0 hwcap_ASIMD = 1 << 1 hwcap_EVTSTRM = 1 << 2 hwcap_AES = 1 << 3 hwcap_PMULL = 1 << 4 hwcap_SHA1 = 1 << 5 hwcap_SHA2 = 1 << 6 hwcap_CRC32 = 1 << 7 hwcap_ATOMICS = 1 << 8 hwcap_FPHP = 1 << 9 hwcap_ASIMDHP = 1 << 10 hwcap_CPUID = 1 << 11 hwcap_ASIMDRDM = 1 << 12 hwcap_JSCVT = 1 << 13 hwcap_FCMA = 1 << 14 hwcap_LRCPC = 1 << 15 hwcap_DCPOP = 1 << 16 hwcap_SHA3 = 1 << 17 hwcap_SM3 = 1 << 18 hwcap_SM4 = 1 << 19 hwcap_ASIMDDP = 1 << 20 hwcap_SHA512 = 1 << 21 hwcap_SVE = 1 << 22 hwcap_ASIMDFHM = 1 << 23 hwcap_DIT = 1 << 24 hwcap_USCAT = 1 << 25 hwcap_ILRCPC = 1 << 26 hwcap_FLAGM = 1 << 27 hwcap_SSBS = 1 << 28 hwcap_SB = 1 << 29 hwcap_PACA = 1 << 30 hwcap_PACG = 1 << 31 hwcap_GCS = 1 << 32 hwcap2_DCPODP = 1 << 0 hwcap2_SVE2 = 1 << 1 hwcap2_SVEAES = 1 << 2 hwcap2_SVEPMULL = 1 << 3 hwcap2_SVEBITPERM = 1 << 4 hwcap2_SVESHA3 = 1 << 5 hwcap2_SVESM4 = 1 << 6 hwcap2_FLAGM2 = 1 << 7 hwcap2_FRINT = 1 << 8 hwcap2_SVEI8MM = 1 << 9 hwcap2_SVEF32MM = 1 << 10 hwcap2_SVEF64MM = 1 << 11 hwcap2_SVEBF16 = 1 << 12 hwcap2_I8MM = 1 << 13 hwcap2_BF16 = 1 << 14 hwcap2_DGH = 1 << 15 hwcap2_RNG = 1 << 16 hwcap2_BTI = 1 << 17 hwcap2_MTE = 1 << 18 hwcap2_ECV = 1 << 19 hwcap2_AFP = 1 << 20 hwcap2_RPRES = 1 << 21 hwcap2_MTE3 = 1 << 22 hwcap2_SME = 1 << 23 hwcap2_SME_I16I64 = 1 << 24 hwcap2_SME_F64F64 = 1 << 25 hwcap2_SME_I8I32 = 1 << 26 hwcap2_SME_F16F32 = 1 << 27 hwcap2_SME_B16F32 = 1 << 28 hwcap2_SME_F32F32 = 1 << 29 hwcap2_SME_FA64 = 1 << 30 hwcap2_WFXT = 1 << 31 hwcap2_EBF16 = 1 << 32 hwcap2_SVE_EBF16 = 1 << 33 hwcap2_CSSC = 1 << 34 hwcap2_RPRFM = 1 << 35 hwcap2_SVE2P1 = 1 << 36 hwcap2_SME2 = 1 << 37 hwcap2_SME2P1 = 1 << 38 hwcap2_SME_I16I32 = 1 << 39 hwcap2_SME_BI32I32 = 1 << 40 hwcap2_SME_B16B16 = 1 << 41 hwcap2_SME_F16F16 = 1 << 42 hwcap2_MOPS = 1 << 43 hwcap2_HBC = 1 << 44 hwcap2_SVE_B16B16 = 1 << 45 hwcap2_LRCPC3 = 1 << 46 hwcap2_LSE128 = 1 << 47 hwcap2_FPMR = 1 << 48 hwcap2_LUT = 1 << 49 hwcap2_FAMINMAX = 1 << 50 hwcap2_F8CVT = 1 << 51 hwcap2_F8FMA = 1 << 52 hwcap2_F8DP4 = 1 << 53 hwcap2_F8DP2 = 1 << 54 hwcap2_F8E4M3 = 1 << 55 hwcap2_F8E5M2 = 1 << 56 hwcap2_SME_LUTV2 = 1 << 57 hwcap2_SME_F8F16 = 1 << 58 hwcap2_SME_F8F32 = 1 << 59 hwcap2_SME_SF8FMA = 1 << 60 hwcap2_SME_SF8DP4 = 1 << 61 hwcap2_SME_SF8DP2 = 1 << 62 hwcap2_POE = 1 << 63 ) func detectOS(c *CPUInfo) bool { // For now assuming no hyperthreading is reasonable. c.LogicalCores = runtime.NumCPU() c.PhysicalCores = c.LogicalCores c.ThreadsPerCore = 1 if hwcap == 0 { // We did not get values from the runtime. // Try reading /proc/self/auxv // From https://github.com/golang/sys const ( _AT_HWCAP = 16 _AT_HWCAP2 = 26 uintSize = int(32 << (^uint(0) >> 63)) ) buf, err := ioutil.ReadFile("/proc/self/auxv") if err != nil { // e.g. on android /proc/self/auxv is not accessible, so silently // ignore the error and leave Initialized = false. On some // architectures (e.g. arm64) doinit() implements a fallback // readout and will set Initialized = true again. return false } bo := binary.LittleEndian for len(buf) >= 2*(uintSize/8) { var tag, val uint switch uintSize { case 32: tag = uint(bo.Uint32(buf[0:])) val = uint(bo.Uint32(buf[4:])) buf = buf[8:] case 64: tag = uint(bo.Uint64(buf[0:])) val = uint(bo.Uint64(buf[8:])) buf = buf[16:] } switch tag { case _AT_HWCAP: hwcap = val case _AT_HWCAP2: // Not used } } if hwcap == 0 { return false } } // HWCap was populated by the runtime from the auxiliary vector. // Use HWCap information since reading aarch64 system registers // is not supported in user space on older linux kernels. c.featureSet.setIf(isSet(hwcap, hwcap_AES), AESARM) c.featureSet.setIf(isSet(hwcap, hwcap_ASIMD), ASIMD) c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDDP), ASIMDDP) c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDHP), ASIMDHP) c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDRDM), ASIMDRDM) c.featureSet.setIf(isSet(hwcap, hwcap_CPUID), ARMCPUID) c.featureSet.setIf(isSet(hwcap, hwcap_CRC32), CRC32) c.featureSet.setIf(isSet(hwcap, hwcap_DCPOP), DCPOP) c.featureSet.setIf(isSet(hwcap, hwcap_EVTSTRM), EVTSTRM) c.featureSet.setIf(isSet(hwcap, hwcap_FCMA), FCMA) c.featureSet.setIf(isSet(hwcap, hwcap_ASIMDFHM), FHM) c.featureSet.setIf(isSet(hwcap, hwcap_FP), FP) c.featureSet.setIf(isSet(hwcap, hwcap_FPHP), FPHP) c.featureSet.setIf(isSet(hwcap, hwcap_JSCVT), JSCVT) c.featureSet.setIf(isSet(hwcap, hwcap_LRCPC), LRCPC) c.featureSet.setIf(isSet(hwcap, hwcap_PMULL), PMULL) c.featureSet.setIf(isSet(hwcap, hwcap2_RNG), RNDR) // c.featureSet.setIf(isSet(hwcap, hwcap_), TLB) // c.featureSet.setIf(isSet(hwcap, hwcap_), TS) c.featureSet.setIf(isSet(hwcap, hwcap_SHA1), SHA1) c.featureSet.setIf(isSet(hwcap, hwcap_SHA2), SHA2) c.featureSet.setIf(isSet(hwcap, hwcap_SHA3), SHA3) c.featureSet.setIf(isSet(hwcap, hwcap_SHA512), SHA512) c.featureSet.setIf(isSet(hwcap, hwcap_SM3), SM3) c.featureSet.setIf(isSet(hwcap, hwcap_SM4), SM4) c.featureSet.setIf(isSet(hwcap, hwcap_SVE), SVE) // The Samsung S9+ kernel reports support for atomics, but not all cores // actually support them, resulting in SIGILL. See issue #28431. // TODO(elias.naur): Only disable the optimization on bad chipsets on android. c.featureSet.setIf(isSet(hwcap, hwcap_ATOMICS) && runtime.GOOS != "android", ATOMICS) return true } func isSet(hwc uint, value uint) bool { return hwc&value != 0 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/json.go
vendor/github.com/goccy/go-json/json.go
package json import ( "bytes" "context" "encoding/json" "github.com/goccy/go-json/internal/encoder" ) // Marshaler is the interface implemented by types that // can marshal themselves into valid JSON. type Marshaler interface { MarshalJSON() ([]byte, error) } // MarshalerContext is the interface implemented by types that // can marshal themselves into valid JSON with context.Context. type MarshalerContext interface { MarshalJSON(context.Context) ([]byte, error) } // Unmarshaler is the interface implemented by types // that can unmarshal a JSON description of themselves. // The input can be assumed to be a valid encoding of // a JSON value. UnmarshalJSON must copy the JSON data // if it wishes to retain the data after returning. // // By convention, to approximate the behavior of Unmarshal itself, // Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. type Unmarshaler interface { UnmarshalJSON([]byte) error } // UnmarshalerContext is the interface implemented by types // that can unmarshal with context.Context a JSON description of themselves. type UnmarshalerContext interface { UnmarshalJSON(context.Context, []byte) error } // Marshal returns the JSON encoding of v. // // Marshal traverses the value v recursively. // If an encountered value implements the Marshaler interface // and is not a nil pointer, Marshal calls its MarshalJSON method // to produce JSON. If no MarshalJSON method is present but the // value implements encoding.TextMarshaler instead, Marshal calls // its MarshalText method and encodes the result as a JSON string. // The nil pointer exception is not strictly necessary // but mimics a similar, necessary exception in the behavior of // UnmarshalJSON. // // Otherwise, Marshal uses the following type-dependent default encodings: // // Boolean values encode as JSON booleans. // // Floating point, integer, and Number values encode as JSON numbers. // // String values encode as JSON strings coerced to valid UTF-8, // replacing invalid bytes with the Unicode replacement rune. // The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" // to keep some browsers from misinterpreting JSON output as HTML. // Ampersand "&" is also escaped to "\u0026" for the same reason. // This escaping can be disabled using an Encoder that had SetEscapeHTML(false) // called on it. // // Array and slice values encode as JSON arrays, except that // []byte encodes as a base64-encoded string, and a nil slice // encodes as the null JSON value. // // Struct values encode as JSON objects. // Each exported struct field becomes a member of the object, using the // field name as the object key, unless the field is omitted for one of the // reasons given below. // // The encoding of each struct field can be customized by the format string // stored under the "json" key in the struct field's tag. // The format string gives the name of the field, possibly followed by a // comma-separated list of options. The name may be empty in order to // specify options without overriding the default field name. // // The "omitempty" option specifies that the field should be omitted // from the encoding if the field has an empty value, defined as // false, 0, a nil pointer, a nil interface value, and any empty array, // slice, map, or string. // // As a special case, if the field tag is "-", the field is always omitted. // Note that a field with name "-" can still be generated using the tag "-,". // // Examples of struct field tags and their meanings: // // // Field appears in JSON as key "myName". // Field int `json:"myName"` // // // Field appears in JSON as key "myName" and // // the field is omitted from the object if its value is empty, // // as defined above. // Field int `json:"myName,omitempty"` // // // Field appears in JSON as key "Field" (the default), but // // the field is skipped if empty. // // Note the leading comma. // Field int `json:",omitempty"` // // // Field is ignored by this package. // Field int `json:"-"` // // // Field appears in JSON as key "-". // Field int `json:"-,"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // // Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, and ASCII punctuation except quotation // marks, backslash, and comma. // // Anonymous struct fields are usually marshaled as if their inner exported fields // were fields in the outer struct, subject to the usual Go visibility rules amended // as described in the next paragraph. // An anonymous struct field with a name given in its JSON tag is treated as // having that name, rather than being anonymous. // An anonymous struct field of interface type is treated the same as having // that type as its name, rather than being anonymous. // // The Go visibility rules for struct fields are amended for JSON when // deciding which field to marshal or unmarshal. If there are // multiple fields at the same level, and that level is the least // nested (and would therefore be the nesting level selected by the // usual Go rules), the following extra rules apply: // // 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, // even if there are multiple untagged fields that would otherwise conflict. // // 2) If there is exactly one field (tagged or not according to the first rule), that is selected. // // 3) Otherwise there are multiple fields, and all are ignored; no error occurs. // // Handling of anonymous struct fields is new in Go 1.1. // Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of // an anonymous struct field in both current and earlier versions, give the field // a JSON tag of "-". // // Map values encode as JSON objects. The map's key type must either be a // string, an integer type, or implement encoding.TextMarshaler. The map keys // are sorted and used as JSON object keys by applying the following rules, // subject to the UTF-8 coercion described for string values above: // - string keys are used directly // - encoding.TextMarshalers are marshaled // - integer keys are converted to strings // // Pointer values encode as the value pointed to. // A nil pointer encodes as the null JSON value. // // Interface values encode as the value contained in the interface. // A nil interface value encodes as the null JSON value. // // Channel, complex, and function values cannot be encoded in JSON. // Attempting to encode such a value causes Marshal to return // an UnsupportedTypeError. // // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an infinite recursion. func Marshal(v interface{}) ([]byte, error) { return MarshalWithOption(v) } // MarshalNoEscape returns the JSON encoding of v and doesn't escape v. func MarshalNoEscape(v interface{}) ([]byte, error) { return marshalNoEscape(v) } // MarshalContext returns the JSON encoding of v with context.Context and EncodeOption. func MarshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { return marshalContext(ctx, v, optFuncs...) } // MarshalWithOption returns the JSON encoding of v with EncodeOption. func MarshalWithOption(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { return marshal(v, optFuncs...) } // MarshalIndent is like Marshal but applies Indent to format the output. // Each JSON element in the output will begin on a new line beginning with prefix // followed by one or more copies of indent according to the indentation nesting. func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { return MarshalIndentWithOption(v, prefix, indent) } // MarshalIndentWithOption is like Marshal but applies Indent to format the output with EncodeOption. func MarshalIndentWithOption(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) { return marshalIndent(v, prefix, indent, optFuncs...) } // Unmarshal parses the JSON-encoded data and stores the result // in the value pointed to by v. If v is nil or not a pointer, // Unmarshal returns an InvalidUnmarshalError. // // Unmarshal uses the inverse of the encodings that // Marshal uses, allocating maps, slices, and pointers as necessary, // with the following additional rules: // // To unmarshal JSON into a pointer, Unmarshal first handles the case of // the JSON being the JSON literal null. In that case, Unmarshal sets // the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into // the value pointed at by the pointer. If the pointer is nil, Unmarshal // allocates a new value for it to point to. // // To unmarshal JSON into a value implementing the Unmarshaler interface, // Unmarshal calls that value's UnmarshalJSON method, including // when the input is a JSON null. // Otherwise, if the value implements encoding.TextUnmarshaler // and the input is a JSON quoted string, Unmarshal calls that value's // UnmarshalText method with the unquoted form of the string. // // To unmarshal JSON into a struct, Unmarshal matches incoming object // keys to the keys used by Marshal (either the struct field name or its tag), // preferring an exact match but also accepting a case-insensitive match. By // default, object keys which don't have a corresponding struct field are // ignored (see Decoder.DisallowUnknownFields for an alternative). // // To unmarshal JSON into an interface value, // Unmarshal stores one of these in the interface value: // // bool, for JSON booleans // float64, for JSON numbers // string, for JSON strings // []interface{}, for JSON arrays // map[string]interface{}, for JSON objects // nil for JSON null // // To unmarshal a JSON array into a slice, Unmarshal resets the slice length // to zero and then appends each element to the slice. // As a special case, to unmarshal an empty JSON array into a slice, // Unmarshal replaces the slice with a new empty slice. // // To unmarshal a JSON array into a Go array, Unmarshal decodes // JSON array elements into corresponding Go array elements. // If the Go array is smaller than the JSON array, // the additional JSON array elements are discarded. // If the JSON array is smaller than the Go array, // the additional Go array elements are set to zero values. // // To unmarshal a JSON object into a map, Unmarshal first establishes a map to // use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal // reuses the existing map, keeping existing entries. Unmarshal then stores // key-value pairs from the JSON object into the map. The map's key type must // either be any string type, an integer, implement json.Unmarshaler, or // implement encoding.TextUnmarshaler. // // If a JSON value is not appropriate for a given target type, // or if a JSON number overflows the target type, Unmarshal // skips that field and completes the unmarshaling as best it can. // If no more serious errors are encountered, Unmarshal returns // an UnmarshalTypeError describing the earliest such error. In any // case, it's not guaranteed that all the remaining fields following // the problematic one will be unmarshaled into the target object. // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean // “not present,” unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. func Unmarshal(data []byte, v interface{}) error { return unmarshal(data, v) } // UnmarshalContext parses the JSON-encoded data and stores the result // in the value pointed to by v. If you implement the UnmarshalerContext interface, // call it with ctx as an argument. func UnmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { return unmarshalContext(ctx, data, v) } func UnmarshalWithOption(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { return unmarshal(data, v, optFuncs...) } func UnmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { return unmarshalNoEscape(data, v, optFuncs...) } // A Token holds a value of one of these types: // // Delim, for the four JSON delimiters [ ] { } // bool, for JSON booleans // float64, for JSON numbers // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null type Token = json.Token // A Number represents a JSON number literal. type Number = json.Number // RawMessage is a raw encoded JSON value. // It implements Marshaler and Unmarshaler and can // be used to delay JSON decoding or precompute a JSON encoding. type RawMessage = json.RawMessage // A Delim is a JSON array or object delimiter, one of [ ] { or }. type Delim = json.Delim // Compact appends to dst the JSON-encoded src with // insignificant space characters elided. func Compact(dst *bytes.Buffer, src []byte) error { return encoder.Compact(dst, src, false) } // Indent appends to dst an indented form of the JSON-encoded src. // Each element in a JSON object or array begins on a new, // indented line beginning with prefix followed by one or more // copies of indent according to the indentation nesting. // The data appended to dst does not begin with the prefix nor // any indentation, to make it easier to embed inside other formatted JSON data. // Although leading space characters (space, tab, carriage return, newline) // at the beginning of src are dropped, trailing space characters // at the end of src are preserved and copied to dst. // For example, if src has no trailing spaces, neither will dst; // if src ends in a trailing newline, so will dst. func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error { return encoder.Indent(dst, src, prefix, indent) } // HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 // characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 // so that the JSON will be safe to embed inside HTML <script> tags. // For historical reasons, web browsers don't honor standard HTML // escaping within <script> tags, so an alternative JSON encoding must // be used. func HTMLEscape(dst *bytes.Buffer, src []byte) { var v interface{} dec := NewDecoder(bytes.NewBuffer(src)) dec.UseNumber() if err := dec.Decode(&v); err != nil { return } buf, _ := marshal(v) dst.Write(buf) } // Valid reports whether data is a valid JSON encoding. func Valid(data []byte) bool { var v interface{} decoder := NewDecoder(bytes.NewReader(data)) err := decoder.Decode(&v) if err != nil { return false } if !decoder.More() { return true } return decoder.InputOffset() >= int64(len(data)) } func init() { encoder.Marshal = Marshal encoder.Unmarshal = Unmarshal }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/path.go
vendor/github.com/goccy/go-json/path.go
package json import ( "reflect" "github.com/goccy/go-json/internal/decoder" ) // CreatePath creates JSON Path. // // JSON Path rule // $ : root object or element. The JSON Path format must start with this operator, which refers to the outermost level of the JSON-formatted string. // . : child operator. You can identify child values using dot-notation. // .. : recursive descent. // [] : subscript operator. If the JSON object is an array, you can use brackets to specify the array index. // [*] : all objects/elements for array. // // Reserved words must be properly escaped when included in Path. // // Escape Rule // single quote style escape: e.g.) `$['a.b'].c` // double quote style escape: e.g.) `$."a.b".c` func CreatePath(p string) (*Path, error) { path, err := decoder.PathString(p).Build() if err != nil { return nil, err } return &Path{path: path}, nil } // Path represents JSON Path. type Path struct { path *decoder.Path } // RootSelectorOnly whether only the root selector ($) is used. func (p *Path) RootSelectorOnly() bool { return p.path.RootSelectorOnly } // UsedSingleQuotePathSelector whether single quote-based escaping was done when building the JSON Path. func (p *Path) UsedSingleQuotePathSelector() bool { return p.path.SingleQuotePathSelector } // UsedSingleQuotePathSelector whether double quote-based escaping was done when building the JSON Path. func (p *Path) UsedDoubleQuotePathSelector() bool { return p.path.DoubleQuotePathSelector } // Extract extracts a specific JSON string. func (p *Path) Extract(data []byte, optFuncs ...DecodeOptionFunc) ([][]byte, error) { return extractFromPath(p, data, optFuncs...) } // PathString returns original JSON Path string. func (p *Path) PathString() string { return p.path.String() } // Unmarshal extract and decode the value of the part corresponding to JSON Path from the input data. func (p *Path) Unmarshal(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { contents, err := extractFromPath(p, data, optFuncs...) if err != nil { return err } results := make([]interface{}, 0, len(contents)) for _, content := range contents { var result interface{} if err := Unmarshal(content, &result); err != nil { return err } results = append(results, result) } if err := decoder.AssignValue(reflect.ValueOf(results), reflect.ValueOf(v)); err != nil { return err } return nil } // Get extract and substitute the value of the part corresponding to JSON Path from the input value. func (p *Path) Get(src, dst interface{}) error { return p.path.Get(reflect.ValueOf(src), reflect.ValueOf(dst)) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/error.go
vendor/github.com/goccy/go-json/error.go
package json import ( "github.com/goccy/go-json/internal/errors" ) // Before Go 1.2, an InvalidUTF8Error was returned by Marshal when // attempting to encode a string value with invalid UTF-8 sequences. // As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by // replacing invalid bytes with the Unicode replacement rune U+FFFD. // // Deprecated: No longer used; kept for compatibility. type InvalidUTF8Error = errors.InvalidUTF8Error // An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. // (The argument to Unmarshal must be a non-nil pointer.) type InvalidUnmarshalError = errors.InvalidUnmarshalError // A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. type MarshalerError = errors.MarshalerError // A SyntaxError is a description of a JSON syntax error. type SyntaxError = errors.SyntaxError // An UnmarshalFieldError describes a JSON object key that // led to an unexported (and therefore unwritable) struct field. // // Deprecated: No longer used; kept for compatibility. type UnmarshalFieldError = errors.UnmarshalFieldError // An UnmarshalTypeError describes a JSON value that was // not appropriate for a value of a specific Go type. type UnmarshalTypeError = errors.UnmarshalTypeError // An UnsupportedTypeError is returned by Marshal when attempting // to encode an unsupported value type. type UnsupportedTypeError = errors.UnsupportedTypeError type UnsupportedValueError = errors.UnsupportedValueError type PathError = errors.PathError
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/option.go
vendor/github.com/goccy/go-json/option.go
package json import ( "io" "github.com/goccy/go-json/internal/decoder" "github.com/goccy/go-json/internal/encoder" ) type EncodeOption = encoder.Option type EncodeOptionFunc func(*EncodeOption) // UnorderedMap doesn't sort when encoding map type. func UnorderedMap() EncodeOptionFunc { return func(opt *EncodeOption) { opt.Flag |= encoder.UnorderedMapOption } } // DisableHTMLEscape disables escaping of HTML characters ( '&', '<', '>' ) when encoding string. func DisableHTMLEscape() EncodeOptionFunc { return func(opt *EncodeOption) { opt.Flag &= ^encoder.HTMLEscapeOption } } // DisableNormalizeUTF8 // By default, when encoding string, UTF8 characters in the range of 0x80 - 0xFF are processed by applying \ufffd for invalid code and escaping for \u2028 and \u2029. // This option disables this behaviour. You can expect faster speeds by applying this option, but be careful. // encoding/json implements here: https://github.com/golang/go/blob/6178d25fc0b28724b1b5aec2b1b74fc06d9294c7/src/encoding/json/encode.go#L1067-L1093. func DisableNormalizeUTF8() EncodeOptionFunc { return func(opt *EncodeOption) { opt.Flag &= ^encoder.NormalizeUTF8Option } } // Debug outputs debug information when panic occurs during encoding. func Debug() EncodeOptionFunc { return func(opt *EncodeOption) { opt.Flag |= encoder.DebugOption } } // DebugWith sets the destination to write debug messages. func DebugWith(w io.Writer) EncodeOptionFunc { return func(opt *EncodeOption) { opt.DebugOut = w } } // DebugDOT sets the destination to write opcodes graph. func DebugDOT(w io.WriteCloser) EncodeOptionFunc { return func(opt *EncodeOption) { opt.DebugDOTOut = w } } // Colorize add an identifier for coloring to the string of the encoded result. func Colorize(scheme *ColorScheme) EncodeOptionFunc { return func(opt *EncodeOption) { opt.Flag |= encoder.ColorizeOption opt.ColorScheme = scheme } } type DecodeOption = decoder.Option type DecodeOptionFunc func(*DecodeOption) // DecodeFieldPriorityFirstWin // in the default behavior, go-json, like encoding/json, // will reflect the result of the last evaluation when a field with the same name exists. // This option allow you to change this behavior. // this option reflects the result of the first evaluation if a field with the same name exists. // This behavior has a performance advantage as it allows the subsequent strings to be skipped if all fields have been evaluated. func DecodeFieldPriorityFirstWin() DecodeOptionFunc { return func(opt *DecodeOption) { opt.Flags |= decoder.FirstWinOption } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/encode.go
vendor/github.com/goccy/go-json/encode.go
package json import ( "context" "io" "os" "unsafe" "github.com/goccy/go-json/internal/encoder" "github.com/goccy/go-json/internal/encoder/vm" "github.com/goccy/go-json/internal/encoder/vm_color" "github.com/goccy/go-json/internal/encoder/vm_color_indent" "github.com/goccy/go-json/internal/encoder/vm_indent" ) // An Encoder writes JSON values to an output stream. type Encoder struct { w io.Writer enabledIndent bool enabledHTMLEscape bool prefix string indentStr string } // NewEncoder returns a new encoder that writes to w. func NewEncoder(w io.Writer) *Encoder { return &Encoder{w: w, enabledHTMLEscape: true} } // Encode writes the JSON encoding of v to the stream, followed by a newline character. // // See the documentation for Marshal for details about the conversion of Go values to JSON. func (e *Encoder) Encode(v interface{}) error { return e.EncodeWithOption(v) } // EncodeWithOption call Encode with EncodeOption. func (e *Encoder) EncodeWithOption(v interface{}, optFuncs ...EncodeOptionFunc) error { ctx := encoder.TakeRuntimeContext() ctx.Option.Flag = 0 err := e.encodeWithOption(ctx, v, optFuncs...) encoder.ReleaseRuntimeContext(ctx) return err } // EncodeContext call Encode with context.Context and EncodeOption. func (e *Encoder) EncodeContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) error { rctx := encoder.TakeRuntimeContext() rctx.Option.Flag = 0 rctx.Option.Flag |= encoder.ContextOption rctx.Option.Context = ctx err := e.encodeWithOption(rctx, v, optFuncs...) //nolint: contextcheck encoder.ReleaseRuntimeContext(rctx) return err } func (e *Encoder) encodeWithOption(ctx *encoder.RuntimeContext, v interface{}, optFuncs ...EncodeOptionFunc) error { if e.enabledHTMLEscape { ctx.Option.Flag |= encoder.HTMLEscapeOption } ctx.Option.Flag |= encoder.NormalizeUTF8Option ctx.Option.DebugOut = os.Stdout for _, optFunc := range optFuncs { optFunc(ctx.Option) } var ( buf []byte err error ) if e.enabledIndent { buf, err = encodeIndent(ctx, v, e.prefix, e.indentStr) } else { buf, err = encode(ctx, v) } if err != nil { return err } if e.enabledIndent { buf = buf[:len(buf)-2] } else { buf = buf[:len(buf)-1] } buf = append(buf, '\n') if _, err := e.w.Write(buf); err != nil { return err } return nil } // SetEscapeHTML specifies whether problematic HTML characters should be escaped inside JSON quoted strings. // The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e to avoid certain safety problems that can arise when embedding JSON in HTML. // // In non-HTML settings where the escaping interferes with the readability of the output, SetEscapeHTML(false) disables this behavior. func (e *Encoder) SetEscapeHTML(on bool) { e.enabledHTMLEscape = on } // SetIndent instructs the encoder to format each subsequent encoded value as if indented by the package-level function Indent(dst, src, prefix, indent). // Calling SetIndent("", "") disables indentation. func (e *Encoder) SetIndent(prefix, indent string) { if prefix == "" && indent == "" { e.enabledIndent = false return } e.prefix = prefix e.indentStr = indent e.enabledIndent = true } func marshalContext(ctx context.Context, v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { rctx := encoder.TakeRuntimeContext() rctx.Option.Flag = 0 rctx.Option.Flag = encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.ContextOption rctx.Option.Context = ctx for _, optFunc := range optFuncs { optFunc(rctx.Option) } buf, err := encode(rctx, v) //nolint: contextcheck if err != nil { encoder.ReleaseRuntimeContext(rctx) return nil, err } // this line exists to escape call of `runtime.makeslicecopy` . // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, // dst buffer size and src buffer size are differrent. // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. buf = buf[:len(buf)-1] copied := make([]byte, len(buf)) copy(copied, buf) encoder.ReleaseRuntimeContext(rctx) return copied, nil } func marshal(v interface{}, optFuncs ...EncodeOptionFunc) ([]byte, error) { ctx := encoder.TakeRuntimeContext() ctx.Option.Flag = 0 ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option) for _, optFunc := range optFuncs { optFunc(ctx.Option) } buf, err := encode(ctx, v) if err != nil { encoder.ReleaseRuntimeContext(ctx) return nil, err } // this line exists to escape call of `runtime.makeslicecopy` . // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, // dst buffer size and src buffer size are differrent. // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. buf = buf[:len(buf)-1] copied := make([]byte, len(buf)) copy(copied, buf) encoder.ReleaseRuntimeContext(ctx) return copied, nil } func marshalNoEscape(v interface{}) ([]byte, error) { ctx := encoder.TakeRuntimeContext() ctx.Option.Flag = 0 ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option) buf, err := encodeNoEscape(ctx, v) if err != nil { encoder.ReleaseRuntimeContext(ctx) return nil, err } // this line exists to escape call of `runtime.makeslicecopy` . // if use `make([]byte, len(buf)-1)` and `copy(copied, buf)`, // dst buffer size and src buffer size are differrent. // in this case, compiler uses `runtime.makeslicecopy`, but it is slow. buf = buf[:len(buf)-1] copied := make([]byte, len(buf)) copy(copied, buf) encoder.ReleaseRuntimeContext(ctx) return copied, nil } func marshalIndent(v interface{}, prefix, indent string, optFuncs ...EncodeOptionFunc) ([]byte, error) { ctx := encoder.TakeRuntimeContext() ctx.Option.Flag = 0 ctx.Option.Flag |= (encoder.HTMLEscapeOption | encoder.NormalizeUTF8Option | encoder.IndentOption) for _, optFunc := range optFuncs { optFunc(ctx.Option) } buf, err := encodeIndent(ctx, v, prefix, indent) if err != nil { encoder.ReleaseRuntimeContext(ctx) return nil, err } buf = buf[:len(buf)-2] copied := make([]byte, len(buf)) copy(copied, buf) encoder.ReleaseRuntimeContext(ctx) return copied, nil } func encode(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) { b := ctx.Buf[:0] if v == nil { b = encoder.AppendNull(ctx, b) b = encoder.AppendComma(ctx, b) return b, nil } header := (*emptyInterface)(unsafe.Pointer(&v)) typ := header.typ typeptr := uintptr(unsafe.Pointer(typ)) codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) if err != nil { return nil, err } p := uintptr(header.ptr) ctx.Init(p, codeSet.CodeLength) ctx.KeepRefs = append(ctx.KeepRefs, header.ptr) buf, err := encodeRunCode(ctx, b, codeSet) if err != nil { return nil, err } ctx.Buf = buf return buf, nil } func encodeNoEscape(ctx *encoder.RuntimeContext, v interface{}) ([]byte, error) { b := ctx.Buf[:0] if v == nil { b = encoder.AppendNull(ctx, b) b = encoder.AppendComma(ctx, b) return b, nil } header := (*emptyInterface)(unsafe.Pointer(&v)) typ := header.typ typeptr := uintptr(unsafe.Pointer(typ)) codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) if err != nil { return nil, err } p := uintptr(header.ptr) ctx.Init(p, codeSet.CodeLength) buf, err := encodeRunCode(ctx, b, codeSet) if err != nil { return nil, err } ctx.Buf = buf return buf, nil } func encodeIndent(ctx *encoder.RuntimeContext, v interface{}, prefix, indent string) ([]byte, error) { b := ctx.Buf[:0] if v == nil { b = encoder.AppendNull(ctx, b) b = encoder.AppendCommaIndent(ctx, b) return b, nil } header := (*emptyInterface)(unsafe.Pointer(&v)) typ := header.typ typeptr := uintptr(unsafe.Pointer(typ)) codeSet, err := encoder.CompileToGetCodeSet(ctx, typeptr) if err != nil { return nil, err } p := uintptr(header.ptr) ctx.Init(p, codeSet.CodeLength) buf, err := encodeRunIndentCode(ctx, b, codeSet, prefix, indent) ctx.KeepRefs = append(ctx.KeepRefs, header.ptr) if err != nil { return nil, err } ctx.Buf = buf return buf, nil } func encodeRunCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) ([]byte, error) { if (ctx.Option.Flag & encoder.DebugOption) != 0 { if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { return vm_color.DebugRun(ctx, b, codeSet) } return vm.DebugRun(ctx, b, codeSet) } if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { return vm_color.Run(ctx, b, codeSet) } return vm.Run(ctx, b, codeSet) } func encodeRunIndentCode(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet, prefix, indent string) ([]byte, error) { ctx.Prefix = []byte(prefix) ctx.IndentStr = []byte(indent) if (ctx.Option.Flag & encoder.DebugOption) != 0 { if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { return vm_color_indent.DebugRun(ctx, b, codeSet) } return vm_indent.DebugRun(ctx, b, codeSet) } if (ctx.Option.Flag & encoder.ColorizeOption) != 0 { return vm_color_indent.Run(ctx, b, codeSet) } return vm_indent.Run(ctx, b, codeSet) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/color.go
vendor/github.com/goccy/go-json/color.go
package json import ( "fmt" "github.com/goccy/go-json/internal/encoder" ) type ( ColorFormat = encoder.ColorFormat ColorScheme = encoder.ColorScheme ) const escape = "\x1b" type colorAttr int //nolint:deadcode,varcheck const ( fgBlackColor colorAttr = iota + 30 fgRedColor fgGreenColor fgYellowColor fgBlueColor fgMagentaColor fgCyanColor fgWhiteColor ) //nolint:deadcode,varcheck const ( fgHiBlackColor colorAttr = iota + 90 fgHiRedColor fgHiGreenColor fgHiYellowColor fgHiBlueColor fgHiMagentaColor fgHiCyanColor fgHiWhiteColor ) func createColorFormat(attr colorAttr) ColorFormat { return ColorFormat{ Header: wrapColor(attr), Footer: resetColor(), } } func wrapColor(attr colorAttr) string { return fmt.Sprintf("%s[%dm", escape, attr) } func resetColor() string { return wrapColor(colorAttr(0)) } var ( DefaultColorScheme = &ColorScheme{ Int: createColorFormat(fgHiMagentaColor), Uint: createColorFormat(fgHiMagentaColor), Float: createColorFormat(fgHiMagentaColor), Bool: createColorFormat(fgHiYellowColor), String: createColorFormat(fgHiGreenColor), Binary: createColorFormat(fgHiRedColor), ObjectKey: createColorFormat(fgHiCyanColor), Null: createColorFormat(fgBlueColor), } )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/query.go
vendor/github.com/goccy/go-json/query.go
package json import ( "github.com/goccy/go-json/internal/encoder" ) type ( // FieldQuery you can dynamically filter the fields in the structure by creating a FieldQuery, // adding it to context.Context using SetFieldQueryToContext and then passing it to MarshalContext. // This is a type-safe operation, so it is faster than filtering using map[string]interface{}. FieldQuery = encoder.FieldQuery FieldQueryString = encoder.FieldQueryString ) var ( // FieldQueryFromContext get current FieldQuery from context.Context. FieldQueryFromContext = encoder.FieldQueryFromContext // SetFieldQueryToContext set current FieldQuery to context.Context. SetFieldQueryToContext = encoder.SetFieldQueryToContext ) // BuildFieldQuery builds FieldQuery by fieldName or sub field query. // First, specify the field name that you want to keep in structure type. // If the field you want to keep is a structure type, by creating a sub field query using BuildSubFieldQuery, // you can select the fields you want to keep in the structure. // This description can be written recursively. func BuildFieldQuery(fields ...FieldQueryString) (*FieldQuery, error) { query, err := Marshal(fields) if err != nil { return nil, err } return FieldQueryString(query).Build() } // BuildSubFieldQuery builds sub field query. func BuildSubFieldQuery(name string) *SubFieldQuery { return &SubFieldQuery{name: name} } type SubFieldQuery struct { name string } func (q *SubFieldQuery) Fields(fields ...FieldQueryString) FieldQueryString { query, _ := Marshal(map[string][]FieldQueryString{q.name: fields}) return FieldQueryString(query) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/decode.go
vendor/github.com/goccy/go-json/decode.go
package json import ( "context" "fmt" "io" "reflect" "unsafe" "github.com/goccy/go-json/internal/decoder" "github.com/goccy/go-json/internal/errors" "github.com/goccy/go-json/internal/runtime" ) type Decoder struct { s *decoder.Stream } const ( nul = '\000' ) type emptyInterface struct { typ *runtime.Type ptr unsafe.Pointer } func unmarshal(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { src := make([]byte, len(data)+1) // append nul byte to the end copy(src, data) header := (*emptyInterface)(unsafe.Pointer(&v)) if err := validateType(header.typ, uintptr(header.ptr)); err != nil { return err } dec, err := decoder.CompileToGetDecoder(header.typ) if err != nil { return err } ctx := decoder.TakeRuntimeContext() ctx.Buf = src ctx.Option.Flags = 0 for _, optFunc := range optFuncs { optFunc(ctx.Option) } cursor, err := dec.Decode(ctx, 0, 0, header.ptr) if err != nil { decoder.ReleaseRuntimeContext(ctx) return err } decoder.ReleaseRuntimeContext(ctx) return validateEndBuf(src, cursor) } func unmarshalContext(ctx context.Context, data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { src := make([]byte, len(data)+1) // append nul byte to the end copy(src, data) header := (*emptyInterface)(unsafe.Pointer(&v)) if err := validateType(header.typ, uintptr(header.ptr)); err != nil { return err } dec, err := decoder.CompileToGetDecoder(header.typ) if err != nil { return err } rctx := decoder.TakeRuntimeContext() rctx.Buf = src rctx.Option.Flags = 0 rctx.Option.Flags |= decoder.ContextOption rctx.Option.Context = ctx for _, optFunc := range optFuncs { optFunc(rctx.Option) } cursor, err := dec.Decode(rctx, 0, 0, header.ptr) if err != nil { decoder.ReleaseRuntimeContext(rctx) return err } decoder.ReleaseRuntimeContext(rctx) return validateEndBuf(src, cursor) } var ( pathDecoder = decoder.NewPathDecoder() ) func extractFromPath(path *Path, data []byte, optFuncs ...DecodeOptionFunc) ([][]byte, error) { if path.path.RootSelectorOnly { return [][]byte{data}, nil } src := make([]byte, len(data)+1) // append nul byte to the end copy(src, data) ctx := decoder.TakeRuntimeContext() ctx.Buf = src ctx.Option.Flags = 0 ctx.Option.Flags |= decoder.PathOption ctx.Option.Path = path.path for _, optFunc := range optFuncs { optFunc(ctx.Option) } paths, cursor, err := pathDecoder.DecodePath(ctx, 0, 0) if err != nil { decoder.ReleaseRuntimeContext(ctx) return nil, err } decoder.ReleaseRuntimeContext(ctx) if err := validateEndBuf(src, cursor); err != nil { return nil, err } return paths, nil } func unmarshalNoEscape(data []byte, v interface{}, optFuncs ...DecodeOptionFunc) error { src := make([]byte, len(data)+1) // append nul byte to the end copy(src, data) header := (*emptyInterface)(unsafe.Pointer(&v)) if err := validateType(header.typ, uintptr(header.ptr)); err != nil { return err } dec, err := decoder.CompileToGetDecoder(header.typ) if err != nil { return err } ctx := decoder.TakeRuntimeContext() ctx.Buf = src ctx.Option.Flags = 0 for _, optFunc := range optFuncs { optFunc(ctx.Option) } cursor, err := dec.Decode(ctx, 0, 0, noescape(header.ptr)) if err != nil { decoder.ReleaseRuntimeContext(ctx) return err } decoder.ReleaseRuntimeContext(ctx) return validateEndBuf(src, cursor) } func validateEndBuf(src []byte, cursor int64) error { for { switch src[cursor] { case ' ', '\t', '\n', '\r': cursor++ continue case nul: return nil } return errors.ErrSyntax( fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]), cursor+1, ) } } //nolint:staticcheck //go:nosplit func noescape(p unsafe.Pointer) unsafe.Pointer { x := uintptr(p) return unsafe.Pointer(x ^ 0) } func validateType(typ *runtime.Type, p uintptr) error { if typ == nil || typ.Kind() != reflect.Ptr || p == 0 { return &InvalidUnmarshalError{Type: runtime.RType2Type(typ)} } return nil } // NewDecoder returns a new decoder that reads from r. // // The decoder introduces its own buffering and may // read data from r beyond the JSON values requested. func NewDecoder(r io.Reader) *Decoder { s := decoder.NewStream(r) return &Decoder{ s: s, } } // Buffered returns a reader of the data remaining in the Decoder's // buffer. The reader is valid until the next call to Decode. func (d *Decoder) Buffered() io.Reader { return d.s.Buffered() } // Decode reads the next JSON-encoded value from its // input and stores it in the value pointed to by v. // // See the documentation for Unmarshal for details about // the conversion of JSON into a Go value. func (d *Decoder) Decode(v interface{}) error { return d.DecodeWithOption(v) } // DecodeContext reads the next JSON-encoded value from its // input and stores it in the value pointed to by v with context.Context. func (d *Decoder) DecodeContext(ctx context.Context, v interface{}) error { d.s.Option.Flags |= decoder.ContextOption d.s.Option.Context = ctx return d.DecodeWithOption(v) } func (d *Decoder) DecodeWithOption(v interface{}, optFuncs ...DecodeOptionFunc) error { header := (*emptyInterface)(unsafe.Pointer(&v)) typ := header.typ ptr := uintptr(header.ptr) typeptr := uintptr(unsafe.Pointer(typ)) // noescape trick for header.typ ( reflect.*rtype ) copiedType := *(**runtime.Type)(unsafe.Pointer(&typeptr)) if err := validateType(copiedType, ptr); err != nil { return err } dec, err := decoder.CompileToGetDecoder(typ) if err != nil { return err } if err := d.s.PrepareForDecode(); err != nil { return err } s := d.s for _, optFunc := range optFuncs { optFunc(s.Option) } if err := dec.DecodeStream(s, 0, header.ptr); err != nil { return err } s.Reset() return nil } func (d *Decoder) More() bool { return d.s.More() } func (d *Decoder) Token() (Token, error) { return d.s.Token() } // DisallowUnknownFields causes the Decoder to return an error when the destination // is a struct and the input contains object keys which do not match any // non-ignored, exported fields in the destination. func (d *Decoder) DisallowUnknownFields() { d.s.DisallowUnknownFields = true } func (d *Decoder) InputOffset() int64 { return d.s.TotalOffset() } // UseNumber causes the Decoder to unmarshal a number into an interface{} as a // Number instead of as a float64. func (d *Decoder) UseNumber() { d.s.UseNumber = true }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/errors/error.go
vendor/github.com/goccy/go-json/internal/errors/error.go
package errors import ( "fmt" "reflect" "strconv" ) type InvalidUTF8Error struct { S string // the whole string value that caused the error } func (e *InvalidUTF8Error) Error() string { return fmt.Sprintf("json: invalid UTF-8 in string: %s", strconv.Quote(e.S)) } type InvalidUnmarshalError struct { Type reflect.Type } func (e *InvalidUnmarshalError) Error() string { if e.Type == nil { return "json: Unmarshal(nil)" } if e.Type.Kind() != reflect.Ptr { return fmt.Sprintf("json: Unmarshal(non-pointer %s)", e.Type) } return fmt.Sprintf("json: Unmarshal(nil %s)", e.Type) } // A MarshalerError represents an error from calling a MarshalJSON or MarshalText method. type MarshalerError struct { Type reflect.Type Err error sourceFunc string } func (e *MarshalerError) Error() string { srcFunc := e.sourceFunc if srcFunc == "" { srcFunc = "MarshalJSON" } return fmt.Sprintf("json: error calling %s for type %s: %s", srcFunc, e.Type, e.Err.Error()) } // Unwrap returns the underlying error. func (e *MarshalerError) Unwrap() error { return e.Err } // A SyntaxError is a description of a JSON syntax error. type SyntaxError struct { msg string // description of error Offset int64 // error occurred after reading Offset bytes } func (e *SyntaxError) Error() string { return e.msg } // An UnmarshalFieldError describes a JSON object key that // led to an unexported (and therefore unwritable) struct field. // // Deprecated: No longer used; kept for compatibility. type UnmarshalFieldError struct { Key string Type reflect.Type Field reflect.StructField } func (e *UnmarshalFieldError) Error() string { return fmt.Sprintf("json: cannot unmarshal object key %s into unexported field %s of type %s", strconv.Quote(e.Key), e.Field.Name, e.Type.String(), ) } // An UnmarshalTypeError describes a JSON value that was // not appropriate for a value of a specific Go type. type UnmarshalTypeError struct { Value string // description of JSON value - "bool", "array", "number -5" Type reflect.Type // type of Go value it could not be assigned to Offset int64 // error occurred after reading Offset bytes Struct string // name of the struct type containing the field Field string // the full path from root node to the field } func (e *UnmarshalTypeError) Error() string { if e.Struct != "" || e.Field != "" { return fmt.Sprintf("json: cannot unmarshal %s into Go struct field %s.%s of type %s", e.Value, e.Struct, e.Field, e.Type, ) } return fmt.Sprintf("json: cannot unmarshal %s into Go value of type %s", e.Value, e.Type) } // An UnsupportedTypeError is returned by Marshal when attempting // to encode an unsupported value type. type UnsupportedTypeError struct { Type reflect.Type } func (e *UnsupportedTypeError) Error() string { return fmt.Sprintf("json: unsupported type: %s", e.Type) } type UnsupportedValueError struct { Value reflect.Value Str string } func (e *UnsupportedValueError) Error() string { return fmt.Sprintf("json: unsupported value: %s", e.Str) } func ErrSyntax(msg string, offset int64) *SyntaxError { return &SyntaxError{msg: msg, Offset: offset} } func ErrMarshaler(typ reflect.Type, err error, msg string) *MarshalerError { return &MarshalerError{ Type: typ, Err: err, sourceFunc: msg, } } func ErrExceededMaxDepth(c byte, cursor int64) *SyntaxError { return &SyntaxError{ msg: fmt.Sprintf(`invalid character "%c" exceeded max depth`, c), Offset: cursor, } } func ErrNotAtBeginningOfValue(cursor int64) *SyntaxError { return &SyntaxError{msg: "not at beginning of value", Offset: cursor} } func ErrUnexpectedEndOfJSON(msg string, cursor int64) *SyntaxError { return &SyntaxError{ msg: fmt.Sprintf("json: %s unexpected end of JSON input", msg), Offset: cursor, } } func ErrExpected(msg string, cursor int64) *SyntaxError { return &SyntaxError{msg: fmt.Sprintf("expected %s", msg), Offset: cursor} } func ErrInvalidCharacter(c byte, context string, cursor int64) *SyntaxError { if c == 0 { return &SyntaxError{ msg: fmt.Sprintf("json: invalid character as %s", context), Offset: cursor, } } return &SyntaxError{ msg: fmt.Sprintf("json: invalid character %c as %s", c, context), Offset: cursor, } } func ErrInvalidBeginningOfValue(c byte, cursor int64) *SyntaxError { return &SyntaxError{ msg: fmt.Sprintf("invalid character '%c' looking for beginning of value", c), Offset: cursor, } } type PathError struct { msg string } func (e *PathError) Error() string { return fmt.Sprintf("json: invalid path format: %s", e.msg) } func ErrInvalidPath(msg string, args ...interface{}) *PathError { if len(args) != 0 { return &PathError{msg: fmt.Sprintf(msg, args...)} } return &PathError{msg: msg} } func ErrEmptyPath() *PathError { return &PathError{msg: "path is empty"} }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/runtime/struct_field.go
vendor/github.com/goccy/go-json/internal/runtime/struct_field.go
package runtime import ( "reflect" "strings" "unicode" ) func getTag(field reflect.StructField) string { return field.Tag.Get("json") } func IsIgnoredStructField(field reflect.StructField) bool { if field.PkgPath != "" { if field.Anonymous { t := field.Type if t.Kind() == reflect.Ptr { t = t.Elem() } if t.Kind() != reflect.Struct { return true } } else { // private field return true } } tag := getTag(field) return tag == "-" } type StructTag struct { Key string IsTaggedKey bool IsOmitEmpty bool IsString bool Field reflect.StructField } type StructTags []*StructTag func (t StructTags) ExistsKey(key string) bool { for _, tt := range t { if tt.Key == key { return true } } return false } func isValidTag(s string) bool { if s == "" { return false } for _, c := range s { switch { case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): // Backslash and quote chars are reserved, but // otherwise any punctuation chars are allowed // in a tag name. case !unicode.IsLetter(c) && !unicode.IsDigit(c): return false } } return true } func StructTagFromField(field reflect.StructField) *StructTag { keyName := field.Name tag := getTag(field) st := &StructTag{Field: field} opts := strings.Split(tag, ",") if len(opts) > 0 { if opts[0] != "" && isValidTag(opts[0]) { keyName = opts[0] st.IsTaggedKey = true } } st.Key = keyName if len(opts) > 1 { for _, opt := range opts[1:] { switch opt { case "omitempty": st.IsOmitEmpty = true case "string": st.IsString = true } } } return st }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/runtime/type.go
vendor/github.com/goccy/go-json/internal/runtime/type.go
package runtime import ( "reflect" "sync" "unsafe" ) type SliceHeader struct { Data unsafe.Pointer Len int Cap int } const ( maxAcceptableTypeAddrRange = 1024 * 1024 * 2 // 2 Mib ) type TypeAddr struct { BaseTypeAddr uintptr MaxTypeAddr uintptr AddrRange uintptr AddrShift uintptr } var ( typeAddr *TypeAddr once sync.Once ) //go:linkname typelinks reflect.typelinks func typelinks() ([]unsafe.Pointer, [][]int32) //go:linkname rtypeOff reflect.rtypeOff func rtypeOff(unsafe.Pointer, int32) unsafe.Pointer func AnalyzeTypeAddr() *TypeAddr { once.Do(func() { sections, offsets := typelinks() if len(sections) != 1 { return } if len(offsets) != 1 { return } section := sections[0] offset := offsets[0] var ( min uintptr = uintptr(^uint(0)) max uintptr = 0 isAligned64 = true isAligned32 = true ) for i := 0; i < len(offset); i++ { typ := (*Type)(rtypeOff(section, offset[i])) addr := uintptr(unsafe.Pointer(typ)) if min > addr { min = addr } if max < addr { max = addr } if typ.Kind() == reflect.Ptr { addr = uintptr(unsafe.Pointer(typ.Elem())) if min > addr { min = addr } if max < addr { max = addr } } isAligned64 = isAligned64 && (addr-min)&63 == 0 isAligned32 = isAligned32 && (addr-min)&31 == 0 } addrRange := max - min if addrRange == 0 { return } var addrShift uintptr if isAligned64 { addrShift = 6 } else if isAligned32 { addrShift = 5 } cacheSize := addrRange >> addrShift if cacheSize > maxAcceptableTypeAddrRange { return } typeAddr = &TypeAddr{ BaseTypeAddr: min, MaxTypeAddr: max, AddrRange: addrRange, AddrShift: addrShift, } }) return typeAddr }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/runtime/rtype.go
vendor/github.com/goccy/go-json/internal/runtime/rtype.go
package runtime import ( "reflect" "unsafe" ) // Type representing reflect.rtype for noescape trick type Type struct{} //go:linkname rtype_Align reflect.(*rtype).Align //go:noescape func rtype_Align(*Type) int func (t *Type) Align() int { return rtype_Align(t) } //go:linkname rtype_FieldAlign reflect.(*rtype).FieldAlign //go:noescape func rtype_FieldAlign(*Type) int func (t *Type) FieldAlign() int { return rtype_FieldAlign(t) } //go:linkname rtype_Method reflect.(*rtype).Method //go:noescape func rtype_Method(*Type, int) reflect.Method func (t *Type) Method(a0 int) reflect.Method { return rtype_Method(t, a0) } //go:linkname rtype_MethodByName reflect.(*rtype).MethodByName //go:noescape func rtype_MethodByName(*Type, string) (reflect.Method, bool) func (t *Type) MethodByName(a0 string) (reflect.Method, bool) { return rtype_MethodByName(t, a0) } //go:linkname rtype_NumMethod reflect.(*rtype).NumMethod //go:noescape func rtype_NumMethod(*Type) int func (t *Type) NumMethod() int { return rtype_NumMethod(t) } //go:linkname rtype_Name reflect.(*rtype).Name //go:noescape func rtype_Name(*Type) string func (t *Type) Name() string { return rtype_Name(t) } //go:linkname rtype_PkgPath reflect.(*rtype).PkgPath //go:noescape func rtype_PkgPath(*Type) string func (t *Type) PkgPath() string { return rtype_PkgPath(t) } //go:linkname rtype_Size reflect.(*rtype).Size //go:noescape func rtype_Size(*Type) uintptr func (t *Type) Size() uintptr { return rtype_Size(t) } //go:linkname rtype_String reflect.(*rtype).String //go:noescape func rtype_String(*Type) string func (t *Type) String() string { return rtype_String(t) } //go:linkname rtype_Kind reflect.(*rtype).Kind //go:noescape func rtype_Kind(*Type) reflect.Kind func (t *Type) Kind() reflect.Kind { return rtype_Kind(t) } //go:linkname rtype_Implements reflect.(*rtype).Implements //go:noescape func rtype_Implements(*Type, reflect.Type) bool func (t *Type) Implements(u reflect.Type) bool { return rtype_Implements(t, u) } //go:linkname rtype_AssignableTo reflect.(*rtype).AssignableTo //go:noescape func rtype_AssignableTo(*Type, reflect.Type) bool func (t *Type) AssignableTo(u reflect.Type) bool { return rtype_AssignableTo(t, u) } //go:linkname rtype_ConvertibleTo reflect.(*rtype).ConvertibleTo //go:noescape func rtype_ConvertibleTo(*Type, reflect.Type) bool func (t *Type) ConvertibleTo(u reflect.Type) bool { return rtype_ConvertibleTo(t, u) } //go:linkname rtype_Comparable reflect.(*rtype).Comparable //go:noescape func rtype_Comparable(*Type) bool func (t *Type) Comparable() bool { return rtype_Comparable(t) } //go:linkname rtype_Bits reflect.(*rtype).Bits //go:noescape func rtype_Bits(*Type) int func (t *Type) Bits() int { return rtype_Bits(t) } //go:linkname rtype_ChanDir reflect.(*rtype).ChanDir //go:noescape func rtype_ChanDir(*Type) reflect.ChanDir func (t *Type) ChanDir() reflect.ChanDir { return rtype_ChanDir(t) } //go:linkname rtype_IsVariadic reflect.(*rtype).IsVariadic //go:noescape func rtype_IsVariadic(*Type) bool func (t *Type) IsVariadic() bool { return rtype_IsVariadic(t) } //go:linkname rtype_Elem reflect.(*rtype).Elem //go:noescape func rtype_Elem(*Type) reflect.Type func (t *Type) Elem() *Type { return Type2RType(rtype_Elem(t)) } //go:linkname rtype_Field reflect.(*rtype).Field //go:noescape func rtype_Field(*Type, int) reflect.StructField func (t *Type) Field(i int) reflect.StructField { return rtype_Field(t, i) } //go:linkname rtype_FieldByIndex reflect.(*rtype).FieldByIndex //go:noescape func rtype_FieldByIndex(*Type, []int) reflect.StructField func (t *Type) FieldByIndex(index []int) reflect.StructField { return rtype_FieldByIndex(t, index) } //go:linkname rtype_FieldByName reflect.(*rtype).FieldByName //go:noescape func rtype_FieldByName(*Type, string) (reflect.StructField, bool) func (t *Type) FieldByName(name string) (reflect.StructField, bool) { return rtype_FieldByName(t, name) } //go:linkname rtype_FieldByNameFunc reflect.(*rtype).FieldByNameFunc //go:noescape func rtype_FieldByNameFunc(*Type, func(string) bool) (reflect.StructField, bool) func (t *Type) FieldByNameFunc(match func(string) bool) (reflect.StructField, bool) { return rtype_FieldByNameFunc(t, match) } //go:linkname rtype_In reflect.(*rtype).In //go:noescape func rtype_In(*Type, int) reflect.Type func (t *Type) In(i int) reflect.Type { return rtype_In(t, i) } //go:linkname rtype_Key reflect.(*rtype).Key //go:noescape func rtype_Key(*Type) reflect.Type func (t *Type) Key() *Type { return Type2RType(rtype_Key(t)) } //go:linkname rtype_Len reflect.(*rtype).Len //go:noescape func rtype_Len(*Type) int func (t *Type) Len() int { return rtype_Len(t) } //go:linkname rtype_NumField reflect.(*rtype).NumField //go:noescape func rtype_NumField(*Type) int func (t *Type) NumField() int { return rtype_NumField(t) } //go:linkname rtype_NumIn reflect.(*rtype).NumIn //go:noescape func rtype_NumIn(*Type) int func (t *Type) NumIn() int { return rtype_NumIn(t) } //go:linkname rtype_NumOut reflect.(*rtype).NumOut //go:noescape func rtype_NumOut(*Type) int func (t *Type) NumOut() int { return rtype_NumOut(t) } //go:linkname rtype_Out reflect.(*rtype).Out //go:noescape func rtype_Out(*Type, int) reflect.Type //go:linkname PtrTo reflect.(*rtype).ptrTo //go:noescape func PtrTo(*Type) *Type func (t *Type) Out(i int) reflect.Type { return rtype_Out(t, i) } //go:linkname IfaceIndir reflect.ifaceIndir //go:noescape func IfaceIndir(*Type) bool //go:linkname RType2Type reflect.toType //go:noescape func RType2Type(t *Type) reflect.Type type emptyInterface struct { _ *Type ptr unsafe.Pointer } func Type2RType(t reflect.Type) *Type { return (*Type)(((*emptyInterface)(unsafe.Pointer(&t))).ptr) }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/indent.go
vendor/github.com/goccy/go-json/internal/encoder/indent.go
package encoder import ( "bytes" "fmt" "github.com/goccy/go-json/internal/errors" ) func takeIndentSrcRuntimeContext(src []byte) (*RuntimeContext, []byte) { ctx := TakeRuntimeContext() buf := ctx.Buf[:0] buf = append(append(buf, src...), nul) ctx.Buf = buf return ctx, buf } func Indent(buf *bytes.Buffer, src []byte, prefix, indentStr string) error { if len(src) == 0 { return errors.ErrUnexpectedEndOfJSON("", 0) } srcCtx, srcBuf := takeIndentSrcRuntimeContext(src) dstCtx := TakeRuntimeContext() dst := dstCtx.Buf[:0] dst, err := indentAndWrite(buf, dst, srcBuf, prefix, indentStr) if err != nil { ReleaseRuntimeContext(srcCtx) ReleaseRuntimeContext(dstCtx) return err } dstCtx.Buf = dst ReleaseRuntimeContext(srcCtx) ReleaseRuntimeContext(dstCtx) return nil } func indentAndWrite(buf *bytes.Buffer, dst []byte, src []byte, prefix, indentStr string) ([]byte, error) { dst, err := doIndent(dst, src, prefix, indentStr, false) if err != nil { return nil, err } if _, err := buf.Write(dst); err != nil { return nil, err } return dst, nil } func doIndent(dst, src []byte, prefix, indentStr string, escape bool) ([]byte, error) { buf, cursor, err := indentValue(dst, src, 0, 0, []byte(prefix), []byte(indentStr), escape) if err != nil { return nil, err } if err := validateEndBuf(src, cursor); err != nil { return nil, err } return buf, nil } func indentValue( dst []byte, src []byte, indentNum int, cursor int64, prefix []byte, indentBytes []byte, escape bool) ([]byte, int64, error) { for { switch src[cursor] { case ' ', '\t', '\n', '\r': cursor++ continue case '{': return indentObject(dst, src, indentNum, cursor, prefix, indentBytes, escape) case '}': return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor) case '[': return indentArray(dst, src, indentNum, cursor, prefix, indentBytes, escape) case ']': return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor) case '"': return compactString(dst, src, cursor, escape) case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return compactNumber(dst, src, cursor) case 't': return compactTrue(dst, src, cursor) case 'f': return compactFalse(dst, src, cursor) case 'n': return compactNull(dst, src, cursor) default: return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor) } } } func indentObject( dst []byte, src []byte, indentNum int, cursor int64, prefix []byte, indentBytes []byte, escape bool) ([]byte, int64, error) { if src[cursor] == '{' { dst = append(dst, '{') } else { return nil, 0, errors.ErrExpected("expected { character for object value", cursor) } cursor = skipWhiteSpace(src, cursor+1) if src[cursor] == '}' { dst = append(dst, '}') return dst, cursor + 1, nil } indentNum++ var err error for { dst = append(append(dst, '\n'), prefix...) for i := 0; i < indentNum; i++ { dst = append(dst, indentBytes...) } cursor = skipWhiteSpace(src, cursor) dst, cursor, err = compactString(dst, src, cursor, escape) if err != nil { return nil, 0, err } cursor = skipWhiteSpace(src, cursor) if src[cursor] != ':' { return nil, 0, errors.ErrSyntax( fmt.Sprintf("invalid character '%c' after object key", src[cursor]), cursor+1, ) } dst = append(dst, ':', ' ') dst, cursor, err = indentValue(dst, src, indentNum, cursor+1, prefix, indentBytes, escape) if err != nil { return nil, 0, err } cursor = skipWhiteSpace(src, cursor) switch src[cursor] { case '}': dst = append(append(dst, '\n'), prefix...) for i := 0; i < indentNum-1; i++ { dst = append(dst, indentBytes...) } dst = append(dst, '}') cursor++ return dst, cursor, nil case ',': dst = append(dst, ',') default: return nil, 0, errors.ErrSyntax( fmt.Sprintf("invalid character '%c' after object key:value pair", src[cursor]), cursor+1, ) } cursor++ } } func indentArray( dst []byte, src []byte, indentNum int, cursor int64, prefix []byte, indentBytes []byte, escape bool) ([]byte, int64, error) { if src[cursor] == '[' { dst = append(dst, '[') } else { return nil, 0, errors.ErrExpected("expected [ character for array value", cursor) } cursor = skipWhiteSpace(src, cursor+1) if src[cursor] == ']' { dst = append(dst, ']') return dst, cursor + 1, nil } indentNum++ var err error for { dst = append(append(dst, '\n'), prefix...) for i := 0; i < indentNum; i++ { dst = append(dst, indentBytes...) } dst, cursor, err = indentValue(dst, src, indentNum, cursor, prefix, indentBytes, escape) if err != nil { return nil, 0, err } cursor = skipWhiteSpace(src, cursor) switch src[cursor] { case ']': dst = append(append(dst, '\n'), prefix...) for i := 0; i < indentNum-1; i++ { dst = append(dst, indentBytes...) } dst = append(dst, ']') cursor++ return dst, cursor, nil case ',': dst = append(dst, ',') default: return nil, 0, errors.ErrSyntax( fmt.Sprintf("invalid character '%c' after array value", src[cursor]), cursor+1, ) } cursor++ } }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/compact.go
vendor/github.com/goccy/go-json/internal/encoder/compact.go
package encoder import ( "bytes" "fmt" "strconv" "unsafe" "github.com/goccy/go-json/internal/errors" ) var ( isWhiteSpace = [256]bool{ ' ': true, '\n': true, '\t': true, '\r': true, } isHTMLEscapeChar = [256]bool{ '<': true, '>': true, '&': true, } nul = byte('\000') ) func Compact(buf *bytes.Buffer, src []byte, escape bool) error { if len(src) == 0 { return errors.ErrUnexpectedEndOfJSON("", 0) } buf.Grow(len(src)) dst := buf.Bytes() ctx := TakeRuntimeContext() ctxBuf := ctx.Buf[:0] ctxBuf = append(append(ctxBuf, src...), nul) ctx.Buf = ctxBuf if err := compactAndWrite(buf, dst, ctxBuf, escape); err != nil { ReleaseRuntimeContext(ctx) return err } ReleaseRuntimeContext(ctx) return nil } func compactAndWrite(buf *bytes.Buffer, dst []byte, src []byte, escape bool) error { dst, err := compact(dst, src, escape) if err != nil { return err } if _, err := buf.Write(dst); err != nil { return err } return nil } func compact(dst, src []byte, escape bool) ([]byte, error) { buf, cursor, err := compactValue(dst, src, 0, escape) if err != nil { return nil, err } if err := validateEndBuf(src, cursor); err != nil { return nil, err } return buf, nil } func validateEndBuf(src []byte, cursor int64) error { for { switch src[cursor] { case ' ', '\t', '\n', '\r': cursor++ continue case nul: return nil } return errors.ErrSyntax( fmt.Sprintf("invalid character '%c' after top-level value", src[cursor]), cursor+1, ) } } func skipWhiteSpace(buf []byte, cursor int64) int64 { LOOP: if isWhiteSpace[buf[cursor]] { cursor++ goto LOOP } return cursor } func compactValue(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { for { switch src[cursor] { case ' ', '\t', '\n', '\r': cursor++ continue case '{': return compactObject(dst, src, cursor, escape) case '}': return nil, 0, errors.ErrSyntax("unexpected character '}'", cursor) case '[': return compactArray(dst, src, cursor, escape) case ']': return nil, 0, errors.ErrSyntax("unexpected character ']'", cursor) case '"': return compactString(dst, src, cursor, escape) case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': return compactNumber(dst, src, cursor) case 't': return compactTrue(dst, src, cursor) case 'f': return compactFalse(dst, src, cursor) case 'n': return compactNull(dst, src, cursor) default: return nil, 0, errors.ErrSyntax(fmt.Sprintf("unexpected character '%c'", src[cursor]), cursor) } } } func compactObject(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { if src[cursor] == '{' { dst = append(dst, '{') } else { return nil, 0, errors.ErrExpected("expected { character for object value", cursor) } cursor = skipWhiteSpace(src, cursor+1) if src[cursor] == '}' { dst = append(dst, '}') return dst, cursor + 1, nil } var err error for { cursor = skipWhiteSpace(src, cursor) dst, cursor, err = compactString(dst, src, cursor, escape) if err != nil { return nil, 0, err } cursor = skipWhiteSpace(src, cursor) if src[cursor] != ':' { return nil, 0, errors.ErrExpected("colon after object key", cursor) } dst = append(dst, ':') dst, cursor, err = compactValue(dst, src, cursor+1, escape) if err != nil { return nil, 0, err } cursor = skipWhiteSpace(src, cursor) switch src[cursor] { case '}': dst = append(dst, '}') cursor++ return dst, cursor, nil case ',': dst = append(dst, ',') default: return nil, 0, errors.ErrExpected("comma after object value", cursor) } cursor++ } } func compactArray(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { if src[cursor] == '[' { dst = append(dst, '[') } else { return nil, 0, errors.ErrExpected("expected [ character for array value", cursor) } cursor = skipWhiteSpace(src, cursor+1) if src[cursor] == ']' { dst = append(dst, ']') return dst, cursor + 1, nil } var err error for { dst, cursor, err = compactValue(dst, src, cursor, escape) if err != nil { return nil, 0, err } cursor = skipWhiteSpace(src, cursor) switch src[cursor] { case ']': dst = append(dst, ']') cursor++ return dst, cursor, nil case ',': dst = append(dst, ',') default: return nil, 0, errors.ErrExpected("comma after array value", cursor) } cursor++ } } func compactString(dst, src []byte, cursor int64, escape bool) ([]byte, int64, error) { if src[cursor] != '"' { return nil, 0, errors.ErrInvalidCharacter(src[cursor], "string", cursor) } start := cursor for { cursor++ c := src[cursor] if escape { if isHTMLEscapeChar[c] { dst = append(dst, src[start:cursor]...) dst = append(dst, `\u00`...) dst = append(dst, hex[c>>4], hex[c&0xF]) start = cursor + 1 } else if c == 0xE2 && cursor+2 < int64(len(src)) && src[cursor+1] == 0x80 && src[cursor+2]&^1 == 0xA8 { dst = append(dst, src[start:cursor]...) dst = append(dst, `\u202`...) dst = append(dst, hex[src[cursor+2]&0xF]) start = cursor + 3 cursor += 2 } } switch c { case '\\': cursor++ if src[cursor] == nul { return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src))) } case '"': cursor++ return append(dst, src[start:cursor]...), cursor, nil case nul: return nil, 0, errors.ErrUnexpectedEndOfJSON("string", int64(len(src))) } } } func compactNumber(dst, src []byte, cursor int64) ([]byte, int64, error) { start := cursor for { cursor++ if floatTable[src[cursor]] { continue } break } num := src[start:cursor] if _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&num)), 64); err != nil { return nil, 0, err } dst = append(dst, num...) return dst, cursor, nil } func compactTrue(dst, src []byte, cursor int64) ([]byte, int64, error) { if cursor+3 >= int64(len(src)) { return nil, 0, errors.ErrUnexpectedEndOfJSON("true", cursor) } if !bytes.Equal(src[cursor:cursor+4], []byte(`true`)) { return nil, 0, errors.ErrInvalidCharacter(src[cursor], "true", cursor) } dst = append(dst, "true"...) cursor += 4 return dst, cursor, nil } func compactFalse(dst, src []byte, cursor int64) ([]byte, int64, error) { if cursor+4 >= int64(len(src)) { return nil, 0, errors.ErrUnexpectedEndOfJSON("false", cursor) } if !bytes.Equal(src[cursor:cursor+5], []byte(`false`)) { return nil, 0, errors.ErrInvalidCharacter(src[cursor], "false", cursor) } dst = append(dst, "false"...) cursor += 5 return dst, cursor, nil } func compactNull(dst, src []byte, cursor int64) ([]byte, int64, error) { if cursor+3 >= int64(len(src)) { return nil, 0, errors.ErrUnexpectedEndOfJSON("null", cursor) } if !bytes.Equal(src[cursor:cursor+4], []byte(`null`)) { return nil, 0, errors.ErrInvalidCharacter(src[cursor], "null", cursor) } dst = append(dst, "null"...) cursor += 4 return dst, cursor, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go
vendor/github.com/goccy/go-json/internal/encoder/compiler_norace.go
//go:build !race // +build !race package encoder func CompileToGetCodeSet(ctx *RuntimeContext, typeptr uintptr) (*OpcodeSet, error) { initEncoder() if typeptr > typeAddr.MaxTypeAddr || typeptr < typeAddr.BaseTypeAddr { codeSet, err := compileToGetCodeSetSlowPath(typeptr) if err != nil { return nil, err } return getFilteredCodeSetIfNeeded(ctx, codeSet) } index := (typeptr - typeAddr.BaseTypeAddr) >> typeAddr.AddrShift if codeSet := cachedOpcodeSets[index]; codeSet != nil { filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) if err != nil { return nil, err } return filtered, nil } codeSet, err := newCompiler().compile(typeptr) if err != nil { return nil, err } filtered, err := getFilteredCodeSetIfNeeded(ctx, codeSet) if err != nil { return nil, err } cachedOpcodeSets[index] = codeSet return filtered, nil }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/option.go
vendor/github.com/goccy/go-json/internal/encoder/option.go
package encoder import ( "context" "io" ) type OptionFlag uint8 const ( HTMLEscapeOption OptionFlag = 1 << iota IndentOption UnorderedMapOption DebugOption ColorizeOption ContextOption NormalizeUTF8Option FieldQueryOption ) type Option struct { Flag OptionFlag ColorScheme *ColorScheme Context context.Context DebugOut io.Writer DebugDOTOut io.WriteCloser } type EncodeFormat struct { Header string Footer string } type EncodeFormatScheme struct { Int EncodeFormat Uint EncodeFormat Float EncodeFormat Bool EncodeFormat String EncodeFormat Binary EncodeFormat ObjectKey EncodeFormat Null EncodeFormat } type ( ColorScheme = EncodeFormatScheme ColorFormat = EncodeFormat )
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go
vendor/github.com/goccy/go-json/internal/encoder/decode_rune.go
package encoder import "unicode/utf8" const ( // The default lowest and highest continuation byte. locb = 128 //0b10000000 hicb = 191 //0b10111111 // These names of these constants are chosen to give nice alignment in the // table below. The first nibble is an index into acceptRanges or F for // special one-byte cases. The second nibble is the Rune length or the // Status for the special one-byte case. xx = 0xF1 // invalid: size 1 as = 0xF0 // ASCII: size 1 s1 = 0x02 // accept 0, size 2 s2 = 0x13 // accept 1, size 3 s3 = 0x03 // accept 0, size 3 s4 = 0x23 // accept 2, size 3 s5 = 0x34 // accept 3, size 4 s6 = 0x04 // accept 0, size 4 s7 = 0x44 // accept 4, size 4 ) // first is information about the first byte in a UTF-8 sequence. var first = [256]uint8{ // 1 2 3 4 5 6 7 8 9 A B C D E F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F // 1 2 3 4 5 6 7 8 9 A B C D E F xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF } const ( lineSep = byte(168) //'\u2028' paragraphSep = byte(169) //'\u2029' ) type decodeRuneState int const ( validUTF8State decodeRuneState = iota runeErrorState lineSepState paragraphSepState ) func decodeRuneInString(s string) (decodeRuneState, int) { n := len(s) s0 := s[0] x := first[s0] if x >= as { // The following code simulates an additional check for x == xx and // handling the ASCII and invalid cases accordingly. This mask-and-or // approach prevents an additional branch. mask := rune(x) << 31 >> 31 // Create 0x0000 or 0xFFFF. if rune(s[0])&^mask|utf8.RuneError&mask == utf8.RuneError { return runeErrorState, 1 } return validUTF8State, 1 } sz := int(x & 7) if n < sz { return runeErrorState, 1 } s1 := s[1] switch x >> 4 { case 0: if s1 < locb || hicb < s1 { return runeErrorState, 1 } case 1: if s1 < 0xA0 || hicb < s1 { return runeErrorState, 1 } case 2: if s1 < locb || 0x9F < s1 { return runeErrorState, 1 } case 3: if s1 < 0x90 || hicb < s1 { return runeErrorState, 1 } case 4: if s1 < locb || 0x8F < s1 { return runeErrorState, 1 } } if sz <= 2 { return validUTF8State, 2 } s2 := s[2] if s2 < locb || hicb < s2 { return runeErrorState, 1 } if sz <= 3 { // separator character prefixes: [2]byte{226, 128} if s0 == 226 && s1 == 128 { switch s2 { case lineSep: return lineSepState, 3 case paragraphSep: return paragraphSepState, 3 } } return validUTF8State, 3 } s3 := s[3] if s3 < locb || hicb < s3 { return runeErrorState, 1 } return validUTF8State, 4 }
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/map112.go
vendor/github.com/goccy/go-json/internal/encoder/map112.go
//go:build !go1.13 // +build !go1.13 package encoder import "unsafe" //go:linkname MapIterValue reflect.mapitervalue func MapIterValue(it *mapIter) unsafe.Pointer
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
false
kubev2v/forklift
https://github.com/kubev2v/forklift/blob/b3b4703e958c25d54c4d48138d9e80ae32fadac3/vendor/github.com/goccy/go-json/internal/encoder/optype.go
vendor/github.com/goccy/go-json/internal/encoder/optype.go
// Code generated by internal/cmd/generator. DO NOT EDIT! package encoder import ( "strings" ) type CodeType int const ( CodeOp CodeType = 0 CodeArrayHead CodeType = 1 CodeArrayElem CodeType = 2 CodeSliceHead CodeType = 3 CodeSliceElem CodeType = 4 CodeMapHead CodeType = 5 CodeMapKey CodeType = 6 CodeMapValue CodeType = 7 CodeMapEnd CodeType = 8 CodeRecursive CodeType = 9 CodeStructField CodeType = 10 CodeStructEnd CodeType = 11 ) var opTypeStrings = [400]string{ "End", "Interface", "Ptr", "SliceElem", "SliceEnd", "ArrayElem", "ArrayEnd", "MapKey", "MapValue", "MapEnd", "Recursive", "RecursivePtr", "RecursiveEnd", "InterfaceEnd", "Int", "Uint", "Float32", "Float64", "Bool", "String", "Bytes", "Number", "Array", "Map", "Slice", "Struct", "MarshalJSON", "MarshalText", "IntString", "UintString", "Float32String", "Float64String", "BoolString", "StringString", "NumberString", "IntPtr", "UintPtr", "Float32Ptr", "Float64Ptr", "BoolPtr", "StringPtr", "BytesPtr", "NumberPtr", "ArrayPtr", "MapPtr", "SlicePtr", "MarshalJSONPtr", "MarshalTextPtr", "InterfacePtr", "IntPtrString", "UintPtrString", "Float32PtrString", "Float64PtrString", "BoolPtrString", "StringPtrString", "NumberPtrString", "StructHeadInt", "StructHeadOmitEmptyInt", "StructPtrHeadInt", "StructPtrHeadOmitEmptyInt", "StructHeadUint", "StructHeadOmitEmptyUint", "StructPtrHeadUint", "StructPtrHeadOmitEmptyUint", "StructHeadFloat32", "StructHeadOmitEmptyFloat32", "StructPtrHeadFloat32", "StructPtrHeadOmitEmptyFloat32", "StructHeadFloat64", "StructHeadOmitEmptyFloat64", "StructPtrHeadFloat64", "StructPtrHeadOmitEmptyFloat64", "StructHeadBool", "StructHeadOmitEmptyBool", "StructPtrHeadBool", "StructPtrHeadOmitEmptyBool", "StructHeadString", "StructHeadOmitEmptyString", "StructPtrHeadString", "StructPtrHeadOmitEmptyString", "StructHeadBytes", "StructHeadOmitEmptyBytes", "StructPtrHeadBytes", "StructPtrHeadOmitEmptyBytes", "StructHeadNumber", "StructHeadOmitEmptyNumber", "StructPtrHeadNumber", "StructPtrHeadOmitEmptyNumber", "StructHeadArray", "StructHeadOmitEmptyArray", "StructPtrHeadArray", "StructPtrHeadOmitEmptyArray", "StructHeadMap", "StructHeadOmitEmptyMap", "StructPtrHeadMap", "StructPtrHeadOmitEmptyMap", "StructHeadSlice", "StructHeadOmitEmptySlice", "StructPtrHeadSlice", "StructPtrHeadOmitEmptySlice", "StructHeadStruct", "StructHeadOmitEmptyStruct", "StructPtrHeadStruct", "StructPtrHeadOmitEmptyStruct", "StructHeadMarshalJSON", "StructHeadOmitEmptyMarshalJSON", "StructPtrHeadMarshalJSON", "StructPtrHeadOmitEmptyMarshalJSON", "StructHeadMarshalText", "StructHeadOmitEmptyMarshalText", "StructPtrHeadMarshalText", "StructPtrHeadOmitEmptyMarshalText", "StructHeadIntString", "StructHeadOmitEmptyIntString", "StructPtrHeadIntString", "StructPtrHeadOmitEmptyIntString", "StructHeadUintString", "StructHeadOmitEmptyUintString", "StructPtrHeadUintString", "StructPtrHeadOmitEmptyUintString", "StructHeadFloat32String", "StructHeadOmitEmptyFloat32String", "StructPtrHeadFloat32String", "StructPtrHeadOmitEmptyFloat32String", "StructHeadFloat64String", "StructHeadOmitEmptyFloat64String", "StructPtrHeadFloat64String", "StructPtrHeadOmitEmptyFloat64String", "StructHeadBoolString", "StructHeadOmitEmptyBoolString", "StructPtrHeadBoolString", "StructPtrHeadOmitEmptyBoolString", "StructHeadStringString", "StructHeadOmitEmptyStringString", "StructPtrHeadStringString", "StructPtrHeadOmitEmptyStringString", "StructHeadNumberString", "StructHeadOmitEmptyNumberString", "StructPtrHeadNumberString", "StructPtrHeadOmitEmptyNumberString", "StructHeadIntPtr", "StructHeadOmitEmptyIntPtr", "StructPtrHeadIntPtr", "StructPtrHeadOmitEmptyIntPtr", "StructHeadUintPtr", "StructHeadOmitEmptyUintPtr", "StructPtrHeadUintPtr", "StructPtrHeadOmitEmptyUintPtr", "StructHeadFloat32Ptr", "StructHeadOmitEmptyFloat32Ptr", "StructPtrHeadFloat32Ptr", "StructPtrHeadOmitEmptyFloat32Ptr", "StructHeadFloat64Ptr", "StructHeadOmitEmptyFloat64Ptr", "StructPtrHeadFloat64Ptr", "StructPtrHeadOmitEmptyFloat64Ptr", "StructHeadBoolPtr", "StructHeadOmitEmptyBoolPtr", "StructPtrHeadBoolPtr", "StructPtrHeadOmitEmptyBoolPtr", "StructHeadStringPtr", "StructHeadOmitEmptyStringPtr", "StructPtrHeadStringPtr", "StructPtrHeadOmitEmptyStringPtr", "StructHeadBytesPtr", "StructHeadOmitEmptyBytesPtr", "StructPtrHeadBytesPtr", "StructPtrHeadOmitEmptyBytesPtr", "StructHeadNumberPtr", "StructHeadOmitEmptyNumberPtr", "StructPtrHeadNumberPtr", "StructPtrHeadOmitEmptyNumberPtr", "StructHeadArrayPtr", "StructHeadOmitEmptyArrayPtr", "StructPtrHeadArrayPtr", "StructPtrHeadOmitEmptyArrayPtr", "StructHeadMapPtr", "StructHeadOmitEmptyMapPtr", "StructPtrHeadMapPtr", "StructPtrHeadOmitEmptyMapPtr", "StructHeadSlicePtr", "StructHeadOmitEmptySlicePtr", "StructPtrHeadSlicePtr", "StructPtrHeadOmitEmptySlicePtr", "StructHeadMarshalJSONPtr", "StructHeadOmitEmptyMarshalJSONPtr", "StructPtrHeadMarshalJSONPtr", "StructPtrHeadOmitEmptyMarshalJSONPtr", "StructHeadMarshalTextPtr", "StructHeadOmitEmptyMarshalTextPtr", "StructPtrHeadMarshalTextPtr", "StructPtrHeadOmitEmptyMarshalTextPtr", "StructHeadInterfacePtr", "StructHeadOmitEmptyInterfacePtr", "StructPtrHeadInterfacePtr", "StructPtrHeadOmitEmptyInterfacePtr", "StructHeadIntPtrString", "StructHeadOmitEmptyIntPtrString", "StructPtrHeadIntPtrString", "StructPtrHeadOmitEmptyIntPtrString", "StructHeadUintPtrString", "StructHeadOmitEmptyUintPtrString", "StructPtrHeadUintPtrString", "StructPtrHeadOmitEmptyUintPtrString", "StructHeadFloat32PtrString", "StructHeadOmitEmptyFloat32PtrString", "StructPtrHeadFloat32PtrString", "StructPtrHeadOmitEmptyFloat32PtrString", "StructHeadFloat64PtrString", "StructHeadOmitEmptyFloat64PtrString", "StructPtrHeadFloat64PtrString", "StructPtrHeadOmitEmptyFloat64PtrString", "StructHeadBoolPtrString", "StructHeadOmitEmptyBoolPtrString", "StructPtrHeadBoolPtrString", "StructPtrHeadOmitEmptyBoolPtrString", "StructHeadStringPtrString", "StructHeadOmitEmptyStringPtrString", "StructPtrHeadStringPtrString", "StructPtrHeadOmitEmptyStringPtrString", "StructHeadNumberPtrString", "StructHeadOmitEmptyNumberPtrString", "StructPtrHeadNumberPtrString", "StructPtrHeadOmitEmptyNumberPtrString", "StructHead", "StructHeadOmitEmpty", "StructPtrHead", "StructPtrHeadOmitEmpty", "StructFieldInt", "StructFieldOmitEmptyInt", "StructEndInt", "StructEndOmitEmptyInt", "StructFieldUint", "StructFieldOmitEmptyUint", "StructEndUint", "StructEndOmitEmptyUint", "StructFieldFloat32", "StructFieldOmitEmptyFloat32", "StructEndFloat32", "StructEndOmitEmptyFloat32", "StructFieldFloat64", "StructFieldOmitEmptyFloat64", "StructEndFloat64", "StructEndOmitEmptyFloat64", "StructFieldBool", "StructFieldOmitEmptyBool", "StructEndBool", "StructEndOmitEmptyBool", "StructFieldString", "StructFieldOmitEmptyString", "StructEndString", "StructEndOmitEmptyString", "StructFieldBytes", "StructFieldOmitEmptyBytes", "StructEndBytes", "StructEndOmitEmptyBytes", "StructFieldNumber", "StructFieldOmitEmptyNumber", "StructEndNumber", "StructEndOmitEmptyNumber", "StructFieldArray", "StructFieldOmitEmptyArray", "StructEndArray", "StructEndOmitEmptyArray", "StructFieldMap", "StructFieldOmitEmptyMap", "StructEndMap", "StructEndOmitEmptyMap", "StructFieldSlice", "StructFieldOmitEmptySlice", "StructEndSlice", "StructEndOmitEmptySlice", "StructFieldStruct", "StructFieldOmitEmptyStruct", "StructEndStruct", "StructEndOmitEmptyStruct", "StructFieldMarshalJSON", "StructFieldOmitEmptyMarshalJSON", "StructEndMarshalJSON", "StructEndOmitEmptyMarshalJSON", "StructFieldMarshalText", "StructFieldOmitEmptyMarshalText", "StructEndMarshalText", "StructEndOmitEmptyMarshalText", "StructFieldIntString", "StructFieldOmitEmptyIntString", "StructEndIntString", "StructEndOmitEmptyIntString", "StructFieldUintString", "StructFieldOmitEmptyUintString", "StructEndUintString", "StructEndOmitEmptyUintString", "StructFieldFloat32String", "StructFieldOmitEmptyFloat32String", "StructEndFloat32String", "StructEndOmitEmptyFloat32String", "StructFieldFloat64String", "StructFieldOmitEmptyFloat64String", "StructEndFloat64String", "StructEndOmitEmptyFloat64String", "StructFieldBoolString", "StructFieldOmitEmptyBoolString", "StructEndBoolString", "StructEndOmitEmptyBoolString", "StructFieldStringString", "StructFieldOmitEmptyStringString", "StructEndStringString", "StructEndOmitEmptyStringString", "StructFieldNumberString", "StructFieldOmitEmptyNumberString", "StructEndNumberString", "StructEndOmitEmptyNumberString", "StructFieldIntPtr", "StructFieldOmitEmptyIntPtr", "StructEndIntPtr", "StructEndOmitEmptyIntPtr", "StructFieldUintPtr", "StructFieldOmitEmptyUintPtr", "StructEndUintPtr", "StructEndOmitEmptyUintPtr", "StructFieldFloat32Ptr", "StructFieldOmitEmptyFloat32Ptr", "StructEndFloat32Ptr", "StructEndOmitEmptyFloat32Ptr", "StructFieldFloat64Ptr", "StructFieldOmitEmptyFloat64Ptr", "StructEndFloat64Ptr", "StructEndOmitEmptyFloat64Ptr", "StructFieldBoolPtr", "StructFieldOmitEmptyBoolPtr", "StructEndBoolPtr", "StructEndOmitEmptyBoolPtr", "StructFieldStringPtr", "StructFieldOmitEmptyStringPtr", "StructEndStringPtr", "StructEndOmitEmptyStringPtr", "StructFieldBytesPtr", "StructFieldOmitEmptyBytesPtr", "StructEndBytesPtr", "StructEndOmitEmptyBytesPtr", "StructFieldNumberPtr", "StructFieldOmitEmptyNumberPtr", "StructEndNumberPtr", "StructEndOmitEmptyNumberPtr", "StructFieldArrayPtr", "StructFieldOmitEmptyArrayPtr", "StructEndArrayPtr", "StructEndOmitEmptyArrayPtr", "StructFieldMapPtr", "StructFieldOmitEmptyMapPtr", "StructEndMapPtr", "StructEndOmitEmptyMapPtr", "StructFieldSlicePtr", "StructFieldOmitEmptySlicePtr", "StructEndSlicePtr", "StructEndOmitEmptySlicePtr", "StructFieldMarshalJSONPtr", "StructFieldOmitEmptyMarshalJSONPtr", "StructEndMarshalJSONPtr", "StructEndOmitEmptyMarshalJSONPtr", "StructFieldMarshalTextPtr", "StructFieldOmitEmptyMarshalTextPtr", "StructEndMarshalTextPtr", "StructEndOmitEmptyMarshalTextPtr", "StructFieldInterfacePtr", "StructFieldOmitEmptyInterfacePtr", "StructEndInterfacePtr", "StructEndOmitEmptyInterfacePtr", "StructFieldIntPtrString", "StructFieldOmitEmptyIntPtrString", "StructEndIntPtrString", "StructEndOmitEmptyIntPtrString", "StructFieldUintPtrString", "StructFieldOmitEmptyUintPtrString", "StructEndUintPtrString", "StructEndOmitEmptyUintPtrString", "StructFieldFloat32PtrString", "StructFieldOmitEmptyFloat32PtrString", "StructEndFloat32PtrString", "StructEndOmitEmptyFloat32PtrString", "StructFieldFloat64PtrString", "StructFieldOmitEmptyFloat64PtrString", "StructEndFloat64PtrString", "StructEndOmitEmptyFloat64PtrString", "StructFieldBoolPtrString", "StructFieldOmitEmptyBoolPtrString", "StructEndBoolPtrString", "StructEndOmitEmptyBoolPtrString", "StructFieldStringPtrString", "StructFieldOmitEmptyStringPtrString", "StructEndStringPtrString", "StructEndOmitEmptyStringPtrString", "StructFieldNumberPtrString", "StructFieldOmitEmptyNumberPtrString", "StructEndNumberPtrString", "StructEndOmitEmptyNumberPtrString", "StructField", "StructFieldOmitEmpty", "StructEnd", "StructEndOmitEmpty", } type OpType uint16 const ( OpEnd OpType = 0 OpInterface OpType = 1 OpPtr OpType = 2 OpSliceElem OpType = 3 OpSliceEnd OpType = 4 OpArrayElem OpType = 5 OpArrayEnd OpType = 6 OpMapKey OpType = 7 OpMapValue OpType = 8 OpMapEnd OpType = 9 OpRecursive OpType = 10 OpRecursivePtr OpType = 11 OpRecursiveEnd OpType = 12 OpInterfaceEnd OpType = 13 OpInt OpType = 14 OpUint OpType = 15 OpFloat32 OpType = 16 OpFloat64 OpType = 17 OpBool OpType = 18 OpString OpType = 19 OpBytes OpType = 20 OpNumber OpType = 21 OpArray OpType = 22 OpMap OpType = 23 OpSlice OpType = 24 OpStruct OpType = 25 OpMarshalJSON OpType = 26 OpMarshalText OpType = 27 OpIntString OpType = 28 OpUintString OpType = 29 OpFloat32String OpType = 30 OpFloat64String OpType = 31 OpBoolString OpType = 32 OpStringString OpType = 33 OpNumberString OpType = 34 OpIntPtr OpType = 35 OpUintPtr OpType = 36 OpFloat32Ptr OpType = 37 OpFloat64Ptr OpType = 38 OpBoolPtr OpType = 39 OpStringPtr OpType = 40 OpBytesPtr OpType = 41 OpNumberPtr OpType = 42 OpArrayPtr OpType = 43 OpMapPtr OpType = 44 OpSlicePtr OpType = 45 OpMarshalJSONPtr OpType = 46 OpMarshalTextPtr OpType = 47 OpInterfacePtr OpType = 48 OpIntPtrString OpType = 49 OpUintPtrString OpType = 50 OpFloat32PtrString OpType = 51 OpFloat64PtrString OpType = 52 OpBoolPtrString OpType = 53 OpStringPtrString OpType = 54 OpNumberPtrString OpType = 55 OpStructHeadInt OpType = 56 OpStructHeadOmitEmptyInt OpType = 57 OpStructPtrHeadInt OpType = 58 OpStructPtrHeadOmitEmptyInt OpType = 59 OpStructHeadUint OpType = 60 OpStructHeadOmitEmptyUint OpType = 61 OpStructPtrHeadUint OpType = 62 OpStructPtrHeadOmitEmptyUint OpType = 63 OpStructHeadFloat32 OpType = 64 OpStructHeadOmitEmptyFloat32 OpType = 65 OpStructPtrHeadFloat32 OpType = 66 OpStructPtrHeadOmitEmptyFloat32 OpType = 67 OpStructHeadFloat64 OpType = 68 OpStructHeadOmitEmptyFloat64 OpType = 69 OpStructPtrHeadFloat64 OpType = 70 OpStructPtrHeadOmitEmptyFloat64 OpType = 71 OpStructHeadBool OpType = 72 OpStructHeadOmitEmptyBool OpType = 73 OpStructPtrHeadBool OpType = 74 OpStructPtrHeadOmitEmptyBool OpType = 75 OpStructHeadString OpType = 76 OpStructHeadOmitEmptyString OpType = 77 OpStructPtrHeadString OpType = 78 OpStructPtrHeadOmitEmptyString OpType = 79 OpStructHeadBytes OpType = 80 OpStructHeadOmitEmptyBytes OpType = 81 OpStructPtrHeadBytes OpType = 82 OpStructPtrHeadOmitEmptyBytes OpType = 83 OpStructHeadNumber OpType = 84 OpStructHeadOmitEmptyNumber OpType = 85 OpStructPtrHeadNumber OpType = 86 OpStructPtrHeadOmitEmptyNumber OpType = 87 OpStructHeadArray OpType = 88 OpStructHeadOmitEmptyArray OpType = 89 OpStructPtrHeadArray OpType = 90 OpStructPtrHeadOmitEmptyArray OpType = 91 OpStructHeadMap OpType = 92 OpStructHeadOmitEmptyMap OpType = 93 OpStructPtrHeadMap OpType = 94 OpStructPtrHeadOmitEmptyMap OpType = 95 OpStructHeadSlice OpType = 96 OpStructHeadOmitEmptySlice OpType = 97 OpStructPtrHeadSlice OpType = 98 OpStructPtrHeadOmitEmptySlice OpType = 99 OpStructHeadStruct OpType = 100 OpStructHeadOmitEmptyStruct OpType = 101 OpStructPtrHeadStruct OpType = 102 OpStructPtrHeadOmitEmptyStruct OpType = 103 OpStructHeadMarshalJSON OpType = 104 OpStructHeadOmitEmptyMarshalJSON OpType = 105 OpStructPtrHeadMarshalJSON OpType = 106 OpStructPtrHeadOmitEmptyMarshalJSON OpType = 107 OpStructHeadMarshalText OpType = 108 OpStructHeadOmitEmptyMarshalText OpType = 109 OpStructPtrHeadMarshalText OpType = 110 OpStructPtrHeadOmitEmptyMarshalText OpType = 111 OpStructHeadIntString OpType = 112 OpStructHeadOmitEmptyIntString OpType = 113 OpStructPtrHeadIntString OpType = 114 OpStructPtrHeadOmitEmptyIntString OpType = 115 OpStructHeadUintString OpType = 116 OpStructHeadOmitEmptyUintString OpType = 117 OpStructPtrHeadUintString OpType = 118 OpStructPtrHeadOmitEmptyUintString OpType = 119 OpStructHeadFloat32String OpType = 120 OpStructHeadOmitEmptyFloat32String OpType = 121 OpStructPtrHeadFloat32String OpType = 122 OpStructPtrHeadOmitEmptyFloat32String OpType = 123 OpStructHeadFloat64String OpType = 124 OpStructHeadOmitEmptyFloat64String OpType = 125 OpStructPtrHeadFloat64String OpType = 126 OpStructPtrHeadOmitEmptyFloat64String OpType = 127 OpStructHeadBoolString OpType = 128 OpStructHeadOmitEmptyBoolString OpType = 129 OpStructPtrHeadBoolString OpType = 130 OpStructPtrHeadOmitEmptyBoolString OpType = 131 OpStructHeadStringString OpType = 132 OpStructHeadOmitEmptyStringString OpType = 133 OpStructPtrHeadStringString OpType = 134 OpStructPtrHeadOmitEmptyStringString OpType = 135 OpStructHeadNumberString OpType = 136 OpStructHeadOmitEmptyNumberString OpType = 137 OpStructPtrHeadNumberString OpType = 138 OpStructPtrHeadOmitEmptyNumberString OpType = 139 OpStructHeadIntPtr OpType = 140 OpStructHeadOmitEmptyIntPtr OpType = 141 OpStructPtrHeadIntPtr OpType = 142 OpStructPtrHeadOmitEmptyIntPtr OpType = 143 OpStructHeadUintPtr OpType = 144 OpStructHeadOmitEmptyUintPtr OpType = 145 OpStructPtrHeadUintPtr OpType = 146 OpStructPtrHeadOmitEmptyUintPtr OpType = 147 OpStructHeadFloat32Ptr OpType = 148 OpStructHeadOmitEmptyFloat32Ptr OpType = 149 OpStructPtrHeadFloat32Ptr OpType = 150 OpStructPtrHeadOmitEmptyFloat32Ptr OpType = 151 OpStructHeadFloat64Ptr OpType = 152 OpStructHeadOmitEmptyFloat64Ptr OpType = 153 OpStructPtrHeadFloat64Ptr OpType = 154 OpStructPtrHeadOmitEmptyFloat64Ptr OpType = 155 OpStructHeadBoolPtr OpType = 156 OpStructHeadOmitEmptyBoolPtr OpType = 157 OpStructPtrHeadBoolPtr OpType = 158 OpStructPtrHeadOmitEmptyBoolPtr OpType = 159 OpStructHeadStringPtr OpType = 160 OpStructHeadOmitEmptyStringPtr OpType = 161 OpStructPtrHeadStringPtr OpType = 162 OpStructPtrHeadOmitEmptyStringPtr OpType = 163 OpStructHeadBytesPtr OpType = 164 OpStructHeadOmitEmptyBytesPtr OpType = 165 OpStructPtrHeadBytesPtr OpType = 166 OpStructPtrHeadOmitEmptyBytesPtr OpType = 167 OpStructHeadNumberPtr OpType = 168 OpStructHeadOmitEmptyNumberPtr OpType = 169 OpStructPtrHeadNumberPtr OpType = 170 OpStructPtrHeadOmitEmptyNumberPtr OpType = 171 OpStructHeadArrayPtr OpType = 172 OpStructHeadOmitEmptyArrayPtr OpType = 173 OpStructPtrHeadArrayPtr OpType = 174 OpStructPtrHeadOmitEmptyArrayPtr OpType = 175 OpStructHeadMapPtr OpType = 176 OpStructHeadOmitEmptyMapPtr OpType = 177 OpStructPtrHeadMapPtr OpType = 178 OpStructPtrHeadOmitEmptyMapPtr OpType = 179 OpStructHeadSlicePtr OpType = 180 OpStructHeadOmitEmptySlicePtr OpType = 181 OpStructPtrHeadSlicePtr OpType = 182 OpStructPtrHeadOmitEmptySlicePtr OpType = 183 OpStructHeadMarshalJSONPtr OpType = 184 OpStructHeadOmitEmptyMarshalJSONPtr OpType = 185 OpStructPtrHeadMarshalJSONPtr OpType = 186 OpStructPtrHeadOmitEmptyMarshalJSONPtr OpType = 187 OpStructHeadMarshalTextPtr OpType = 188 OpStructHeadOmitEmptyMarshalTextPtr OpType = 189 OpStructPtrHeadMarshalTextPtr OpType = 190 OpStructPtrHeadOmitEmptyMarshalTextPtr OpType = 191 OpStructHeadInterfacePtr OpType = 192 OpStructHeadOmitEmptyInterfacePtr OpType = 193 OpStructPtrHeadInterfacePtr OpType = 194 OpStructPtrHeadOmitEmptyInterfacePtr OpType = 195 OpStructHeadIntPtrString OpType = 196 OpStructHeadOmitEmptyIntPtrString OpType = 197 OpStructPtrHeadIntPtrString OpType = 198 OpStructPtrHeadOmitEmptyIntPtrString OpType = 199 OpStructHeadUintPtrString OpType = 200 OpStructHeadOmitEmptyUintPtrString OpType = 201 OpStructPtrHeadUintPtrString OpType = 202 OpStructPtrHeadOmitEmptyUintPtrString OpType = 203 OpStructHeadFloat32PtrString OpType = 204 OpStructHeadOmitEmptyFloat32PtrString OpType = 205 OpStructPtrHeadFloat32PtrString OpType = 206 OpStructPtrHeadOmitEmptyFloat32PtrString OpType = 207 OpStructHeadFloat64PtrString OpType = 208 OpStructHeadOmitEmptyFloat64PtrString OpType = 209 OpStructPtrHeadFloat64PtrString OpType = 210 OpStructPtrHeadOmitEmptyFloat64PtrString OpType = 211 OpStructHeadBoolPtrString OpType = 212 OpStructHeadOmitEmptyBoolPtrString OpType = 213 OpStructPtrHeadBoolPtrString OpType = 214 OpStructPtrHeadOmitEmptyBoolPtrString OpType = 215 OpStructHeadStringPtrString OpType = 216 OpStructHeadOmitEmptyStringPtrString OpType = 217 OpStructPtrHeadStringPtrString OpType = 218 OpStructPtrHeadOmitEmptyStringPtrString OpType = 219 OpStructHeadNumberPtrString OpType = 220 OpStructHeadOmitEmptyNumberPtrString OpType = 221 OpStructPtrHeadNumberPtrString OpType = 222 OpStructPtrHeadOmitEmptyNumberPtrString OpType = 223 OpStructHead OpType = 224 OpStructHeadOmitEmpty OpType = 225 OpStructPtrHead OpType = 226 OpStructPtrHeadOmitEmpty OpType = 227 OpStructFieldInt OpType = 228 OpStructFieldOmitEmptyInt OpType = 229 OpStructEndInt OpType = 230 OpStructEndOmitEmptyInt OpType = 231 OpStructFieldUint OpType = 232 OpStructFieldOmitEmptyUint OpType = 233 OpStructEndUint OpType = 234 OpStructEndOmitEmptyUint OpType = 235 OpStructFieldFloat32 OpType = 236 OpStructFieldOmitEmptyFloat32 OpType = 237 OpStructEndFloat32 OpType = 238 OpStructEndOmitEmptyFloat32 OpType = 239 OpStructFieldFloat64 OpType = 240 OpStructFieldOmitEmptyFloat64 OpType = 241 OpStructEndFloat64 OpType = 242 OpStructEndOmitEmptyFloat64 OpType = 243 OpStructFieldBool OpType = 244 OpStructFieldOmitEmptyBool OpType = 245 OpStructEndBool OpType = 246 OpStructEndOmitEmptyBool OpType = 247 OpStructFieldString OpType = 248 OpStructFieldOmitEmptyString OpType = 249 OpStructEndString OpType = 250 OpStructEndOmitEmptyString OpType = 251 OpStructFieldBytes OpType = 252 OpStructFieldOmitEmptyBytes OpType = 253 OpStructEndBytes OpType = 254 OpStructEndOmitEmptyBytes OpType = 255 OpStructFieldNumber OpType = 256 OpStructFieldOmitEmptyNumber OpType = 257 OpStructEndNumber OpType = 258 OpStructEndOmitEmptyNumber OpType = 259 OpStructFieldArray OpType = 260 OpStructFieldOmitEmptyArray OpType = 261 OpStructEndArray OpType = 262 OpStructEndOmitEmptyArray OpType = 263 OpStructFieldMap OpType = 264 OpStructFieldOmitEmptyMap OpType = 265 OpStructEndMap OpType = 266 OpStructEndOmitEmptyMap OpType = 267 OpStructFieldSlice OpType = 268 OpStructFieldOmitEmptySlice OpType = 269 OpStructEndSlice OpType = 270 OpStructEndOmitEmptySlice OpType = 271 OpStructFieldStruct OpType = 272 OpStructFieldOmitEmptyStruct OpType = 273 OpStructEndStruct OpType = 274 OpStructEndOmitEmptyStruct OpType = 275 OpStructFieldMarshalJSON OpType = 276 OpStructFieldOmitEmptyMarshalJSON OpType = 277 OpStructEndMarshalJSON OpType = 278 OpStructEndOmitEmptyMarshalJSON OpType = 279 OpStructFieldMarshalText OpType = 280 OpStructFieldOmitEmptyMarshalText OpType = 281 OpStructEndMarshalText OpType = 282 OpStructEndOmitEmptyMarshalText OpType = 283 OpStructFieldIntString OpType = 284 OpStructFieldOmitEmptyIntString OpType = 285 OpStructEndIntString OpType = 286 OpStructEndOmitEmptyIntString OpType = 287 OpStructFieldUintString OpType = 288 OpStructFieldOmitEmptyUintString OpType = 289 OpStructEndUintString OpType = 290 OpStructEndOmitEmptyUintString OpType = 291 OpStructFieldFloat32String OpType = 292 OpStructFieldOmitEmptyFloat32String OpType = 293 OpStructEndFloat32String OpType = 294 OpStructEndOmitEmptyFloat32String OpType = 295 OpStructFieldFloat64String OpType = 296 OpStructFieldOmitEmptyFloat64String OpType = 297 OpStructEndFloat64String OpType = 298 OpStructEndOmitEmptyFloat64String OpType = 299 OpStructFieldBoolString OpType = 300 OpStructFieldOmitEmptyBoolString OpType = 301 OpStructEndBoolString OpType = 302 OpStructEndOmitEmptyBoolString OpType = 303 OpStructFieldStringString OpType = 304 OpStructFieldOmitEmptyStringString OpType = 305 OpStructEndStringString OpType = 306 OpStructEndOmitEmptyStringString OpType = 307 OpStructFieldNumberString OpType = 308 OpStructFieldOmitEmptyNumberString OpType = 309 OpStructEndNumberString OpType = 310 OpStructEndOmitEmptyNumberString OpType = 311 OpStructFieldIntPtr OpType = 312 OpStructFieldOmitEmptyIntPtr OpType = 313 OpStructEndIntPtr OpType = 314 OpStructEndOmitEmptyIntPtr OpType = 315 OpStructFieldUintPtr OpType = 316 OpStructFieldOmitEmptyUintPtr OpType = 317 OpStructEndUintPtr OpType = 318 OpStructEndOmitEmptyUintPtr OpType = 319 OpStructFieldFloat32Ptr OpType = 320 OpStructFieldOmitEmptyFloat32Ptr OpType = 321 OpStructEndFloat32Ptr OpType = 322 OpStructEndOmitEmptyFloat32Ptr OpType = 323 OpStructFieldFloat64Ptr OpType = 324 OpStructFieldOmitEmptyFloat64Ptr OpType = 325 OpStructEndFloat64Ptr OpType = 326 OpStructEndOmitEmptyFloat64Ptr OpType = 327 OpStructFieldBoolPtr OpType = 328 OpStructFieldOmitEmptyBoolPtr OpType = 329 OpStructEndBoolPtr OpType = 330 OpStructEndOmitEmptyBoolPtr OpType = 331 OpStructFieldStringPtr OpType = 332 OpStructFieldOmitEmptyStringPtr OpType = 333 OpStructEndStringPtr OpType = 334 OpStructEndOmitEmptyStringPtr OpType = 335 OpStructFieldBytesPtr OpType = 336 OpStructFieldOmitEmptyBytesPtr OpType = 337 OpStructEndBytesPtr OpType = 338 OpStructEndOmitEmptyBytesPtr OpType = 339 OpStructFieldNumberPtr OpType = 340 OpStructFieldOmitEmptyNumberPtr OpType = 341 OpStructEndNumberPtr OpType = 342 OpStructEndOmitEmptyNumberPtr OpType = 343 OpStructFieldArrayPtr OpType = 344 OpStructFieldOmitEmptyArrayPtr OpType = 345 OpStructEndArrayPtr OpType = 346 OpStructEndOmitEmptyArrayPtr OpType = 347 OpStructFieldMapPtr OpType = 348 OpStructFieldOmitEmptyMapPtr OpType = 349 OpStructEndMapPtr OpType = 350 OpStructEndOmitEmptyMapPtr OpType = 351 OpStructFieldSlicePtr OpType = 352 OpStructFieldOmitEmptySlicePtr OpType = 353 OpStructEndSlicePtr OpType = 354 OpStructEndOmitEmptySlicePtr OpType = 355 OpStructFieldMarshalJSONPtr OpType = 356 OpStructFieldOmitEmptyMarshalJSONPtr OpType = 357 OpStructEndMarshalJSONPtr OpType = 358 OpStructEndOmitEmptyMarshalJSONPtr OpType = 359 OpStructFieldMarshalTextPtr OpType = 360 OpStructFieldOmitEmptyMarshalTextPtr OpType = 361 OpStructEndMarshalTextPtr OpType = 362 OpStructEndOmitEmptyMarshalTextPtr OpType = 363 OpStructFieldInterfacePtr OpType = 364 OpStructFieldOmitEmptyInterfacePtr OpType = 365 OpStructEndInterfacePtr OpType = 366 OpStructEndOmitEmptyInterfacePtr OpType = 367 OpStructFieldIntPtrString OpType = 368 OpStructFieldOmitEmptyIntPtrString OpType = 369 OpStructEndIntPtrString OpType = 370 OpStructEndOmitEmptyIntPtrString OpType = 371 OpStructFieldUintPtrString OpType = 372 OpStructFieldOmitEmptyUintPtrString OpType = 373 OpStructEndUintPtrString OpType = 374 OpStructEndOmitEmptyUintPtrString OpType = 375 OpStructFieldFloat32PtrString OpType = 376 OpStructFieldOmitEmptyFloat32PtrString OpType = 377 OpStructEndFloat32PtrString OpType = 378 OpStructEndOmitEmptyFloat32PtrString OpType = 379 OpStructFieldFloat64PtrString OpType = 380 OpStructFieldOmitEmptyFloat64PtrString OpType = 381 OpStructEndFloat64PtrString OpType = 382 OpStructEndOmitEmptyFloat64PtrString OpType = 383 OpStructFieldBoolPtrString OpType = 384 OpStructFieldOmitEmptyBoolPtrString OpType = 385 OpStructEndBoolPtrString OpType = 386 OpStructEndOmitEmptyBoolPtrString OpType = 387 OpStructFieldStringPtrString OpType = 388 OpStructFieldOmitEmptyStringPtrString OpType = 389
go
Apache-2.0
b3b4703e958c25d54c4d48138d9e80ae32fadac3
2026-01-07T09:44:30.792320Z
true