repo
stringlengths
6
47
file_url
stringlengths
77
269
file_path
stringlengths
5
186
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-07 08:35:43
2026-01-07 08:55:24
truncated
bool
2 classes
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/integrii/flaggy/positionalValue.go
vendor/github.com/integrii/flaggy/positionalValue.go
package flaggy // PositionalValue represents a value which is determined by its position // relative to where a subcommand was detected. type PositionalValue struct { Name string // used in documentation only Description string AssignmentVar *string // the var that will get this variable Position int // the position, not including switches, of this variable Required bool // this subcommand must always be specified Found bool // was this positional found during parsing? Hidden bool // indicates this positional value should be hidden from help defaultValue string // used for help output }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/integrii/flaggy/subCommand.go
vendor/github.com/integrii/flaggy/subCommand.go
package flaggy import ( "fmt" "log" "net" "os" "strconv" "strings" "time" ) // Subcommand represents a subcommand which contains a set of child // subcommands along with a set of flags relevant to it. Parsing // runs until a subcommand is detected by matching its name and // position. Once a matching subcommand is found, the next set // of parsing occurs within that matched subcommand. type Subcommand struct { Name string ShortName string Description string Position int // the position of this subcommand, not including flags Subcommands []*Subcommand Flags []*Flag PositionalFlags []*PositionalValue ParsedValues []parsedValue // a list of values and positionals parsed AdditionalHelpPrepend string // additional prepended message when Help is displayed AdditionalHelpAppend string // additional appended message when Help is displayed Used bool // indicates this subcommand was found and parsed Hidden bool // indicates this subcommand should be hidden from help } // NewSubcommand creates a new subcommand that can have flags or PositionalFlags // added to it. The position starts with 1, not 0 func NewSubcommand(name string) *Subcommand { if len(name) == 0 { fmt.Fprintln(os.Stderr, "Error creating subcommand (NewSubcommand()). No subcommand name was specified.") exitOrPanic(2) } newSC := &Subcommand{ Name: name, } return newSC } // parseAllFlagsFromArgs parses the non-positional flags such as -f or -v=value // out of the supplied args and returns the resulting positional items in order, // all the flag names found (without values), a bool to indicate if help was // requested, and any errors found during parsing func (sc *Subcommand) parseAllFlagsFromArgs(p *Parser, args []string) ([]string, bool, error) { var positionalOnlyArguments []string var helpRequested bool // indicates the user has supplied -h and we // should render help if we are the last subcommand // indicates we should skip the next argument, like when parsing a flag // that separates key and value by space var skipNext bool // endArgfound indicates that a -- was found and everything // remaining should be added to the trailing arguments slices var endArgFound bool // find all the normal flags (not positional) and parse them out for i, a := range args { debugPrint("parsing arg:", a) // evaluate if there is a following arg to avoid panics var nextArgExists bool var nextArg string if len(args)-1 >= i+1 { nextArgExists = true nextArg = args[i+1] } // if end arg -- has been found, just add everything to TrailingArguments if endArgFound { if !p.trailingArgumentsExtracted { p.TrailingArguments = append(p.TrailingArguments, a) } continue } // skip this run if specified if skipNext { skipNext = false debugPrint("skipping flag because it is an arg:", a) continue } // parse the flag into its name for consideration without dashes flagName := parseFlagToName(a) // if the flag being passed is version or v and the option to display // version with version flags, then display version if p.ShowVersionWithVersionFlag { if flagName == versionFlagLongName { p.ShowVersionAndExit() } } // if the show Help on h flag option is set, then show Help when h or Help // is passed as an option if p.ShowHelpWithHFlag { if flagName == helpFlagShortName || flagName == helpFlagLongName { // Ensure this is the last subcommand passed so we give the correct // help output helpRequested = true continue } } // determine what kind of flag this is argType := determineArgType(a) // strip flags from arg // debugPrint("Parsing flag named", a, "of type", argType) // depending on the flag type, parse the key and value out, then apply it switch argType { case argIsFinal: // debugPrint("Arg", i, "is final:", a) endArgFound = true case argIsPositional: // debugPrint("Arg is positional or subcommand:", a) // this positional argument into a slice of their own, so that // we can determine if its a subcommand or positional value later positionalOnlyArguments = append(positionalOnlyArguments, a) // track this as a parsed value with the subcommand sc.addParsedPositionalValue(a) case argIsFlagWithSpace: // a flag with a space. ex) -k v or --key value a = parseFlagToName(a) // debugPrint("Arg", i, "is flag with space:", a) // parse next arg as value to this flag and apply to subcommand flags // if the flag is a bool flag, then we check for a following positional // and skip it if necessary if flagIsBool(sc, p, a) { debugPrint(sc.Name, "bool flag", a, "next var is:", nextArg) // set the value in this subcommand and its root parser valueSet, err := setValueForParsers(a, "true", p, sc) // if an error occurs, just return it and quit parsing if err != nil { return []string{}, false, err } // log all values parsed by this subcommand. We leave the value blank // because the bool value had no explicit true or false supplied if valueSet { sc.addParsedFlag(a, "") } // we've found and set a standalone bool flag, so we move on to the next // argument in the list of arguments continue } skipNext = true // debugPrint(sc.Name, "NOT bool flag", a) // if the next arg was not found, then show a Help message if !nextArgExists { p.ShowHelpWithMessage("Expected a following arg for flag " + a + ", but it did not exist.") exitOrPanic(2) } valueSet, err := setValueForParsers(a, nextArg, p, sc) if err != nil { return []string{}, false, err } // log all parsed values in the subcommand if valueSet { sc.addParsedFlag(a, nextArg) } case argIsFlagWithValue: // a flag with an equals sign. ex) -k=v or --key=value // debugPrint("Arg", i, "is flag with value:", a) a = parseFlagToName(a) // parse flag into key and value and apply to subcommand flags key, val := parseArgWithValue(a) // set the value in this subcommand and its root parser valueSet, err := setValueForParsers(key, val, p, sc) if err != nil { return []string{}, false, err } // log all values parsed by the subcommand if valueSet { sc.addParsedFlag(a, val) } } } return positionalOnlyArguments, helpRequested, nil } // findAllParsedValues finds all values parsed by all subcommands and this // subcommand and its child subcommands func (sc *Subcommand) findAllParsedValues() []parsedValue { parsedValues := sc.ParsedValues for _, sc := range sc.Subcommands { // skip unused subcommands if !sc.Used { continue } parsedValues = append(parsedValues, sc.findAllParsedValues()...) } return parsedValues } // parse causes the argument parser to parse based on the supplied []string. // depth specifies the non-flag subcommand positional depth. A slice of flags // and subcommands parsed is returned so that the parser can ultimately decide // if there were any unexpected values supplied by the user func (sc *Subcommand) parse(p *Parser, args []string, depth int) error { debugPrint("- Parsing subcommand", sc.Name, "with depth of", depth, "and args", args) // if a command is parsed, its used sc.Used = true debugPrint("used subcommand", sc.Name, sc.ShortName) if len(sc.Name) > 0 { sc.addParsedPositionalValue(sc.Name) } if len(sc.ShortName) > 0 { sc.addParsedPositionalValue(sc.ShortName) } // as subcommands are used, they become the context of the parser. This helps // us understand how to display help based on which subcommand is being used p.subcommandContext = sc // ensure that help and version flags are not used if the parser has the // built-in help and version flags enabled if p.ShowHelpWithHFlag { sc.ensureNoConflictWithBuiltinHelp() } if p.ShowVersionWithVersionFlag { sc.ensureNoConflictWithBuiltinVersion() } // Parse the normal flags out of the argument list and return the positionals // (subcommands and positional values), along with the flags used. // Then the flag values are applied to the parent parser and the current // subcommand being parsed. positionalOnlyArguments, helpRequested, err := sc.parseAllFlagsFromArgs(p, args) if err != nil { return err } // indicate that trailing arguments have been extracted, so that they aren't // appended a second time p.trailingArgumentsExtracted = true // loop over positional values and look for their matching positional // parameter, or their positional command. If neither are found, then // we throw an error var parsedArgCount int for pos, v := range positionalOnlyArguments { // the first relative positional argument will be human natural at position 1 // but offset for the depth of relative commands being parsed for currently. relativeDepth := pos - depth + 1 // debugPrint("Parsing positional only position", relativeDepth, "with value", v) if relativeDepth < 1 { // debugPrint(sc.Name, "skipped value:", v) continue } parsedArgCount++ // determine subcommands and parse them by positional value and name for _, cmd := range sc.Subcommands { // debugPrint("Subcommand being compared", relativeDepth, "==", cmd.Position, "and", v, "==", cmd.Name, "==", cmd.ShortName) if relativeDepth == cmd.Position && (v == cmd.Name || v == cmd.ShortName) { debugPrint("Decending into positional subcommand", cmd.Name, "at relativeDepth", relativeDepth, "and absolute depth", depth+1) return cmd.parse(p, args, depth+parsedArgCount) // continue recursive positional parsing } } // determine positional args and parse them by positional value and name var foundPositional bool for _, val := range sc.PositionalFlags { if relativeDepth == val.Position { debugPrint("Found a positional value at relativePos:", relativeDepth, "value:", v) // set original value for help output val.defaultValue = *val.AssignmentVar // defrerence the struct pointer, then set the pointer property within it *val.AssignmentVar = v // debugPrint("set positional to value", *val.AssignmentVar) foundPositional = true val.Found = true break } } // if there aren't any positional flags but there are subcommands that // were not used, display a useful message with subcommand options. if !foundPositional && p.ShowHelpOnUnexpected { debugPrint("No positional at position", relativeDepth) var foundSubcommandAtDepth bool for _, cmd := range sc.Subcommands { if cmd.Position == relativeDepth { foundSubcommandAtDepth = true } } // if there is a subcommand here but it was not specified, display them all // as a suggestion to the user before exiting. if foundSubcommandAtDepth { // determine which name to use in upcoming help output fmt.Fprintln(os.Stderr, sc.Name+":", "No subcommand or positional value found at position", strconv.Itoa(relativeDepth)+".") var output string for _, cmd := range sc.Subcommands { if cmd.Hidden { continue } output = output + " " + cmd.Name } // if there are available subcommands, let the user know if len(output) > 0 { output = strings.TrimLeft(output, " ") fmt.Println("Available subcommands:", output) } exitOrPanic(2) } // if there were not any flags or subcommands at this position at all, then // throw an error (display Help if necessary) p.ShowHelpWithMessage("Unexpected argument: " + v) exitOrPanic(2) } } // if help was requested and we should show help when h is passed, if helpRequested && p.ShowHelpWithHFlag { p.ShowHelp() exitOrPanic(0) } // find any positionals that were not used on subcommands that were // found and throw help (unknown argument) in the global parse or subcommand for _, pv := range p.PositionalFlags { if pv.Required && !pv.Found { p.ShowHelpWithMessage("Required global positional variable " + pv.Name + " not found at position " + strconv.Itoa(pv.Position)) exitOrPanic(2) } } for _, pv := range sc.PositionalFlags { if pv.Required && !pv.Found { p.ShowHelpWithMessage("Required positional of subcommand " + sc.Name + " named " + pv.Name + " not found at position " + strconv.Itoa(pv.Position)) exitOrPanic(2) } } return nil } // addParsedFlag makes it easy to append flag values parsed by the subcommand func (sc *Subcommand) addParsedFlag(key string, value string) { sc.ParsedValues = append(sc.ParsedValues, newParsedValue(key, value, false)) } // addParsedPositionalValue makes it easy to append positionals parsed by the // subcommand func (sc *Subcommand) addParsedPositionalValue(value string) { sc.ParsedValues = append(sc.ParsedValues, newParsedValue("", value, true)) } // FlagExists lets you know if the flag name exists as either a short or long // name in the (sub)command func (sc *Subcommand) FlagExists(name string) bool { for _, f := range sc.Flags { if f.HasName(name) { return true } } return false } // AttachSubcommand adds a possible subcommand to the Parser. func (sc *Subcommand) AttachSubcommand(newSC *Subcommand, relativePosition int) { // assign the depth of the subcommand when its attached newSC.Position = relativePosition // ensure no subcommands at this depth with this name for _, other := range sc.Subcommands { if newSC.Position == other.Position { if newSC.Name != "" { if newSC.Name == other.Name { log.Panicln("Unable to add subcommand because one already exists at position" + strconv.Itoa(newSC.Position) + " with name " + other.Name) } } if newSC.ShortName != "" { if newSC.ShortName == other.ShortName { log.Panicln("Unable to add subcommand because one already exists at position" + strconv.Itoa(newSC.Position) + " with name " + other.ShortName) } } } } // ensure no positionals at this depth for _, other := range sc.PositionalFlags { if newSC.Position == other.Position { log.Panicln("Unable to add subcommand because a positional value already exists at position " + strconv.Itoa(newSC.Position) + ": " + other.Name) } } sc.Subcommands = append(sc.Subcommands, newSC) } // add is a "generic" to add flags of any type. Checks the supplied parent // parser to ensure that the user isn't setting version or help flags that // conflict with the built-in help and version flag behavior. func (sc *Subcommand) add(assignmentVar interface{}, shortName string, longName string, description string) { // if the flag is already used, throw an error for _, existingFlag := range sc.Flags { if longName != "" && existingFlag.LongName == longName { log.Panicln("Flag " + longName + " added to subcommand " + sc.Name + " but the name is already assigned.") } if shortName != "" && existingFlag.ShortName == shortName { log.Panicln("Flag " + shortName + " added to subcommand " + sc.Name + " but the short name is already assigned.") } } newFlag := Flag{ AssignmentVar: assignmentVar, ShortName: shortName, LongName: longName, Description: description, } sc.Flags = append(sc.Flags, &newFlag) } // String adds a new string flag func (sc *Subcommand) String(assignmentVar *string, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // StringSlice adds a new slice of strings flag // Specify the flag multiple times to fill the slice func (sc *Subcommand) StringSlice(assignmentVar *[]string, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Bool adds a new bool flag func (sc *Subcommand) Bool(assignmentVar *bool, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // BoolSlice adds a new slice of bools flag // Specify the flag multiple times to fill the slice func (sc *Subcommand) BoolSlice(assignmentVar *[]bool, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // ByteSlice adds a new slice of bytes flag // Specify the flag multiple times to fill the slice. Takes hex as input. func (sc *Subcommand) ByteSlice(assignmentVar *[]byte, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Duration adds a new time.Duration flag. // Input format is described in time.ParseDuration(). // Example values: 1h, 1h50m, 32s func (sc *Subcommand) Duration(assignmentVar *time.Duration, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // DurationSlice adds a new time.Duration flag. // Input format is described in time.ParseDuration(). // Example values: 1h, 1h50m, 32s // Specify the flag multiple times to fill the slice. func (sc *Subcommand) DurationSlice(assignmentVar *[]time.Duration, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Float32 adds a new float32 flag. func (sc *Subcommand) Float32(assignmentVar *float32, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Float32Slice adds a new float32 flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) Float32Slice(assignmentVar *[]float32, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Float64 adds a new float64 flag. func (sc *Subcommand) Float64(assignmentVar *float64, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Float64Slice adds a new float64 flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) Float64Slice(assignmentVar *[]float64, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Int adds a new int flag func (sc *Subcommand) Int(assignmentVar *int, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // IntSlice adds a new int slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) IntSlice(assignmentVar *[]int, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // UInt adds a new uint flag func (sc *Subcommand) UInt(assignmentVar *uint, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // UIntSlice adds a new uint slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) UIntSlice(assignmentVar *[]uint, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // UInt64 adds a new uint64 flag func (sc *Subcommand) UInt64(assignmentVar *uint64, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // UInt64Slice adds a new uint64 slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) UInt64Slice(assignmentVar *[]uint64, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // UInt32 adds a new uint32 flag func (sc *Subcommand) UInt32(assignmentVar *uint32, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // UInt32Slice adds a new uint32 slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) UInt32Slice(assignmentVar *[]uint32, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // UInt16 adds a new uint16 flag func (sc *Subcommand) UInt16(assignmentVar *uint16, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // UInt16Slice adds a new uint16 slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) UInt16Slice(assignmentVar *[]uint16, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // UInt8 adds a new uint8 flag func (sc *Subcommand) UInt8(assignmentVar *uint8, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // UInt8Slice adds a new uint8 slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) UInt8Slice(assignmentVar *[]uint8, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Int64 adds a new int64 flag. func (sc *Subcommand) Int64(assignmentVar *int64, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Int64Slice adds a new int64 slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) Int64Slice(assignmentVar *[]int64, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Int32 adds a new int32 flag func (sc *Subcommand) Int32(assignmentVar *int32, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Int32Slice adds a new int32 slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) Int32Slice(assignmentVar *[]int32, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Int16 adds a new int16 flag func (sc *Subcommand) Int16(assignmentVar *int16, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Int16Slice adds a new int16 slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) Int16Slice(assignmentVar *[]int16, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Int8 adds a new int8 flag func (sc *Subcommand) Int8(assignmentVar *int8, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // Int8Slice adds a new int8 slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) Int8Slice(assignmentVar *[]int8, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // IP adds a new net.IP flag. func (sc *Subcommand) IP(assignmentVar *net.IP, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // IPSlice adds a new int8 slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) IPSlice(assignmentVar *[]net.IP, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // HardwareAddr adds a new net.HardwareAddr flag. func (sc *Subcommand) HardwareAddr(assignmentVar *net.HardwareAddr, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // HardwareAddrSlice adds a new net.HardwareAddr slice flag. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) HardwareAddrSlice(assignmentVar *[]net.HardwareAddr, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // IPMask adds a new net.IPMask flag. IPv4 Only. func (sc *Subcommand) IPMask(assignmentVar *net.IPMask, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // IPMaskSlice adds a new net.HardwareAddr slice flag. IPv4 only. // Specify the flag multiple times to fill the slice. func (sc *Subcommand) IPMaskSlice(assignmentVar *[]net.IPMask, shortName string, longName string, description string) { sc.add(assignmentVar, shortName, longName, description) } // AddPositionalValue adds a positional value to the subcommand. the // relativePosition starts at 1 and is relative to the subcommand it belongs to func (sc *Subcommand) AddPositionalValue(assignmentVar *string, name string, relativePosition int, required bool, description string) { // ensure no other positionals are at this depth for _, other := range sc.PositionalFlags { if relativePosition == other.Position { log.Panicln("Unable to add positional value because one already exists at position: " + strconv.Itoa(relativePosition)) } } // ensure no subcommands at this depth for _, other := range sc.Subcommands { if relativePosition == other.Position { log.Panicln("Unable to add positional value a subcommand already exists at position: " + strconv.Itoa(relativePosition)) } } newPositionalValue := PositionalValue{ Name: name, Position: relativePosition, AssignmentVar: assignmentVar, Required: required, Description: description, defaultValue: *assignmentVar, } sc.PositionalFlags = append(sc.PositionalFlags, &newPositionalValue) } // SetValueForKey sets the value for the specified key. If setting a bool // value, then send "true" or "false" as strings. The returned bool indicates // that a value was set. func (sc *Subcommand) SetValueForKey(key string, value string) (bool, error) { // debugPrint("Looking to set key", key, "to value", value) // check for and assign flags that match the key for _, f := range sc.Flags { // debugPrint("Evaluating string flag", f.ShortName, "==", key, "||", f.LongName, "==", key) if f.ShortName == key || f.LongName == key { // debugPrint("Setting string value for", key, "to", value) f.identifyAndAssignValue(value) return true, nil } } // debugPrint(sc.Name, "was unable to find a key named", key, "to set to value", value) return false, nil } // ensureNoConflictWithBuiltinHelp ensures that the flags on this subcommand do // not conflict with the builtin help flags (-h or --help). Exits the program // if a conflict is found. func (sc *Subcommand) ensureNoConflictWithBuiltinHelp() { for _, f := range sc.Flags { if f.LongName == helpFlagLongName { sc.exitBecauseOfHelpFlagConflict(f.LongName) } if f.LongName == helpFlagShortName { sc.exitBecauseOfHelpFlagConflict(f.LongName) } if f.ShortName == helpFlagLongName { sc.exitBecauseOfHelpFlagConflict(f.ShortName) } if f.ShortName == helpFlagShortName { sc.exitBecauseOfHelpFlagConflict(f.ShortName) } } } // ensureNoConflictWithBuiltinVersion ensures that the flags on this subcommand do // not conflict with the builtin version flag (--version). Exits the program // if a conflict is found. func (sc *Subcommand) ensureNoConflictWithBuiltinVersion() { for _, f := range sc.Flags { if f.LongName == versionFlagLongName { sc.exitBecauseOfVersionFlagConflict(f.LongName) } if f.ShortName == versionFlagLongName { sc.exitBecauseOfVersionFlagConflict(f.ShortName) } } } // exitBecauseOfVersionFlagConflict exits the program with a message about how to prevent // flags being defined from conflicting with the builtin flags. func (sc *Subcommand) exitBecauseOfVersionFlagConflict(flagName string) { fmt.Println(`Flag with name '` + flagName + `' conflicts with the internal --version flag in flaggy. You must either change the flag's name, or disable flaggy's internal version flag with 'flaggy.DefaultParser.ShowVersionWithVersionFlag = false'. If you are using a custom parser, you must instead set '.ShowVersionWithVersionFlag = false' on it.`) exitOrPanic(1) } // exitBecauseOfHelpFlagConflict exits the program with a message about how to prevent // flags being defined from conflicting with the builtin flags. func (sc *Subcommand) exitBecauseOfHelpFlagConflict(flagName string) { fmt.Println(`Flag with name '` + flagName + `' conflicts with the internal --help or -h flag in flaggy. You must either change the flag's name, or disable flaggy's internal help flag with 'flaggy.DefaultParser.ShowHelpWithHFlag = false'. If you are using a custom parser, you must instead set '.ShowHelpWithHFlag = false' on it.`) exitOrPanic(1) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/integrii/flaggy/parsedValue.go
vendor/github.com/integrii/flaggy/parsedValue.go
package flaggy // parsedValue represents a flag or subcommand that was parsed. Primairily used // to account for all parsed values in order to determine if unknown values were // passed to the root parser after all subcommands have been parsed. type parsedValue struct { Key string Value string IsPositional bool // indicates that this value was positional and not a key/value } // newParsedValue creates and returns a new parsedValue struct with the // supplied values set func newParsedValue(key string, value string, isPositional bool) parsedValue { if len(key) == 0 && len(value) == 0 { panic("cant add parsed value with no key or value") } return parsedValue{ Key: key, Value: value, IsPositional: isPositional, } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/integrii/flaggy/argumentParser.go
vendor/github.com/integrii/flaggy/argumentParser.go
package flaggy // setValueForParsers sets the value for a specified key in the // specified parsers (which normally include a Parser and Subcommand). // The return values represent the key being set, and any errors // returned when setting the key, such as failures to convert the string // into the appropriate flag value. We stop assigning values as soon // as we find a any parser that accepts it. func setValueForParsers(key string, value string, parsers ...ArgumentParser) (bool, error) { for _, p := range parsers { valueWasSet, err := p.SetValueForKey(key, value) if err != nil { return valueWasSet, err } if valueWasSet { return true, nil } } return false, nil } // ArgumentParser represents a parser or subcommand type ArgumentParser interface { SetValueForKey(key string, value string) (bool, error) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/integrii/flaggy/helpValues.go
vendor/github.com/integrii/flaggy/helpValues.go
package flaggy import ( "log" "reflect" "strings" "unicode/utf8" ) // Help represents the values needed to render a Help page type Help struct { Subcommands []HelpSubcommand Positionals []HelpPositional Flags []HelpFlag UsageString string CommandName string PrependMessage string AppendMessage string Message string Description string } // HelpSubcommand is used to template subcommand Help output type HelpSubcommand struct { ShortName string LongName string Description string Position int Spacer string } // HelpPositional is used to template positional Help output type HelpPositional struct { Name string Description string Required bool Position int DefaultValue string Spacer string } // HelpFlag is used to template string flag Help output type HelpFlag struct { ShortName string LongName string Description string DefaultValue string Spacer string } // ExtractValues extracts Help template values from a subcommand and its parent // parser. The parser is required in order to detect default flag settings // for help and version output. func (h *Help) ExtractValues(p *Parser, message string) { // accept message string for output h.Message = message // extract Help values from the current subcommand in context // prependMessage string h.PrependMessage = p.subcommandContext.AdditionalHelpPrepend // appendMessage string h.AppendMessage = p.subcommandContext.AdditionalHelpAppend // command name h.CommandName = p.subcommandContext.Name // description h.Description = p.subcommandContext.Description maxLength := getLongestNameLength(p.subcommandContext.Subcommands, 0) // subcommands []HelpSubcommand for _, cmd := range p.subcommandContext.Subcommands { if cmd.Hidden { continue } newHelpSubcommand := HelpSubcommand{ ShortName: cmd.ShortName, LongName: cmd.Name, Description: cmd.Description, Position: cmd.Position, Spacer: makeSpacer(cmd.Name, maxLength), } h.Subcommands = append(h.Subcommands, newHelpSubcommand) } maxLength = getLongestNameLength(p.subcommandContext.PositionalFlags, 0) // parse positional flags into help output structs for _, pos := range p.subcommandContext.PositionalFlags { if pos.Hidden { continue } newHelpPositional := HelpPositional{ Name: pos.Name, Position: pos.Position, Description: pos.Description, Required: pos.Required, DefaultValue: pos.defaultValue, Spacer: makeSpacer(pos.Name, maxLength), } h.Positionals = append(h.Positionals, newHelpPositional) } maxLength = len(versionFlagLongName) if len(helpFlagLongName) > maxLength { maxLength = len(helpFlagLongName) } maxLength = getLongestNameLength(p.subcommandContext.Flags, maxLength) maxLength = getLongestNameLength(p.Flags, maxLength) // if the built-in version flag is enabled, then add it as a help flag if p.ShowVersionWithVersionFlag { defaultVersionFlag := HelpFlag{ ShortName: "", LongName: versionFlagLongName, Description: "Displays the program version string.", DefaultValue: "", Spacer: makeSpacer(versionFlagLongName, maxLength), } h.Flags = append(h.Flags, defaultVersionFlag) } // if the built-in help flag exists, then add it as a help flag if p.ShowHelpWithHFlag { defaultHelpFlag := HelpFlag{ ShortName: helpFlagShortName, LongName: helpFlagLongName, Description: "Displays help with available flag, subcommand, and positional value parameters.", DefaultValue: "", Spacer: makeSpacer(helpFlagLongName, maxLength), } h.Flags = append(h.Flags, defaultHelpFlag) } // go through every flag in the subcommand and add it to help output h.parseFlagsToHelpFlags(p.subcommandContext.Flags, maxLength) // go through every flag in the parent parser and add it to help output h.parseFlagsToHelpFlags(p.Flags, maxLength) // formulate the usage string // first, we capture all the command and positional names by position commandsByPosition := make(map[int]string) for _, pos := range p.subcommandContext.PositionalFlags { if pos.Hidden { continue } if len(commandsByPosition[pos.Position]) > 0 { commandsByPosition[pos.Position] = commandsByPosition[pos.Position] + "|" + pos.Name } else { commandsByPosition[pos.Position] = pos.Name } } for _, cmd := range p.subcommandContext.Subcommands { if cmd.Hidden { continue } if len(commandsByPosition[cmd.Position]) > 0 { commandsByPosition[cmd.Position] = commandsByPosition[cmd.Position] + "|" + cmd.Name } else { commandsByPosition[cmd.Position] = cmd.Name } } // find the highest position count in the map var highestPosition int for i := range commandsByPosition { if i > highestPosition { highestPosition = i } } // only have a usage string if there are positional items var usageString string if highestPosition > 0 { // find each positional value and make our final string usageString = p.subcommandContext.Name for i := 1; i <= highestPosition; i++ { if len(commandsByPosition[i]) > 0 { usageString = usageString + " [" + commandsByPosition[i] + "]" } else { // dont keep listing after the first position without any properties // it will be impossible to reach anything beyond here anyway break } } } h.UsageString = usageString } // parseFlagsToHelpFlags parses the specified slice of flags into // help flags on the the calling help command func (h *Help) parseFlagsToHelpFlags(flags []*Flag, maxLength int) { for _, f := range flags { if f.Hidden { continue } // parse help values out if the flag hasn't been parsed yet if !f.parsed { f.parsed = true // parse the default value as a string and remember it for help output f.defaultValue, _ = f.returnAssignmentVarValueAsString() } // determine the default value based on the assignment variable defaultValue := f.defaultValue // dont show nils if defaultValue == "<nil>" { defaultValue = "" } // for bools, dont show a default of false _, isBool := f.AssignmentVar.(*bool) if isBool { b := f.AssignmentVar.(*bool) if *b == false { defaultValue = "" } } newHelpFlag := HelpFlag{ ShortName: f.ShortName, LongName: f.LongName, Description: f.Description, DefaultValue: defaultValue, Spacer: makeSpacer(f.LongName, maxLength), } h.AddFlagToHelp(newHelpFlag) } } // AddFlagToHelp adds a flag to help output if it does not exist func (h *Help) AddFlagToHelp(f HelpFlag) { for _, existingFlag := range h.Flags { if len(existingFlag.ShortName) > 0 && existingFlag.ShortName == f.ShortName { return } if len(existingFlag.LongName) > 0 && existingFlag.LongName == f.LongName { return } } h.Flags = append(h.Flags, f) } // getLongestNameLength takes a slice of any supported flag and returns the length of the longest of their names func getLongestNameLength(slice interface{}, min int) int { var maxLength = min s := reflect.ValueOf(slice) if s.Kind() != reflect.Slice { log.Panicf("Paremeter given to getLongestNameLength() is of type %s. Expected slice", s.Kind()) } for i := 0; i < s.Len(); i++ { option := s.Index(i).Interface() var name string switch t := option.(type) { case *Subcommand: name = t.Name case *Flag: name = t.LongName case *PositionalValue: name = t.Name default: log.Panicf("Unexpected type %T found in slice passed to getLongestNameLength(). Possible types: *Subcommand, *Flag, *PositionalValue", t) } length := len(name) if length > maxLength { maxLength = length } } return maxLength } // makeSpacer creates a string of whitespaces, with a length of the given // maxLength minus the length of the given name func makeSpacer(name string, maxLength int) string { length := maxLength - utf8.RuneCountInString(name) if length < 0 { length = 0 } return strings.Repeat(" ", length) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/integrii/flaggy/help.go
vendor/github.com/integrii/flaggy/help.go
package flaggy // defaultHelpTemplate is the help template used by default // {{if (or (or (gt (len .StringFlags) 0) (gt (len .IntFlags) 0)) (gt (len .BoolFlags) 0))}} // {{if (or (gt (len .StringFlags) 0) (gt (len .BoolFlags) 0))}} const defaultHelpTemplate = `{{.CommandName}}{{if .Description}} - {{.Description}}{{end}}{{if .PrependMessage}} {{.PrependMessage}}{{end}} {{if .UsageString}} Usage: {{.UsageString}}{{end}}{{if .Positionals}} Positional Variables: {{range .Positionals}} {{.Name}} {{.Spacer}}{{if .Description}} {{.Description}}{{end}}{{if .DefaultValue}} (default: {{.DefaultValue}}){{else}}{{if .Required}} (Required){{end}}{{end}}{{end}}{{end}}{{if .Subcommands}} Subcommands: {{range .Subcommands}} {{.LongName}}{{if .ShortName}} ({{.ShortName}}){{end}}{{if .Position}}{{if gt .Position 1}} (position {{.Position}}){{end}}{{end}}{{if .Description}} {{.Spacer}}{{.Description}}{{end}}{{end}} {{end}}{{if (gt (len .Flags) 0)}} Flags: {{if .Flags}}{{range .Flags}} {{if .ShortName}}-{{.ShortName}} {{else}} {{end}}{{if .LongName}}--{{.LongName}}{{end}}{{if .Description}} {{.Spacer}}{{.Description}}{{if .DefaultValue}} (default: {{.DefaultValue}}){{end}}{{end}}{{end}}{{end}} {{end}}{{if .AppendMessage}}{{.AppendMessage}} {{end}}{{if .Message}} {{.Message}}{{end}} `
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/integrii/flaggy/main.go
vendor/github.com/integrii/flaggy/main.go
// Package flaggy is a input flag parsing package that supports recursive // subcommands, positional values, and any-position flags without // unnecessary complexeties. // // For a getting started tutorial and full feature list, check out the // readme at https://github.com/integrii/flaggy. package flaggy // import "github.com/integrii/flaggy" import ( "fmt" "log" "net" "os" "strconv" "strings" "time" ) // strings used for builtin help and version flags both short and long const versionFlagLongName = "version" const helpFlagLongName = "help" const helpFlagShortName = "h" // defaultVersion is applied to parsers when they are created const defaultVersion = "0.0.0" // DebugMode indicates that debug output should be enabled var DebugMode bool // DefaultHelpTemplate is the help template that will be used // for newly created subcommands and commands var DefaultHelpTemplate = defaultHelpTemplate // DefaultParser is the default parser that is used with the package-level public // functions var DefaultParser *Parser // TrailingArguments holds trailing arguments in the main parser after parsing // has been run. var TrailingArguments []string func init() { // set the default help template // allow usage like flaggy.StringVar by enabling a default Parser ResetParser() } // ResetParser resets the default parser to a fresh instance. Uses the // name of the binary executing as the program name by default. func ResetParser() { if len(os.Args) > 0 { chunks := strings.Split(os.Args[0], "/") DefaultParser = NewParser(chunks[len(chunks)-1]) } else { DefaultParser = NewParser("default") } } // Parse parses flags as requested in the default package parser func Parse() { err := DefaultParser.Parse() TrailingArguments = DefaultParser.TrailingArguments if err != nil { log.Panicln("Error from argument parser:", err) } } // ParseArgs parses the passed args as if they were the arguments to the // running binary. Targets the default main parser for the package. func ParseArgs(args []string) { err := DefaultParser.ParseArgs(args) TrailingArguments = DefaultParser.TrailingArguments if err != nil { log.Panicln("Error from argument parser:", err) } } // String adds a new string flag func String(assignmentVar *string, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // StringSlice adds a new slice of strings flag // Specify the flag multiple times to fill the slice func StringSlice(assignmentVar *[]string, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Bool adds a new bool flag func Bool(assignmentVar *bool, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // BoolSlice adds a new slice of bools flag // Specify the flag multiple times to fill the slice func BoolSlice(assignmentVar *[]bool, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // ByteSlice adds a new slice of bytes flag // Specify the flag multiple times to fill the slice. Takes hex as input. func ByteSlice(assignmentVar *[]byte, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Duration adds a new time.Duration flag. // Input format is described in time.ParseDuration(). // Example values: 1h, 1h50m, 32s func Duration(assignmentVar *time.Duration, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // DurationSlice adds a new time.Duration flag. // Input format is described in time.ParseDuration(). // Example values: 1h, 1h50m, 32s // Specify the flag multiple times to fill the slice. func DurationSlice(assignmentVar *[]time.Duration, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Float32 adds a new float32 flag. func Float32(assignmentVar *float32, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Float32Slice adds a new float32 flag. // Specify the flag multiple times to fill the slice. func Float32Slice(assignmentVar *[]float32, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Float64 adds a new float64 flag. func Float64(assignmentVar *float64, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Float64Slice adds a new float64 flag. // Specify the flag multiple times to fill the slice. func Float64Slice(assignmentVar *[]float64, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Int adds a new int flag func Int(assignmentVar *int, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // IntSlice adds a new int slice flag. // Specify the flag multiple times to fill the slice. func IntSlice(assignmentVar *[]int, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // UInt adds a new uint flag func UInt(assignmentVar *uint, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // UIntSlice adds a new uint slice flag. // Specify the flag multiple times to fill the slice. func UIntSlice(assignmentVar *[]uint, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // UInt64 adds a new uint64 flag func UInt64(assignmentVar *uint64, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // UInt64Slice adds a new uint64 slice flag. // Specify the flag multiple times to fill the slice. func UInt64Slice(assignmentVar *[]uint64, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // UInt32 adds a new uint32 flag func UInt32(assignmentVar *uint32, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // UInt32Slice adds a new uint32 slice flag. // Specify the flag multiple times to fill the slice. func UInt32Slice(assignmentVar *[]uint32, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // UInt16 adds a new uint16 flag func UInt16(assignmentVar *uint16, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // UInt16Slice adds a new uint16 slice flag. // Specify the flag multiple times to fill the slice. func UInt16Slice(assignmentVar *[]uint16, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // UInt8 adds a new uint8 flag func UInt8(assignmentVar *uint8, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // UInt8Slice adds a new uint8 slice flag. // Specify the flag multiple times to fill the slice. func UInt8Slice(assignmentVar *[]uint8, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Int64 adds a new int64 flag func Int64(assignmentVar *int64, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Int64Slice adds a new int64 slice flag. // Specify the flag multiple times to fill the slice. func Int64Slice(assignmentVar *[]int64, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Int32 adds a new int32 flag func Int32(assignmentVar *int32, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Int32Slice adds a new int32 slice flag. // Specify the flag multiple times to fill the slice. func Int32Slice(assignmentVar *[]int32, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Int16 adds a new int16 flag func Int16(assignmentVar *int16, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Int16Slice adds a new int16 slice flag. // Specify the flag multiple times to fill the slice. func Int16Slice(assignmentVar *[]int16, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Int8 adds a new int8 flag func Int8(assignmentVar *int8, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // Int8Slice adds a new int8 slice flag. // Specify the flag multiple times to fill the slice. func Int8Slice(assignmentVar *[]int8, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // IP adds a new net.IP flag. func IP(assignmentVar *net.IP, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // IPSlice adds a new int8 slice flag. // Specify the flag multiple times to fill the slice. func IPSlice(assignmentVar *[]net.IP, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // HardwareAddr adds a new net.HardwareAddr flag. func HardwareAddr(assignmentVar *net.HardwareAddr, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // HardwareAddrSlice adds a new net.HardwareAddr slice flag. // Specify the flag multiple times to fill the slice. func HardwareAddrSlice(assignmentVar *[]net.HardwareAddr, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // IPMask adds a new net.IPMask flag. IPv4 Only. func IPMask(assignmentVar *net.IPMask, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // IPMaskSlice adds a new net.HardwareAddr slice flag. IPv4 only. // Specify the flag multiple times to fill the slice. func IPMaskSlice(assignmentVar *[]net.IPMask, shortName string, longName string, description string) { DefaultParser.add(assignmentVar, shortName, longName, description) } // AttachSubcommand adds a subcommand for parsing func AttachSubcommand(subcommand *Subcommand, relativePosition int) { DefaultParser.AttachSubcommand(subcommand, relativePosition) } // ShowHelp shows parser help func ShowHelp(message string) { DefaultParser.ShowHelpWithMessage(message) } // SetDescription sets the description of the default package command parser func SetDescription(description string) { DefaultParser.Description = description } // SetVersion sets the version of the default package command parser func SetVersion(version string) { DefaultParser.Version = version } // SetName sets the name of the default package command parser func SetName(name string) { DefaultParser.Name = name } // ShowHelpAndExit shows parser help and exits with status code 2 func ShowHelpAndExit(message string) { ShowHelp(message) exitOrPanic(2) } // PanicInsteadOfExit is used when running tests var PanicInsteadOfExit bool // exitOrPanic panics instead of calling os.Exit so that tests can catch // more failures func exitOrPanic(code int) { if PanicInsteadOfExit { panic("Panic instead of exit with code: " + strconv.Itoa(code)) } os.Exit(code) } // ShowHelpOnUnexpectedEnable enables the ShowHelpOnUnexpected behavior on the // default parser. This causes unknown inputs to error out. func ShowHelpOnUnexpectedEnable() { DefaultParser.ShowHelpOnUnexpected = true } // ShowHelpOnUnexpectedDisable disables the ShowHelpOnUnexpected behavior on the // default parser. This causes unknown inputs to error out. func ShowHelpOnUnexpectedDisable() { DefaultParser.ShowHelpOnUnexpected = false } // AddPositionalValue adds a positional value to the main parser at the global // context func AddPositionalValue(assignmentVar *string, name string, relativePosition int, required bool, description string) { DefaultParser.AddPositionalValue(assignmentVar, name, relativePosition, required, description) } // debugPrint prints if debugging is enabled func debugPrint(i ...interface{}) { if DebugMode { fmt.Println(i...) } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/atotto/clipboard/clipboard_darwin.go
vendor/github.com/atotto/clipboard/clipboard_darwin.go
// Copyright 2013 @atotto. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build darwin package clipboard import ( "os/exec" ) var ( pasteCmdArgs = "pbpaste" copyCmdArgs = "pbcopy" ) func getPasteCommand() *exec.Cmd { return exec.Command(pasteCmdArgs) } func getCopyCommand() *exec.Cmd { return exec.Command(copyCmdArgs) } func readAll() (string, error) { pasteCmd := getPasteCommand() out, err := pasteCmd.Output() if err != nil { return "", err } return string(out), nil } func writeAll(text string) error { copyCmd := getCopyCommand() in, err := copyCmd.StdinPipe() if err != nil { return err } if err := copyCmd.Start(); err != nil { return err } if _, err := in.Write([]byte(text)); err != nil { return err } if err := in.Close(); err != nil { return err } return copyCmd.Wait() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/atotto/clipboard/clipboard_windows.go
vendor/github.com/atotto/clipboard/clipboard_windows.go
// Copyright 2013 @atotto. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build windows package clipboard import ( "runtime" "syscall" "time" "unsafe" ) const ( cfUnicodetext = 13 gmemMoveable = 0x0002 ) var ( user32 = syscall.MustLoadDLL("user32") isClipboardFormatAvailable = user32.MustFindProc("IsClipboardFormatAvailable") openClipboard = user32.MustFindProc("OpenClipboard") closeClipboard = user32.MustFindProc("CloseClipboard") emptyClipboard = user32.MustFindProc("EmptyClipboard") getClipboardData = user32.MustFindProc("GetClipboardData") setClipboardData = user32.MustFindProc("SetClipboardData") kernel32 = syscall.NewLazyDLL("kernel32") globalAlloc = kernel32.NewProc("GlobalAlloc") globalFree = kernel32.NewProc("GlobalFree") globalLock = kernel32.NewProc("GlobalLock") globalUnlock = kernel32.NewProc("GlobalUnlock") lstrcpy = kernel32.NewProc("lstrcpyW") ) // waitOpenClipboard opens the clipboard, waiting for up to a second to do so. func waitOpenClipboard() error { started := time.Now() limit := started.Add(time.Second) var r uintptr var err error for time.Now().Before(limit) { r, _, err = openClipboard.Call(0) if r != 0 { return nil } time.Sleep(time.Millisecond) } return err } func readAll() (string, error) { // LockOSThread ensure that the whole method will keep executing on the same thread from begin to end (it actually locks the goroutine thread attribution). // Otherwise if the goroutine switch thread during execution (which is a common practice), the OpenClipboard and CloseClipboard will happen on two different threads, and it will result in a clipboard deadlock. runtime.LockOSThread() defer runtime.UnlockOSThread() if formatAvailable, _, err := isClipboardFormatAvailable.Call(cfUnicodetext); formatAvailable == 0 { return "", err } err := waitOpenClipboard() if err != nil { return "", err } h, _, err := getClipboardData.Call(cfUnicodetext) if h == 0 { _, _, _ = closeClipboard.Call() return "", err } l, _, err := globalLock.Call(h) if l == 0 { _, _, _ = closeClipboard.Call() return "", err } text := syscall.UTF16ToString((*[1 << 20]uint16)(unsafe.Pointer(l))[:]) r, _, err := globalUnlock.Call(h) if r == 0 { _, _, _ = closeClipboard.Call() return "", err } closed, _, err := closeClipboard.Call() if closed == 0 { return "", err } return text, nil } func writeAll(text string) error { // LockOSThread ensure that the whole method will keep executing on the same thread from begin to end (it actually locks the goroutine thread attribution). // Otherwise if the goroutine switch thread during execution (which is a common practice), the OpenClipboard and CloseClipboard will happen on two different threads, and it will result in a clipboard deadlock. runtime.LockOSThread() defer runtime.UnlockOSThread() err := waitOpenClipboard() if err != nil { return err } r, _, err := emptyClipboard.Call(0) if r == 0 { _, _, _ = closeClipboard.Call() return err } data := syscall.StringToUTF16(text) // "If the hMem parameter identifies a memory object, the object must have // been allocated using the function with the GMEM_MOVEABLE flag." h, _, err := globalAlloc.Call(gmemMoveable, uintptr(len(data)*int(unsafe.Sizeof(data[0])))) if h == 0 { _, _, _ = closeClipboard.Call() return err } defer func() { if h != 0 { globalFree.Call(h) } }() l, _, err := globalLock.Call(h) if l == 0 { _, _, _ = closeClipboard.Call() return err } r, _, err = lstrcpy.Call(l, uintptr(unsafe.Pointer(&data[0]))) if r == 0 { _, _, _ = closeClipboard.Call() return err } r, _, err = globalUnlock.Call(h) if r == 0 { if err.(syscall.Errno) != 0 { _, _, _ = closeClipboard.Call() return err } } r, _, err = setClipboardData.Call(cfUnicodetext, h) if r == 0 { _, _, _ = closeClipboard.Call() return err } h = 0 // suppress deferred cleanup closed, _, err := closeClipboard.Call() if closed == 0 { return err } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/atotto/clipboard/clipboard.go
vendor/github.com/atotto/clipboard/clipboard.go
// Copyright 2013 @atotto. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package clipboard read/write on clipboard package clipboard // ReadAll read string from clipboard func ReadAll() (string, error) { return readAll() } // WriteAll write string to clipboard func WriteAll(text string) error { return writeAll(text) } // Unsupported might be set true during clipboard init, to help callers decide // whether or not to offer clipboard options. var Unsupported bool
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/atotto/clipboard/clipboard_unix.go
vendor/github.com/atotto/clipboard/clipboard_unix.go
// Copyright 2013 @atotto. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build freebsd linux netbsd openbsd solaris dragonfly package clipboard import ( "errors" "os" "os/exec" ) const ( xsel = "xsel" xclip = "xclip" powershellExe = "powershell.exe" clipExe = "clip.exe" wlcopy = "wl-copy" wlpaste = "wl-paste" termuxClipboardGet = "termux-clipboard-get" termuxClipboardSet = "termux-clipboard-set" ) var ( Primary bool trimDos bool pasteCmdArgs []string copyCmdArgs []string xselPasteArgs = []string{xsel, "--output", "--clipboard"} xselCopyArgs = []string{xsel, "--input", "--clipboard"} xclipPasteArgs = []string{xclip, "-out", "-selection", "clipboard"} xclipCopyArgs = []string{xclip, "-in", "-selection", "clipboard"} powershellExePasteArgs = []string{powershellExe, "Get-Clipboard"} clipExeCopyArgs = []string{clipExe} wlpasteArgs = []string{wlpaste, "--no-newline"} wlcopyArgs = []string{wlcopy} termuxPasteArgs = []string{termuxClipboardGet} termuxCopyArgs = []string{termuxClipboardSet} missingCommands = errors.New("No clipboard utilities available. Please install xsel, xclip, wl-clipboard or Termux:API add-on for termux-clipboard-get/set.") ) func init() { if os.Getenv("WAYLAND_DISPLAY") != "" { pasteCmdArgs = wlpasteArgs copyCmdArgs = wlcopyArgs if _, err := exec.LookPath(wlcopy); err == nil { if _, err := exec.LookPath(wlpaste); err == nil { return } } } pasteCmdArgs = xclipPasteArgs copyCmdArgs = xclipCopyArgs if _, err := exec.LookPath(xclip); err == nil { return } pasteCmdArgs = xselPasteArgs copyCmdArgs = xselCopyArgs if _, err := exec.LookPath(xsel); err == nil { return } pasteCmdArgs = termuxPasteArgs copyCmdArgs = termuxCopyArgs if _, err := exec.LookPath(termuxClipboardSet); err == nil { if _, err := exec.LookPath(termuxClipboardGet); err == nil { return } } pasteCmdArgs = powershellExePasteArgs copyCmdArgs = clipExeCopyArgs trimDos = true if _, err := exec.LookPath(clipExe); err == nil { if _, err := exec.LookPath(powershellExe); err == nil { return } } Unsupported = true } func getPasteCommand() *exec.Cmd { if Primary { pasteCmdArgs = pasteCmdArgs[:1] } return exec.Command(pasteCmdArgs[0], pasteCmdArgs[1:]...) } func getCopyCommand() *exec.Cmd { if Primary { copyCmdArgs = copyCmdArgs[:1] } return exec.Command(copyCmdArgs[0], copyCmdArgs[1:]...) } func readAll() (string, error) { if Unsupported { return "", missingCommands } pasteCmd := getPasteCommand() out, err := pasteCmd.Output() if err != nil { return "", err } result := string(out) if trimDos && len(result) > 1 { result = result[:len(result)-2] } return result, nil } func writeAll(text string) error { if Unsupported { return missingCommands } copyCmd := getCopyCommand() in, err := copyCmd.StdinPipe() if err != nil { return err } if err := copyCmd.Start(); err != nil { return err } if _, err := in.Write([]byte(text)); err != nil { return err } if err := in.Close(); err != nil { return err } return copyCmd.Wait() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/atotto/clipboard/clipboard_plan9.go
vendor/github.com/atotto/clipboard/clipboard_plan9.go
// Copyright 2013 @atotto. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build plan9 package clipboard import ( "os" "io/ioutil" ) func readAll() (string, error) { f, err := os.Open("/dev/snarf") if err != nil { return "", err } defer f.Close() str, err := ioutil.ReadAll(f) if err != nil { return "", err } return string(str), nil } func writeAll(text string) error { f, err := os.OpenFile("/dev/snarf", os.O_WRONLY, 0666) if err != nil { return err } defer f.Close() _, err = f.Write([]byte(text)) if err != nil { return err } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/mattn/go-colorable/colorable_windows.go
vendor/github.com/mattn/go-colorable/colorable_windows.go
//go:build windows && !appengine // +build windows,!appengine package colorable import ( "bytes" "io" "math" "os" "strconv" "strings" "sync" "syscall" "unsafe" "github.com/mattn/go-isatty" ) const ( foregroundBlue = 0x1 foregroundGreen = 0x2 foregroundRed = 0x4 foregroundIntensity = 0x8 foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) backgroundBlue = 0x10 backgroundGreen = 0x20 backgroundRed = 0x40 backgroundIntensity = 0x80 backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) commonLvbUnderscore = 0x8000 cENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 ) const ( genericRead = 0x80000000 genericWrite = 0x40000000 ) const ( consoleTextmodeBuffer = 0x1 ) type wchar uint16 type short int16 type dword uint32 type word uint16 type coord struct { x short y short } type smallRect struct { left short top short right short bottom short } type consoleScreenBufferInfo struct { size coord cursorPosition coord attributes word window smallRect maximumWindowSize coord } type consoleCursorInfo struct { size dword visible int32 } var ( kernel32 = syscall.NewLazyDLL("kernel32.dll") procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") procGetConsoleMode = kernel32.NewProc("GetConsoleMode") procSetConsoleMode = kernel32.NewProc("SetConsoleMode") procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") ) // Writer provides colorable Writer to the console type Writer struct { out io.Writer handle syscall.Handle althandle syscall.Handle oldattr word oldpos coord rest bytes.Buffer mutex sync.Mutex } // NewColorable returns new instance of Writer which handles escape sequence from File. func NewColorable(file *os.File) io.Writer { if file == nil { panic("nil passed instead of *os.File to NewColorable()") } if isatty.IsTerminal(file.Fd()) { var mode uint32 if r, _, _ := procGetConsoleMode.Call(file.Fd(), uintptr(unsafe.Pointer(&mode))); r != 0 && mode&cENABLE_VIRTUAL_TERMINAL_PROCESSING != 0 { return file } var csbi consoleScreenBufferInfo handle := syscall.Handle(file.Fd()) procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} } return file } // NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. func NewColorableStdout() io.Writer { return NewColorable(os.Stdout) } // NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. func NewColorableStderr() io.Writer { return NewColorable(os.Stderr) } var color256 = map[int]int{ 0: 0x000000, 1: 0x800000, 2: 0x008000, 3: 0x808000, 4: 0x000080, 5: 0x800080, 6: 0x008080, 7: 0xc0c0c0, 8: 0x808080, 9: 0xff0000, 10: 0x00ff00, 11: 0xffff00, 12: 0x0000ff, 13: 0xff00ff, 14: 0x00ffff, 15: 0xffffff, 16: 0x000000, 17: 0x00005f, 18: 0x000087, 19: 0x0000af, 20: 0x0000d7, 21: 0x0000ff, 22: 0x005f00, 23: 0x005f5f, 24: 0x005f87, 25: 0x005faf, 26: 0x005fd7, 27: 0x005fff, 28: 0x008700, 29: 0x00875f, 30: 0x008787, 31: 0x0087af, 32: 0x0087d7, 33: 0x0087ff, 34: 0x00af00, 35: 0x00af5f, 36: 0x00af87, 37: 0x00afaf, 38: 0x00afd7, 39: 0x00afff, 40: 0x00d700, 41: 0x00d75f, 42: 0x00d787, 43: 0x00d7af, 44: 0x00d7d7, 45: 0x00d7ff, 46: 0x00ff00, 47: 0x00ff5f, 48: 0x00ff87, 49: 0x00ffaf, 50: 0x00ffd7, 51: 0x00ffff, 52: 0x5f0000, 53: 0x5f005f, 54: 0x5f0087, 55: 0x5f00af, 56: 0x5f00d7, 57: 0x5f00ff, 58: 0x5f5f00, 59: 0x5f5f5f, 60: 0x5f5f87, 61: 0x5f5faf, 62: 0x5f5fd7, 63: 0x5f5fff, 64: 0x5f8700, 65: 0x5f875f, 66: 0x5f8787, 67: 0x5f87af, 68: 0x5f87d7, 69: 0x5f87ff, 70: 0x5faf00, 71: 0x5faf5f, 72: 0x5faf87, 73: 0x5fafaf, 74: 0x5fafd7, 75: 0x5fafff, 76: 0x5fd700, 77: 0x5fd75f, 78: 0x5fd787, 79: 0x5fd7af, 80: 0x5fd7d7, 81: 0x5fd7ff, 82: 0x5fff00, 83: 0x5fff5f, 84: 0x5fff87, 85: 0x5fffaf, 86: 0x5fffd7, 87: 0x5fffff, 88: 0x870000, 89: 0x87005f, 90: 0x870087, 91: 0x8700af, 92: 0x8700d7, 93: 0x8700ff, 94: 0x875f00, 95: 0x875f5f, 96: 0x875f87, 97: 0x875faf, 98: 0x875fd7, 99: 0x875fff, 100: 0x878700, 101: 0x87875f, 102: 0x878787, 103: 0x8787af, 104: 0x8787d7, 105: 0x8787ff, 106: 0x87af00, 107: 0x87af5f, 108: 0x87af87, 109: 0x87afaf, 110: 0x87afd7, 111: 0x87afff, 112: 0x87d700, 113: 0x87d75f, 114: 0x87d787, 115: 0x87d7af, 116: 0x87d7d7, 117: 0x87d7ff, 118: 0x87ff00, 119: 0x87ff5f, 120: 0x87ff87, 121: 0x87ffaf, 122: 0x87ffd7, 123: 0x87ffff, 124: 0xaf0000, 125: 0xaf005f, 126: 0xaf0087, 127: 0xaf00af, 128: 0xaf00d7, 129: 0xaf00ff, 130: 0xaf5f00, 131: 0xaf5f5f, 132: 0xaf5f87, 133: 0xaf5faf, 134: 0xaf5fd7, 135: 0xaf5fff, 136: 0xaf8700, 137: 0xaf875f, 138: 0xaf8787, 139: 0xaf87af, 140: 0xaf87d7, 141: 0xaf87ff, 142: 0xafaf00, 143: 0xafaf5f, 144: 0xafaf87, 145: 0xafafaf, 146: 0xafafd7, 147: 0xafafff, 148: 0xafd700, 149: 0xafd75f, 150: 0xafd787, 151: 0xafd7af, 152: 0xafd7d7, 153: 0xafd7ff, 154: 0xafff00, 155: 0xafff5f, 156: 0xafff87, 157: 0xafffaf, 158: 0xafffd7, 159: 0xafffff, 160: 0xd70000, 161: 0xd7005f, 162: 0xd70087, 163: 0xd700af, 164: 0xd700d7, 165: 0xd700ff, 166: 0xd75f00, 167: 0xd75f5f, 168: 0xd75f87, 169: 0xd75faf, 170: 0xd75fd7, 171: 0xd75fff, 172: 0xd78700, 173: 0xd7875f, 174: 0xd78787, 175: 0xd787af, 176: 0xd787d7, 177: 0xd787ff, 178: 0xd7af00, 179: 0xd7af5f, 180: 0xd7af87, 181: 0xd7afaf, 182: 0xd7afd7, 183: 0xd7afff, 184: 0xd7d700, 185: 0xd7d75f, 186: 0xd7d787, 187: 0xd7d7af, 188: 0xd7d7d7, 189: 0xd7d7ff, 190: 0xd7ff00, 191: 0xd7ff5f, 192: 0xd7ff87, 193: 0xd7ffaf, 194: 0xd7ffd7, 195: 0xd7ffff, 196: 0xff0000, 197: 0xff005f, 198: 0xff0087, 199: 0xff00af, 200: 0xff00d7, 201: 0xff00ff, 202: 0xff5f00, 203: 0xff5f5f, 204: 0xff5f87, 205: 0xff5faf, 206: 0xff5fd7, 207: 0xff5fff, 208: 0xff8700, 209: 0xff875f, 210: 0xff8787, 211: 0xff87af, 212: 0xff87d7, 213: 0xff87ff, 214: 0xffaf00, 215: 0xffaf5f, 216: 0xffaf87, 217: 0xffafaf, 218: 0xffafd7, 219: 0xffafff, 220: 0xffd700, 221: 0xffd75f, 222: 0xffd787, 223: 0xffd7af, 224: 0xffd7d7, 225: 0xffd7ff, 226: 0xffff00, 227: 0xffff5f, 228: 0xffff87, 229: 0xffffaf, 230: 0xffffd7, 231: 0xffffff, 232: 0x080808, 233: 0x121212, 234: 0x1c1c1c, 235: 0x262626, 236: 0x303030, 237: 0x3a3a3a, 238: 0x444444, 239: 0x4e4e4e, 240: 0x585858, 241: 0x626262, 242: 0x6c6c6c, 243: 0x767676, 244: 0x808080, 245: 0x8a8a8a, 246: 0x949494, 247: 0x9e9e9e, 248: 0xa8a8a8, 249: 0xb2b2b2, 250: 0xbcbcbc, 251: 0xc6c6c6, 252: 0xd0d0d0, 253: 0xdadada, 254: 0xe4e4e4, 255: 0xeeeeee, } // `\033]0;TITLESTR\007` func doTitleSequence(er *bytes.Reader) error { var c byte var err error c, err = er.ReadByte() if err != nil { return err } if c != '0' && c != '2' { return nil } c, err = er.ReadByte() if err != nil { return err } if c != ';' { return nil } title := make([]byte, 0, 80) for { c, err = er.ReadByte() if err != nil { return err } if c == 0x07 || c == '\n' { break } title = append(title, c) } if len(title) > 0 { title8, err := syscall.UTF16PtrFromString(string(title)) if err == nil { procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) } } return nil } // returns Atoi(s) unless s == "" in which case it returns def func atoiWithDefault(s string, def int) (int, error) { if s == "" { return def, nil } return strconv.Atoi(s) } // Write writes data on console func (w *Writer) Write(data []byte) (n int, err error) { w.mutex.Lock() defer w.mutex.Unlock() var csbi consoleScreenBufferInfo procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) handle := w.handle var er *bytes.Reader if w.rest.Len() > 0 { var rest bytes.Buffer w.rest.WriteTo(&rest) w.rest.Reset() rest.Write(data) er = bytes.NewReader(rest.Bytes()) } else { er = bytes.NewReader(data) } var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { plaintext.WriteByte(c1) continue } _, err = plaintext.WriteTo(w.out) if err != nil { break loop } c2, err := er.ReadByte() if err != nil { break loop } switch c2 { case '>': continue case ']': w.rest.WriteByte(c1) w.rest.WriteByte(c2) er.WriteTo(&w.rest) if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { break loop } er = bytes.NewReader(w.rest.Bytes()[2:]) err := doTitleSequence(er) if err != nil { break loop } w.rest.Reset() continue // https://github.com/mattn/go-colorable/issues/27 case '7': procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) w.oldpos = csbi.cursorPosition continue case '8': procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) continue case 0x5b: // execute part after switch default: continue } w.rest.WriteByte(c1) w.rest.WriteByte(c2) er.WriteTo(&w.rest) var buf bytes.Buffer var m byte for i, c := range w.rest.Bytes()[2:] { if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { m = c er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) w.rest.Reset() break } buf.Write([]byte(string(c))) } if m == 0 { break loop } switch m { case 'A': n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y -= short(n) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'B': n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.y += short(n) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'C': n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x += short(n) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'D': n, err = atoiWithDefault(buf.String(), 1) if err != nil { continue } procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x -= short(n) if csbi.cursorPosition.x < 0 { csbi.cursorPosition.x = 0 } procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'E': n, err = strconv.Atoi(buf.String()) if err != nil { continue } procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y += short(n) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'F': n, err = strconv.Atoi(buf.String()) if err != nil { continue } procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = 0 csbi.cursorPosition.y -= short(n) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'G': n, err = strconv.Atoi(buf.String()) if err != nil { continue } if n < 1 { n = 1 } procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) csbi.cursorPosition.x = short(n - 1) procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'H', 'f': procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) if buf.Len() > 0 { token := strings.Split(buf.String(), ";") switch len(token) { case 1: n1, err := strconv.Atoi(token[0]) if err != nil { continue } csbi.cursorPosition.y = short(n1 - 1) case 2: n1, err := strconv.Atoi(token[0]) if err != nil { continue } n2, err := strconv.Atoi(token[1]) if err != nil { continue } csbi.cursorPosition.x = short(n2 - 1) csbi.cursorPosition.y = short(n1 - 1) } } else { csbi.cursorPosition.y = 0 } procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) case 'J': n := 0 if buf.Len() > 0 { n, err = strconv.Atoi(buf.String()) if err != nil { continue } } var count, written dword var cursor coord procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) switch n { case 0: cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) case 1: cursor = coord{x: csbi.window.left, y: csbi.window.top} count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) case 2: cursor = coord{x: csbi.window.left, y: csbi.window.top} count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) } procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'K': n := 0 if buf.Len() > 0 { n, err = strconv.Atoi(buf.String()) if err != nil { continue } } procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) var cursor coord var count, written dword switch n { case 0: cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} count = dword(csbi.size.x - csbi.cursorPosition.x) case 1: cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} count = dword(csbi.size.x - csbi.cursorPosition.x) case 2: cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} count = dword(csbi.size.x) } procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'X': n := 0 if buf.Len() > 0 { n, err = strconv.Atoi(buf.String()) if err != nil { continue } } procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) var cursor coord var written dword cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(n), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) case 'm': procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) attr := csbi.attributes cs := buf.String() if cs == "" { procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) continue } token := strings.Split(cs, ";") for i := 0; i < len(token); i++ { ns := token[i] if n, err = strconv.Atoi(ns); err == nil { switch { case n == 0 || n == 100: attr = w.oldattr case n == 4: attr |= commonLvbUnderscore case (1 <= n && n <= 3) || n == 5: attr |= foregroundIntensity case n == 7 || n == 27: attr = (attr &^ (foregroundMask | backgroundMask)) | ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) case n == 22: attr &^= foregroundIntensity case n == 24: attr &^= commonLvbUnderscore case 30 <= n && n <= 37: attr &= backgroundMask if (n-30)&1 != 0 { attr |= foregroundRed } if (n-30)&2 != 0 { attr |= foregroundGreen } if (n-30)&4 != 0 { attr |= foregroundBlue } case n == 38: // set foreground color. if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { if n256, err := strconv.Atoi(token[i+2]); err == nil { if n256foreAttr == nil { n256setup() } attr &= backgroundMask attr |= n256foreAttr[n256%len(n256foreAttr)] i += 2 } } else if len(token) == 5 && token[i+1] == "2" { var r, g, b int r, _ = strconv.Atoi(token[i+2]) g, _ = strconv.Atoi(token[i+3]) b, _ = strconv.Atoi(token[i+4]) i += 4 if r > 127 { attr |= foregroundRed } if g > 127 { attr |= foregroundGreen } if b > 127 { attr |= foregroundBlue } } else { attr = attr & (w.oldattr & backgroundMask) } case n == 39: // reset foreground color. attr &= backgroundMask attr |= w.oldattr & foregroundMask case 40 <= n && n <= 47: attr &= foregroundMask if (n-40)&1 != 0 { attr |= backgroundRed } if (n-40)&2 != 0 { attr |= backgroundGreen } if (n-40)&4 != 0 { attr |= backgroundBlue } case n == 48: // set background color. if i < len(token)-2 && token[i+1] == "5" { if n256, err := strconv.Atoi(token[i+2]); err == nil { if n256backAttr == nil { n256setup() } attr &= foregroundMask attr |= n256backAttr[n256%len(n256backAttr)] i += 2 } } else if len(token) == 5 && token[i+1] == "2" { var r, g, b int r, _ = strconv.Atoi(token[i+2]) g, _ = strconv.Atoi(token[i+3]) b, _ = strconv.Atoi(token[i+4]) i += 4 if r > 127 { attr |= backgroundRed } if g > 127 { attr |= backgroundGreen } if b > 127 { attr |= backgroundBlue } } else { attr = attr & (w.oldattr & foregroundMask) } case n == 49: // reset foreground color. attr &= foregroundMask attr |= w.oldattr & backgroundMask case 90 <= n && n <= 97: attr = (attr & backgroundMask) attr |= foregroundIntensity if (n-90)&1 != 0 { attr |= foregroundRed } if (n-90)&2 != 0 { attr |= foregroundGreen } if (n-90)&4 != 0 { attr |= foregroundBlue } case 100 <= n && n <= 107: attr = (attr & foregroundMask) attr |= backgroundIntensity if (n-100)&1 != 0 { attr |= backgroundRed } if (n-100)&2 != 0 { attr |= backgroundGreen } if (n-100)&4 != 0 { attr |= backgroundBlue } } procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) } } case 'h': var ci consoleCursorInfo cs := buf.String() if cs == "5>" { procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 0 procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) } else if cs == "?25" { procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 1 procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) } else if cs == "?1049" { if w.althandle == 0 { h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) w.althandle = syscall.Handle(h) if w.althandle != 0 { handle = w.althandle } } } case 'l': var ci consoleCursorInfo cs := buf.String() if cs == "5>" { procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 1 procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) } else if cs == "?25" { procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) ci.visible = 0 procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) } else if cs == "?1049" { if w.althandle != 0 { syscall.CloseHandle(w.althandle) w.althandle = 0 handle = w.handle } } case 's': procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) w.oldpos = csbi.cursorPosition case 'u': procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) } } return len(data), nil } type consoleColor struct { rgb int red bool green bool blue bool intensity bool } func (c consoleColor) foregroundAttr() (attr word) { if c.red { attr |= foregroundRed } if c.green { attr |= foregroundGreen } if c.blue { attr |= foregroundBlue } if c.intensity { attr |= foregroundIntensity } return } func (c consoleColor) backgroundAttr() (attr word) { if c.red { attr |= backgroundRed } if c.green { attr |= backgroundGreen } if c.blue { attr |= backgroundBlue } if c.intensity { attr |= backgroundIntensity } return } var color16 = []consoleColor{ {0x000000, false, false, false, false}, {0x000080, false, false, true, false}, {0x008000, false, true, false, false}, {0x008080, false, true, true, false}, {0x800000, true, false, false, false}, {0x800080, true, false, true, false}, {0x808000, true, true, false, false}, {0xc0c0c0, true, true, true, false}, {0x808080, false, false, false, true}, {0x0000ff, false, false, true, true}, {0x00ff00, false, true, false, true}, {0x00ffff, false, true, true, true}, {0xff0000, true, false, false, true}, {0xff00ff, true, false, true, true}, {0xffff00, true, true, false, true}, {0xffffff, true, true, true, true}, } type hsv struct { h, s, v float32 } func (a hsv) dist(b hsv) float32 { dh := a.h - b.h switch { case dh > 0.5: dh = 1 - dh case dh < -0.5: dh = -1 - dh } ds := a.s - b.s dv := a.v - b.v return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) } func toHSV(rgb int) hsv { r, g, b := float32((rgb&0xFF0000)>>16)/256.0, float32((rgb&0x00FF00)>>8)/256.0, float32(rgb&0x0000FF)/256.0 min, max := minmax3f(r, g, b) h := max - min if h > 0 { if max == r { h = (g - b) / h if h < 0 { h += 6 } } else if max == g { h = 2 + (b-r)/h } else { h = 4 + (r-g)/h } } h /= 6.0 s := max - min if max != 0 { s /= max } v := max return hsv{h: h, s: s, v: v} } type hsvTable []hsv func toHSVTable(rgbTable []consoleColor) hsvTable { t := make(hsvTable, len(rgbTable)) for i, c := range rgbTable { t[i] = toHSV(c.rgb) } return t } func (t hsvTable) find(rgb int) consoleColor { hsv := toHSV(rgb) n := 7 l := float32(5.0) for i, p := range t { d := hsv.dist(p) if d < l { l, n = d, i } } return color16[n] } func minmax3f(a, b, c float32) (min, max float32) { if a < b { if b < c { return a, c } else if a < c { return a, b } else { return c, b } } else { if a < c { return b, c } else if b < c { return b, a } else { return c, a } } } var n256foreAttr []word var n256backAttr []word func n256setup() { n256foreAttr = make([]word, 256) n256backAttr = make([]word, 256) t := toHSVTable(color16) for i, rgb := range color256 { c := t.find(rgb) n256foreAttr[i] = c.foregroundAttr() n256backAttr[i] = c.backgroundAttr() } } // EnableColorsStdout enable colors if possible. func EnableColorsStdout(enabled *bool) func() { var mode uint32 h := os.Stdout.Fd() if r, _, _ := procGetConsoleMode.Call(h, uintptr(unsafe.Pointer(&mode))); r != 0 { if r, _, _ = procSetConsoleMode.Call(h, uintptr(mode|cENABLE_VIRTUAL_TERMINAL_PROCESSING)); r != 0 { if enabled != nil { *enabled = true } return func() { procSetConsoleMode.Call(h, uintptr(mode)) } } } if enabled != nil { *enabled = true } return func() {} }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/mattn/go-colorable/noncolorable.go
vendor/github.com/mattn/go-colorable/noncolorable.go
package colorable import ( "bytes" "io" ) // NonColorable holds writer but removes escape sequence. type NonColorable struct { out io.Writer } // NewNonColorable returns new instance of Writer which removes escape sequence from Writer. func NewNonColorable(w io.Writer) io.Writer { return &NonColorable{out: w} } // Write writes data on console func (w *NonColorable) Write(data []byte) (n int, err error) { er := bytes.NewReader(data) var plaintext bytes.Buffer loop: for { c1, err := er.ReadByte() if err != nil { plaintext.WriteTo(w.out) break loop } if c1 != 0x1b { plaintext.WriteByte(c1) continue } _, err = plaintext.WriteTo(w.out) if err != nil { break loop } c2, err := er.ReadByte() if err != nil { break loop } if c2 != 0x5b { continue } var buf bytes.Buffer for { c, err := er.ReadByte() if err != nil { break loop } if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { break } buf.Write([]byte(string(c))) } } return len(data), nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/mattn/go-colorable/colorable_others.go
vendor/github.com/mattn/go-colorable/colorable_others.go
//go:build !windows && !appengine // +build !windows,!appengine package colorable import ( "io" "os" _ "github.com/mattn/go-isatty" ) // NewColorable returns new instance of Writer which handles escape sequence. func NewColorable(file *os.File) io.Writer { if file == nil { panic("nil passed instead of *os.File to NewColorable()") } return file } // NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. func NewColorableStdout() io.Writer { return os.Stdout } // NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. func NewColorableStderr() io.Writer { return os.Stderr } // EnableColorsStdout enable colors if possible. func EnableColorsStdout(enabled *bool) func() { if enabled != nil { *enabled = true } return func() {} }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/mattn/go-colorable/colorable_appengine.go
vendor/github.com/mattn/go-colorable/colorable_appengine.go
//go:build appengine // +build appengine package colorable import ( "io" "os" _ "github.com/mattn/go-isatty" ) // NewColorable returns new instance of Writer which handles escape sequence. func NewColorable(file *os.File) io.Writer { if file == nil { panic("nil passed instead of *os.File to NewColorable()") } return file } // NewColorableStdout returns new instance of Writer which handles escape sequence for stdout. func NewColorableStdout() io.Writer { return os.Stdout } // NewColorableStderr returns new instance of Writer which handles escape sequence for stderr. func NewColorableStderr() io.Writer { return os.Stderr } // EnableColorsStdout enable colors if possible. func EnableColorsStdout(enabled *bool) func() { if enabled != nil { *enabled = true } return func() {} }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/mattn/go-isatty/isatty_bsd.go
vendor/github.com/mattn/go-isatty/isatty_bsd.go
//go:build (darwin || freebsd || openbsd || netbsd || dragonfly) && !appengine // +build darwin freebsd openbsd netbsd dragonfly // +build !appengine package isatty import "golang.org/x/sys/unix" // IsTerminal return true if the file descriptor is terminal. func IsTerminal(fd uintptr) bool { _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA) return err == nil } // IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 // terminal. This is also always false on this environment. func IsCygwinTerminal(fd uintptr) bool { return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/mattn/go-isatty/isatty_windows.go
vendor/github.com/mattn/go-isatty/isatty_windows.go
//go:build windows && !appengine // +build windows,!appengine package isatty import ( "errors" "strings" "syscall" "unicode/utf16" "unsafe" ) const ( objectNameInfo uintptr = 1 fileNameInfo = 2 fileTypePipe = 3 ) var ( kernel32 = syscall.NewLazyDLL("kernel32.dll") ntdll = syscall.NewLazyDLL("ntdll.dll") procGetConsoleMode = kernel32.NewProc("GetConsoleMode") procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") procGetFileType = kernel32.NewProc("GetFileType") procNtQueryObject = ntdll.NewProc("NtQueryObject") ) func init() { // Check if GetFileInformationByHandleEx is available. if procGetFileInformationByHandleEx.Find() != nil { procGetFileInformationByHandleEx = nil } } // IsTerminal return true if the file descriptor is terminal. func IsTerminal(fd uintptr) bool { var st uint32 r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) return r != 0 && e == 0 } // Check pipe name is used for cygwin/msys2 pty. // Cygwin/MSYS2 PTY has a name like: // \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master func isCygwinPipeName(name string) bool { token := strings.Split(name, "-") if len(token) < 5 { return false } if token[0] != `\msys` && token[0] != `\cygwin` && token[0] != `\Device\NamedPipe\msys` && token[0] != `\Device\NamedPipe\cygwin` { return false } if token[1] == "" { return false } if !strings.HasPrefix(token[2], "pty") { return false } if token[3] != `from` && token[3] != `to` { return false } if token[4] != "master" { return false } return true } // getFileNameByHandle use the undocomented ntdll NtQueryObject to get file full name from file handler // since GetFileInformationByHandleEx is not available under windows Vista and still some old fashion // guys are using Windows XP, this is a workaround for those guys, it will also work on system from // Windows vista to 10 // see https://stackoverflow.com/a/18792477 for details func getFileNameByHandle(fd uintptr) (string, error) { if procNtQueryObject == nil { return "", errors.New("ntdll.dll: NtQueryObject not supported") } var buf [4 + syscall.MAX_PATH]uint16 var result int r, _, e := syscall.Syscall6(procNtQueryObject.Addr(), 5, fd, objectNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(2*len(buf)), uintptr(unsafe.Pointer(&result)), 0) if r != 0 { return "", e } return string(utf16.Decode(buf[4 : 4+buf[0]/2])), nil } // IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 // terminal. func IsCygwinTerminal(fd uintptr) bool { if procGetFileInformationByHandleEx == nil { name, err := getFileNameByHandle(fd) if err != nil { return false } return isCygwinPipeName(name) } // Cygwin/msys's pty is a pipe. ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) if ft != fileTypePipe || e != 0 { return false } var buf [2 + syscall.MAX_PATH]uint16 r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), uintptr(len(buf)*2), 0, 0) if r == 0 || e != 0 { return false } l := *(*uint32)(unsafe.Pointer(&buf)) return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/mattn/go-isatty/isatty_solaris.go
vendor/github.com/mattn/go-isatty/isatty_solaris.go
//go:build solaris && !appengine // +build solaris,!appengine package isatty import ( "golang.org/x/sys/unix" ) // IsTerminal returns true if the given file descriptor is a terminal. // see: https://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libc/port/gen/isatty.c func IsTerminal(fd uintptr) bool { _, err := unix.IoctlGetTermio(int(fd), unix.TCGETA) return err == nil } // IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 // terminal. This is also always false on this environment. func IsCygwinTerminal(fd uintptr) bool { return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/mattn/go-isatty/isatty_plan9.go
vendor/github.com/mattn/go-isatty/isatty_plan9.go
//go:build plan9 // +build plan9 package isatty import ( "syscall" ) // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd uintptr) bool { path, err := syscall.Fd2path(int(fd)) if err != nil { return false } return path == "/dev/cons" || path == "/mnt/term/dev/cons" } // IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 // terminal. This is also always false on this environment. func IsCygwinTerminal(fd uintptr) bool { return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/mattn/go-isatty/isatty_others.go
vendor/github.com/mattn/go-isatty/isatty_others.go
//go:build appengine || js || nacl || wasm // +build appengine js nacl wasm package isatty // IsTerminal returns true if the file descriptor is terminal which // is always false on js and appengine classic which is a sandboxed PaaS. func IsTerminal(fd uintptr) bool { return false } // IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 // terminal. This is also always false on this environment. func IsCygwinTerminal(fd uintptr) bool { return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/mattn/go-isatty/doc.go
vendor/github.com/mattn/go-isatty/doc.go
// Package isatty implements interface to isatty package isatty
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/mattn/go-isatty/isatty_tcgets.go
vendor/github.com/mattn/go-isatty/isatty_tcgets.go
//go:build (linux || aix || zos) && !appengine // +build linux aix zos // +build !appengine package isatty import "golang.org/x/sys/unix" // IsTerminal return true if the file descriptor is terminal. func IsTerminal(fd uintptr) bool { _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) return err == nil } // IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 // terminal. This is also always false on this environment. func IsCygwinTerminal(fd uintptr) bool { return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
package logrus import ( "golang.org/x/sys/unix" ) // IsTerminal returns true if the given file descriptor is a terminal. func isTerminal(fd int) bool { _, err := unix.IoctlGetTermio(fd, unix.TCGETA) return err == nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/terminal_check_js.go
vendor/github.com/sirupsen/logrus/terminal_check_js.go
// +build js package logrus func isTerminal(fd int) bool { return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
// +build appengine package logrus import ( "io" ) func checkIfTerminal(w io.Writer) bool { return true }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/entry.go
vendor/github.com/sirupsen/logrus/entry.go
package logrus import ( "bytes" "context" "fmt" "os" "reflect" "runtime" "strings" "sync" "time" ) var ( // qualified package name, cached at first use logrusPackage string // Positions in the call stack when tracing to report the calling method minimumCallerDepth int // Used for caller information initialisation callerInitOnce sync.Once ) const ( maximumCallerDepth int = 25 knownLogrusFrames int = 4 ) func init() { // start at the bottom of the stack before the package-name cache is primed minimumCallerDepth = 1 } // Defines the key when adding errors using WithError. var ErrorKey = "error" // An entry is the final or intermediate Logrus logging entry. It contains all // the fields passed with WithField{,s}. It's finally logged when Trace, Debug, // Info, Warn, Error, Fatal or Panic is called on it. These objects can be // reused and passed around as much as you wish to avoid field duplication. type Entry struct { Logger *Logger // Contains all the fields set by the user. Data Fields // Time at which the log entry was created Time time.Time // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level // Calling method, with package name Caller *runtime.Frame // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic Message string // When formatter is called in entry.log(), a Buffer may be set to entry Buffer *bytes.Buffer // Contains the context set by the user. Useful for hook processing etc. Context context.Context // err may contain a field formatting error err string } func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, // Default is three fields, plus one optional. Give a little extra room. Data: make(Fields, 6), } } func (entry *Entry) Dup() *Entry { data := make(Fields, len(entry.Data)) for k, v := range entry.Data { data[k] = v } return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} } // Returns the bytes representation of this entry from the formatter. func (entry *Entry) Bytes() ([]byte, error) { return entry.Logger.Formatter.Format(entry) } // Returns the string representation from the reader and ultimately the // formatter. func (entry *Entry) String() (string, error) { serialized, err := entry.Bytes() if err != nil { return "", err } str := string(serialized) return str, nil } // Add an error as single field (using the key defined in ErrorKey) to the Entry. func (entry *Entry) WithError(err error) *Entry { return entry.WithField(ErrorKey, err) } // Add a context to the Entry. func (entry *Entry) WithContext(ctx context.Context) *Entry { dataCopy := make(Fields, len(entry.Data)) for k, v := range entry.Data { dataCopy[k] = v } return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} } // Add a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return entry.WithFields(Fields{key: value}) } // Add a map of fields to the Entry. func (entry *Entry) WithFields(fields Fields) *Entry { data := make(Fields, len(entry.Data)+len(fields)) for k, v := range entry.Data { data[k] = v } fieldErr := entry.err for k, v := range fields { isErrField := false if t := reflect.TypeOf(v); t != nil { switch { case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: isErrField = true } } if isErrField { tmp := fmt.Sprintf("can not add field %q", k) if fieldErr != "" { fieldErr = entry.err + ", " + tmp } else { fieldErr = tmp } } else { data[k] = v } } return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} } // Overrides the time of the Entry. func (entry *Entry) WithTime(t time.Time) *Entry { dataCopy := make(Fields, len(entry.Data)) for k, v := range entry.Data { dataCopy[k] = v } return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} } // getPackageName reduces a fully qualified function name to the package name // There really ought to be to be a better way... func getPackageName(f string) string { for { lastPeriod := strings.LastIndex(f, ".") lastSlash := strings.LastIndex(f, "/") if lastPeriod > lastSlash { f = f[:lastPeriod] } else { break } } return f } // getCaller retrieves the name of the first non-logrus calling function func getCaller() *runtime.Frame { // cache this package's fully-qualified name callerInitOnce.Do(func() { pcs := make([]uintptr, maximumCallerDepth) _ = runtime.Callers(0, pcs) // dynamic get the package name and the minimum caller depth for i := 0; i < maximumCallerDepth; i++ { funcName := runtime.FuncForPC(pcs[i]).Name() if strings.Contains(funcName, "getCaller") { logrusPackage = getPackageName(funcName) break } } minimumCallerDepth = knownLogrusFrames }) // Restrict the lookback frames to avoid runaway lookups pcs := make([]uintptr, maximumCallerDepth) depth := runtime.Callers(minimumCallerDepth, pcs) frames := runtime.CallersFrames(pcs[:depth]) for f, again := frames.Next(); again; f, again = frames.Next() { pkg := getPackageName(f.Function) // If the caller isn't part of this package, we're done if pkg != logrusPackage { return &f //nolint:scopelint } } // if we got here, we failed to find the caller's context return nil } func (entry Entry) HasCaller() (has bool) { return entry.Logger != nil && entry.Logger.ReportCaller && entry.Caller != nil } func (entry *Entry) log(level Level, msg string) { var buffer *bytes.Buffer newEntry := entry.Dup() if newEntry.Time.IsZero() { newEntry.Time = time.Now() } newEntry.Level = level newEntry.Message = msg newEntry.Logger.mu.Lock() reportCaller := newEntry.Logger.ReportCaller bufPool := newEntry.getBufferPool() newEntry.Logger.mu.Unlock() if reportCaller { newEntry.Caller = getCaller() } newEntry.fireHooks() buffer = bufPool.Get() defer func() { newEntry.Buffer = nil buffer.Reset() bufPool.Put(buffer) }() buffer.Reset() newEntry.Buffer = buffer newEntry.write() newEntry.Buffer = nil // To avoid Entry#log() returning a value that only would make sense for // panic() to use in Entry#Panic(), we avoid the allocation by checking // directly here. if level <= PanicLevel { panic(newEntry) } } func (entry *Entry) getBufferPool() (pool BufferPool) { if entry.Logger.BufferPool != nil { return entry.Logger.BufferPool } return bufferPool } func (entry *Entry) fireHooks() { var tmpHooks LevelHooks entry.Logger.mu.Lock() tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) for k, v := range entry.Logger.Hooks { tmpHooks[k] = v } entry.Logger.mu.Unlock() err := tmpHooks.Fire(entry.Level, entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) } } func (entry *Entry) write() { entry.Logger.mu.Lock() defer entry.Logger.mu.Unlock() serialized, err := entry.Logger.Formatter.Format(entry) if err != nil { fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) return } if _, err := entry.Logger.Out.Write(serialized); err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } } // Log will log a message at the level given as parameter. // Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. // For this behaviour Entry.Panic or Entry.Fatal should be used instead. func (entry *Entry) Log(level Level, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.log(level, fmt.Sprint(args...)) } } func (entry *Entry) Trace(args ...interface{}) { entry.Log(TraceLevel, args...) } func (entry *Entry) Debug(args ...interface{}) { entry.Log(DebugLevel, args...) } func (entry *Entry) Print(args ...interface{}) { entry.Info(args...) } func (entry *Entry) Info(args ...interface{}) { entry.Log(InfoLevel, args...) } func (entry *Entry) Warn(args ...interface{}) { entry.Log(WarnLevel, args...) } func (entry *Entry) Warning(args ...interface{}) { entry.Warn(args...) } func (entry *Entry) Error(args ...interface{}) { entry.Log(ErrorLevel, args...) } func (entry *Entry) Fatal(args ...interface{}) { entry.Log(FatalLevel, args...) entry.Logger.Exit(1) } func (entry *Entry) Panic(args ...interface{}) { entry.Log(PanicLevel, args...) } // Entry Printf family functions func (entry *Entry) Logf(level Level, format string, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.Log(level, fmt.Sprintf(format, args...)) } } func (entry *Entry) Tracef(format string, args ...interface{}) { entry.Logf(TraceLevel, format, args...) } func (entry *Entry) Debugf(format string, args ...interface{}) { entry.Logf(DebugLevel, format, args...) } func (entry *Entry) Infof(format string, args ...interface{}) { entry.Logf(InfoLevel, format, args...) } func (entry *Entry) Printf(format string, args ...interface{}) { entry.Infof(format, args...) } func (entry *Entry) Warnf(format string, args ...interface{}) { entry.Logf(WarnLevel, format, args...) } func (entry *Entry) Warningf(format string, args ...interface{}) { entry.Warnf(format, args...) } func (entry *Entry) Errorf(format string, args ...interface{}) { entry.Logf(ErrorLevel, format, args...) } func (entry *Entry) Fatalf(format string, args ...interface{}) { entry.Logf(FatalLevel, format, args...) entry.Logger.Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { entry.Logf(PanicLevel, format, args...) } // Entry Println family functions func (entry *Entry) Logln(level Level, args ...interface{}) { if entry.Logger.IsLevelEnabled(level) { entry.Log(level, entry.sprintlnn(args...)) } } func (entry *Entry) Traceln(args ...interface{}) { entry.Logln(TraceLevel, args...) } func (entry *Entry) Debugln(args ...interface{}) { entry.Logln(DebugLevel, args...) } func (entry *Entry) Infoln(args ...interface{}) { entry.Logln(InfoLevel, args...) } func (entry *Entry) Println(args ...interface{}) { entry.Infoln(args...) } func (entry *Entry) Warnln(args ...interface{}) { entry.Logln(WarnLevel, args...) } func (entry *Entry) Warningln(args ...interface{}) { entry.Warnln(args...) } func (entry *Entry) Errorln(args ...interface{}) { entry.Logln(ErrorLevel, args...) } func (entry *Entry) Fatalln(args ...interface{}) { entry.Logln(FatalLevel, args...) entry.Logger.Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { entry.Logln(PanicLevel, args...) } // Sprintlnn => Sprint no newline. This is to get the behavior of how // fmt.Sprintln where spaces are always added between operands, regardless of // their type. Instead of vendoring the Sprintln implementation to spare a // string allocation, we do the simplest thing. func (entry *Entry) sprintlnn(args ...interface{}) string { msg := fmt.Sprintln(args...) return msg[:len(msg)-1] }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/logrus.go
vendor/github.com/sirupsen/logrus/logrus.go
package logrus import ( "fmt" "log" "strings" ) // Fields type, used to pass to `WithFields`. type Fields map[string]interface{} // Level type type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { if b, err := level.MarshalText(); err == nil { return string(b) } else { return "unknown" } } // ParseLevel takes a string level and returns the Logrus log level constant. func ParseLevel(lvl string) (Level, error) { switch strings.ToLower(lvl) { case "panic": return PanicLevel, nil case "fatal": return FatalLevel, nil case "error": return ErrorLevel, nil case "warn", "warning": return WarnLevel, nil case "info": return InfoLevel, nil case "debug": return DebugLevel, nil case "trace": return TraceLevel, nil } var l Level return l, fmt.Errorf("not a valid logrus Level: %q", lvl) } // UnmarshalText implements encoding.TextUnmarshaler. func (level *Level) UnmarshalText(text []byte) error { l, err := ParseLevel(string(text)) if err != nil { return err } *level = l return nil } func (level Level) MarshalText() ([]byte, error) { switch level { case TraceLevel: return []byte("trace"), nil case DebugLevel: return []byte("debug"), nil case InfoLevel: return []byte("info"), nil case WarnLevel: return []byte("warning"), nil case ErrorLevel: return []byte("error"), nil case FatalLevel: return []byte("fatal"), nil case PanicLevel: return []byte("panic"), nil } return nil, fmt.Errorf("not a valid logrus level %d", level) } // A constant exposing all logging levels var AllLevels = []Level{ PanicLevel, FatalLevel, ErrorLevel, WarnLevel, InfoLevel, DebugLevel, TraceLevel, } // These are the different logging levels. You can set the logging level to log // on your instance of logger, obtained with `logrus.New()`. const ( // PanicLevel level, highest level of severity. Logs and then calls panic with the // message passed to Debug, Info, ... PanicLevel Level = iota // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the // logging level is set to Panic. FatalLevel // ErrorLevel level. Logs. Used for errors that should definitely be noted. // Commonly used for hooks to send errors to an error tracking service. ErrorLevel // WarnLevel level. Non-critical entries that deserve eyes. WarnLevel // InfoLevel level. General operational entries about what's going on inside the // application. InfoLevel // DebugLevel level. Usually only enabled when debugging. Very verbose logging. DebugLevel // TraceLevel level. Designates finer-grained informational events than the Debug. TraceLevel ) // Won't compile if StdLogger can't be realized by a log.Logger var ( _ StdLogger = &log.Logger{} _ StdLogger = &Entry{} _ StdLogger = &Logger{} ) // StdLogger is what your logrus-enabled library should take, that way // it'll accept a stdlib logger and a logrus logger. There's no standard // interface, this is the closest we get, unfortunately. type StdLogger interface { Print(...interface{}) Printf(string, ...interface{}) Println(...interface{}) Fatal(...interface{}) Fatalf(string, ...interface{}) Fatalln(...interface{}) Panic(...interface{}) Panicf(string, ...interface{}) Panicln(...interface{}) } // The FieldLogger interface generalizes the Entry and Logger types type FieldLogger interface { WithField(key string, value interface{}) *Entry WithFields(fields Fields) *Entry WithError(err error) *Entry Debugf(format string, args ...interface{}) Infof(format string, args ...interface{}) Printf(format string, args ...interface{}) Warnf(format string, args ...interface{}) Warningf(format string, args ...interface{}) Errorf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) Panicf(format string, args ...interface{}) Debug(args ...interface{}) Info(args ...interface{}) Print(args ...interface{}) Warn(args ...interface{}) Warning(args ...interface{}) Error(args ...interface{}) Fatal(args ...interface{}) Panic(args ...interface{}) Debugln(args ...interface{}) Infoln(args ...interface{}) Println(args ...interface{}) Warnln(args ...interface{}) Warningln(args ...interface{}) Errorln(args ...interface{}) Fatalln(args ...interface{}) Panicln(args ...interface{}) // IsDebugEnabled() bool // IsInfoEnabled() bool // IsWarnEnabled() bool // IsErrorEnabled() bool // IsFatalEnabled() bool // IsPanicEnabled() bool } // Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is // here for consistancy. Do not use. Use Logger or Entry instead. type Ext1FieldLogger interface { FieldLogger Tracef(format string, args ...interface{}) Trace(args ...interface{}) Traceln(args ...interface{}) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
// +build js nacl plan9 package logrus import ( "io" ) func checkIfTerminal(w io.Writer) bool { return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/writer.go
vendor/github.com/sirupsen/logrus/writer.go
package logrus import ( "bufio" "io" "runtime" "strings" ) // Writer at INFO level. See WriterLevel for details. func (logger *Logger) Writer() *io.PipeWriter { return logger.WriterLevel(InfoLevel) } // WriterLevel returns an io.Writer that can be used to write arbitrary text to // the logger at the given log level. Each line written to the writer will be // printed in the usual way using formatters and hooks. The writer is part of an // io.Pipe and it is the callers responsibility to close the writer when done. // This can be used to override the standard library logger easily. func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } // Writer returns an io.Writer that writes to the logger at the info log level func (entry *Entry) Writer() *io.PipeWriter { return entry.WriterLevel(InfoLevel) } // WriterLevel returns an io.Writer that writes to the logger at the given log level func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) // Determine which log function to use based on the specified log level switch level { case TraceLevel: printFunc = entry.Trace case DebugLevel: printFunc = entry.Debug case InfoLevel: printFunc = entry.Info case WarnLevel: printFunc = entry.Warn case ErrorLevel: printFunc = entry.Error case FatalLevel: printFunc = entry.Fatal case PanicLevel: printFunc = entry.Panic default: printFunc = entry.Print } // Start a new goroutine to scan the input and write it to the logger using the specified print function. // It splits the input into chunks of up to 64KB to avoid buffer overflows. go entry.writerScanner(reader, printFunc) // Set a finalizer function to close the writer when it is garbage collected runtime.SetFinalizer(writer, writerFinalizer) return writer } // writerScanner scans the input from the reader and writes it to the logger func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) // Set the buffer size to the maximum token size to avoid buffer overflows scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) // Define a split function to split the input into chunks of up to 64KB chunkSize := bufio.MaxScanTokenSize // 64KB splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { if len(data) >= chunkSize { return chunkSize, data[:chunkSize], nil } return bufio.ScanLines(data, atEOF) } // Use the custom split function to split the input scanner.Split(splitFunc) // Scan the input and write it to the logger using the specified print function for scanner.Scan() { printFunc(strings.TrimRight(scanner.Text(), "\r\n")) } // If there was an error while scanning the input, log an error if err := scanner.Err(); err != nil { entry.Errorf("Error while reading from Writer: %s", err) } // Close the reader when we are done reader.Close() } // WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected func writerFinalizer(writer *io.PipeWriter) { writer.Close() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
vendor/github.com/sirupsen/logrus/terminal_check_windows.go
// +build !appengine,!js,windows package logrus import ( "io" "os" "golang.org/x/sys/windows" ) func checkIfTerminal(w io.Writer) bool { switch v := w.(type) { case *os.File: handle := windows.Handle(v.Fd()) var mode uint32 if err := windows.GetConsoleMode(handle, &mode); err != nil { return false } mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING if err := windows.SetConsoleMode(handle, mode); err != nil { return false } return true } return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/formatter.go
vendor/github.com/sirupsen/logrus/formatter.go
package logrus import "time" // Default key names for the default fields const ( defaultTimestampFormat = time.RFC3339 FieldKeyMsg = "msg" FieldKeyLevel = "level" FieldKeyTime = "time" FieldKeyLogrusError = "logrus_error" FieldKeyFunc = "func" FieldKeyFile = "file" ) // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: // // * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. // * `entry.Data["time"]`. The timestamp. // * `entry.Data["level"]. The level the entry was logged at. // // Any additional fields added with `WithField` or `WithFields` are also in // `entry.Data`. Format is expected to return an array of bytes which are then // logged to `logger.Out`. type Formatter interface { Format(*Entry) ([]byte, error) } // This is to not silently overwrite `time`, `msg`, `func` and `level` fields when // dumping it. If this code wasn't there doing: // // logrus.WithField("level", 1).Info("hello") // // Would just silently drop the user provided level. Instead with this code // it'll logged as: // // {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { timeKey := fieldMap.resolve(FieldKeyTime) if t, ok := data[timeKey]; ok { data["fields."+timeKey] = t delete(data, timeKey) } msgKey := fieldMap.resolve(FieldKeyMsg) if m, ok := data[msgKey]; ok { data["fields."+msgKey] = m delete(data, msgKey) } levelKey := fieldMap.resolve(FieldKeyLevel) if l, ok := data[levelKey]; ok { data["fields."+levelKey] = l delete(data, levelKey) } logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) if l, ok := data[logrusErrKey]; ok { data["fields."+logrusErrKey] = l delete(data, logrusErrKey) } // If reportCaller is not set, 'func' will not conflict. if reportCaller { funcKey := fieldMap.resolve(FieldKeyFunc) if l, ok := data[funcKey]; ok { data["fields."+funcKey] = l } fileKey := fieldMap.resolve(FieldKeyFile) if l, ok := data[fileKey]; ok { data["fields."+fileKey] = l } } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/buffer_pool.go
vendor/github.com/sirupsen/logrus/buffer_pool.go
package logrus import ( "bytes" "sync" ) var ( bufferPool BufferPool ) type BufferPool interface { Put(*bytes.Buffer) Get() *bytes.Buffer } type defaultPool struct { pool *sync.Pool } func (p *defaultPool) Put(buf *bytes.Buffer) { p.pool.Put(buf) } func (p *defaultPool) Get() *bytes.Buffer { return p.pool.Get().(*bytes.Buffer) } // SetBufferPool allows to replace the default logrus buffer pool // to better meets the specific needs of an application. func SetBufferPool(bp BufferPool) { bufferPool = bp } func init() { SetBufferPool(&defaultPool{ pool: &sync.Pool{ New: func() interface{} { return new(bytes.Buffer) }, }, }) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/alt_exit.go
vendor/github.com/sirupsen/logrus/alt_exit.go
package logrus // The following code was sourced and modified from the // https://github.com/tebeka/atexit package governed by the following license: // // Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>. // // Permission is hereby granted, free of charge, to any person obtaining a copy of // this software and associated documentation files (the "Software"), to deal in // the Software without restriction, including without limitation the rights to // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of // the Software, and to permit persons to whom the Software is furnished to do so, // subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import ( "fmt" "os" ) var handlers = []func(){} func runHandler(handler func()) { defer func() { if err := recover(); err != nil { fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) } }() handler() } func runHandlers() { for _, handler := range handlers { runHandler(handler) } } // Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) func Exit(code int) { runHandlers() os.Exit(code) } // RegisterExitHandler appends a Logrus Exit handler to the list of handlers, // call logrus.Exit to invoke all handlers. The handlers will also be invoked when // any Fatal log entry is made. // // This method is useful when a caller wishes to use logrus to log a fatal // message but also needs to gracefully shutdown. An example usecase could be // closing database connections, or sending a alert that the application is // closing. func RegisterExitHandler(handler func()) { handlers = append(handlers, handler) } // DeferExitHandler prepends a Logrus Exit handler to the list of handlers, // call logrus.Exit to invoke all handlers. The handlers will also be invoked when // any Fatal log entry is made. // // This method is useful when a caller wishes to use logrus to log a fatal // message but also needs to gracefully shutdown. An example usecase could be // closing database connections, or sending a alert that the application is // closing. func DeferExitHandler(handler func()) { handlers = append([]func(){handler}, handlers...) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/hooks.go
vendor/github.com/sirupsen/logrus/hooks.go
package logrus // A hook to be fired when logging on the logging levels returned from // `Levels()` on your implementation of the interface. Note that this is not // fired in a goroutine or a channel with workers, you should handle such // functionality yourself if your call is non-blocking and you don't wish for // the logging calls for levels returned from `Levels()` to block. type Hook interface { Levels() []Level Fire(*Entry) error } // Internal type for storing the hooks on a logger instance. type LevelHooks map[Level][]Hook // Add a hook to an instance of logger. This is called with // `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. func (hooks LevelHooks) Add(hook Hook) { for _, level := range hook.Levels() { hooks[level] = append(hooks[level], hook) } } // Fire all the hooks for the passed level. Used by `entry.log` to fire // appropriate hooks for a log entry. func (hooks LevelHooks) Fire(level Level, entry *Entry) error { for _, hook := range hooks[level] { if err := hook.Fire(entry); err != nil { return err } } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
// +build darwin dragonfly freebsd netbsd openbsd // +build !js package logrus import "golang.org/x/sys/unix" const ioctlReadTermios = unix.TIOCGETA func isTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
vendor/github.com/sirupsen/logrus/terminal_check_unix.go
// +build linux aix zos // +build !js package logrus import "golang.org/x/sys/unix" const ioctlReadTermios = unix.TCGETS func isTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/exported.go
vendor/github.com/sirupsen/logrus/exported.go
package logrus import ( "context" "io" "time" ) var ( // std is the name of the standard logger in stdlib `log` std = New() ) func StandardLogger() *Logger { return std } // SetOutput sets the standard logger output. func SetOutput(out io.Writer) { std.SetOutput(out) } // SetFormatter sets the standard logger formatter. func SetFormatter(formatter Formatter) { std.SetFormatter(formatter) } // SetReportCaller sets whether the standard logger will include the calling // method as a field. func SetReportCaller(include bool) { std.SetReportCaller(include) } // SetLevel sets the standard logger level. func SetLevel(level Level) { std.SetLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { return std.GetLevel() } // IsLevelEnabled checks if the log level of the standard logger is greater than the level param func IsLevelEnabled(level Level) bool { return std.IsLevelEnabled(level) } // AddHook adds a hook to the standard logger hooks. func AddHook(hook Hook) { std.AddHook(hook) } // WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. func WithError(err error) *Entry { return std.WithField(ErrorKey, err) } // WithContext creates an entry from the standard logger and adds a context to it. func WithContext(ctx context.Context) *Entry { return std.WithContext(ctx) } // WithField creates an entry from the standard logger and adds a field to // it. If you want multiple fields, use `WithFields`. // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal // or Panic on the Entry it returns. func WithField(key string, value interface{}) *Entry { return std.WithField(key, value) } // WithFields creates an entry from the standard logger and adds multiple // fields to it. This is simply a helper for `WithField`, invoking it // once for each field. // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal // or Panic on the Entry it returns. func WithFields(fields Fields) *Entry { return std.WithFields(fields) } // WithTime creates an entry from the standard logger and overrides the time of // logs generated with it. // // Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal // or Panic on the Entry it returns. func WithTime(t time.Time) *Entry { return std.WithTime(t) } // Trace logs a message at level Trace on the standard logger. func Trace(args ...interface{}) { std.Trace(args...) } // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) } // Print logs a message at level Info on the standard logger. func Print(args ...interface{}) { std.Print(args...) } // Info logs a message at level Info on the standard logger. func Info(args ...interface{}) { std.Info(args...) } // Warn logs a message at level Warn on the standard logger. func Warn(args ...interface{}) { std.Warn(args...) } // Warning logs a message at level Warn on the standard logger. func Warning(args ...interface{}) { std.Warning(args...) } // Error logs a message at level Error on the standard logger. func Error(args ...interface{}) { std.Error(args...) } // Panic logs a message at level Panic on the standard logger. func Panic(args ...interface{}) { std.Panic(args...) } // Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatal(args ...interface{}) { std.Fatal(args...) } // TraceFn logs a message from a func at level Trace on the standard logger. func TraceFn(fn LogFunction) { std.TraceFn(fn) } // DebugFn logs a message from a func at level Debug on the standard logger. func DebugFn(fn LogFunction) { std.DebugFn(fn) } // PrintFn logs a message from a func at level Info on the standard logger. func PrintFn(fn LogFunction) { std.PrintFn(fn) } // InfoFn logs a message from a func at level Info on the standard logger. func InfoFn(fn LogFunction) { std.InfoFn(fn) } // WarnFn logs a message from a func at level Warn on the standard logger. func WarnFn(fn LogFunction) { std.WarnFn(fn) } // WarningFn logs a message from a func at level Warn on the standard logger. func WarningFn(fn LogFunction) { std.WarningFn(fn) } // ErrorFn logs a message from a func at level Error on the standard logger. func ErrorFn(fn LogFunction) { std.ErrorFn(fn) } // PanicFn logs a message from a func at level Panic on the standard logger. func PanicFn(fn LogFunction) { std.PanicFn(fn) } // FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. func FatalFn(fn LogFunction) { std.FatalFn(fn) } // Tracef logs a message at level Trace on the standard logger. func Tracef(format string, args ...interface{}) { std.Tracef(format, args...) } // Debugf logs a message at level Debug on the standard logger. func Debugf(format string, args ...interface{}) { std.Debugf(format, args...) } // Printf logs a message at level Info on the standard logger. func Printf(format string, args ...interface{}) { std.Printf(format, args...) } // Infof logs a message at level Info on the standard logger. func Infof(format string, args ...interface{}) { std.Infof(format, args...) } // Warnf logs a message at level Warn on the standard logger. func Warnf(format string, args ...interface{}) { std.Warnf(format, args...) } // Warningf logs a message at level Warn on the standard logger. func Warningf(format string, args ...interface{}) { std.Warningf(format, args...) } // Errorf logs a message at level Error on the standard logger. func Errorf(format string, args ...interface{}) { std.Errorf(format, args...) } // Panicf logs a message at level Panic on the standard logger. func Panicf(format string, args ...interface{}) { std.Panicf(format, args...) } // Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } // Traceln logs a message at level Trace on the standard logger. func Traceln(args ...interface{}) { std.Traceln(args...) } // Debugln logs a message at level Debug on the standard logger. func Debugln(args ...interface{}) { std.Debugln(args...) } // Println logs a message at level Info on the standard logger. func Println(args ...interface{}) { std.Println(args...) } // Infoln logs a message at level Info on the standard logger. func Infoln(args ...interface{}) { std.Infoln(args...) } // Warnln logs a message at level Warn on the standard logger. func Warnln(args ...interface{}) { std.Warnln(args...) } // Warningln logs a message at level Warn on the standard logger. func Warningln(args ...interface{}) { std.Warningln(args...) } // Errorln logs a message at level Error on the standard logger. func Errorln(args ...interface{}) { std.Errorln(args...) } // Panicln logs a message at level Panic on the standard logger. func Panicln(args ...interface{}) { std.Panicln(args...) } // Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalln(args ...interface{}) { std.Fatalln(args...) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/doc.go
vendor/github.com/sirupsen/logrus/doc.go
/* Package logrus is a structured logger for Go, completely API compatible with the standard library logger. The simplest way to use Logrus is simply the package-level exported logger: package main import ( log "github.com/sirupsen/logrus" ) func main() { log.WithFields(log.Fields{ "animal": "walrus", "number": 1, "size": 10, }).Info("A walrus appears") } Output: time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 For a full guide visit https://github.com/sirupsen/logrus */ package logrus
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/logger.go
vendor/github.com/sirupsen/logrus/logger.go
package logrus import ( "context" "io" "os" "sync" "sync/atomic" "time" ) // LogFunction For big messages, it can be more efficient to pass a function // and only call it if the log level is actually enables rather than // generating the log message and then checking if the level is enabled type LogFunction func() []interface{} type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a // file, or leave it default which is `os.Stderr`. You can also set this to // something more adventurous, such as logging to Kafka. Out io.Writer // Hooks for the logger instance. These allow firing events based on logging // levels and log entries. For example, to send errors to an error tracking // service, log to StatsD or dump the core on fatal errors. Hooks LevelHooks // All log entries pass through the formatter before logged to Out. The // included formatters are `TextFormatter` and `JSONFormatter` for which // TextFormatter is the default. In development (when a TTY is attached) it // logs with colors, but to a file it wouldn't. You can easily implement your // own that implements the `Formatter` interface, see the `README` or included // formatters for examples. Formatter Formatter // Flag for whether to log caller info (off by default) ReportCaller bool // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // logged. Level Level // Used to sync writing to the log. Locking is enabled by Default mu MutexWrap // Reusable empty entry entryPool sync.Pool // Function to exit the application, defaults to `os.Exit()` ExitFunc exitFunc // The buffer pool used to format the log. If it is nil, the default global // buffer pool will be used. BufferPool BufferPool } type exitFunc func(int) type MutexWrap struct { lock sync.Mutex disabled bool } func (mw *MutexWrap) Lock() { if !mw.disabled { mw.lock.Lock() } } func (mw *MutexWrap) Unlock() { if !mw.disabled { mw.lock.Unlock() } } func (mw *MutexWrap) Disable() { mw.disabled = true } // Creates a new logger. Configuration should be set by changing `Formatter`, // `Out` and `Hooks` directly on the default logger instance. You can also just // instantiate your own: // // var log = &logrus.Logger{ // Out: os.Stderr, // Formatter: new(logrus.TextFormatter), // Hooks: make(logrus.LevelHooks), // Level: logrus.DebugLevel, // } // // It's recommended to make this a global instance called `log`. func New() *Logger { return &Logger{ Out: os.Stderr, Formatter: new(TextFormatter), Hooks: make(LevelHooks), Level: InfoLevel, ExitFunc: os.Exit, ReportCaller: false, } } func (logger *Logger) newEntry() *Entry { entry, ok := logger.entryPool.Get().(*Entry) if ok { return entry } return NewEntry(logger) } func (logger *Logger) releaseEntry(entry *Entry) { entry.Data = map[string]interface{}{} logger.entryPool.Put(entry) } // WithField allocates a new entry and adds a field to it. // Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to // this new returned entry. // If you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithField(key, value) } // Adds a struct of fields to the log entry. All it does is call `WithField` for // each `Field`. func (logger *Logger) WithFields(fields Fields) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithFields(fields) } // Add an error as single field to the log entry. All it does is call // `WithError` for the given `error`. func (logger *Logger) WithError(err error) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithError(err) } // Add a context to the log entry. func (logger *Logger) WithContext(ctx context.Context) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithContext(ctx) } // Overrides the time of the log entry. func (logger *Logger) WithTime(t time.Time) *Entry { entry := logger.newEntry() defer logger.releaseEntry(entry) return entry.WithTime(t) } func (logger *Logger) Logf(level Level, format string, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() entry.Logf(level, format, args...) logger.releaseEntry(entry) } } func (logger *Logger) Tracef(format string, args ...interface{}) { logger.Logf(TraceLevel, format, args...) } func (logger *Logger) Debugf(format string, args ...interface{}) { logger.Logf(DebugLevel, format, args...) } func (logger *Logger) Infof(format string, args ...interface{}) { logger.Logf(InfoLevel, format, args...) } func (logger *Logger) Printf(format string, args ...interface{}) { entry := logger.newEntry() entry.Printf(format, args...) logger.releaseEntry(entry) } func (logger *Logger) Warnf(format string, args ...interface{}) { logger.Logf(WarnLevel, format, args...) } func (logger *Logger) Warningf(format string, args ...interface{}) { logger.Warnf(format, args...) } func (logger *Logger) Errorf(format string, args ...interface{}) { logger.Logf(ErrorLevel, format, args...) } func (logger *Logger) Fatalf(format string, args ...interface{}) { logger.Logf(FatalLevel, format, args...) logger.Exit(1) } func (logger *Logger) Panicf(format string, args ...interface{}) { logger.Logf(PanicLevel, format, args...) } // Log will log a message at the level given as parameter. // Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. // For this behaviour Logger.Panic or Logger.Fatal should be used instead. func (logger *Logger) Log(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() entry.Log(level, args...) logger.releaseEntry(entry) } } func (logger *Logger) LogFn(level Level, fn LogFunction) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() entry.Log(level, fn()...) logger.releaseEntry(entry) } } func (logger *Logger) Trace(args ...interface{}) { logger.Log(TraceLevel, args...) } func (logger *Logger) Debug(args ...interface{}) { logger.Log(DebugLevel, args...) } func (logger *Logger) Info(args ...interface{}) { logger.Log(InfoLevel, args...) } func (logger *Logger) Print(args ...interface{}) { entry := logger.newEntry() entry.Print(args...) logger.releaseEntry(entry) } func (logger *Logger) Warn(args ...interface{}) { logger.Log(WarnLevel, args...) } func (logger *Logger) Warning(args ...interface{}) { logger.Warn(args...) } func (logger *Logger) Error(args ...interface{}) { logger.Log(ErrorLevel, args...) } func (logger *Logger) Fatal(args ...interface{}) { logger.Log(FatalLevel, args...) logger.Exit(1) } func (logger *Logger) Panic(args ...interface{}) { logger.Log(PanicLevel, args...) } func (logger *Logger) TraceFn(fn LogFunction) { logger.LogFn(TraceLevel, fn) } func (logger *Logger) DebugFn(fn LogFunction) { logger.LogFn(DebugLevel, fn) } func (logger *Logger) InfoFn(fn LogFunction) { logger.LogFn(InfoLevel, fn) } func (logger *Logger) PrintFn(fn LogFunction) { entry := logger.newEntry() entry.Print(fn()...) logger.releaseEntry(entry) } func (logger *Logger) WarnFn(fn LogFunction) { logger.LogFn(WarnLevel, fn) } func (logger *Logger) WarningFn(fn LogFunction) { logger.WarnFn(fn) } func (logger *Logger) ErrorFn(fn LogFunction) { logger.LogFn(ErrorLevel, fn) } func (logger *Logger) FatalFn(fn LogFunction) { logger.LogFn(FatalLevel, fn) logger.Exit(1) } func (logger *Logger) PanicFn(fn LogFunction) { logger.LogFn(PanicLevel, fn) } func (logger *Logger) Logln(level Level, args ...interface{}) { if logger.IsLevelEnabled(level) { entry := logger.newEntry() entry.Logln(level, args...) logger.releaseEntry(entry) } } func (logger *Logger) Traceln(args ...interface{}) { logger.Logln(TraceLevel, args...) } func (logger *Logger) Debugln(args ...interface{}) { logger.Logln(DebugLevel, args...) } func (logger *Logger) Infoln(args ...interface{}) { logger.Logln(InfoLevel, args...) } func (logger *Logger) Println(args ...interface{}) { entry := logger.newEntry() entry.Println(args...) logger.releaseEntry(entry) } func (logger *Logger) Warnln(args ...interface{}) { logger.Logln(WarnLevel, args...) } func (logger *Logger) Warningln(args ...interface{}) { logger.Warnln(args...) } func (logger *Logger) Errorln(args ...interface{}) { logger.Logln(ErrorLevel, args...) } func (logger *Logger) Fatalln(args ...interface{}) { logger.Logln(FatalLevel, args...) logger.Exit(1) } func (logger *Logger) Panicln(args ...interface{}) { logger.Logln(PanicLevel, args...) } func (logger *Logger) Exit(code int) { runHandlers() if logger.ExitFunc == nil { logger.ExitFunc = os.Exit } logger.ExitFunc(code) } //When file is opened with appending mode, it's safe to //write concurrently to a file (within 4k message on Linux). //In these cases user can choose to disable the lock. func (logger *Logger) SetNoLock() { logger.mu.Disable() } func (logger *Logger) level() Level { return Level(atomic.LoadUint32((*uint32)(&logger.Level))) } // SetLevel sets the logger level. func (logger *Logger) SetLevel(level Level) { atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) } // GetLevel returns the logger level. func (logger *Logger) GetLevel() Level { return logger.level() } // AddHook adds a hook to the logger hooks. func (logger *Logger) AddHook(hook Hook) { logger.mu.Lock() defer logger.mu.Unlock() logger.Hooks.Add(hook) } // IsLevelEnabled checks if the log level of the logger is greater than the level param func (logger *Logger) IsLevelEnabled(level Level) bool { return logger.level() >= level } // SetFormatter sets the logger formatter. func (logger *Logger) SetFormatter(formatter Formatter) { logger.mu.Lock() defer logger.mu.Unlock() logger.Formatter = formatter } // SetOutput sets the logger output. func (logger *Logger) SetOutput(output io.Writer) { logger.mu.Lock() defer logger.mu.Unlock() logger.Out = output } func (logger *Logger) SetReportCaller(reportCaller bool) { logger.mu.Lock() defer logger.mu.Unlock() logger.ReportCaller = reportCaller } // ReplaceHooks replaces the logger hooks and returns the old ones func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { logger.mu.Lock() oldHooks := logger.Hooks logger.Hooks = hooks logger.mu.Unlock() return oldHooks } // SetBufferPool sets the logger buffer pool. func (logger *Logger) SetBufferPool(pool BufferPool) { logger.mu.Lock() defer logger.mu.Unlock() logger.BufferPool = pool }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/json_formatter.go
vendor/github.com/sirupsen/logrus/json_formatter.go
package logrus import ( "bytes" "encoding/json" "fmt" "runtime" ) type fieldKey string // FieldMap allows customization of the key names for default fields. type FieldMap map[fieldKey]string func (f FieldMap) resolve(key fieldKey) string { if k, ok := f[key]; ok { return k } return string(key) } // JSONFormatter formats logs into parsable json type JSONFormatter struct { // TimestampFormat sets the format used for marshaling timestamps. // The format to use is the same than for time.Format or time.Parse from the standard // library. // The standard Library already provides a set of predefined format. TimestampFormat string // DisableTimestamp allows disabling automatic timestamps in output DisableTimestamp bool // DisableHTMLEscape allows disabling html escaping in output DisableHTMLEscape bool // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. DataKey string // FieldMap allows users to customize the names of keys for default fields. // As an example: // formatter := &JSONFormatter{ // FieldMap: FieldMap{ // FieldKeyTime: "@timestamp", // FieldKeyLevel: "@level", // FieldKeyMsg: "@message", // FieldKeyFunc: "@caller", // }, // } FieldMap FieldMap // CallerPrettyfier can be set by the user to modify the content // of the function and file keys in the json data when ReportCaller is // activated. If any of the returned value is the empty string the // corresponding key will be removed from json fields. CallerPrettyfier func(*runtime.Frame) (function string, file string) // PrettyPrint will indent all json logs PrettyPrint bool } // Format renders a single log entry func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data := make(Fields, len(entry.Data)+4) for k, v := range entry.Data { switch v := v.(type) { case error: // Otherwise errors are ignored by `encoding/json` // https://github.com/sirupsen/logrus/issues/137 data[k] = v.Error() default: data[k] = v } } if f.DataKey != "" { newData := make(Fields, 4) newData[f.DataKey] = data data = newData } prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } if entry.err != "" { data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err } if !f.DisableTimestamp { data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) } data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() if entry.HasCaller() { funcVal := entry.Caller.Function fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) if f.CallerPrettyfier != nil { funcVal, fileVal = f.CallerPrettyfier(entry.Caller) } if funcVal != "" { data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal } if fileVal != "" { data[f.FieldMap.resolve(FieldKeyFile)] = fileVal } } var b *bytes.Buffer if entry.Buffer != nil { b = entry.Buffer } else { b = &bytes.Buffer{} } encoder := json.NewEncoder(b) encoder.SetEscapeHTML(!f.DisableHTMLEscape) if f.PrettyPrint { encoder.SetIndent("", " ") } if err := encoder.Encode(data); err != nil { return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) } return b.Bytes(), nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/text_formatter.go
vendor/github.com/sirupsen/logrus/text_formatter.go
package logrus import ( "bytes" "fmt" "os" "runtime" "sort" "strconv" "strings" "sync" "time" "unicode/utf8" ) const ( red = 31 yellow = 33 blue = 36 gray = 37 ) var baseTimestamp time.Time func init() { baseTimestamp = time.Now() } // TextFormatter formats logs into text type TextFormatter struct { // Set to true to bypass checking for a TTY before outputting colors. ForceColors bool // Force disabling colors. DisableColors bool // Force quoting of all values ForceQuote bool // DisableQuote disables quoting for all values. // DisableQuote will have a lower priority than ForceQuote. // If both of them are set to true, quote will be forced on all values. DisableQuote bool // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ EnvironmentOverrideColors bool // Disable timestamp logging. useful when output is redirected to logging // system that already adds timestamps. DisableTimestamp bool // Enable logging the full timestamp when a TTY is attached instead of just // the time passed since beginning of execution. FullTimestamp bool // TimestampFormat to use for display when a full timestamp is printed. // The format to use is the same than for time.Format or time.Parse from the standard // library. // The standard Library already provides a set of predefined format. TimestampFormat string // The fields are sorted by default for a consistent output. For applications // that log extremely frequently and don't use the JSON formatter this may not // be desired. DisableSorting bool // The keys sorting function, when uninitialized it uses sort.Strings. SortingFunc func([]string) // Disables the truncation of the level text to 4 characters. DisableLevelTruncation bool // PadLevelText Adds padding the level text so that all the levels output at the same length // PadLevelText is a superset of the DisableLevelTruncation option PadLevelText bool // QuoteEmptyFields will wrap empty fields in quotes if true QuoteEmptyFields bool // Whether the logger's out is to a terminal isTerminal bool // FieldMap allows users to customize the names of keys for default fields. // As an example: // formatter := &TextFormatter{ // FieldMap: FieldMap{ // FieldKeyTime: "@timestamp", // FieldKeyLevel: "@level", // FieldKeyMsg: "@message"}} FieldMap FieldMap // CallerPrettyfier can be set by the user to modify the content // of the function and file keys in the data when ReportCaller is // activated. If any of the returned value is the empty string the // corresponding key will be removed from fields. CallerPrettyfier func(*runtime.Frame) (function string, file string) terminalInitOnce sync.Once // The max length of the level text, generated dynamically on init levelTextMaxLength int } func (f *TextFormatter) init(entry *Entry) { if entry.Logger != nil { f.isTerminal = checkIfTerminal(entry.Logger.Out) } // Get the max length of the level text for _, level := range AllLevels { levelTextLength := utf8.RuneCount([]byte(level.String())) if levelTextLength > f.levelTextMaxLength { f.levelTextMaxLength = levelTextLength } } } func (f *TextFormatter) isColored() bool { isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) if f.EnvironmentOverrideColors { switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { case ok && force != "0": isColored = true case ok && force == "0", os.Getenv("CLICOLOR") == "0": isColored = false } } return isColored && !f.DisableColors } // Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { data := make(Fields) for k, v := range entry.Data { data[k] = v } prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) keys := make([]string, 0, len(data)) for k := range data { keys = append(keys, k) } var funcVal, fileVal string fixedKeys := make([]string, 0, 4+len(data)) if !f.DisableTimestamp { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) } fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) if entry.Message != "" { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) } if entry.err != "" { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) } if entry.HasCaller() { if f.CallerPrettyfier != nil { funcVal, fileVal = f.CallerPrettyfier(entry.Caller) } else { funcVal = entry.Caller.Function fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) } if funcVal != "" { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) } if fileVal != "" { fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) } } if !f.DisableSorting { if f.SortingFunc == nil { sort.Strings(keys) fixedKeys = append(fixedKeys, keys...) } else { if !f.isColored() { fixedKeys = append(fixedKeys, keys...) f.SortingFunc(fixedKeys) } else { f.SortingFunc(keys) } } } else { fixedKeys = append(fixedKeys, keys...) } var b *bytes.Buffer if entry.Buffer != nil { b = entry.Buffer } else { b = &bytes.Buffer{} } f.terminalInitOnce.Do(func() { f.init(entry) }) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } if f.isColored() { f.printColored(b, entry, keys, data, timestampFormat) } else { for _, key := range fixedKeys { var value interface{} switch { case key == f.FieldMap.resolve(FieldKeyTime): value = entry.Time.Format(timestampFormat) case key == f.FieldMap.resolve(FieldKeyLevel): value = entry.Level.String() case key == f.FieldMap.resolve(FieldKeyMsg): value = entry.Message case key == f.FieldMap.resolve(FieldKeyLogrusError): value = entry.err case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): value = funcVal case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): value = fileVal default: value = data[key] } f.appendKeyValue(b, key, value) } } b.WriteByte('\n') return b.Bytes(), nil } func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { var levelColor int switch entry.Level { case DebugLevel, TraceLevel: levelColor = gray case WarnLevel: levelColor = yellow case ErrorLevel, FatalLevel, PanicLevel: levelColor = red case InfoLevel: levelColor = blue default: levelColor = blue } levelText := strings.ToUpper(entry.Level.String()) if !f.DisableLevelTruncation && !f.PadLevelText { levelText = levelText[0:4] } if f.PadLevelText { // Generates the format string used in the next line, for example "%-6s" or "%-7s". // Based on the max level text length. formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" // Formats the level text by appending spaces up to the max length, for example: // - "INFO " // - "WARNING" levelText = fmt.Sprintf(formatString, levelText) } // Remove a single newline if it already exists in the message to keep // the behavior of logrus text_formatter the same as the stdlib log package entry.Message = strings.TrimSuffix(entry.Message, "\n") caller := "" if entry.HasCaller() { funcVal := fmt.Sprintf("%s()", entry.Caller.Function) fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) if f.CallerPrettyfier != nil { funcVal, fileVal = f.CallerPrettyfier(entry.Caller) } if fileVal == "" { caller = funcVal } else if funcVal == "" { caller = fileVal } else { caller = fileVal + " " + funcVal } } switch { case f.DisableTimestamp: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) case !f.FullTimestamp: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) default: fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) } for _, k := range keys { v := data[k] fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) f.appendValue(b, v) } } func (f *TextFormatter) needsQuoting(text string) bool { if f.ForceQuote { return true } if f.QuoteEmptyFields && len(text) == 0 { return true } if f.DisableQuote { return false } for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { return true } } return false } func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { if b.Len() > 0 { b.WriteByte(' ') } b.WriteString(key) b.WriteByte('=') f.appendValue(b, value) } func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { stringVal, ok := value.(string) if !ok { stringVal = fmt.Sprint(value) } if !f.needsQuoting(stringVal) { b.WriteString(stringVal) } else { b.WriteString(fmt.Sprintf("%q", stringVal)) } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
// +build !appengine,!js,!windows,!nacl,!plan9 package logrus import ( "io" "os" ) func checkIfTerminal(w io.Writer) bool { switch v := w.(type) { case *os.File: return isTerminal(int(v.Fd())) default: return false } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/generics/orderedset/orderedset.go
vendor/github.com/jesseduffield/generics/orderedset/orderedset.go
package orderedset import ( orderedmap "github.com/wk8/go-ordered-map/v2" ) type OrderedSet[T comparable] struct { om *orderedmap.OrderedMap[T, bool] } func New[T comparable]() *OrderedSet[T] { return &OrderedSet[T]{om: orderedmap.New[T, bool]()} } func NewFromSlice[T comparable](slice []T) *OrderedSet[T] { result := &OrderedSet[T]{om: orderedmap.New[T, bool](len(slice))} result.Add(slice...) return result } func (os *OrderedSet[T]) Add(values ...T) { for _, value := range values { os.om.Set(value, true) } } func (os *OrderedSet[T]) Remove(value T) { os.om.Delete(value) } func (os *OrderedSet[T]) RemoveSlice(slice []T) { for _, value := range slice { os.Remove(value) } } func (os *OrderedSet[T]) Includes(value T) bool { return os.om.Value(value) } func (os *OrderedSet[T]) Len() int { return os.om.Len() } func (os *OrderedSet[T]) ToSliceFromOldest() []T { // TODO: can be simplified to // return os.om.KeysFromOldest() // when we update to a newer version of go-ordered-map result := make([]T, 0, os.Len()) for pair := os.om.Oldest(); pair != nil; pair = pair.Next() { result = append(result, pair.Key) } return result } func (os *OrderedSet[T]) ToSliceFromNewest() []T { // TODO: can be simplified to // return os.om.KeysFromNewest() // when we update to a newer version of go-ordered-map result := make([]T, 0, os.Len()) for pair := os.om.Newest(); pair != nil; pair = pair.Prev() { result = append(result, pair.Key) } return result }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/generics/maps/maps.go
vendor/github.com/jesseduffield/generics/maps/maps.go
package maps func Keys[Key comparable, Value any](m map[Key]Value) []Key { keys := make([]Key, 0, len(m)) for key := range m { keys = append(keys, key) } return keys } func Values[Key comparable, Value any](m map[Key]Value) []Value { values := make([]Value, 0, len(m)) for _, value := range m { values = append(values, value) } return values } func TransformValues[Key comparable, Value any, NewValue any]( m map[Key]Value, fn func(Value) NewValue, ) map[Key]NewValue { output := make(map[Key]NewValue, len(m)) for key, value := range m { output[key] = fn(value) } return output } func TransformKeys[Key comparable, Value any, NewKey comparable](m map[Key]Value, fn func(Key) NewKey) map[NewKey]Value { output := make(map[NewKey]Value, len(m)) for key, value := range m { output[fn(key)] = value } return output } func MapToSlice[Key comparable, Value any, Mapped any](m map[Key]Value, f func(Key, Value) Mapped) []Mapped { output := make([]Mapped, 0, len(m)) for key, value := range m { output = append(output, f(key, value)) } return output } func Filter[Key comparable, Value any](m map[Key]Value, f func(Key, Value) bool) map[Key]Value { output := map[Key]Value{} for key, value := range m { if f(key, value) { output[key] = value } } return output }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/generics/set/set.go
vendor/github.com/jesseduffield/generics/set/set.go
package set import "github.com/jesseduffield/generics/maps" type Set[T comparable] struct { hashMap map[T]bool } func New[T comparable]() *Set[T] { return &Set[T]{hashMap: make(map[T]bool)} } func NewFromSlice[T comparable](slice []T) *Set[T] { result := &Set[T]{hashMap: make(map[T]bool, len(slice))} result.Add(slice...) return result } func (s *Set[T]) Add(values ...T) { for _, value := range values { s.hashMap[value] = true } } func (s *Set[T]) Remove(value T) { delete(s.hashMap, value) } func (s *Set[T]) RemoveSlice(slice []T) { for _, value := range slice { s.Remove(value) } } func (s *Set[T]) Includes(value T) bool { return s.hashMap[value] } func (s *Set[T]) Len() int { return len(s.hashMap) } // output slice is not necessarily in the same order that items were added func (s *Set[T]) ToSlice() []T { return maps.Keys(s.hashMap) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/loader.go
vendor/github.com/jesseduffield/gocui/loader.go
package gocui import "time" func (v *View) loaderLines() [][]cell { duplicate := make([][]cell, len(v.lines)) for i := range v.lines { if i < len(v.lines)-1 { duplicate[i] = make([]cell, len(v.lines[i])) copy(duplicate[i], v.lines[i]) } else { duplicate[i] = make([]cell, len(v.lines[i])+2) copy(duplicate[i], v.lines[i]) duplicate[i][len(duplicate[i])-2] = cell{chr: " "} duplicate[i][len(duplicate[i])-1] = Loader() } } return duplicate } // Loader can show a loading animation func Loader() cell { frames := []string{"|", "/", "-", "\\"} now := time.Now() nanos := now.UnixNano() index := nanos / 50000000 % int64(len(frames)) return cell{ chr: frames[index], } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/view.go
vendor/github.com/jesseduffield/gocui/view.go
// Copyright 2014 The gocui Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gocui import ( "fmt" "io" "strings" "sync" "unicode" "unicode/utf8" "github.com/gdamore/tcell/v2" "github.com/rivo/uniseg" ) // Constants for overlapping edges const ( TOP = 1 // view is overlapping at top edge BOTTOM = 2 // view is overlapping at bottom edge LEFT = 4 // view is overlapping at left edge RIGHT = 8 // view is overlapping at right edge ) // A View is a window. It maintains its own internal buffer and cursor // position. type View struct { name string x0, y0, x1, y1 int // left top right bottom ox, oy int // view offsets cx, cy int // cursor position rx, ry int // Read() offsets wx, wy int // Write() offsets lines [][]cell // All the data outMode OutputMode // The y position of the first line of a range selection. // This is not relative to the view's origin: it is relative to the first line // of the view's content, so you can scroll the view and this value will remain // the same, unlike the view's cy value. // A value of -1 means that there is no range selection. // This value can be greater than the selected line index, in the event that // a user starts a range select and then moves the cursor up. rangeSelectStartY int // readBuffer is used for storing unread bytes readBuffer []byte // tained is true if the viewLines must be updated tainted bool // the last position that the mouse was hovering over; nil if the mouse is outside of // this view, or not hovering over a cell lastHoverPosition *pos // the location of the hyperlink that the mouse is currently hovering over; nil if none hoveredHyperlink *SearchPosition // internal representation of the view's buffer. We will keep viewLines around // from a previous render until we explicitly set them to nil, allowing us to // render the same content twice without flicker. Wherever we want to render // something without any chance of old content appearing (e.g. when actually // rendering new content or if the view is resized) we should set tainted to // true and viewLines to nil viewLines []viewLine // If the last character written was a newline, we don't write it but // instead set pendingNewline to true. If more text is written, we write the // newline then. This is to avoid having an extra blank at the end of the view. pendingNewline bool // writeMutex protects locks the write process writeMutex sync.Mutex // ei is used to decode ESC sequences on Write ei *escapeInterpreter // Visible specifies whether the view is visible. Visible bool // BgColor and FgColor allow to configure the background and foreground // colors of the View. BgColor, FgColor Attribute // SelBgColor and SelFgColor are used to configure the background and // foreground colors of the selected line, when it is highlighted. SelBgColor, SelFgColor Attribute // InactiveViewSelBgColor is used to configure the background color of the // selected line, when it is highlighted but the view doesn't have the // focus. InactiveViewSelBgColor Attribute // If Editable is true, keystrokes will be added to the view's internal // buffer at the cursor position. Editable bool // Editor allows to define the editor that manages the editing mode, // including keybindings or cursor behaviour. DefaultEditor is used by // default. Editor Editor // Overwrite enables or disables the overwrite mode of the view. Overwrite bool // If Highlight is true, Sel{Bg,Fg}Colors will be used // for the line under the cursor position. Highlight bool // If HighlightInactive is true, InavtiveViewSel{Bg,Fg}Colors will be used // instead of Sel{Bg,Fg}Colors for highlighting selected lines. HighlightInactive bool // If Frame is true, a border will be drawn around the view. Frame bool // FrameColor allow to configure the color of the Frame when it is not highlighted. FrameColor Attribute // FrameRunes allows to define custom runes for the frame edges. // The rune slice can be defined with 3 different lengths. // If slice doesn't match these lengths, default runes will be used instead of missing one. // // 2 runes with only horizontal and vertical edges. // []rune{'─', '│'} // []rune{'═','║'} // 6 runes with horizontal, vertical edges and top-left, top-right, bottom-left, bottom-right cornes. // []rune{'─', '│', '┌', '┐', '└', '┘'} // []rune{'═','║','╔','╗','╚','╝'} // 11 runes which can be used with `gocui.Gui.SupportOverlaps` property. // []rune{'─', '│', '┌', '┐', '└', '┘', '├', '┤', '┬', '┴', '┼'} // []rune{'═','║','╔','╗','╚','╝','╠','╣','╦','╩','╬'} FrameRunes []rune // If Wrap is true, the content that is written to this View is // automatically wrapped when it is longer than its width. If true the // view's x-origin will be ignored. Wrap bool // If Autoscroll is true, the View will automatically scroll down when the // text overflows. If true the view's y-origin will be ignored. Autoscroll bool // If Frame is true, Title allows to configure a title for the view. Title string // If non-empty, TitlePrefix is prepended to the title of a view regardless on // the the currently selected tab (if any.) TitlePrefix string Tabs []string TabIndex int // TitleColor allow to configure the color of title and subtitle for the view. TitleColor Attribute // If Frame is true, Subtitle allows to configure a subtitle for the view. Subtitle string // If Mask is true, the View will display the mask instead of the real // content Mask string // Overlaps describes which edges are overlapping with another view's edges Overlaps byte // If HasLoader is true, the message will be appended with a spinning loader animation HasLoader bool // IgnoreCarriageReturns tells us whether to ignore '\r' characters IgnoreCarriageReturns bool // ParentView is the view which catches events bubbled up from the given view if there's no matching handler ParentView *View searcher *searcher // KeybindOnEdit should be set to true when you want to execute keybindings even when the view is editable // (this is usually not the case) KeybindOnEdit bool TextArea *TextArea // something like '1 of 20' for a list view Footer string // if true, the user can scroll all the way past the last item until it appears at the top of the view CanScrollPastBottom bool // if true, the view will automatically recognize https: URLs in the content written to it and render // them as hyperlinks AutoRenderHyperLinks bool // if true, the view will underline hyperlinks only when the cursor is on // them; otherwise, they will always be underlined UnderlineHyperLinksOnlyOnHover bool // number of spaces per \t character, defaults to 4 TabWidth int } type pos struct { x, y int } // call this in the event of a view resize, or if you want to render new content // without the chance of old content still appearing, or if you want to remove // a line from the existing content func (v *View) clearViewLines() { v.tainted = true v.viewLines = nil v.clearHover() } type searcher struct { searchString string searchPositions []SearchPosition modelSearchResults []SearchPosition currentSearchIndex int onSelectItem func(int) renderSearchStatus func(int, int) } func (v *View) SetRenderSearchStatus(renderSearchStatus func(int, int)) { v.searcher.renderSearchStatus = renderSearchStatus } func (v *View) SetOnSelectItem(onSelectItem func(int)) { v.searcher.onSelectItem = onSelectItem } func (v *View) renderSearchStatus(index int, itemCount int) { if v.searcher.renderSearchStatus != nil { v.searcher.renderSearchStatus(index, itemCount) } } func (v *View) gotoNextMatch() error { if len(v.searcher.searchPositions) == 0 { return nil } if v.Highlight && v.oy+v.cy < v.searcher.searchPositions[v.searcher.currentSearchIndex].Y { // If the selection is before the current match, just jump to the current match and return. // This can only happen if the user has moved the cursor to before the first match. v.SelectSearchResult(v.searcher.currentSearchIndex) return nil } if v.searcher.currentSearchIndex >= len(v.searcher.searchPositions)-1 { v.searcher.currentSearchIndex = 0 } else { v.searcher.currentSearchIndex++ } v.SelectSearchResult(v.searcher.currentSearchIndex) return nil } func (v *View) gotoPreviousMatch() error { if len(v.searcher.searchPositions) == 0 { return nil } if v.Highlight && v.oy+v.cy > v.searcher.searchPositions[v.searcher.currentSearchIndex].Y { // If the selection is after the current match, just jump to the current match and return. // This happens if the user has moved the cursor down from the current match. v.SelectSearchResult(v.searcher.currentSearchIndex) return nil } if v.searcher.currentSearchIndex == 0 { if len(v.searcher.searchPositions) > 0 { v.searcher.currentSearchIndex = len(v.searcher.searchPositions) - 1 } } else { v.searcher.currentSearchIndex-- } v.SelectSearchResult(v.searcher.currentSearchIndex) return nil } func (v *View) SelectSearchResult(index int) { itemCount := len(v.searcher.searchPositions) if itemCount == 0 { return } if index > itemCount-1 { index = itemCount - 1 } y := v.searcher.searchPositions[index].Y v.FocusPoint(v.ox, y, true) v.renderSearchStatus(index, itemCount) if v.searcher.onSelectItem != nil { v.searcher.onSelectItem(y) } } // Returns <current match index>, <total matches> func (v *View) GetSearchStatus() (int, int) { return v.searcher.currentSearchIndex, len(v.searcher.searchPositions) } // modelSearchResults is optional; pass nil to search the view. If non-nil, // these positions will be used for highlighting search results. Even in this // case the view will still be searched on a per-line basis, so that the caller // doesn't have to make assumptions where in the rendered line the search result // is. The XStart and XEnd values in the modelSearchResults are only used in // case the search string is not found in the given line, which can happen if // the view renders an abbreviated version of some of the model data. // // Mind the difference between nil and empty slice: nil means we're not // searching the model, empty slice means we *are* searching the model but we // didn't find any matches. func (v *View) UpdateSearchResults(str string, modelSearchResults []SearchPosition) { v.writeMutex.Lock() defer v.writeMutex.Unlock() v.searcher.search(str, modelSearchResults) v.updateSearchPositions() if len(v.searcher.searchPositions) > 0 { // get the first result past the current cursor currentIndex := 0 if v.Highlight { // ...but only if we're showing the highlighted line adjustedY := v.oy + v.cy adjustedX := v.ox + v.cx for i, pos := range v.searcher.searchPositions { if pos.Y > adjustedY || (pos.Y == adjustedY && pos.XStart > adjustedX) { currentIndex = i break } } } v.searcher.currentSearchIndex = currentIndex } } func (v *View) Search(str string, modelSearchResults []SearchPosition) { v.UpdateSearchResults(str, modelSearchResults) if len(v.searcher.searchPositions) > 0 { v.SelectSearchResult(v.searcher.currentSearchIndex) } else { v.renderSearchStatus(0, 0) } } func (v *View) ClearSearch() { v.searcher.clearSearch() } func (v *View) IsSearching() bool { return v.searcher.searchString != "" } func (v *View) nearestSearchPosition() int { currentLineIndex := v.cy + v.oy lastSearchPos := 0 for i, pos := range v.searcher.searchPositions { if pos.Y == currentLineIndex { return i } if pos.Y > currentLineIndex { break } lastSearchPos = i } return lastSearchPos } func (v *View) SetNearestSearchPosition() { if len(v.searcher.searchPositions) > 0 { newPos := v.nearestSearchPosition() if newPos != v.searcher.currentSearchIndex { v.searcher.currentSearchIndex = newPos v.renderSearchStatus(newPos, len(v.searcher.searchPositions)) } } } func (v *View) FocusPoint(cx int, cy int, scrollIntoView bool) { v.writeMutex.Lock() defer v.writeMutex.Unlock() v.refreshViewLinesIfNeeded() lineCount := len(v.viewLines) if cy < 0 || cy > lineCount { return } if scrollIntoView { height := v.InnerHeight() v.oy = calculateNewOrigin(cy, v.oy, lineCount, height) } v.cx = cx v.cy = cy - v.oy } func (v *View) SetRangeSelectStart(rangeSelectStartY int) { v.rangeSelectStartY = rangeSelectStartY } func (v *View) CancelRangeSelect() { v.rangeSelectStartY = -1 } func calculateNewOrigin(selectedLine int, oldOrigin int, lineCount int, viewHeight int) int { if viewHeight >= lineCount { return 0 } else if selectedLine < oldOrigin || selectedLine >= oldOrigin+viewHeight { // If the selected line is outside the visible area, scroll the view so // that the selected line is in the middle. newOrigin := selectedLine - viewHeight/2 // However, take care not to overflow if the total line count is less // than the view height. maxOrigin := lineCount - viewHeight if newOrigin > maxOrigin { newOrigin = maxOrigin } if newOrigin < 0 { newOrigin = 0 } return newOrigin } return oldOrigin } func (s *searcher) search(str string, modelSearchResults []SearchPosition) { s.searchString = str s.searchPositions = []SearchPosition{} s.modelSearchResults = modelSearchResults s.currentSearchIndex = 0 } func (s *searcher) clearSearch() { s.searchString = "" s.searchPositions = []SearchPosition{} s.currentSearchIndex = 0 } type SearchPosition struct { XStart int XEnd int Y int } type viewLine struct { linesX, linesY int // coordinates relative to v.lines line []cell } type cell struct { chr string // a grapheme cluster width int // number of terminal cells occupied by chr (always 1 or 2) bgColor, fgColor Attribute hyperlink string } type lineType []cell func characterEquals(chr []byte, b byte) bool { return len(chr) == 1 && chr[0] == b } // String returns a string from a given cell slice. func (l lineType) String() string { var str strings.Builder for _, c := range l { str.WriteString(c.chr) } return str.String() } // NewView returns a new View object. func NewView(name string, x0, y0, x1, y1 int, mode OutputMode) *View { v := &View{ name: name, x0: x0, y0: y0, x1: x1, y1: y1, Visible: true, Frame: true, Editor: DefaultEditor, tainted: true, outMode: mode, ei: newEscapeInterpreter(mode), searcher: &searcher{}, TextArea: &TextArea{}, rangeSelectStartY: -1, TabWidth: 4, } v.FgColor, v.BgColor = ColorDefault, ColorDefault v.SelFgColor, v.SelBgColor = ColorDefault, ColorDefault v.InactiveViewSelBgColor = ColorDefault v.TitleColor, v.FrameColor = ColorDefault, ColorDefault return v } // Dimensions returns the dimensions of the View func (v *View) Dimensions() (int, int, int, int) { return v.x0, v.y0, v.x1, v.y1 } // Size returns the number of visible columns and rows in the View, including // the frame if any func (v *View) Size() (x, y int) { return v.Width(), v.Height() } // InnerSize returns the number of usable columns and rows in the View, excluding // the frame if any func (v *View) InnerSize() (x, y int) { return v.InnerWidth(), v.InnerHeight() } func (v *View) Width() int { return v.x1 - v.x0 + 1 } func (v *View) Height() int { return v.y1 - v.y0 + 1 } // The writeable area of the view is always two less then the view's size, // because if it has a frame, we need to subtract that, but if it doesn't, the // view is made 1 larger on all sides. I'd like to clean this up at some point, // but for now we live with this weirdness. func (v *View) InnerWidth() int { innerWidth := v.Width() - 2 if innerWidth < 0 { return 0 } return innerWidth } func (v *View) InnerHeight() int { innerHeight := v.Height() - 2 if innerHeight < 0 { return 0 } return innerHeight } // Name returns the name of the view. func (v *View) Name() string { return v.name } // setCharacter sets a character (grapheme cluster) at the given point relative to the view. It applies // the specified colors, taking into account if the cell must be highlighted. Also, it checks if the // position is valid. func (v *View) setCharacter(x, y int, ch string, fgColor, bgColor Attribute) { maxX, maxY := v.Size() if x < 0 || x >= maxX || y < 0 || y >= maxY { return } if v.Mask != "" { fgColor = v.FgColor bgColor = v.BgColor ch = v.Mask } else if v.Highlight { rangeSelectStart := v.cy rangeSelectEnd := v.cy if v.rangeSelectStartY != -1 { relativeRangeSelectStart := v.rangeSelectStartY - v.oy rangeSelectStart = min(relativeRangeSelectStart, v.cy) rangeSelectEnd = max(relativeRangeSelectStart, v.cy) } if y >= rangeSelectStart && y <= rangeSelectEnd { // this ensures we use the bright variant of a colour upon highlight fgColorComponent := fgColor & ^AttrAll if fgColorComponent >= AttrIsValidColor && fgColorComponent < AttrIsValidColor+8 { fgColor += 8 } fgColor = fgColor | AttrBold if v.HighlightInactive { bgColor = (bgColor & AttrStyleBits) | v.InactiveViewSelBgColor } else { bgColor = (bgColor & AttrStyleBits) | v.SelBgColor } } } if matched, selected := v.isPatternMatchedRune(x, y); matched { fgColor = ColorBlack if selected { bgColor = ColorCyan } else { bgColor = ColorYellow } } if v.isHoveredHyperlink(x, y) { fgColor |= AttrUnderline } // Don't display empty characters if ch == "" { ch = " " } tcellSetCell(v.x0+x+1, v.y0+y+1, ch, fgColor, bgColor, v.outMode) } // SetCursor sets the cursor position of the view at the given point, // relative to the view. It is allowed to set the position to a point outside // the visible portion of the view, or even outside the content of the view. // Clients are responsible for clamping to valid positions. func (v *View) SetCursor(x, y int) { v.cx = x v.cy = y } func (v *View) SetCursorX(x int) { v.cx = x } func (v *View) SetCursorY(y int) { v.cy = y } // Cursor returns the cursor position of the view. func (v *View) Cursor() (x, y int) { return v.cx, v.cy } func (v *View) CursorX() int { return v.cx } func (v *View) CursorY() int { return v.cy } // SetOrigin sets the origin position of the view's internal buffer, // so the buffer starts to be printed from this point, which means that // it is linked with the origin point of view. It can be used to // implement Horizontal and Vertical scrolling with just incrementing // or decrementing ox and oy. func (v *View) SetOrigin(x, y int) { if x < 0 { x = 0 } if y < 0 { y = 0 } v.ox = x v.oy = y } func (v *View) SetOriginX(x int) { if x < 0 { x = 0 } v.ox = x } func (v *View) SetOriginY(y int) { if y < 0 { y = 0 } v.oy = y } // Origin returns the origin position of the view. func (v *View) Origin() (x, y int) { return v.OriginX(), v.OriginY() } func (v *View) OriginX() int { return v.ox } func (v *View) OriginY() int { return v.oy } // SetWritePos sets the write position of the view's internal buffer. // So the next Write call would write directly to the specified position. func (v *View) SetWritePos(x, y int) { if x < 0 { x = 0 } if y < 0 { y = 0 } v.wx = x v.wy = y // Changing the write position makes a pending newline obsolete v.pendingNewline = false } // WritePos returns the current write position of the view's internal buffer. func (v *View) WritePos() (x, y int) { return v.wx, v.wy } // SetReadPos sets the read position of the view's internal buffer. // So the next Read call would read from the specified position. func (v *View) SetReadPos(x, y int) { if x < 0 { x = 0 } if y < 0 { y = 0 } v.readBuffer = nil v.rx = x v.ry = y } // ReadPos returns the current read position of the view's internal buffer. func (v *View) ReadPos() (x, y int) { return v.rx, v.ry } // makeWriteable creates empty cells if required to make position (x, y) writeable. func (v *View) makeWriteable(x, y int) { // TODO: make this more efficient // line `y` must be index-able (that's why `<=`) for len(v.lines) <= y { if cap(v.lines) > len(v.lines) { newLen := cap(v.lines) if newLen > y { newLen = y + 1 } v.lines = v.lines[:newLen] } else { v.lines = append(v.lines, nil) } } // cell `x` need not be index-able (that's why `<`) // append should be used by `lines[y]` user if he wants to write beyond `x` for len(v.lines[y]) < x { if cap(v.lines[y]) > len(v.lines[y]) { newLen := cap(v.lines[y]) if newLen > x { newLen = x } v.lines[y] = v.lines[y][:newLen] } else { v.lines[y] = append(v.lines[y], cell{}) } } } // writeCells copies []cell to (v.wx, v.wy), and advances v.wx accordingly. // !!! caller MUST ensure that specified location (x, y) is writeable by calling makeWriteable func (v *View) writeCells(cells []cell) { var newLen int // use maximum len available line := v.lines[v.wy][:cap(v.lines[v.wy])] maxCopy := len(line) - v.wx if maxCopy < len(cells) { copy(line[v.wx:], cells[:maxCopy]) line = append(line, cells[maxCopy:]...) newLen = len(line) } else { // maxCopy >= len(cells) copy(line[v.wx:], cells) newLen = v.wx + len(cells) if newLen < len(v.lines[v.wy]) { newLen = len(v.lines[v.wy]) } } v.lines[v.wy] = line[:newLen] v.wx += len(cells) } // Write appends a byte slice into the view's internal buffer. Because // View implements the io.Writer interface, it can be passed as parameter // of functions like fmt.Fprintf, fmt.Fprintln, io.Copy, etc. Clear must // be called to clear the view's buffer. func (v *View) Write(p []byte) (n int, err error) { v.writeMutex.Lock() defer v.writeMutex.Unlock() v.write(p) return len(p), nil } func (v *View) write(p []byte) { v.tainted = true v.clearHover() // Fill with empty cells, if writing outside current view buffer v.makeWriteable(v.wx, v.wy) finishLine := func() { v.autoRenderHyperlinksInCurrentLine() if v.wx >= len(v.lines[v.wy]) { v.writeCells([]cell{{ chr: "", width: 0, fgColor: 0, bgColor: 0, }}) } } advanceToNextLine := func() { v.wx = 0 v.wy++ if v.wy >= len(v.lines) { v.lines = append(v.lines, nil) } } if v.pendingNewline { advanceToNextLine() v.pendingNewline = false } until := len(p) if !v.Editable && until > 0 && p[until-1] == '\n' { v.pendingNewline = true until-- } state := -1 var chr []byte var width int remaining := p[:until] for len(remaining) > 0 { chr, remaining, width, state = uniseg.FirstGraphemeCluster(remaining, state) switch { case characterEquals(chr, '\n'): finishLine() advanceToNextLine() case characterEquals(chr, '\r'): finishLine() v.wx = 0 default: truncateLine, cells := v.parseInput(chr, width, v.wx, v.wy) if cells == nil { continue } v.writeCells(cells) if truncateLine { v.lines[v.wy] = v.lines[v.wy][:v.wx] } } } if v.pendingNewline { finishLine() } else { v.autoRenderHyperlinksInCurrentLine() } v.updateSearchPositions() } // exported functions use the mutex. Non-exported functions are for internal use // and a calling function should use a mutex func (v *View) WriteString(s string) { _, _ = v.Write([]byte(s)) } func (v *View) writeString(s string) { v.write([]byte(s)) } var linkStartChars = []string{"h", "t", "t", "p", "s", ":", "/", "/"} func findLinkStart(line []cell) int { for i := 0; i < len(line)-len(linkStartChars); i++ { for j := range linkStartChars { if line[i+j].chr != string(linkStartChars[j]) { break } if j == len(linkStartChars)-1 { return i } } } return -1 } // We need a heuristic to find the end of a hyperlink. Searching for the // first character that is not a valid URI character is not quite good // enough, because in markdown it's common to have a hyperlink followed by a // ')', so we want to stop there. Hopefully URLs containing ')' are uncommon // enough that this is not a problem. var lineEndCharacters map[string]bool = map[string]bool{ "": true, " ": true, "\n": true, ">": true, "\"": true, ")": true, } func (v *View) autoRenderHyperlinksInCurrentLine() { if !v.AutoRenderHyperLinks { return } line := v.lines[v.wy] start := 0 for { linkStart := findLinkStart(line[start:]) if linkStart == -1 { break } linkStart += start var link strings.Builder linkEnd := linkStart for ; linkEnd < len(line); linkEnd++ { if _, ok := lineEndCharacters[line[linkEnd].chr]; ok { break } link.WriteString(string(line[linkEnd].chr)) } for i := linkStart; i < linkEnd; i++ { v.lines[v.wy][i].hyperlink = link.String() } start = linkEnd } } // parseInput parses char by char the input written to the View. It returns nil // while processing ESC sequences. Otherwise, it returns a cell slice that // contains the processed data. func (v *View) parseInput(ch []byte, width int, x int, _ int) (bool, []cell) { cells := []cell{} truncateLine := false isEscape, err := v.ei.parseOne(ch) if err != nil { for _, chr := range v.ei.characters() { c := cell{ fgColor: v.FgColor, bgColor: v.BgColor, chr: chr, width: uniseg.StringWidth(chr), } cells = append(cells, c) } v.ei.reset() } else { repeatCount := 1 if _, ok := v.ei.instruction.(eraseInLineFromCursor); ok { // fill rest of line v.ei.instructionRead() cx := 0 for _, cell := range v.lines[v.wy][0:v.wx] { cx += cell.width } repeatCount = v.InnerWidth() - cx ch = []byte{' '} width = 1 truncateLine = true } else if isEscape { // do not output anything return truncateLine, nil } else if characterEquals(ch, '\t') { // fill tab-sized space tabWidth := v.TabWidth if tabWidth < 1 { tabWidth = 4 } ch = []byte{' '} width = 1 repeatCount = tabWidth - (x % tabWidth) } c := cell{ fgColor: v.ei.curFgColor, bgColor: v.ei.curBgColor, hyperlink: v.ei.hyperlink.String(), chr: string(ch), width: width, } for i := 0; i < repeatCount; i++ { cells = append(cells, c) } } return truncateLine, cells } // Read reads data into p from the current reading position set by SetReadPos. // It returns the number of bytes read into p. // At EOF, err will be io.EOF. func (v *View) Read(p []byte) (n int, err error) { buffer := make([]byte, utf8.UTFMax) offset := 0 if v.readBuffer != nil { copy(p, v.readBuffer) if len(v.readBuffer) >= len(p) { if len(v.readBuffer) > len(p) { v.readBuffer = v.readBuffer[len(p):] } return len(p), nil } v.readBuffer = nil } for v.ry < len(v.lines) { for v.rx < len(v.lines[v.ry]) { s := v.lines[v.ry][v.rx].chr count := len(s) copy(p[offset:], s) v.rx++ newOffset := offset + count if newOffset >= len(p) { if newOffset > len(p) { v.readBuffer = buffer[newOffset-len(p):] } return len(p), nil } offset += count } v.rx = 0 v.ry++ } return offset, io.EOF } // only use this if the calling function has a lock on writeMutex func (v *View) clear() { v.rewind() v.lines = nil v.clearViewLines() } // Clear empties the view's internal buffer. // And resets reading and writing offsets. func (v *View) Clear() { v.writeMutex.Lock() defer v.writeMutex.Unlock() v.clear() } func (v *View) SetContent(str string) { v.writeMutex.Lock() defer v.writeMutex.Unlock() v.clear() v.writeString(str) } func (v *View) CopyContent(from *View) { v.writeMutex.Lock() defer v.writeMutex.Unlock() v.clear() v.lines = from.lines v.viewLines = from.viewLines v.ox = from.ox v.oy = from.oy v.cx = from.cx v.cy = from.cy } // Rewind sets read and write pos to (0, 0). func (v *View) Rewind() { v.writeMutex.Lock() defer v.writeMutex.Unlock() v.rewind() } // similar to Rewind but clears lines. Also similar to Clear but doesn't reset // viewLines func (v *View) Reset() { v.writeMutex.Lock() defer v.writeMutex.Unlock() v.rewind() v.lines = nil } // This is for when we've done a restart for the sake of avoiding a flicker and // we've reached the end of the new content to display: we need to clear the remaining // content from the previous round. We do this by setting v.viewLines to nil so that // we just render the new content from v.lines directly func (v *View) FlushStaleCells() { v.writeMutex.Lock() defer v.writeMutex.Unlock() v.clearViewLines() } func (v *View) rewind() { v.ei.reset() v.SetReadPos(0, 0) v.SetWritePos(0, 0) } func containsUpcaseChar(str string) bool { for _, ch := range str { if unicode.IsUpper(ch) { return true } } return false } func stringToGraphemes(s string) []string { var graphemes []string state := -1 for s != "" { var chr string chr, s, _, state = uniseg.FirstGraphemeClusterInString(s, state) graphemes = append(graphemes, chr) } return graphemes } func (v *View) updateSearchPositions() { if v.searcher.searchString != "" { var normalizeRune func(s string) string var normalizedSearchStr string // if we have any uppercase characters we'll do a case-sensitive search if containsUpcaseChar(v.searcher.searchString) { normalizeRune = func(s string) string { return s } normalizedSearchStr = v.searcher.searchString } else { normalizeRune = strings.ToLower normalizedSearchStr = strings.ToLower(v.searcher.searchString) } searchStrGraphemes := stringToGraphemes(normalizedSearchStr) v.searcher.searchPositions = []SearchPosition{} searchPositionsForLine := func(line []cell, y int) []SearchPosition { var result []SearchPosition searchStringWidth := uniseg.StringWidth(v.searcher.searchString) x := 0 for startIdx, cell := range line { found := true for i, c := range searchStrGraphemes { if len(line)-1 < startIdx+i { found = false break } if normalizeRune(line[startIdx+i].chr) != c { found = false break } } if found { result = append(result, SearchPosition{XStart: x, XEnd: x + searchStringWidth, Y: y}) } x += cell.width } return result } if v.searcher.modelSearchResults != nil { for _, result := range v.searcher.modelSearchResults { // This code only works when v.Wrap is false. if result.Y >= len(v.lines) { break } // If a view line exists for this line index: if v.lines[result.Y] != nil { // search this view line for the search string positions := searchPositionsForLine(v.lines[result.Y], result.Y) if len(positions) > 0 { // If we found any occurrences, add them v.searcher.searchPositions = append(v.searcher.searchPositions, positions...) } else { // Otherwise, the search string was found in the model // but not in the view line; this can happen if the view // renders only truncated versions of the model strings. // In this case, add one search position with what the // model search function returned. v.searcher.searchPositions = append(v.searcher.searchPositions, result) } } else { // We don't have a view line for this line index. Add a // searchPosition anyway, just for the sake of being able to // show the "n of m" search status. The X positions don't // matter in this case. v.searcher.searchPositions = append(v.searcher.searchPositions, SearchPosition{XStart: -1, XEnd: -1, Y: result.Y}) } } } else { v.refreshViewLinesIfNeeded() for y, line := range v.viewLines { v.searcher.searchPositions = append(v.searcher.searchPositions, searchPositionsForLine(line.line, y)...) } } } } // IsTainted tells us if the view is tainted func (v *View) IsTainted() bool { return v.tainted } // draw re-draws the view's contents. func (v *View) draw() { v.writeMutex.Lock() defer v.writeMutex.Unlock() if !v.Visible { return } v.clearRunes() maxX, maxY := v.InnerSize() if v.Wrap { if maxX == 0 { return } v.ox = 0 } v.refreshViewLinesIfNeeded() visibleViewLinesHeight := v.viewLineLengthIgnoringTrailingBlankLines() if v.Autoscroll && visibleViewLinesHeight > maxY { v.oy = visibleViewLinesHeight - maxY } if len(v.viewLines) == 0 { return } start := v.oy if start > len(v.viewLines)-1 {
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
true
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/keybinding.go
vendor/github.com/jesseduffield/gocui/keybinding.go
// Copyright 2014 The gocui Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gocui import ( "strings" "github.com/gdamore/tcell/v2" ) // Key represents special keys or keys combinations. type Key tcell.Key // Modifier allows to define special keys combinations. They can be used // in combination with Keys or Runes when a new keybinding is defined. type Modifier tcell.ModMask // Keybidings are used to link a given key-press event with a handler. type keybinding struct { viewName string key Key ch rune mod Modifier handler func(*Gui, *View) error } // Parse takes the input string and extracts the keybinding. // Returns a Key / rune, a Modifier and an error. func Parse(input string) (any, Modifier, error) { if len(input) == 1 { _, r, err := getKey(rune(input[0])) if err != nil { return nil, ModNone, err } return r, ModNone, nil } var modifier Modifier cleaned := make([]string, 0) tokens := strings.SplitSeq(input, "+") for t := range tokens { normalized := strings.Title(strings.ToLower(t)) if t == "Alt" { modifier = ModAlt continue } cleaned = append(cleaned, normalized) } key, exist := translate[strings.Join(cleaned, "")] if !exist { return nil, ModNone, ErrNoSuchKeybind } return key, modifier, nil } // ParseAll takes an array of strings and returns a map of all keybindings. func ParseAll(input []string) (map[any]Modifier, error) { ret := make(map[any]Modifier) for _, i := range input { k, m, err := Parse(i) if err != nil { return ret, err } ret[k] = m } return ret, nil } // MustParse takes the input string and returns a Key / rune and a Modifier. // It will panic if any error occured. func MustParse(input string) (any, Modifier) { k, m, err := Parse(input) if err != nil { panic(err) } return k, m } // MustParseAll takes an array of strings and returns a map of all keybindings. // It will panic if any error occured. func MustParseAll(input []string) map[any]Modifier { result, err := ParseAll(input) if err != nil { panic(err) } return result } // newKeybinding returns a new Keybinding object. func newKeybinding(viewname string, key Key, ch rune, mod Modifier, handler func(*Gui, *View) error) (kb *keybinding) { kb = &keybinding{ viewName: viewname, key: key, ch: ch, mod: mod, handler: handler, } return kb } func eventMatchesKey(ev *GocuiEvent, key any) bool { // assuming ModNone for now if ev.Mod != ModNone { return false } k, ch, err := getKey(key) if err != nil { return false } return k == ev.Key && ch == ev.Ch } // matchKeypress returns if the keybinding matches the keypress. func (kb *keybinding) matchKeypress(key Key, ch rune, mod Modifier) bool { return kb.key == key && kb.ch == ch && kb.mod == mod } // translations for strings to keys var translate = map[string]Key{ "F1": KeyF1, "F2": KeyF2, "F3": KeyF3, "F4": KeyF4, "F5": KeyF5, "F6": KeyF6, "F7": KeyF7, "F8": KeyF8, "F9": KeyF9, "F10": KeyF10, "F11": KeyF11, "F12": KeyF12, "Insert": KeyInsert, "Delete": KeyDelete, "Home": KeyHome, "End": KeyEnd, "Pgup": KeyPgup, "Pgdn": KeyPgdn, "ArrowUp": KeyArrowUp, "ShiftArrowUp": KeyShiftArrowUp, "ArrowDown": KeyArrowDown, "ShiftArrowDown": KeyShiftArrowDown, "ArrowLeft": KeyArrowLeft, "ArrowRight": KeyArrowRight, "CtrlTilde": KeyCtrlTilde, "Ctrl2": KeyCtrl2, "CtrlSpace": KeyCtrlSpace, "CtrlA": KeyCtrlA, "CtrlB": KeyCtrlB, "CtrlC": KeyCtrlC, "CtrlD": KeyCtrlD, "CtrlE": KeyCtrlE, "CtrlF": KeyCtrlF, "CtrlG": KeyCtrlG, "Backspace": KeyBackspace, "CtrlH": KeyCtrlH, "Tab": KeyTab, "BackTab": KeyBacktab, "CtrlI": KeyCtrlI, "CtrlJ": KeyCtrlJ, "CtrlK": KeyCtrlK, "CtrlL": KeyCtrlL, "Enter": KeyEnter, "CtrlM": KeyCtrlM, "CtrlN": KeyCtrlN, "CtrlO": KeyCtrlO, "CtrlP": KeyCtrlP, "CtrlQ": KeyCtrlQ, "CtrlR": KeyCtrlR, "CtrlS": KeyCtrlS, "CtrlT": KeyCtrlT, "CtrlU": KeyCtrlU, "CtrlV": KeyCtrlV, "CtrlW": KeyCtrlW, "CtrlX": KeyCtrlX, "CtrlY": KeyCtrlY, "CtrlZ": KeyCtrlZ, "Esc": KeyEsc, "CtrlLsqBracket": KeyCtrlLsqBracket, "Ctrl3": KeyCtrl3, "Ctrl4": KeyCtrl4, "CtrlBackslash": KeyCtrlBackslash, "Ctrl5": KeyCtrl5, "CtrlRsqBracket": KeyCtrlRsqBracket, "Ctrl6": KeyCtrl6, "Ctrl7": KeyCtrl7, "CtrlSlash": KeyCtrlSlash, "CtrlUnderscore": KeyCtrlUnderscore, "Space": KeySpace, "Backspace2": KeyBackspace2, "Ctrl8": KeyCtrl8, "Mouseleft": MouseLeft, "Mousemiddle": MouseMiddle, "Mouseright": MouseRight, "Mouserelease": MouseRelease, "MousewheelUp": MouseWheelUp, "MousewheelDown": MouseWheelDown, } // Special keys. const ( KeyF1 Key = Key(tcell.KeyF1) KeyF2 = Key(tcell.KeyF2) KeyF3 = Key(tcell.KeyF3) KeyF4 = Key(tcell.KeyF4) KeyF5 = Key(tcell.KeyF5) KeyF6 = Key(tcell.KeyF6) KeyF7 = Key(tcell.KeyF7) KeyF8 = Key(tcell.KeyF8) KeyF9 = Key(tcell.KeyF9) KeyF10 = Key(tcell.KeyF10) KeyF11 = Key(tcell.KeyF11) KeyF12 = Key(tcell.KeyF12) KeyInsert = Key(tcell.KeyInsert) KeyDelete = Key(tcell.KeyDelete) KeyHome = Key(tcell.KeyHome) KeyEnd = Key(tcell.KeyEnd) KeyPgdn = Key(tcell.KeyPgDn) KeyPgup = Key(tcell.KeyPgUp) KeyArrowUp = Key(tcell.KeyUp) KeyShiftArrowUp = Key(tcell.KeyF62) KeyArrowDown = Key(tcell.KeyDown) KeyShiftArrowDown = Key(tcell.KeyF63) KeyArrowLeft = Key(tcell.KeyLeft) KeyArrowRight = Key(tcell.KeyRight) ) // Keys combinations. const ( KeyCtrlTilde = Key(tcell.KeyF64) // arbitrary assignment KeyCtrlSpace = Key(tcell.KeyCtrlSpace) KeyCtrlA = Key(tcell.KeyCtrlA) KeyCtrlB = Key(tcell.KeyCtrlB) KeyCtrlC = Key(tcell.KeyCtrlC) KeyCtrlD = Key(tcell.KeyCtrlD) KeyCtrlE = Key(tcell.KeyCtrlE) KeyCtrlF = Key(tcell.KeyCtrlF) KeyCtrlG = Key(tcell.KeyCtrlG) KeyBackspace = Key(tcell.KeyBackspace) KeyCtrlH = Key(tcell.KeyCtrlH) KeyTab = Key(tcell.KeyTab) KeyBacktab = Key(tcell.KeyBacktab) KeyCtrlI = Key(tcell.KeyCtrlI) KeyCtrlJ = Key(tcell.KeyCtrlJ) KeyCtrlK = Key(tcell.KeyCtrlK) KeyCtrlL = Key(tcell.KeyCtrlL) KeyEnter = Key(tcell.KeyEnter) KeyCtrlM = Key(tcell.KeyCtrlM) KeyCtrlN = Key(tcell.KeyCtrlN) KeyCtrlO = Key(tcell.KeyCtrlO) KeyCtrlP = Key(tcell.KeyCtrlP) KeyCtrlQ = Key(tcell.KeyCtrlQ) KeyCtrlR = Key(tcell.KeyCtrlR) KeyCtrlS = Key(tcell.KeyCtrlS) KeyCtrlT = Key(tcell.KeyCtrlT) KeyCtrlU = Key(tcell.KeyCtrlU) KeyCtrlV = Key(tcell.KeyCtrlV) KeyCtrlW = Key(tcell.KeyCtrlW) KeyCtrlX = Key(tcell.KeyCtrlX) KeyCtrlY = Key(tcell.KeyCtrlY) KeyCtrlZ = Key(tcell.KeyCtrlZ) KeyEsc = Key(tcell.KeyEscape) KeyCtrlUnderscore = Key(tcell.KeyCtrlUnderscore) KeySpace = Key(32) KeyBackspace2 = Key(tcell.KeyBackspace2) KeyCtrl8 = Key(tcell.KeyBackspace2) // same key as in termbox-go // The following assignments were used in termbox implementation. // In tcell, these are not keys per se. But in gocui we have them // mapped to the keys so we have to use placeholder keys. KeyAltEnter = Key(tcell.KeyF64) // arbitrary assignments MouseLeft = Key(tcell.KeyF63) MouseRight = Key(tcell.KeyF62) MouseMiddle = Key(tcell.KeyF61) MouseRelease = Key(tcell.KeyF60) MouseWheelUp = Key(tcell.KeyF59) MouseWheelDown = Key(tcell.KeyF58) MouseWheelLeft = Key(tcell.KeyF57) MouseWheelRight = Key(tcell.KeyF56) KeyCtrl2 = Key(tcell.KeyNUL) // termbox defines theses KeyCtrl3 = Key(tcell.KeyEscape) KeyCtrl4 = Key(tcell.KeyCtrlBackslash) KeyCtrl5 = Key(tcell.KeyCtrlRightSq) KeyCtrl6 = Key(tcell.KeyCtrlCarat) KeyCtrl7 = Key(tcell.KeyCtrlUnderscore) KeyCtrlSlash = Key(tcell.KeyCtrlUnderscore) KeyCtrlRsqBracket = Key(tcell.KeyCtrlRightSq) KeyCtrlBackslash = Key(tcell.KeyCtrlBackslash) KeyCtrlLsqBracket = Key(tcell.KeyCtrlLeftSq) ) // Modifiers. const ( ModNone Modifier = Modifier(0) ModAlt = Modifier(tcell.ModAlt) ModMotion = Modifier(2) // just picking an arbitrary number here that doesn't clash with tcell.ModAlt // ModCtrl doesn't work with keyboard keys. Use CtrlKey in Key and ModNone. This is was for mouse clicks only (tcell.v1) // ModCtrl = Modifier(tcell.ModCtrl) )
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/attribute.go
vendor/github.com/jesseduffield/gocui/attribute.go
// Copyright 2020 The gocui Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gocui import "github.com/gdamore/tcell/v2" // Attribute affects the presentation of characters, such as color, boldness, etc. type Attribute uint64 const ( // ColorDefault is used to leave the Color unchanged from whatever system or teminal default may exist. ColorDefault = Attribute(tcell.ColorDefault) // AttrIsValidColor is used to indicate the color value is actually // valid (initialized). This is useful to permit the zero value // to be treated as the default. AttrIsValidColor = Attribute(tcell.ColorValid) // AttrIsRGBColor is used to indicate that the Attribute value is RGB value of color. // The lower order 3 bytes are RGB. // (It's not a color in basic ANSI range 256). AttrIsRGBColor = Attribute(tcell.ColorIsRGB) // AttrColorBits is a mask where color is located in Attribute AttrColorBits = 0xffffffffff // roughly 5 bytes, tcell uses 4 bytes and half-byte as a special flags for color (rest is reserved for future) // AttrStyleBits is a mask where character attributes (e.g.: bold, italic, underline) are located in Attribute AttrStyleBits = 0xffffff0000000000 // remaining 3 bytes in the 8 bytes Attribute (tcell is not using it, so we should be fine) ) // Color attributes. These colors are compatible with tcell.Color type and can be expanded like: // // g.FgColor := gocui.Attribute(tcell.ColorLime) const ( ColorBlack Attribute = AttrIsValidColor + iota ColorRed ColorGreen ColorYellow ColorBlue ColorMagenta ColorCyan ColorWhite ) // grayscale indexes (for backward compatibility with termbox-go original grayscale) var grayscale = []tcell.Color{ 16, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 231, } // Attributes are not colors, but effects (e.g.: bold, dim) which affect the display of text. // They can be combined. const ( AttrBold Attribute = 1 << (40 + iota) AttrBlink AttrReverse AttrUnderline AttrDim AttrItalic AttrStrikeThrough AttrNone Attribute = 0 // Just normal text. ) // AttrAll represents all the text effect attributes turned on const AttrAll = AttrBold | AttrBlink | AttrReverse | AttrUnderline | AttrDim | AttrItalic // IsValidColor indicates if the Attribute is a valid color value (has been set). func (a Attribute) IsValidColor() bool { return a&AttrIsValidColor != 0 } // Hex returns the color's hexadecimal RGB 24-bit value with each component // consisting of a single byte, ala R << 16 | G << 8 | B. If the color // is unknown or unset, -1 is returned. // // This function produce the same output as `tcell.Hex()` with additional // support for `termbox-go` colors (to 256). func (a Attribute) Hex() int32 { if !a.IsValidColor() { return -1 } tc := getTcellColor(a, OutputTrue) return tc.Hex() } // RGB returns the red, green, and blue components of the color, with // each component represented as a value 0-255. If the color // is unknown or unset, -1 is returned for each component. // // This function produce the same output as `tcell.RGB()` with additional // support for `termbox-go` colors (to 256). func (a Attribute) RGB() (int32, int32, int32) { v := a.Hex() if v < 0 { return -1, -1, -1 } return (v >> 16) & 0xff, (v >> 8) & 0xff, v & 0xff } // GetColor creates a Color from a color name (W3C name). A hex value may // be supplied as a string in the format "#ffffff". func GetColor(color string) Attribute { return Attribute(tcell.GetColor(color)) } // Get256Color creates Attribute which stores ANSI color (0-255) func Get256Color(color int32) Attribute { return Attribute(color) | AttrIsValidColor } // GetRGBColor creates Attribute which stores RGB color. // Color is passed as 24bit RGB value, where R << 16 | G << 8 | B func GetRGBColor(color int32) Attribute { return Attribute(color) | AttrIsValidColor | AttrIsRGBColor } // NewRGBColor creates Attribute which stores RGB color. func NewRGBColor(r, g, b int32) Attribute { return Attribute(tcell.NewRGBColor(r, g, b)) } // getTcellColor transform Attribute into tcell.Color func getTcellColor(c Attribute, omode OutputMode) tcell.Color { c = c & AttrColorBits // Default color is 0 in tcell/v2 and was 0 in termbox-go, so we are good here if c == ColorDefault { return tcell.ColorDefault } tc := tcell.ColorDefault // Check if we have valid color if c.IsValidColor() { tc = tcell.Color(c) } else if c > 0 && c <= 256 { // It's not valid color, but it has value in range 1-256 // This is old Attribute style of color from termbox-go (black=1, etc.) // convert to tcell color (black=0|ColorValid) tc = tcell.Color(c-1) | tcell.ColorValid } switch omode { case OutputTrue: return tc case OutputNormal: tc &= tcell.Color(0xf) | tcell.ColorValid case Output256: tc &= tcell.Color(0xff) | tcell.ColorValid case Output216: tc &= tcell.Color(0xff) if tc > 215 { return tcell.ColorDefault } tc += tcell.Color(16) | tcell.ColorValid case OutputGrayscale: tc &= tcell.Color(0x1f) if tc > 26 { return tcell.ColorDefault } tc = grayscale[tc] | tcell.ColorValid default: return tcell.ColorDefault } return tc }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/edit.go
vendor/github.com/jesseduffield/gocui/edit.go
// Copyright 2014 The gocui Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gocui // Editor interface must be satisfied by gocui editors. type Editor interface { Edit(v *View, key Key, ch rune, mod Modifier) bool } // The EditorFunc type is an adapter to allow the use of ordinary functions as // Editors. If f is a function with the appropriate signature, EditorFunc(f) // is an Editor object that calls f. type EditorFunc func(v *View, key Key, ch rune, mod Modifier) bool // Edit calls f(v, key, ch, mod) func (f EditorFunc) Edit(v *View, key Key, ch rune, mod Modifier) bool { return f(v, key, ch, mod) } // DefaultEditor is the default editor. var DefaultEditor Editor = EditorFunc(SimpleEditor) // SimpleEditor is used as the default gocui editor. func SimpleEditor(v *View, key Key, ch rune, mod Modifier) bool { switch { case (key == KeyBackspace || key == KeyBackspace2) && (mod&ModAlt) != 0, key == KeyCtrlW: v.TextArea.BackSpaceWord() case key == KeyBackspace || key == KeyBackspace2 || key == KeyCtrlH: v.TextArea.BackSpaceChar() case key == KeyCtrlD || key == KeyDelete: v.TextArea.DeleteChar() case key == KeyArrowDown: v.TextArea.MoveCursorDown() case key == KeyArrowUp: v.TextArea.MoveCursorUp() case (key == KeyArrowLeft || ch == 'b') && (mod&ModAlt) != 0: v.TextArea.MoveLeftWord() case key == KeyArrowLeft || key == KeyCtrlB: v.TextArea.MoveCursorLeft() case (key == KeyArrowRight || ch == 'f') && (mod&ModAlt) != 0: v.TextArea.MoveRightWord() case key == KeyArrowRight || key == KeyCtrlF: v.TextArea.MoveCursorRight() case key == KeyEnter: v.TextArea.TypeCharacter("\n") case key == KeySpace: v.TextArea.TypeCharacter(" ") case key == KeyInsert: v.TextArea.ToggleOverwrite() case key == KeyCtrlU: v.TextArea.DeleteToStartOfLine() case key == KeyCtrlK: v.TextArea.DeleteToEndOfLine() case key == KeyCtrlA || key == KeyHome: v.TextArea.GoToStartOfLine() case key == KeyCtrlE || key == KeyEnd: v.TextArea.GoToEndOfLine() case key == KeyCtrlW: v.TextArea.BackSpaceWord() case key == KeyCtrlY: v.TextArea.Yank() case ch != 0: v.TextArea.TypeCharacter(string(ch)) default: return false } v.RenderTextArea() return true }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/gui_others.go
vendor/github.com/jesseduffield/gocui/gui_others.go
// Copyright 2014 The gocui Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !windows package gocui import ( "os" "os/signal" "syscall" "unsafe" "github.com/go-errors/errors" ) // getTermWindowSize is get terminal window size on linux or unix. // When gocui run inside the docker contaienr need to check and get the window size. func (g *Gui) getTermWindowSize() (int, int, error) { var sz struct { rows uint16 cols uint16 _ [2]uint16 // to match underlying syscall; see https://github.com/awesome-gocui/gocui/issues/33 } var termw, termh int out, err := os.OpenFile("/dev/tty", os.O_RDWR, 0) if err != nil { return 0, 0, err } defer out.Close() signalCh := make(chan os.Signal, 1) signal.Notify(signalCh, syscall.SIGWINCH, syscall.SIGINT) for { _, _, _ = syscall.Syscall(syscall.SYS_IOCTL, out.Fd(), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&sz))) // check terminal window size termw, termh = int(sz.cols), int(sz.rows) if termw > 0 && termh > 0 { return termw, termh, nil } signal := <-signalCh switch signal { // when the terminal window size is changed case syscall.SIGWINCH: continue // ctrl + c to cancel case syscall.SIGINT: return 0, 0, errors.New("stop to get term window size") } } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/tcell_driver.go
vendor/github.com/jesseduffield/gocui/tcell_driver.go
// Copyright 2020 The gocui Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gocui import ( "github.com/gdamore/tcell/v2" ) // We probably don't want this being a global variable for YOLO for now var Screen tcell.Screen // oldStyle is a representation of how a cell would be styled when we were using termbox type oldStyle struct { fg Attribute bg Attribute outputMode OutputMode } var runeReplacements = map[rune]string{ '┌': "+", '┐': "+", '└': "+", '┘': "+", '╭': "+", '╮': "+", '╰': "+", '╯': "+", '─': "-", '═': "-", '║': "|", '╔': "+", '╗': "+", '╚': "+", '╝': "+", // using a hyphen here actually looks weird. // We see these characters when in portrait mode '╶': " ", '╴': " ", '┴': "+", '┬': "+", '╷': "|", '├': "+", '│': "|", '▼': "v", '►': ">", '▲': "^", '◄': "<", } // tcellInit initializes tcell screen for use. func (g *Gui) tcellInit(runeReplacements map[rune]string) error { tcell.SetEncodingFallback(tcell.EncodingFallbackASCII) if s, e := tcell.NewScreen(); e != nil { return e } else if e = s.Init(); e != nil { return e } else { registerRuneFallbacks(s, runeReplacements) g.screen = s Screen = s return nil } } func registerRuneFallbacks(s tcell.Screen, additional map[rune]string) { for before, after := range runeReplacements { s.RegisterRuneFallback(before, after) } for before, after := range additional { s.RegisterRuneFallback(before, after) } } // tcellInitSimulation initializes tcell screen for use. func (g *Gui) tcellInitSimulation(width int, height int) error { s := tcell.NewSimulationScreen("") if e := s.Init(); e != nil { return e } else { g.screen = s Screen = s // setting to a larger value than the typical terminal size // so that during a test we're more likely to see an item to select in a view. s.SetSize(width, height) s.Sync() return nil } } // tcellSetCell sets the character cell at a given location to the given // content (grapheme cluster) and attributes using provided OutputMode func tcellSetCell(x, y int, ch string, fg, bg Attribute, outputMode OutputMode) { st := getTcellStyle(oldStyle{fg: fg, bg: bg, outputMode: outputMode}) Screen.Put(x, y, ch, st) } // getTcellStyle creates tcell.Style from Attributes func getTcellStyle(input oldStyle) tcell.Style { st := tcell.StyleDefault // extract colors and attributes if input.fg != ColorDefault { st = st.Foreground(getTcellColor(input.fg, input.outputMode)) st = setTcellFontEffectStyle(st, input.fg) } if input.bg != ColorDefault { st = st.Background(getTcellColor(input.bg, input.outputMode)) st = setTcellFontEffectStyle(st, input.bg) } return st } // setTcellFontEffectStyle add additional attributes to tcell.Style func setTcellFontEffectStyle(st tcell.Style, attr Attribute) tcell.Style { if attr&AttrBold != 0 { st = st.Bold(true) } if attr&AttrUnderline != 0 { st = st.Underline(true) } if attr&AttrReverse != 0 { st = st.Reverse(true) } if attr&AttrBlink != 0 { st = st.Blink(true) } if attr&AttrDim != 0 { st = st.Dim(true) } if attr&AttrItalic != 0 { st = st.Italic(true) } if attr&AttrStrikeThrough != 0 { st = st.StrikeThrough(true) } return st } // gocuiEventType represents the type of event. type gocuiEventType uint8 // GocuiEvent represents events like a keys, mouse actions, or window resize. // // The 'Mod', 'Key' and 'Ch' fields are valid if 'Type' is 'eventKey'. // The 'MouseX' and 'MouseY' fields are valid if 'Type' is 'eventMouse'. // The 'Width' and 'Height' fields are valid if 'Type' is 'eventResize'. // The 'Focused' field is valid if 'Type' is 'eventFocus'. // The 'Start' field is valid if 'Type' is 'eventPaste'. It is true for the // beginning of a paste operation, false for the end. // The 'Err' field is valid if 'Type' is 'eventError'. type GocuiEvent struct { Type gocuiEventType Mod Modifier Key Key Ch rune Width int Height int Err error MouseX int MouseY int Focused bool Start bool N int } // Event types. const ( eventNone gocuiEventType = iota eventKey eventResize eventMouse eventMouseMove // only used when no button is down, otherwise it's eventMouse eventFocus eventPaste eventInterrupt eventError eventRaw ) const ( NOT_DRAGGING int = iota MAYBE_DRAGGING DRAGGING ) var ( lastMouseKey tcell.ButtonMask = tcell.ButtonNone lastMouseMod tcell.ModMask = tcell.ModNone dragState int = NOT_DRAGGING lastX int = 0 lastY int = 0 ) // this wrapper struct has public keys so we can easily serialize/deserialize to JSON type TcellKeyEventWrapper struct { Timestamp int64 Mod tcell.ModMask Key tcell.Key Ch rune } func NewTcellKeyEventWrapper(event *tcell.EventKey, timestamp int64) *TcellKeyEventWrapper { return &TcellKeyEventWrapper{ Timestamp: timestamp, Mod: event.Modifiers(), Key: event.Key(), Ch: event.Rune(), } } func (wrapper TcellKeyEventWrapper) toTcellEvent() tcell.Event { return tcell.NewEventKey(wrapper.Key, wrapper.Ch, wrapper.Mod) } type TcellMouseEventWrapper struct { Timestamp int64 X int Y int ButtonMask tcell.ButtonMask ModMask tcell.ModMask } func NewTcellMouseEventWrapper(event *tcell.EventMouse, timestamp int64) *TcellMouseEventWrapper { x, y := event.Position() return &TcellMouseEventWrapper{ Timestamp: timestamp, X: x, Y: y, ButtonMask: event.Buttons(), ModMask: event.Modifiers(), } } func (wrapper TcellMouseEventWrapper) toTcellEvent() tcell.Event { return tcell.NewEventMouse(wrapper.X, wrapper.Y, wrapper.ButtonMask, wrapper.ModMask) } type TcellResizeEventWrapper struct { Timestamp int64 Width int Height int } func NewTcellResizeEventWrapper(event *tcell.EventResize, timestamp int64) *TcellResizeEventWrapper { w, h := event.Size() return &TcellResizeEventWrapper{ Timestamp: timestamp, Width: w, Height: h, } } func (wrapper TcellResizeEventWrapper) toTcellEvent() tcell.Event { return tcell.NewEventResize(wrapper.Width, wrapper.Height) } // pollEvent get tcell.Event and transform it into gocuiEvent func (g *Gui) pollEvent() GocuiEvent { var tev tcell.Event if g.playRecording { select { case ev := <-g.ReplayedEvents.Keys: tev = (ev).toTcellEvent() case ev := <-g.ReplayedEvents.Resizes: tev = (ev).toTcellEvent() case ev := <-g.ReplayedEvents.MouseEvents: tev = (ev).toTcellEvent() } } else { tev = Screen.PollEvent() } switch tev := tev.(type) { case *tcell.EventInterrupt: return GocuiEvent{Type: eventInterrupt} case *tcell.EventResize: w, h := tev.Size() return GocuiEvent{Type: eventResize, Width: w, Height: h} case *tcell.EventKey: k := tev.Key() ch := rune(0) if k == tcell.KeyRune { k = 0 // if rune remove key (so it can match rune instead of key) ch = tev.Rune() if ch == ' ' { // special handling for spacebar k = 32 // tcell keys ends at 31 or starts at 256 ch = rune(0) } } mod := tev.Modifiers() // remove control modifier and setup special handling of ctrl+spacebar, etc. if mod == tcell.ModCtrl && k == 32 { mod = 0 ch = rune(0) k = tcell.KeyCtrlSpace } else if mod == tcell.ModShift && k == tcell.KeyUp { mod = 0 ch = rune(0) k = tcell.KeyF62 } else if mod == tcell.ModShift && k == tcell.KeyDown { mod = 0 ch = rune(0) k = tcell.KeyF63 } else if mod == tcell.ModCtrl || mod == tcell.ModShift { // remove Ctrl or Shift if specified // - shift - will be translated to the final code of rune // - ctrl - is translated in the key mod = 0 } else if mod == tcell.ModAlt && k == tcell.KeyEnter { // for the sake of convenience I'm having a KeyAltEnter key. I will likely // regret this laziness in the future. We're arbitrarily mapping that to tcell's // KeyF64. mod = 0 k = tcell.KeyF64 } return GocuiEvent{ Type: eventKey, Key: Key(k), Ch: ch, Mod: Modifier(mod), } case *tcell.EventMouse: x, y := tev.Position() button := tev.Buttons() mouseKey := MouseRelease mouseMod := ModNone // process mouse wheel if button&tcell.WheelUp != 0 { mouseKey = MouseWheelUp } if button&tcell.WheelDown != 0 { mouseKey = MouseWheelDown } if button&tcell.WheelLeft != 0 { mouseKey = MouseWheelLeft } if button&tcell.WheelRight != 0 { mouseKey = MouseWheelRight } wheeling := mouseKey == MouseWheelUp || mouseKey == MouseWheelDown || mouseKey == MouseWheelLeft || mouseKey == MouseWheelRight // process button events (not wheel events) button &= tcell.ButtonMask(0xff) if button != tcell.ButtonNone && lastMouseKey == tcell.ButtonNone { lastMouseKey = button lastMouseMod = tev.Modifiers() switch button { case tcell.ButtonPrimary: mouseKey = MouseLeft dragState = MAYBE_DRAGGING lastX = x lastY = y case tcell.ButtonSecondary: mouseKey = MouseRight case tcell.ButtonMiddle: mouseKey = MouseMiddle default: } } switch tev.Buttons() { case tcell.ButtonNone: if lastMouseKey != tcell.ButtonNone { switch lastMouseKey { case tcell.ButtonPrimary: dragState = NOT_DRAGGING case tcell.ButtonSecondary: case tcell.ButtonMiddle: default: } mouseMod = Modifier(lastMouseMod) lastMouseMod = tcell.ModNone lastMouseKey = tcell.ButtonNone } default: } if !wheeling { switch dragState { case NOT_DRAGGING: return GocuiEvent{ Type: eventMouseMove, MouseX: x, MouseY: y, } // if we haven't released the left mouse button and we've moved the cursor then we're dragging case MAYBE_DRAGGING: if x != lastX || y != lastY { dragState = DRAGGING } case DRAGGING: mouseMod = ModMotion mouseKey = MouseLeft } } return GocuiEvent{ Type: eventMouse, MouseX: x, MouseY: y, Key: mouseKey, Ch: 0, Mod: mouseMod, } case *tcell.EventFocus: return GocuiEvent{ Type: eventFocus, Focused: tev.Focused, } case *tcell.EventPaste: return GocuiEvent{ Type: eventPaste, Start: tev.Start(), } default: return GocuiEvent{Type: eventNone} } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/text_area.go
vendor/github.com/jesseduffield/gocui/text_area.go
package gocui import ( "regexp" "slices" "strings" "github.com/rivo/uniseg" ) const ( WHITESPACES = " \t" WORD_SEPARATORS = "*?_+-.[]~=/&;!#$%^(){}<>" ) type TextAreaCell struct { char string // string because it could be a multi-rune grapheme cluster width int x, y int // cell coordinates contentIndex int // byte index into the original content } // returns the cursor x,y position after this cell func (c *TextAreaCell) nextCursorXY() (int, int) { if c.char == "\n" { return 0, c.y + 1 } return c.x + c.width, c.y } type TextArea struct { content string cells []TextAreaCell cursor int // position in content, as an index into the byte array overwrite bool clipboard string AutoWrap bool AutoWrapWidth int } func stringToTextAreaCells(str string) []TextAreaCell { result := make([]TextAreaCell, 0, len(str)) contentIndex := 0 state := -1 for len(str) > 0 { var c string var w int c, str, w, state = uniseg.FirstGraphemeClusterInString(str, state) // only set char, width, and contentIndex; x and y will be set later result = append(result, TextAreaCell{char: c, width: w, contentIndex: contentIndex}) contentIndex += len(c) } return result } // Returns the indices in content where soft line breaks occur due to auto-wrapping to the given width. func AutoWrapContent(content string, autoWrapWidth int) []int { _, softLineBreakIndices := contentToCells(content, autoWrapWidth) return softLineBreakIndices } func contentToCells(content string, autoWrapWidth int) ([]TextAreaCell, []int) { estimatedNumberOfSoftLineBreaks := 0 if autoWrapWidth > 0 { estimatedNumberOfSoftLineBreaks = len(content) / autoWrapWidth } softLineBreakIndices := make([]int, 0, estimatedNumberOfSoftLineBreaks) result := make([]TextAreaCell, 0, len(content)+estimatedNumberOfSoftLineBreaks) startOfLine := 0 currentLineWidth := 0 indexOfLastWhitespace := -1 var footNoteMatcher footNoteMatcher cells := stringToTextAreaCells(content) y := 0 appendCellsSinceLineStart := func(to int) { x := 0 for i := startOfLine; i < to; i++ { cells[i].x = x cells[i].y = y x += cells[i].width } result = append(result, cells[startOfLine:to]...) } for currentPos, c := range cells { if c.char == "\n" { appendCellsSinceLineStart(currentPos + 1) y++ startOfLine = currentPos + 1 indexOfLastWhitespace = -1 currentLineWidth = 0 footNoteMatcher.reset() } else { currentLineWidth += c.width if c.char == " " && !footNoteMatcher.isFootNote() { indexOfLastWhitespace = currentPos + 1 } else if autoWrapWidth > 0 && currentLineWidth > autoWrapWidth && indexOfLastWhitespace >= 0 { wrapAt := indexOfLastWhitespace appendCellsSinceLineStart(wrapAt) contentIndex := cells[wrapAt].contentIndex y++ result = append(result, TextAreaCell{char: "\n", width: 1, contentIndex: contentIndex, x: 0, y: y}) softLineBreakIndices = append(softLineBreakIndices, contentIndex) startOfLine = wrapAt indexOfLastWhitespace = -1 currentLineWidth = 0 for _, c1 := range cells[startOfLine : currentPos+1] { currentLineWidth += c1.width } footNoteMatcher.reset() } footNoteMatcher.addCharacter(c.char) } } appendCellsSinceLineStart(len(cells)) return result, softLineBreakIndices } var footNoteRe = regexp.MustCompile(`^\[\d+\]:\s*$`) type footNoteMatcher struct { lineStr strings.Builder didFailToMatch bool } func (self *footNoteMatcher) addCharacter(chr string) { if self.didFailToMatch { // don't bother tracking the rune if we know it can't possibly match any more return } if self.lineStr.Len() == 0 && chr != "[" { // fail early if the first rune of a line isn't a '['; this is mainly to avoid a (possibly // expensive) regex match self.didFailToMatch = true return } self.lineStr.WriteString(chr) } func (self *footNoteMatcher) isFootNote() bool { if self.didFailToMatch { return false } if footNoteRe.MatchString(self.lineStr.String()) { // it's a footnote, so treat spaces as non-breaking. It's important not to reset the matcher // here, because there could be multiple spaces after a footnote. return true } // no need to check again for this line self.didFailToMatch = true return false } func (self *footNoteMatcher) reset() { self.lineStr.Reset() self.didFailToMatch = false } func (self *TextArea) updateCells() { width := self.AutoWrapWidth if !self.AutoWrap { width = -1 } self.cells, _ = contentToCells(self.content, width) } func (self *TextArea) typeCharacter(ch string) { widthToDelete := 0 if self.overwrite && !self.atEnd() { s, _, _, _ := uniseg.FirstGraphemeClusterInString(self.content[self.cursor:], -1) widthToDelete = len(s) } self.content = self.content[:self.cursor] + ch + self.content[self.cursor+widthToDelete:] self.cursor += len(ch) } func (self *TextArea) TypeCharacter(ch string) { self.typeCharacter(ch) self.updateCells() } func (self *TextArea) BackSpaceChar() { if self.cursor == 0 { return } cellCursor := self.contentCursorToCellCursor(self.cursor) widthToDelete := len(self.cells[cellCursor-1].char) oldCursor := self.cursor self.cursor -= widthToDelete self.content = self.content[:self.cursor] + self.content[oldCursor:] self.updateCells() } func (self *TextArea) DeleteChar() { if self.atEnd() { return } s, _, _, _ := uniseg.FirstGraphemeClusterInString(self.content[self.cursor:], -1) widthToDelete := len(s) self.content = self.content[:self.cursor] + self.content[self.cursor+widthToDelete:] self.updateCells() } func (self *TextArea) MoveCursorLeft() { if self.cursor == 0 { return } cellCursor := self.contentCursorToCellCursor(self.cursor) self.cursor -= len(self.cells[cellCursor-1].char) } func (self *TextArea) MoveCursorRight() { if self.cursor == len(self.content) { return } s, _, _, _ := uniseg.FirstGraphemeClusterInString(self.content[self.cursor:], -1) self.cursor += len(s) } func (self *TextArea) newCursorForMoveLeftWord() int { if self.cursor == 0 { return 0 } if self.atLineStart() { return self.cursor - 1 } cellCursor := self.contentCursorToCellCursor(self.cursor) for cellCursor > 0 && (self.isSoftLineBreak(cellCursor-1) || strings.Contains(WHITESPACES, self.cells[cellCursor-1].char)) { cellCursor-- } separators := false for cellCursor > 0 && strings.Contains(WORD_SEPARATORS, self.cells[cellCursor-1].char) { cellCursor-- separators = true } if !separators { for cellCursor > 0 && self.cells[cellCursor-1].char != "\n" && !strings.Contains(WHITESPACES+WORD_SEPARATORS, self.cells[cellCursor-1].char) { cellCursor-- } } return self.cellCursorToContentCursor(cellCursor) } func (self *TextArea) MoveLeftWord() { self.cursor = self.newCursorForMoveLeftWord() } func (self *TextArea) MoveRightWord() { if self.atEnd() { return } if self.atLineEnd() { self.cursor++ return } cellCursor := self.contentCursorToCellCursor(self.cursor) for cellCursor < len(self.cells) && (self.isSoftLineBreak(cellCursor) || strings.Contains(WHITESPACES, self.cells[cellCursor].char)) { cellCursor++ } separators := false for cellCursor < len(self.cells) && strings.Contains(WORD_SEPARATORS, self.cells[cellCursor].char) { cellCursor++ separators = true } if !separators { for cellCursor < len(self.cells) && self.cells[cellCursor].char != "\n" && !strings.Contains(WHITESPACES+WORD_SEPARATORS, self.cells[cellCursor].char) { cellCursor++ } } self.cursor = self.cellCursorToContentCursor(cellCursor) } func (self *TextArea) MoveCursorUp() { x, y := self.GetCursorXY() self.SetCursor2D(x, y-1) } func (self *TextArea) MoveCursorDown() { x, y := self.GetCursorXY() self.SetCursor2D(x, y+1) } func (self *TextArea) GetContent() string { var b strings.Builder for _, c := range self.cells { b.WriteString(c.char) } return b.String() } func (self *TextArea) GetUnwrappedContent() string { return self.content } func (self *TextArea) ToggleOverwrite() { self.overwrite = !self.overwrite } func (self *TextArea) atEnd() bool { return self.cursor == len(self.content) } func (self *TextArea) DeleteToStartOfLine() { // copying vim's logic: if you're at the start of the line, you delete the newline // character and go to the end of the previous line if self.atLineStart() { if self.cursor == 0 { return } self.content = self.content[:self.cursor-1] + self.content[self.cursor:] self.cursor-- self.updateCells() return } // otherwise, if we're at a soft line start, skip left past the soft line // break, so we'll end up deleting the previous line. This seems like the // only reasonable behavior in this case, as you can't delete just the soft // line break. if self.atSoftLineStart() { self.cursor-- } // otherwise, you delete everything up to the start of the current line, without // deleting the newline character newlineIndex := self.closestNewlineOnLeft() self.clipboard = self.content[newlineIndex+1 : self.cursor] self.content = self.content[:newlineIndex+1] + self.content[self.cursor:] self.updateCells() self.cursor = newlineIndex + 1 } func (self *TextArea) DeleteToEndOfLine() { if self.atEnd() { return } // if we're at the end of the line, delete just the newline character if self.atLineEnd() { self.content = self.content[:self.cursor] + self.content[self.cursor+1:] self.updateCells() return } // otherwise, if we're at a soft line end, skip right past the soft line // break, so we'll end up deleting the next line. This seems like the // only reasonable behavior in this case, as you can't delete just the soft // line break. if self.atSoftLineEnd() { self.cursor++ } lineEndIndex := self.closestNewlineOnRight() self.clipboard = self.content[self.cursor:lineEndIndex] self.content = self.content[:self.cursor] + self.content[lineEndIndex:] self.updateCells() } func (self *TextArea) GoToStartOfLine() { if self.atSoftLineStart() { return } newlineIndex := self.closestNewlineOnLeft() self.cursor = newlineIndex + 1 } func (self *TextArea) closestNewlineOnLeft() int { cellCursor := self.contentCursorToCellCursor(self.cursor) newlineCellIndex := -1 for i, c := range self.cells[0:cellCursor] { if c.char == "\n" { newlineCellIndex = i } } if newlineCellIndex == -1 { return -1 } newlineContentIndex := self.cells[newlineCellIndex].contentIndex if self.content[newlineContentIndex] != '\n' { newlineContentIndex-- } return newlineContentIndex } func (self *TextArea) GoToEndOfLine() { if self.atEnd() { return } self.cursor = self.closestNewlineOnRight() self.moveLeftFromSoftLineBreak() } func (self *TextArea) closestNewlineOnRight() int { cellCursor := self.contentCursorToCellCursor(self.cursor) for i, c := range self.cells[cellCursor:] { if c.char == "\n" { return self.cellCursorToContentCursor(cellCursor + i) } } return len(self.content) } func (self *TextArea) moveLeftFromSoftLineBreak() { // If the end of line is a soft line break, we need to move left by one so // that we end up at the last whitespace before the line break. Otherwise // we'd be at the start of the next line, since the newline character // doesn't really exist in the real content. if self.cursor < len(self.content) && self.content[self.cursor] != '\n' { self.cursor-- } } func (self *TextArea) atLineStart() bool { return self.cursor == 0 || (len(self.content) > self.cursor-1 && self.content[self.cursor-1] == '\n') } func (self *TextArea) isSoftLineBreak(cellCursor int) bool { cell := self.cells[cellCursor] return cell.char == "\n" && self.content[cell.contentIndex] != '\n' } func (self *TextArea) atSoftLineStart() bool { cellCursor := self.contentCursorToCellCursor(self.cursor) return cellCursor == 0 || (len(self.cells) > cellCursor-1 && self.cells[cellCursor-1].char == "\n") } func (self *TextArea) atLineEnd() bool { return self.atEnd() || (len(self.content) > self.cursor && self.content[self.cursor] == '\n') } func (self *TextArea) atSoftLineEnd() bool { cellCursor := self.contentCursorToCellCursor(self.cursor) return cellCursor == len(self.cells) || (len(self.cells) > cellCursor+1 && self.cells[cellCursor+1].char == "\n") } func (self *TextArea) BackSpaceWord() { newCursor := self.newCursorForMoveLeftWord() if newCursor == self.cursor { return } clipboard := self.content[newCursor:self.cursor] if clipboard != "\n" { self.clipboard = clipboard } self.content = self.content[:newCursor] + self.content[self.cursor:] self.cursor = newCursor self.updateCells() } func (self *TextArea) Yank() { self.TypeString(self.clipboard) } func (self *TextArea) contentCursorToCellCursor(origCursor int) int { idx, _ := slices.BinarySearchFunc(self.cells, origCursor, func(cell TextAreaCell, cursor int) int { return cell.contentIndex - cursor }) for idx < len(self.cells)-1 && self.cells[idx+1].contentIndex == origCursor { idx++ } return idx } func (self *TextArea) cellCursorToContentCursor(cellCursor int) int { if cellCursor >= len(self.cells) { return len(self.content) } return self.cells[cellCursor].contentIndex } func (self *TextArea) GetCursorXY() (int, int) { if len(self.cells) == 0 { return 0, 0 } cellCursor := self.contentCursorToCellCursor(self.cursor) if cellCursor >= len(self.cells) { return self.cells[len(self.cells)-1].nextCursorXY() } if cellCursor > 0 && self.cells[cellCursor].char == "\n" { return self.cells[cellCursor-1].nextCursorXY() } cell := self.cells[cellCursor] return cell.x, cell.y } // takes an x,y position and maps it to a 1D cursor position func (self *TextArea) SetCursor2D(x int, y int) { if y < 0 { y = 0 } if x < 0 { x = 0 } newCursor := 0 for _, c := range self.cells { if x <= 0 && y == 0 { self.cursor = self.cellCursorToContentCursor(newCursor) if self.cells[newCursor].char == "\n" { self.moveLeftFromSoftLineBreak() } return } if c.char == "\n" { if y == 0 { self.cursor = self.cellCursorToContentCursor(newCursor) self.moveLeftFromSoftLineBreak() return } y-- } else if y == 0 { x -= c.width } newCursor++ } // if we weren't able to run-down our arg, the user is trying to move out of // bounds so we'll just return if y > 0 { return } self.cursor = self.cellCursorToContentCursor(newCursor) } func (self *TextArea) Clear() { self.content = "" self.cells = nil self.cursor = 0 } func (self *TextArea) TypeString(str string) { state := -1 for str != "" { var chr string chr, str, _, state = uniseg.FirstGraphemeClusterInString(str, state) self.typeCharacter(chr) } self.updateCells() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/scrollbar.go
vendor/github.com/jesseduffield/gocui/scrollbar.go
package gocui import "math" // returns start and height of scrollbar // `max` is the maximum possible value of `position` func calcScrollbar(listSize int, pageSize int, position int, scrollAreaSize int) (int, int) { height := calcScrollbarHeight(listSize, pageSize, scrollAreaSize) // assume we can't scroll past the last item maxPosition := listSize - pageSize if maxPosition <= 0 { return 0, height } if position == maxPosition { return scrollAreaSize - height, height } // we only want to show the scrollbar at the top or bottom positions if we're at the end. Hence the .Ceil (for moving the scrollbar once we scroll down) and the -1 (for pretending there's a smaller range than we actually have, with the above condition ensuring we snap to the bottom once we're at the end of the list) start := int(math.Ceil(((float64(position) / float64(maxPosition)) * float64(scrollAreaSize-height-1)))) return start, height } func calcScrollbarHeight(listSize int, pageSize int, scrollAreaSize int) int { if pageSize >= listSize { return scrollAreaSize } return int((float64(pageSize) / float64(listSize)) * float64(scrollAreaSize)) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/task_manager.go
vendor/github.com/jesseduffield/gocui/task_manager.go
package gocui import "sync" // Tracks whether the program is busy (i.e. either something is happening on // the main goroutine or a worker goroutine). Used by integration tests // to wait until the program is idle before progressing. type TaskManager struct { // each of these listeners will be notified when the program goes from busy to idle idleListeners []chan struct{} tasks map[int]Task // auto-incrementing id for new tasks nextId int mutex sync.Mutex } func newTaskManager() *TaskManager { return &TaskManager{ tasks: make(map[int]Task), idleListeners: []chan struct{}{}, } } func (self *TaskManager) NewTask() *TaskImpl { self.mutex.Lock() defer self.mutex.Unlock() self.nextId++ taskId := self.nextId onDone := func() { self.delete(taskId) } task := &TaskImpl{id: taskId, busy: true, onDone: onDone, withMutex: self.withMutex} self.tasks[taskId] = task return task } func (self *TaskManager) addIdleListener(c chan struct{}) { self.idleListeners = append(self.idleListeners, c) } func (self *TaskManager) withMutex(f func()) { self.mutex.Lock() defer self.mutex.Unlock() f() // Check if all tasks are done for _, task := range self.tasks { if task.isBusy() { return } } // If we get here, all tasks are done, so // notify listeners that the program is idle for _, listener := range self.idleListeners { listener <- struct{}{} } } func (self *TaskManager) delete(taskId int) { self.withMutex(func() { delete(self.tasks, taskId) }) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/task.go
vendor/github.com/jesseduffield/gocui/task.go
package gocui // A task represents the fact that the program is busy doing something, which // is useful for integration tests which only want to proceed when the program // is idle. type Task interface { Done() Pause() Continue() // not exporting because we don't need to isBusy() bool } type TaskImpl struct { id int busy bool onDone func() withMutex func(func()) } func (self *TaskImpl) Done() { self.onDone() } func (self *TaskImpl) Pause() { self.withMutex(func() { self.busy = false }) } func (self *TaskImpl) Continue() { self.withMutex(func() { self.busy = true }) } func (self *TaskImpl) isBusy() bool { return self.busy } type TaskStatus int const ( TaskStatusBusy TaskStatus = iota TaskStatusPaused TaskStatusDone ) type FakeTask struct { status TaskStatus } func NewFakeTask() *FakeTask { return &FakeTask{ status: TaskStatusBusy, } } func (self *FakeTask) Done() { self.status = TaskStatusDone } func (self *FakeTask) Pause() { self.status = TaskStatusPaused } func (self *FakeTask) Continue() { self.status = TaskStatusBusy } func (self *FakeTask) isBusy() bool { return self.status == TaskStatusBusy } func (self *FakeTask) Status() TaskStatus { return self.status } func (self *FakeTask) FormatStatus() string { return formatTaskStatus(self.status) } func formatTaskStatus(status TaskStatus) string { switch status { case TaskStatusBusy: return "busy" case TaskStatusPaused: return "paused" case TaskStatusDone: return "done" } return "unknown" }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/gui_windows.go
vendor/github.com/jesseduffield/gocui/gui_windows.go
// Copyright 2014 The gocui Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build windows package gocui import ( "os" "syscall" "unsafe" ) type ( wchar uint16 short int16 dword uint32 word uint16 ) type coord struct { x short y short } type smallRect struct { left short top short right short bottom short } type consoleScreenBufferInfo struct { size coord cursorPosition coord attributes word window smallRect maximumWindowSize coord } var ( kernel32 = syscall.NewLazyDLL("kernel32.dll") procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") ) // getTermWindowSize is get terminal window size on windows. func (g *Gui) getTermWindowSize() (int, int, error) { var csbi consoleScreenBufferInfo r1, _, err := procGetConsoleScreenBufferInfo.Call(os.Stdout.Fd(), uintptr(unsafe.Pointer(&csbi))) if r1 == 0 { return 0, 0, err } return int(csbi.window.right - csbi.window.left + 1), int(csbi.window.bottom - csbi.window.top + 1), nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/doc.go
vendor/github.com/jesseduffield/gocui/doc.go
// Copyright 2014 The gocui Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package gocui allows to create console user interfaces. Create a new GUI: g, err := gocui.NewGui(gocui.OutputNormal, false) if err != nil { // handle error } defer g.Close() // Set GUI managers and key bindings // ... if err := g.MainLoop(); err != nil && !gocui.IsQuit(err) { // handle error } Set GUI managers: g.SetManager(mgr1, mgr2) Managers are in charge of GUI's layout and can be used to build widgets. On each iteration of the GUI's main loop, the Layout function of each configured manager is executed. Managers are used to set-up and update the application's main views, being possible to freely change them during execution. Also, it is important to mention that a main loop iteration is executed on each reported event (key-press, mouse event, window resize, etc). GUIs are composed by Views, you can think of it as buffers. Views implement the io.ReadWriter interface, so you can just write to them if you want to modify their content. The same is valid for reading. Create and initialize a view with absolute coordinates: if v, err := g.SetView("viewname", 2, 2, 22, 7, 0); err != nil { if !gocui.IsUnknownView(err) { // handle error } fmt.Fprintln(v, "This is a new view") // ... } Views can also be created using relative coordinates: maxX, maxY := g.Size() if v, err := g.SetView("viewname", maxX/2-30, maxY/2, maxX/2+30, maxY/2+2, 0); err != nil { // ... } Configure keybindings: if err := g.SetKeybinding("viewname", gocui.KeyEnter, gocui.ModNone, fcn); err != nil { // handle error } gocui implements full mouse support that can be enabled with: g.Mouse = true Mouse events are handled like any other keybinding: if err := g.SetKeybinding("viewname", gocui.MouseLeft, gocui.ModNone, fcn); err != nil { // handle error } IMPORTANT: Views can only be created, destroyed or updated in three ways: from the Layout function within managers, from keybinding callbacks or via *Gui.Update(). The reason for this is that it allows gocui to be concurrent-safe. So, if you want to update your GUI from a goroutine, you must use *Gui.Update(). For example: g.Update(func(g *gocui.Gui) error { v, err := g.View("viewname") if err != nil { // handle error } v.Clear() fmt.Fprintln(v, "Writing from different goroutines") return nil }) By default, gocui provides a basic editing mode. This mode can be extended and customized creating a new Editor and assigning it to *View.Editor: type Editor interface { Edit(v *View, key Key, ch rune, mod Modifier) } DefaultEditor can be taken as example to create your own custom Editor: var DefaultEditor Editor = EditorFunc(simpleEditor) func simpleEditor(v *View, key Key, ch rune, mod Modifier) { switch { case ch != 0 && mod == 0: v.EditWrite(ch) case key == KeySpace: v.EditWrite(' ') case key == KeyBackspace || key == KeyBackspace2: v.EditDelete(true) // ... } } Colored text: Views allow to add colored text using ANSI colors. For example: fmt.Fprintln(v, "\x1b[0;31mHello world") For more information, see the examples in folder "_examples/". */ package gocui
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/escape.go
vendor/github.com/jesseduffield/gocui/escape.go
// Copyright 2014 The gocui Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gocui import ( "strconv" "strings" "github.com/go-errors/errors" ) type escapeInterpreter struct { state escapeState curch string csiParam []string curFgColor, curBgColor Attribute mode OutputMode instruction instruction hyperlink strings.Builder } type ( escapeState int fontEffect int ) type instruction interface{ isInstruction() } type eraseInLineFromCursor struct{} func (self eraseInLineFromCursor) isInstruction() {} type noInstruction struct{} func (self noInstruction) isInstruction() {} const ( stateNone escapeState = iota stateEscape stateCSI stateParams stateOSC stateOSCWaitForParams stateOSCParams stateOSCHyperlink stateOSCEndEscape stateOSCSkipUnknown bold fontEffect = 1 faint fontEffect = 2 italic fontEffect = 3 underline fontEffect = 4 blink fontEffect = 5 reverse fontEffect = 7 strike fontEffect = 9 setForegroundColor int = 38 defaultForegroundColor int = 39 setBackgroundColor int = 48 defaultBackgroundColor int = 49 ) var ( errNotCSI = errors.New("Not a CSI escape sequence") errCSIParseError = errors.New("CSI escape sequence parsing error") errCSITooLong = errors.New("CSI escape sequence is too long") errOSCParseError = errors.New("OSC escape sequence parsing error") ) // characters in case of error will output the non-parsed characters as a string. func (ei *escapeInterpreter) characters() []string { switch ei.state { case stateNone: return []string{"\x1b"} case stateEscape: return []string{"\x1b", ei.curch} case stateCSI: return []string{"\x1b", "[", ei.curch} case stateParams: ret := []string{"\x1b", "["} for _, s := range ei.csiParam { ret = append(ret, s) ret = append(ret, ";") } return append(ret, ei.curch) default: } return nil } // newEscapeInterpreter returns an escapeInterpreter that will be able to parse // terminal escape sequences. func newEscapeInterpreter(mode OutputMode) *escapeInterpreter { ei := &escapeInterpreter{ state: stateNone, curFgColor: ColorDefault, curBgColor: ColorDefault, mode: mode, instruction: noInstruction{}, } return ei } // reset sets the escapeInterpreter in initial state. func (ei *escapeInterpreter) reset() { ei.state = stateNone ei.curFgColor = ColorDefault ei.curBgColor = ColorDefault ei.csiParam = nil } func (ei *escapeInterpreter) instructionRead() { ei.instruction = noInstruction{} } // parseOne parses a character (grapheme cluster). If isEscape is true, it means that the character // is part of an escape sequence, and as such should not be printed verbatim. Otherwise, it's not an // escape sequence. func (ei *escapeInterpreter) parseOne(ch []byte) (isEscape bool, err error) { // Sanity checks if len(ei.csiParam) > 20 { return false, errCSITooLong } if len(ei.csiParam) > 0 && len(ei.csiParam[len(ei.csiParam)-1]) > 255 { return false, errCSITooLong } ei.curch = string(ch) switch ei.state { case stateNone: if characterEquals(ch, 0x1b) { ei.state = stateEscape return true, nil } return false, nil case stateEscape: switch { case characterEquals(ch, '['): ei.state = stateCSI return true, nil case characterEquals(ch, ']'): ei.state = stateOSC return true, nil default: return false, errNotCSI } case stateCSI: switch { case len(ch) == 1 && ch[0] >= '0' && ch[0] <= '9': ei.csiParam = append(ei.csiParam, "") case characterEquals(ch, 'm'): ei.csiParam = append(ei.csiParam, "0") case characterEquals(ch, 'K'): // fall through default: return false, errCSIParseError } ei.state = stateParams fallthrough case stateParams: switch { case len(ch) == 1 && ch[0] >= '0' && ch[0] <= '9': ei.csiParam[len(ei.csiParam)-1] += string(ch) return true, nil case characterEquals(ch, ';'): ei.csiParam = append(ei.csiParam, "") return true, nil case characterEquals(ch, 'm'): if err := ei.outputCSI(); err != nil { return false, errCSIParseError } ei.state = stateNone ei.csiParam = nil return true, nil case characterEquals(ch, 'K'): p := 0 if len(ei.csiParam) != 0 && ei.csiParam[0] != "" { p, err = strconv.Atoi(ei.csiParam[0]) if err != nil { return false, errCSIParseError } } if p == 0 { ei.instruction = eraseInLineFromCursor{} } else { // non-zero values of P not supported ei.instruction = noInstruction{} } ei.state = stateNone ei.csiParam = nil return true, nil default: return false, errCSIParseError } case stateOSC: if characterEquals(ch, '8') { ei.state = stateOSCWaitForParams ei.hyperlink.Reset() return true, nil } ei.state = stateOSCSkipUnknown return true, nil case stateOSCWaitForParams: if !characterEquals(ch, ';') { return true, errOSCParseError } ei.state = stateOSCParams return true, nil case stateOSCParams: if characterEquals(ch, ';') { ei.state = stateOSCHyperlink } return true, nil case stateOSCHyperlink: switch { case characterEquals(ch, 0x07): ei.state = stateNone case characterEquals(ch, 0x1b): ei.state = stateOSCEndEscape default: ei.hyperlink.Write(ch) } return true, nil case stateOSCEndEscape: ei.state = stateNone return true, nil case stateOSCSkipUnknown: switch { case characterEquals(ch, 0x07): ei.state = stateNone case characterEquals(ch, 0x1b): ei.state = stateOSCEndEscape } return true, nil } return false, nil } func (ei *escapeInterpreter) outputCSI() error { n := len(ei.csiParam) for i := 0; i < n; { p, err := strconv.Atoi(ei.csiParam[i]) if err != nil { return errCSIParseError } skip := 1 switch { case p == 0: // reset style and color ei.curFgColor = ColorDefault ei.curBgColor = ColorDefault case p >= 1 && p <= 9: // set style ei.curFgColor |= getFontEffect(p) case p >= 21 && p <= 29: // reset style ei.curFgColor &= ^getFontEffect(p - 20) case p >= 30 && p <= 37: // set foreground color ei.curFgColor &= AttrStyleBits ei.curFgColor |= Get256Color(int32(p) - 30) case p == setForegroundColor: // set foreground color (256-color or true color) var color Attribute var err error color, skip, err = ei.csiColor(ei.csiParam[i:]) if err != nil { return err } ei.curFgColor &= AttrStyleBits ei.curFgColor |= color case p == defaultForegroundColor: // reset foreground color ei.curFgColor &= AttrStyleBits ei.curFgColor |= ColorDefault case p >= 40 && p <= 47: // set background color ei.curBgColor &= AttrStyleBits ei.curBgColor |= Get256Color(int32(p) - 40) case p == setBackgroundColor: // set background color (256-color or true color) var color Attribute var err error color, skip, err = ei.csiColor(ei.csiParam[i:]) if err != nil { return err } ei.curBgColor &= AttrStyleBits ei.curBgColor |= color case p == defaultBackgroundColor: // reset background color ei.curBgColor &= AttrStyleBits ei.curBgColor |= ColorDefault case p >= 90 && p <= 97: // set bright foreground color ei.curFgColor &= AttrStyleBits ei.curFgColor |= Get256Color(int32(p) - 90 + 8) case p >= 100 && p <= 107: // set bright background color ei.curBgColor &= AttrStyleBits ei.curBgColor |= Get256Color(int32(p) - 100 + 8) default: } i += skip } return nil } func (ei *escapeInterpreter) csiColor(param []string) (color Attribute, skip int, err error) { if len(param) < 2 { return 0, 0, errCSIParseError } switch param[1] { case "2": // 24-bit color if ei.mode < OutputTrue { return 0, 0, errCSIParseError } if len(param) < 5 { return 0, 0, errCSIParseError } var red, green, blue int red, err = strconv.Atoi(param[2]) if err != nil { return 0, 0, errCSIParseError } green, err = strconv.Atoi(param[3]) if err != nil { return 0, 0, errCSIParseError } blue, err = strconv.Atoi(param[4]) if err != nil { return 0, 0, errCSIParseError } return NewRGBColor(int32(red), int32(green), int32(blue)), 5, nil case "5": // 8-bit color if ei.mode < Output256 { return 0, 0, errCSIParseError } if len(param) < 3 { return 0, 0, errCSIParseError } var hex int hex, err = strconv.Atoi(param[2]) if err != nil { return 0, 0, errCSIParseError } return Get256Color(int32(hex)), 3, nil default: return 0, 0, errCSIParseError } } func getFontEffect(f int) Attribute { switch fontEffect(f) { case bold: return AttrBold case faint: return AttrDim case italic: return AttrItalic case underline: return AttrUnderline case blink: return AttrBlink case reverse: return AttrReverse case strike: return AttrStrikeThrough } return AttrNone }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/gocui/gui.go
vendor/github.com/jesseduffield/gocui/gui.go
// Copyright 2014 The gocui Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gocui import ( "context" standardErrors "errors" "runtime" "slices" "strings" "sync" "time" "github.com/gdamore/tcell/v2" "github.com/go-errors/errors" "github.com/rivo/uniseg" ) // OutputMode represents an output mode, which determines how colors // are used. type OutputMode int const DOUBLE_CLICK_THRESHOLD = 500 * time.Millisecond var ( // ErrAlreadyBlacklisted is returned when the keybinding is already blacklisted. ErrAlreadyBlacklisted = standardErrors.New("keybind already blacklisted") // ErrBlacklisted is returned when the keybinding being parsed / used is blacklisted. ErrBlacklisted = standardErrors.New("keybind blacklisted") // ErrNotBlacklisted is returned when a keybinding being whitelisted is not blacklisted. ErrNotBlacklisted = standardErrors.New("keybind not blacklisted") // ErrNoSuchKeybind is returned when the keybinding being parsed does not exist. ErrNoSuchKeybind = standardErrors.New("no such keybind") // ErrUnknownView allows to assert if a View must be initialized. ErrUnknownView = standardErrors.New("unknown view") // ErrQuit is used to decide if the MainLoop finished successfully. ErrQuit = standardErrors.New("quit") // ErrKeybindingNotHandled is returned when a keybinding is not handled, so that the key can be dispatched further ErrKeybindingNotHandled = standardErrors.New("keybinding not handled") ) const ( // OutputNormal provides 8-colors terminal mode. OutputNormal OutputMode = iota // Output256 provides 256-colors terminal mode. Output256 // Output216 provides 216 ansi color terminal mode. Output216 // OutputGrayscale provides greyscale terminal mode. OutputGrayscale // OutputTrue provides 24bit color terminal mode. // This mode is recommended even if your terminal doesn't support // such mode. The colors are represented exactly as you // write them (no clamping or truncating). `tcell` should take care // of what your terminal can do. OutputTrue ) type tabClickHandler func(int) error type tabClickBinding struct { viewName string handler tabClickHandler } // TODO: would be good to define inbound and outbound click handlers e.g. // clicking on a file is an inbound thing where we don't care what context you're // in when it happens, whereas clicking on the main view from the files view is an // outbound click with a specific handler. But this requires more thinking about // where handlers should live. type ViewMouseBinding struct { // the view that is clicked ViewName string // the view that has focus when the click occurs. FocusedView string Handler func(ViewMouseBindingOpts) error Modifier Modifier // must be a mouse key Key Key } type ViewMouseBindingOpts struct { X int // i.e. origin x + cursor x Y int // i.e. origin y + cursor y Key Key // which button was clicked (will be one of the Mouse* constants) IsDoubleClick bool // true if this is a double click } type GuiMutexes struct { // tickingMutex ensures we don't have two loops ticking. The point of 'ticking' // is to refresh the gui rapidly so that loader characters can be animated. tickingMutex sync.Mutex ViewsMutex sync.Mutex } type replayedEvents struct { Keys chan *TcellKeyEventWrapper Resizes chan *TcellResizeEventWrapper MouseEvents chan *TcellMouseEventWrapper } type RecordingConfig struct { Speed float64 Leeway int } type clickInfo struct { x int y int key Key viewName string time time.Time } // Gui represents the whole User Interface, including the views, layouts // and keybindings. type Gui struct { RecordingConfig // ReplayedEvents is for passing pre-recorded input events, for the purposes of testing ReplayedEvents replayedEvents playRecording bool tabClickBindings []*tabClickBinding viewMouseBindings []*ViewMouseBinding lastClick *clickInfo gEvents chan GocuiEvent userEvents chan userEvent views []*View currentView *View managers []Manager keybindings []*keybinding focusHandler func(bool) error openHyperlink func(string, string) error maxX, maxY int outputMode OutputMode stop chan struct{} blacklist []Key // BgColor and FgColor allow to configure the background and foreground // colors of the GUI. BgColor, FgColor, FrameColor Attribute // SelBgColor and SelFgColor allow to configure the background and // foreground colors of the frame of the current view. SelBgColor, SelFgColor, SelFrameColor Attribute // If Highlight is true, Sel{Bg,Fg}Colors will be used to draw the // frame of the current view. Highlight bool // If ShowListFooter is true then show list footer (i.e. the part that says we're at item 5 out of 10) ShowListFooter bool // If Cursor is true then the cursor is enabled. Cursor bool // If Mouse is true then mouse events will be enabled. Mouse bool IsPasting bool // If InputEsc is true, when ESC sequence is in the buffer and it doesn't // match any known sequence, ESC means KeyEsc. InputEsc bool // SupportOverlaps is true when we allow for view edges to overlap with other // view edges SupportOverlaps bool Mutexes GuiMutexes OnSearchEscape func() error // these keys must either be of type Key of rune SearchEscapeKey any NextSearchMatchKey any PrevSearchMatchKey any ErrorHandler func(error) error screen tcell.Screen suspendedMutex sync.Mutex suspended bool taskManager *TaskManager lastHoverView *View } type NewGuiOpts struct { OutputMode OutputMode SupportOverlaps bool PlayRecording bool Headless bool // only applicable when Headless is true Width int // only applicable when Headless is true Height int RuneReplacements map[rune]string } // NewGui returns a new Gui object with a given output mode. func NewGui(opts NewGuiOpts) (*Gui, error) { g := &Gui{} var err error if opts.Headless { err = g.tcellInitSimulation(opts.Width, opts.Height) } else { err = g.tcellInit(runeReplacements) } if err != nil { return nil, err } if opts.Headless || runtime.GOOS == "windows" { g.maxX, g.maxY = g.screen.Size() } else { // TODO: find out if we actually need this bespoke logic for linux g.maxX, g.maxY, err = g.getTermWindowSize() if err != nil { return nil, err } } g.outputMode = opts.OutputMode g.stop = make(chan struct{}) g.gEvents = make(chan GocuiEvent, 20) g.userEvents = make(chan userEvent, 20) g.taskManager = newTaskManager() if opts.PlayRecording { g.ReplayedEvents = replayedEvents{ Keys: make(chan *TcellKeyEventWrapper), Resizes: make(chan *TcellResizeEventWrapper), MouseEvents: make(chan *TcellMouseEventWrapper), } } g.BgColor, g.FgColor, g.FrameColor = ColorDefault, ColorDefault, ColorDefault g.SelBgColor, g.SelFgColor, g.SelFrameColor = ColorDefault, ColorDefault, ColorDefault // SupportOverlaps is true when we allow for view edges to overlap with other // view edges g.SupportOverlaps = opts.SupportOverlaps // default keys for when searching strings in a view g.SearchEscapeKey = KeyEsc g.NextSearchMatchKey = 'n' g.PrevSearchMatchKey = 'N' g.playRecording = opts.PlayRecording return g, nil } func (g *Gui) NewTask() *TaskImpl { return g.taskManager.NewTask() } // An idle listener listens for when the program is idle. This is useful for // integration tests which can wait for the program to be idle before taking // the next step in the test. func (g *Gui) AddIdleListener(c chan struct{}) { g.taskManager.addIdleListener(c) } // Close finalizes the library. It should be called after a successful // initialization and when gocui is not needed anymore. func (g *Gui) Close() { close(g.stop) Screen.Fini() } // Size returns the terminal's size. func (g *Gui) Size() (x, y int) { return g.maxX, g.maxY } // SetRune writes a rune at the given point, relative to the top-left // corner of the terminal. It checks if the position is valid and applies // the given colors. // Should only be used if you know that the given rune is not part of a grapheme cluster. func (g *Gui) SetRune(x, y int, ch rune, fgColor, bgColor Attribute) error { if x < 0 || y < 0 || x >= g.maxX || y >= g.maxY { // swallowing error because it's not that big of a deal return nil } tcellSetCell(x, y, string(ch), fgColor, bgColor, g.outputMode) return nil } // SetView creates a new view with its top-left corner at (x0, y0) // and the bottom-right one at (x1, y1). If a view with the same name // already exists, its dimensions are updated; otherwise, the error // ErrUnknownView is returned, which allows to assert if the View must // be initialized. It checks if the position is valid. func (g *Gui) SetView(name string, x0, y0, x1, y1 int, overlaps byte) (*View, error) { if name == "" { return nil, errors.New("invalid name") } if v, err := g.View(name); err == nil { sizeChanged := v.x0 != x0 || v.x1 != x1 || v.y0 != y0 || v.y1 != y1 v.x0 = x0 v.y0 = y0 v.x1 = x1 v.y1 = y1 if sizeChanged { v.clearViewLines() if v.Editable { cursorX, cursorY := v.TextArea.GetCursorXY() newViewCursorX, newOriginX := updatedCursorAndOrigin(0, v.InnerWidth(), cursorX) newViewCursorY, newOriginY := updatedCursorAndOrigin(0, v.InnerHeight(), cursorY) v.SetCursor(newViewCursorX, newViewCursorY) v.SetOrigin(newOriginX, newOriginY) } } return v, nil } g.Mutexes.ViewsMutex.Lock() v := NewView(name, x0, y0, x1, y1, g.outputMode) v.BgColor, v.FgColor = g.BgColor, g.FgColor v.SelBgColor, v.SelFgColor = g.SelBgColor, g.SelFgColor v.Overlaps = overlaps g.views = append(g.views, v) g.Mutexes.ViewsMutex.Unlock() return v, errors.Wrap(ErrUnknownView, 0) } // SetViewBeneath sets a view stacked beneath another view func (g *Gui) SetViewBeneath(name string, aboveViewName string, height int) (*View, error) { aboveView, err := g.View(aboveViewName) if err != nil { return nil, err } viewTop := aboveView.y1 + 1 return g.SetView(name, aboveView.x0, viewTop, aboveView.x1, viewTop+height-1, 0) } // SetViewOnTop sets the given view on top of the existing ones. func (g *Gui) SetViewOnTop(name string) (*View, error) { g.Mutexes.ViewsMutex.Lock() defer g.Mutexes.ViewsMutex.Unlock() for i, v := range g.views { if v.name == name { s := append(g.views[:i], g.views[i+1:]...) g.views = append(s, v) return v, nil } } return nil, errors.Wrap(ErrUnknownView, 0) } // SetViewOnBottom sets the given view on bottom of the existing ones. func (g *Gui) SetViewOnBottom(name string) (*View, error) { g.Mutexes.ViewsMutex.Lock() defer g.Mutexes.ViewsMutex.Unlock() for i, v := range g.views { if v.name == name { s := append(g.views[:i], g.views[i+1:]...) g.views = append([]*View{v}, s...) return v, nil } } return nil, errors.Wrap(ErrUnknownView, 0) } func (g *Gui) SetViewOnTopOf(toMove string, other string) error { g.Mutexes.ViewsMutex.Lock() defer g.Mutexes.ViewsMutex.Unlock() if toMove == other { return nil } // need to find the two current positions and then move toMove before other in the list. toMoveIndex := -1 otherIndex := -1 for i, v := range g.views { if v.name == toMove { toMoveIndex = i } if v.name == other { otherIndex = i } } if toMoveIndex == -1 || otherIndex == -1 { return errors.Wrap(ErrUnknownView, 0) } // already on top if toMoveIndex > otherIndex { return nil } // need to actually do it the other way around. Last is highest viewToMove := g.views[toMoveIndex] g.views = append(g.views[:toMoveIndex], g.views[toMoveIndex+1:]...) g.views = append(g.views[:otherIndex], append([]*View{viewToMove}, g.views[otherIndex:]...)...) return nil } // replaces the content in toView with the content in fromView func (g *Gui) CopyContent(fromView *View, toView *View) { g.Mutexes.ViewsMutex.Lock() defer g.Mutexes.ViewsMutex.Unlock() toView.CopyContent(fromView) } // Views returns all the views in the GUI. func (g *Gui) Views() []*View { return g.views } // View returns a pointer to the view with the given name, or error // ErrUnknownView if a view with that name does not exist. func (g *Gui) View(name string) (*View, error) { g.Mutexes.ViewsMutex.Lock() defer g.Mutexes.ViewsMutex.Unlock() for _, v := range g.views { if v.name == name { return v, nil } } return nil, errors.Wrap(ErrUnknownView, 0) } // VisibleViewByPosition returns a pointer to a view matching the given position, or // error ErrUnknownView if a view in that position does not exist. func (g *Gui) VisibleViewByPosition(x, y int) (*View, error) { g.Mutexes.ViewsMutex.Lock() defer g.Mutexes.ViewsMutex.Unlock() // traverse views in reverse order checking top views first for i := len(g.views); i > 0; i-- { v := g.views[i-1] if !v.Visible { continue } frameOffset := 0 if v.Frame { frameOffset = 1 } if x > v.x0-frameOffset && x < v.x1+frameOffset && y > v.y0-frameOffset && y < v.y1+frameOffset { return v, nil } } return nil, errors.Wrap(ErrUnknownView, 0) } // ViewPosition returns the coordinates of the view with the given name, or // error ErrUnknownView if a view with that name does not exist. func (g *Gui) ViewPosition(name string) (x0, y0, x1, y1 int, err error) { g.Mutexes.ViewsMutex.Lock() defer g.Mutexes.ViewsMutex.Unlock() for _, v := range g.views { if v.name == name { return v.x0, v.y0, v.x1, v.y1, nil } } return 0, 0, 0, 0, errors.Wrap(ErrUnknownView, 0) } // DeleteView deletes a view by name. func (g *Gui) DeleteView(name string) error { g.Mutexes.ViewsMutex.Lock() defer g.Mutexes.ViewsMutex.Unlock() for i, v := range g.views { if v.name == name { g.views = append(g.views[:i], g.views[i+1:]...) return nil } } return errors.Wrap(ErrUnknownView, 0) } // SetCurrentView gives the focus to a given view. func (g *Gui) SetCurrentView(name string) (*View, error) { g.Mutexes.ViewsMutex.Lock() defer g.Mutexes.ViewsMutex.Unlock() for _, v := range g.views { if v.name == name { g.currentView = v return v, nil } } return nil, errors.Wrap(ErrUnknownView, 0) } // CurrentView returns the currently focused view, or nil if no view // owns the focus. func (g *Gui) CurrentView() *View { return g.currentView } // SetKeybinding creates a new keybinding. If viewname equals to "" // (empty string) then the keybinding will apply to all views. key must // be a rune or a Key. // // When mouse keys are used (MouseLeft, MouseRight, ...), modifier might not work correctly. // It behaves differently on different platforms. Somewhere it doesn't register Alt key press, // on others it might report Ctrl as Alt. It's not consistent and therefore it's not recommended // to use with mouse keys. func (g *Gui) SetKeybinding(viewname string, key any, mod Modifier, handler func(*Gui, *View) error) error { var kb *keybinding k, ch, err := getKey(key) if err != nil { return err } if g.isBlacklisted(k) { return ErrBlacklisted } kb = newKeybinding(viewname, k, ch, mod, handler) g.keybindings = append(g.keybindings, kb) return nil } // DeleteKeybinding deletes a keybinding. func (g *Gui) DeleteKeybinding(viewname string, key any, mod Modifier) error { k, ch, err := getKey(key) if err != nil { return err } for i, kb := range g.keybindings { if kb.viewName == viewname && kb.ch == ch && kb.key == k && kb.mod == mod { g.keybindings = append(g.keybindings[:i], g.keybindings[i+1:]...) return nil } } return errors.New("keybinding not found") } // DeleteKeybindings deletes all keybindings of view. func (g *Gui) DeleteAllKeybindings() { g.keybindings = []*keybinding{} g.tabClickBindings = []*tabClickBinding{} g.viewMouseBindings = []*ViewMouseBinding{} } // DeleteKeybindings deletes all keybindings of view. func (g *Gui) DeleteViewKeybindings(viewname string) { var s []*keybinding for _, kb := range g.keybindings { if kb.viewName != viewname { s = append(s, kb) } } g.keybindings = s } // SetTabClickBinding sets a binding for a tab click event func (g *Gui) SetTabClickBinding(viewName string, handler tabClickHandler) error { g.tabClickBindings = append(g.tabClickBindings, &tabClickBinding{ viewName: viewName, handler: handler, }) return nil } func (g *Gui) SetViewClickBinding(binding *ViewMouseBinding) error { g.viewMouseBindings = append(g.viewMouseBindings, binding) return nil } // BlackListKeybinding adds a keybinding to the blacklist func (g *Gui) BlacklistKeybinding(k Key) error { if slices.Contains(g.blacklist, k) { return ErrAlreadyBlacklisted } g.blacklist = append(g.blacklist, k) return nil } // WhiteListKeybinding removes a keybinding from the blacklist func (g *Gui) WhitelistKeybinding(k Key) error { for i, j := range g.blacklist { if j == k { g.blacklist = append(g.blacklist[:i], g.blacklist[i+1:]...) return nil } } return ErrNotBlacklisted } func (g *Gui) SetFocusHandler(handler func(bool) error) { g.focusHandler = handler } func (g *Gui) SetOpenHyperlinkFunc(openHyperlinkFunc func(string, string) error) { g.openHyperlink = openHyperlinkFunc } // getKey takes an empty interface with a key and returns the corresponding // typed Key or rune. func getKey(key any) (Key, rune, error) { switch t := key.(type) { case nil: // Ignore keybinding if `nil` return 0, 0, nil case Key: return t, 0, nil case rune: return 0, t, nil default: return 0, 0, errors.New("unknown type") } } // userEvent represents an event triggered by the user. type userEvent struct { f func(*Gui) error task Task } // Update executes the passed function. This method can be called safely from a // goroutine in order to update the GUI. It is important to note that the // passed function won't be executed immediately, instead it will be added to // the user events queue. Given that Update spawns a goroutine, the order in // which the user events will be handled is not guaranteed. func (g *Gui) Update(f func(*Gui) error) { task := g.NewTask() go g.updateAsyncAux(f, task) } // UpdateAsync is a version of Update that does not spawn a go routine, it can // be a bit more efficient in cases where Update is called many times like when // tailing a file. In general you should use Update() func (g *Gui) UpdateAsync(f func(*Gui) error) { task := g.NewTask() g.updateAsyncAux(f, task) } func (g *Gui) updateAsyncAux(f func(*Gui) error, task Task) { g.userEvents <- userEvent{f: f, task: task} } // Calls a function in a goroutine. Handles panics gracefully and tracks // number of background tasks. // Always use this when you want to spawn a goroutine and you want lazygit to // consider itself 'busy` as it runs the code. Don't use for long-running // background goroutines where you wouldn't want lazygit to be considered busy // (i.e. when you wouldn't want a loader to be shown to the user) func (g *Gui) OnWorker(f func(Task) error) { task := g.NewTask() go func() { g.onWorkerAux(f, task) task.Done() }() } func (g *Gui) onWorkerAux(f func(Task) error, task Task) { panicking := true defer func() { if panicking && Screen != nil { Screen.Fini() } }() err := f(task) panicking = false if err != nil { g.Update(func(g *Gui) error { return err }) } } // A Manager is in charge of GUI's layout and can be used to build widgets. type Manager interface { // Layout is called every time the GUI is redrawn, it must contain the // base views and its initializations. Layout(*Gui) error } // The ManagerFunc type is an adapter to allow the use of ordinary functions as // Managers. If f is a function with the appropriate signature, ManagerFunc(f) // is an Manager object that calls f. type ManagerFunc func(*Gui) error // Layout calls f(g) func (f ManagerFunc) Layout(g *Gui) error { return f(g) } // SetManager sets the given GUI managers. It deletes all views and // keybindings. func (g *Gui) SetManager(managers ...Manager) { g.managers = managers g.currentView = nil g.views = nil g.keybindings = nil g.tabClickBindings = nil go func() { g.gEvents <- GocuiEvent{Type: eventResize} }() } // SetManagerFunc sets the given manager function. It deletes all views and // keybindings. func (g *Gui) SetManagerFunc(manager func(*Gui) error) { g.SetManager(ManagerFunc(manager)) } // MainLoop runs the main loop until an error is returned. A successful // finish should return ErrQuit. func (g *Gui) MainLoop() error { go func() { for { select { case <-g.stop: return default: g.gEvents <- g.pollEvent() } } }() Screen.EnableFocus() Screen.EnablePaste() previousEnableMouse := false for { if g.Mouse != previousEnableMouse { if g.Mouse { Screen.EnableMouse() } else { Screen.DisableMouse() } previousEnableMouse = g.Mouse } err := g.processEvent() if err != nil { return err } } } func (g *Gui) handleError(err error) error { if err != nil && !standardErrors.Is(err, ErrQuit) && g.ErrorHandler != nil { return g.ErrorHandler(err) } return err } func (g *Gui) processEvent() error { select { case ev := <-g.gEvents: task := g.NewTask() defer func() { task.Done() }() if err := g.handleError(g.handleEvent(&ev)); err != nil { return err } case ev := <-g.userEvents: defer func() { ev.task.Done() }() if err := g.handleError(ev.f(g)); err != nil { return err } } if err := g.processRemainingEvents(); err != nil { return err } if err := g.flush(); err != nil { return err } return nil } // processRemainingEvents handles the remaining events in the events pool. func (g *Gui) processRemainingEvents() error { for { select { case ev := <-g.gEvents: if err := g.handleError(g.handleEvent(&ev)); err != nil { return err } case ev := <-g.userEvents: err := g.handleError(ev.f(g)) ev.task.Done() if err != nil { return err } default: return nil } } } // handleEvent handles an event, based on its type (key-press, error, // etc.) func (g *Gui) handleEvent(ev *GocuiEvent) error { switch ev.Type { case eventKey, eventMouse, eventMouseMove: return g.onKey(ev) case eventError: return ev.Err case eventResize: g.onResize() return nil case eventFocus: return g.onFocus(ev) case eventPaste: g.IsPasting = ev.Start return nil default: return nil } } func (g *Gui) onResize() { // not sure if we actually need this // g.screen.Sync() } // drawFrameEdges draws the horizontal and vertical edges of a view. func (g *Gui) drawFrameEdges(v *View, fgColor, bgColor Attribute) error { runeH, runeV := '─', '│' if len(v.FrameRunes) >= 2 { runeH, runeV = v.FrameRunes[0], v.FrameRunes[1] } for x := v.x0 + 1; x < v.x1 && x < g.maxX; x++ { if x < 0 { continue } if v.y0 > -1 && v.y0 < g.maxY { if err := g.SetRune(x, v.y0, runeH, fgColor, bgColor); err != nil { return err } } if v.y1 > -1 && v.y1 < g.maxY { if err := g.SetRune(x, v.y1, runeH, fgColor, bgColor); err != nil { return err } } } showScrollbar, realScrollbarStart, realScrollbarEnd := calcRealScrollbarStartEnd(v) for y := v.y0 + 1; y < v.y1 && y < g.maxY; y++ { if y < 0 { continue } if v.x0 > -1 && v.x0 < g.maxX { if err := g.SetRune(v.x0, y, runeV, fgColor, bgColor); err != nil { return err } } if v.x1 > -1 && v.x1 < g.maxX { runeToPrint := calcScrollbarRune(showScrollbar, realScrollbarStart, realScrollbarEnd, y, runeV) if err := g.SetRune(v.x1, y, runeToPrint, fgColor, bgColor); err != nil { return err } } } return nil } func calcScrollbarRune( showScrollbar bool, scrollbarStart int, scrollbarEnd int, position int, runeV rune, ) rune { if showScrollbar && (position >= scrollbarStart && position <= scrollbarEnd) { return '▐' } else { return runeV } } func calcRealScrollbarStartEnd(v *View) (bool, int, int) { height := v.InnerHeight() fullHeight := v.ViewLinesHeight() - v.scrollMargin() if v.CanScrollPastBottom { fullHeight += height } if height < 2 || height >= fullHeight { return false, 0, 0 } originY := v.OriginY() scrollbarStart, scrollbarHeight := calcScrollbar(fullHeight, height, originY, height-1) top := v.y0 + 1 realScrollbarStart := top + scrollbarStart realScrollbarEnd := realScrollbarStart + scrollbarHeight return true, realScrollbarStart, realScrollbarEnd } func cornerRune(index byte) rune { return []rune{' ', '│', '│', '│', '─', '┘', '┐', '┤', '─', '└', '┌', '├', '├', '┴', '┬', '┼'}[index] } // cornerCustomRune returns rune from `v.FrameRunes` slice. If the length of slice is less than 11 // all the missing runes will be translated to the default `cornerRune()` func cornerCustomRune(v *View, index byte) rune { // Translate `cornerRune()` index // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 // ' ', '│', '│', '│', '─', '┘', '┐', '┤', '─', '└', '┌', '├', '├', '┴', '┬', '┼' // into `FrameRunes` index // 0 1 2 3 4 5 6 7 8 9 10 // '─', '│', '┌', '┐', '└', '┘', '├', '┤', '┬', '┴', '┼' switch index { case 1, 2, 3: return v.FrameRunes[1] case 4, 8: return v.FrameRunes[0] case 5: return v.FrameRunes[5] case 6: return v.FrameRunes[3] case 7: if len(v.FrameRunes) < 8 { break } return v.FrameRunes[7] case 9: return v.FrameRunes[4] case 10: return v.FrameRunes[2] case 11, 12: if len(v.FrameRunes) < 7 { break } return v.FrameRunes[6] case 13: if len(v.FrameRunes) < 10 { break } return v.FrameRunes[9] case 14: if len(v.FrameRunes) < 9 { break } return v.FrameRunes[8] case 15: if len(v.FrameRunes) < 11 { break } return v.FrameRunes[10] default: return ' ' // cornerRune(0) } return cornerRune(index) } func corner(v *View, directions byte) rune { index := v.Overlaps | directions if len(v.FrameRunes) >= 6 { return cornerCustomRune(v, index) } return cornerRune(index) } // drawFrameCorners draws the corners of the view. func (g *Gui) drawFrameCorners(v *View, fgColor, bgColor Attribute) error { if v.y0 == v.y1 { if !g.SupportOverlaps && v.x0 >= 0 && v.x1 >= 0 && v.y0 >= 0 && v.x0 < g.maxX && v.x1 < g.maxX && v.y0 < g.maxY { if err := g.SetRune(v.x0, v.y0, '╶', fgColor, bgColor); err != nil { return err } if err := g.SetRune(v.x1, v.y0, '╴', fgColor, bgColor); err != nil { return err } } return nil } runeTL, runeTR, runeBL, runeBR := '┌', '┐', '└', '┘' if len(v.FrameRunes) >= 6 { runeTL, runeTR, runeBL, runeBR = v.FrameRunes[2], v.FrameRunes[3], v.FrameRunes[4], v.FrameRunes[5] } if g.SupportOverlaps { runeTL = corner(v, BOTTOM|RIGHT) runeTR = corner(v, BOTTOM|LEFT) runeBL = corner(v, TOP|RIGHT) runeBR = corner(v, TOP|LEFT) } corners := []struct { x, y int ch rune }{{v.x0, v.y0, runeTL}, {v.x1, v.y0, runeTR}, {v.x0, v.y1, runeBL}, {v.x1, v.y1, runeBR}} for _, c := range corners { if c.x >= 0 && c.y >= 0 && c.x < g.maxX && c.y < g.maxY { if err := g.SetRune(c.x, c.y, c.ch, fgColor, bgColor); err != nil { return err } } } return nil } // drawTitle draws the title of the view. func (g *Gui) drawTitle(v *View, fgColor, bgColor Attribute) error { if v.y0 < 0 || v.y0 >= g.maxY { return nil } tabs := v.Tabs prefix := v.TitlePrefix if prefix != "" { if len(v.FrameRunes) > 0 { prefix += string(v.FrameRunes[0]) } else { prefix += "─" } } separator := " - " charIndex := 0 currentTabStart := -1 currentTabEnd := -1 if len(tabs) == 0 { tabs = []string{v.Title} } else { for i, tab := range tabs { if i == v.TabIndex { currentTabStart = charIndex currentTabEnd = charIndex + len(tab) break } charIndex += len(tab) if i < len(tabs)-1 { charIndex += len(separator) } } } str := strings.Join(tabs, separator) x := v.x0 + 2 for _, ch := range prefix { if err := g.SetRune(x, v.y0, ch, fgColor, bgColor); err != nil { return err } x += uniseg.StringWidth(string(ch)) } for i, ch := range str { if x < 0 { continue } else if x > v.x1-2 || x >= g.maxX { break } currentFgColor := fgColor currentBgColor := bgColor // if you are the current view and you have multiple tabs, de-highlight the non-selected tabs if v == g.currentView && len(v.Tabs) > 0 { currentFgColor = v.FgColor currentBgColor = v.BgColor } if i >= currentTabStart && i <= currentTabEnd { currentFgColor = v.SelFgColor if v != g.currentView { currentFgColor &= ^AttrBold } } if err := g.SetRune(x, v.y0, ch, currentFgColor, currentBgColor); err != nil { return err } x += uniseg.StringWidth(string(ch)) } return nil } // drawSubtitle draws the subtitle of the view. func (g *Gui) drawSubtitle(v *View, fgColor, bgColor Attribute) error { if v.y0 < 0 || v.y0 >= g.maxY { return nil } start := v.x1 - 5 - uniseg.StringWidth(v.Subtitle) if start < v.x0 { return nil } x := start for _, ch := range v.Subtitle { if x >= v.x1 { break } if err := g.SetRune(x, v.y0, ch, fgColor, bgColor); err != nil { return err } x += uniseg.StringWidth(string(ch)) } return nil } // drawListFooter draws the footer of a list view, showing something like '1 of 10' func (g *Gui) drawListFooter(v *View, fgColor, bgColor Attribute) error { if len(v.lines) == 0 { return nil } message := v.Footer if v.y1 < 0 || v.y1 >= g.maxY { return nil } start := v.x1 - 1 - uniseg.StringWidth(message) if start < v.x0 { return nil } x := start for _, ch := range message { if x >= v.x1 { break } if err := g.SetRune(x, v.y1, ch, fgColor, bgColor); err != nil { return err } x += uniseg.StringWidth(string(ch)) } return nil } // flush updates the gui, re-drawing frames and buffers. func (g *Gui) flush() error { // pretty sure we don't need this, but keeping it here in case we get weird visual artifacts // g.clear(g.FgColor, g.BgColor) maxX, maxY := Screen.Size() // if GUI's size has changed, we need to redraw all views if maxX != g.maxX || maxY != g.maxY { for _, v := range g.views { v.clearViewLines() } } g.maxX, g.maxY = maxX, maxY for _, m := range g.managers { if err := m.Layout(g); err != nil { return err } } for _, v := range g.views { if err := g.draw(v); err != nil { return err } } Screen.Show() return nil } func (g *Gui) ForceLayoutAndRedraw() error { return g.flush() } // force redrawing one or more views outside of the normal main loop. Useful during longer // operations that block the main thread, to update a spinner in a status view. func (g *Gui) ForceRedrawViews(views ...*View) error { for _, m := range g.managers { if err := m.Layout(g); err != nil { return err } } for _, v := range views { v.draw() } Screen.Show() return nil } // draw manages the cursor and calls the draw function of a view. func (g *Gui) draw(v *View) error { if g.suspended { return nil } if !v.Visible || v.y1 < v.y0 || v.x1 < v.x0 { return nil } if g.Cursor { if curview := g.currentView; curview != nil { vMaxX, vMaxY := curview.InnerSize() if curview.cx >= 0 && curview.cx < vMaxX && curview.cy >= 0 && curview.cy < vMaxY { cx, cy := curview.x0+curview.cx+1, curview.y0+curview.cy+1 Screen.ShowCursor(cx, cy) } else { Screen.HideCursor() } } } else { Screen.HideCursor() } v.draw() if v.Frame { var fgColor, bgColor, frameColor Attribute if g.Highlight && v == g.currentView { fgColor = g.SelFgColor bgColor = g.SelBgColor frameColor = g.SelFrameColor } else { bgColor = g.BgColor if v.TitleColor != ColorDefault { fgColor = v.TitleColor } else { fgColor = g.FgColor } if v.FrameColor != ColorDefault { frameColor = v.FrameColor } else { frameColor = g.FrameColor } } if err := g.drawFrameEdges(v, frameColor, bgColor); err != nil { return err } if err := g.drawFrameCorners(v, frameColor, bgColor); err != nil { return err } if v.Title != "" || len(v.Tabs) > 0 { if err := g.drawTitle(v, fgColor, bgColor); err != nil { return err } } if v.Subtitle != "" { if err := g.drawSubtitle(v, fgColor, bgColor); err != nil { return err } } if v.Footer != "" && g.ShowListFooter { if err := g.drawListFooter(v, fgColor, bgColor); err != nil { return err } } } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
true
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/blame.go
vendor/github.com/jesseduffield/go-git/v5/blame.go
package git import ( "bytes" "container/heap" "errors" "fmt" "io" "strconv" "time" "unicode/utf8" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/object" "github.com/jesseduffield/go-git/v5/utils/diff" "github.com/sergi/go-diff/diffmatchpatch" ) // BlameResult represents the result of a Blame operation. type BlameResult struct { // Path is the path of the File that we're blaming. Path string // Rev (Revision) is the hash of the specified Commit used to generate this result. Rev plumbing.Hash // Lines contains every line with its authorship. Lines []*Line } // Blame returns a BlameResult with the information about the last author of // each line from file `path` at commit `c`. func Blame(c *object.Commit, path string) (*BlameResult, error) { // The file to blame is identified by the input arguments: // commit and path. commit is a Commit object obtained from a Repository. Path // represents a path to a specific file contained in the repository. // // Blaming a file is done by walking the tree in reverse order trying to find where each line was last modified. // // When a diff is found it cannot immediately assume it came from that commit, as it may have come from 1 of its // parents, so it will first try to resolve those diffs from its parents, if it couldn't find the change in its // parents then it will assign the change to itself. // // When encountering 2 parents that have made the same change to a file it will choose the parent that was merged // into the current branch first (this is determined by the order of the parents inside the commit). // // This currently works on a line by line basis, if performance becomes an issue it could be changed to work with // hunks rather than lines. Then when encountering diff hunks it would need to split them where necessary. b := new(blame) b.fRev = c b.path = path b.q = new(priorityQueue) file, err := b.fRev.File(path) if err != nil { return nil, err } finalLines, err := file.Lines() if err != nil { return nil, err } finalLength := len(finalLines) needsMap := make([]lineMap, finalLength) for i := range needsMap { needsMap[i] = lineMap{i, i, nil, -1} } contents, err := file.Contents() if err != nil { return nil, err } b.q.Push(&queueItem{ nil, nil, c, path, contents, needsMap, 0, false, 0, }) items := make([]*queueItem, 0) for { items = items[:0] for { if b.q.Len() == 0 { return nil, errors.New("invalid state: no items left on the blame queue") } item := b.q.Pop() items = append(items, item) next := b.q.Peek() if next == nil || next.Hash != item.Commit.Hash { break } } finished, err := b.addBlames(items) if err != nil { return nil, err } if finished { break } } b.lineToCommit = make([]*object.Commit, finalLength) for i := range needsMap { b.lineToCommit[i] = needsMap[i].Commit } lines, err := newLines(finalLines, b.lineToCommit) if err != nil { return nil, err } return &BlameResult{ Path: path, Rev: c.Hash, Lines: lines, }, nil } // Line values represent the contents and author of a line in BlamedResult values. type Line struct { // Author is the email address of the last author that modified the line. Author string // AuthorName is the name of the last author that modified the line. AuthorName string // Text is the original text of the line. Text string // Date is when the original text of the line was introduced Date time.Time // Hash is the commit hash that introduced the original line Hash plumbing.Hash } func newLine(author, authorName, text string, date time.Time, hash plumbing.Hash) *Line { return &Line{ Author: author, AuthorName: authorName, Text: text, Hash: hash, Date: date, } } func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { result := make([]*Line, 0, len(contents)) for i := range contents { result = append(result, newLine( commits[i].Author.Email, commits[i].Author.Name, contents[i], commits[i].Author.When, commits[i].Hash, )) } return result, nil } // this struct is internally used by the blame function to hold its // inputs, outputs and state. type blame struct { // the path of the file to blame path string // the commit of the final revision of the file to blame fRev *object.Commit // resolved lines lineToCommit []*object.Commit // queue of commits that need resolving q *priorityQueue } type lineMap struct { Orig, Cur int Commit *object.Commit FromParentNo int } func (b *blame) addBlames(curItems []*queueItem) (bool, error) { curItem := curItems[0] // Simple optimisation to merge paths, there is potential to go a bit further here and check for any duplicates // not only if they are all the same. if len(curItems) == 1 { curItems = nil } else if curItem.IdenticalToChild { allSame := true lenCurItems := len(curItems) lowestParentNo := curItem.ParentNo for i := 1; i < lenCurItems; i++ { if !curItems[i].IdenticalToChild || curItem.Child != curItems[i].Child { allSame = false break } lowestParentNo = min(lowestParentNo, curItems[i].ParentNo) } if allSame { curItem.Child.numParentsNeedResolving = curItem.Child.numParentsNeedResolving - lenCurItems + 1 curItems = nil // free the memory curItem.ParentNo = lowestParentNo // Now check if we can remove the parent completely for curItem.Child.IdenticalToChild && curItem.Child.MergedChildren == nil && curItem.Child.numParentsNeedResolving == 1 { oldChild := curItem.Child curItem.Child = oldChild.Child curItem.ParentNo = oldChild.ParentNo } } } // if we have more than 1 item for this commit, create a single needsMap if len(curItems) > 1 { curItem.MergedChildren = make([]childToNeedsMap, len(curItems)) for i, c := range curItems { curItem.MergedChildren[i] = childToNeedsMap{c.Child, c.NeedsMap, c.IdenticalToChild, c.ParentNo} } newNeedsMap := make([]lineMap, 0, len(curItem.NeedsMap)) newNeedsMap = append(newNeedsMap, curItems[0].NeedsMap...) for i := 1; i < len(curItems); i++ { cur := curItems[i].NeedsMap n := 0 // position in newNeedsMap c := 0 // position in current list for c < len(cur) { if n == len(newNeedsMap) { newNeedsMap = append(newNeedsMap, cur[c:]...) break } else if newNeedsMap[n].Cur == cur[c].Cur { n++ c++ } else if newNeedsMap[n].Cur < cur[c].Cur { n++ } else { newNeedsMap = append(newNeedsMap, cur[c]) newPos := len(newNeedsMap) - 1 for newPos > n { newNeedsMap[newPos-1], newNeedsMap[newPos] = newNeedsMap[newPos], newNeedsMap[newPos-1] newPos-- } } } } curItem.NeedsMap = newNeedsMap curItem.IdenticalToChild = false curItem.Child = nil curItems = nil // free the memory } parents, err := parentsContainingPath(curItem.path, curItem.Commit) if err != nil { return false, err } anyPushed := false for parnetNo, prev := range parents { currentHash, err := blobHash(curItem.path, curItem.Commit) if err != nil { return false, err } prevHash, err := blobHash(prev.Path, prev.Commit) if err != nil { return false, err } if currentHash == prevHash { if len(parents) == 1 && curItem.MergedChildren == nil && curItem.IdenticalToChild { // commit that has 1 parent and 1 child and is the same as both, bypass it completely b.q.Push(&queueItem{ Child: curItem.Child, Commit: prev.Commit, path: prev.Path, Contents: curItem.Contents, NeedsMap: curItem.NeedsMap, // reuse the NeedsMap as we are throwing away this item IdenticalToChild: true, ParentNo: curItem.ParentNo, }) } else { b.q.Push(&queueItem{ Child: curItem, Commit: prev.Commit, path: prev.Path, Contents: curItem.Contents, NeedsMap: append([]lineMap(nil), curItem.NeedsMap...), // create new slice and copy IdenticalToChild: true, ParentNo: parnetNo, }) curItem.numParentsNeedResolving++ } anyPushed = true continue } // get the contents of the file file, err := prev.Commit.File(prev.Path) if err != nil { return false, err } prevContents, err := file.Contents() if err != nil { return false, err } hunks := diff.Do(prevContents, curItem.Contents) prevl := -1 curl := -1 need := 0 getFromParent := make([]lineMap, 0) out: for h := range hunks { hLines := countLines(hunks[h].Text) for hl := 0; hl < hLines; hl++ { switch hunks[h].Type { case diffmatchpatch.DiffEqual: prevl++ curl++ if curl == curItem.NeedsMap[need].Cur { // add to needs getFromParent = append(getFromParent, lineMap{curl, prevl, nil, -1}) // move to next need need++ if need >= len(curItem.NeedsMap) { break out } } case diffmatchpatch.DiffInsert: curl++ if curl == curItem.NeedsMap[need].Cur { // the line we want is added, it may have been added here (or by another parent), skip it for now need++ if need >= len(curItem.NeedsMap) { break out } } case diffmatchpatch.DiffDelete: prevl += hLines continue out default: return false, errors.New("invalid state: invalid hunk Type") } } } if len(getFromParent) > 0 { b.q.Push(&queueItem{ curItem, nil, prev.Commit, prev.Path, prevContents, getFromParent, 0, false, parnetNo, }) curItem.numParentsNeedResolving++ anyPushed = true } } curItem.Contents = "" // no longer need, free the memory if !anyPushed { return finishNeeds(curItem) } return false, nil } func finishNeeds(curItem *queueItem) (bool, error) { // any needs left in the needsMap must have come from this revision for i := range curItem.NeedsMap { if curItem.NeedsMap[i].Commit == nil { curItem.NeedsMap[i].Commit = curItem.Commit curItem.NeedsMap[i].FromParentNo = -1 } } if curItem.Child == nil && curItem.MergedChildren == nil { return true, nil } if curItem.MergedChildren == nil { return applyNeeds(curItem.Child, curItem.NeedsMap, curItem.IdenticalToChild, curItem.ParentNo) } for _, ctn := range curItem.MergedChildren { m := 0 // position in merged needs map p := 0 // position in parent needs map for p < len(ctn.NeedsMap) { if ctn.NeedsMap[p].Cur == curItem.NeedsMap[m].Cur { ctn.NeedsMap[p].Commit = curItem.NeedsMap[m].Commit m++ p++ } else if ctn.NeedsMap[p].Cur < curItem.NeedsMap[m].Cur { p++ } else { m++ } } finished, err := applyNeeds(ctn.Child, ctn.NeedsMap, ctn.IdenticalToChild, ctn.ParentNo) if finished || err != nil { return finished, err } } return false, nil } func applyNeeds(child *queueItem, needsMap []lineMap, identicalToChild bool, parentNo int) (bool, error) { if identicalToChild { for i := range child.NeedsMap { l := &child.NeedsMap[i] if l.Cur != needsMap[i].Cur || l.Orig != needsMap[i].Orig { return false, errors.New("needsMap isn't the same? Why not??") } if l.Commit == nil || parentNo < l.FromParentNo { l.Commit = needsMap[i].Commit l.FromParentNo = parentNo } } } else { i := 0 out: for j := range child.NeedsMap { l := &child.NeedsMap[j] for needsMap[i].Orig < l.Cur { i++ if i == len(needsMap) { break out } } if l.Cur == needsMap[i].Orig { if l.Commit == nil || parentNo < l.FromParentNo { l.Commit = needsMap[i].Commit l.FromParentNo = parentNo } } } } child.numParentsNeedResolving-- if child.numParentsNeedResolving == 0 { finished, err := finishNeeds(child) if finished || err != nil { return finished, err } } return false, nil } // String prints the results of a Blame using git-blame's style. func (b BlameResult) String() string { var buf bytes.Buffer // max line number length mlnl := len(strconv.Itoa(len(b.Lines))) // max author length mal := b.maxAuthorLength() format := fmt.Sprintf("%%s (%%-%ds %%s %%%dd) %%s\n", mal, mlnl) for ln := range b.Lines { _, _ = fmt.Fprintf(&buf, format, b.Lines[ln].Hash.String()[:8], b.Lines[ln].AuthorName, b.Lines[ln].Date.Format("2006-01-02 15:04:05 -0700"), ln+1, b.Lines[ln].Text) } return buf.String() } // utility function to calculate the number of runes needed // to print the longest author name in the blame of a file. func (b BlameResult) maxAuthorLength() int { m := 0 for ln := range b.Lines { m = max(m, utf8.RuneCountInString(b.Lines[ln].AuthorName)) } return m } func min(a, b int) int { if a < b { return a } return b } func max(a, b int) int { if a > b { return a } return b } type childToNeedsMap struct { Child *queueItem NeedsMap []lineMap IdenticalToChild bool ParentNo int } type queueItem struct { Child *queueItem MergedChildren []childToNeedsMap Commit *object.Commit path string Contents string NeedsMap []lineMap numParentsNeedResolving int IdenticalToChild bool ParentNo int } type priorityQueueImp []*queueItem func (pq *priorityQueueImp) Len() int { return len(*pq) } func (pq *priorityQueueImp) Less(i, j int) bool { return !(*pq)[i].Commit.Less((*pq)[j].Commit) } func (pq *priorityQueueImp) Swap(i, j int) { (*pq)[i], (*pq)[j] = (*pq)[j], (*pq)[i] } func (pq *priorityQueueImp) Push(x any) { *pq = append(*pq, x.(*queueItem)) } func (pq *priorityQueueImp) Pop() any { n := len(*pq) ret := (*pq)[n-1] (*pq)[n-1] = nil // ovoid memory leak *pq = (*pq)[0 : n-1] return ret } func (pq *priorityQueueImp) Peek() *object.Commit { if len(*pq) == 0 { return nil } return (*pq)[0].Commit } type priorityQueue priorityQueueImp func (pq *priorityQueue) Init() { heap.Init((*priorityQueueImp)(pq)) } func (pq *priorityQueue) Len() int { return (*priorityQueueImp)(pq).Len() } func (pq *priorityQueue) Push(c *queueItem) { heap.Push((*priorityQueueImp)(pq), c) } func (pq *priorityQueue) Pop() *queueItem { return heap.Pop((*priorityQueueImp)(pq)).(*queueItem) } func (pq *priorityQueue) Peek() *object.Commit { return (*priorityQueueImp)(pq).Peek() } type parentCommit struct { Commit *object.Commit Path string } func parentsContainingPath(path string, c *object.Commit) ([]parentCommit, error) { // TODO: benchmark this method making git.object.Commit.parent public instead of using // an iterator var result []parentCommit iter := c.Parents() for { parent, err := iter.Next() if err == io.EOF { return result, nil } if err != nil { return nil, err } if _, err := parent.File(path); err == nil { result = append(result, parentCommit{parent, path}) } else { // look for renames patch, err := parent.Patch(c) if err != nil { return nil, err } else if patch != nil { for _, fp := range patch.FilePatches() { from, to := fp.Files() if from != nil && to != nil && to.Path() == path { result = append(result, parentCommit{parent, from.Path()}) break } } } } } } func blobHash(path string, commit *object.Commit) (plumbing.Hash, error) { file, err := commit.File(path) if err != nil { return plumbing.ZeroHash, err } return file.Hash, nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/worktree_js.go
vendor/github.com/jesseduffield/go-git/v5/worktree_js.go
// +build js package git import ( "syscall" "time" "github.com/jesseduffield/go-git/v5/plumbing/format/index" ) func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Stat_t); ok { e.CreatedAt = time.Unix(int64(os.Ctime), int64(os.CtimeNsec)) e.Dev = uint32(os.Dev) e.Inode = uint32(os.Ino) e.GID = os.Gid e.UID = os.Uid } } } func isSymlinkWindowsNonAdmin(err error) bool { return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/worktree_unix_other.go
vendor/github.com/jesseduffield/go-git/v5/worktree_unix_other.go
// +build openbsd dragonfly solaris package git import ( "syscall" "time" "github.com/jesseduffield/go-git/v5/plumbing/format/index" ) func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Stat_t); ok { e.CreatedAt = time.Unix(os.Atim.Unix()) e.Dev = uint32(os.Dev) e.Inode = uint32(os.Ino) e.GID = os.Gid e.UID = os.Uid } } } func isSymlinkWindowsNonAdmin(err error) bool { return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/remote.go
vendor/github.com/jesseduffield/go-git/v5/remote.go
package git import ( "context" "errors" "fmt" "io" "strings" "time" "github.com/go-git/go-billy/v5/osfs" "github.com/jesseduffield/go-git/v5/config" "github.com/jesseduffield/go-git/v5/internal/url" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/cache" "github.com/jesseduffield/go-git/v5/plumbing/format/packfile" "github.com/jesseduffield/go-git/v5/plumbing/object" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/capability" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband" "github.com/jesseduffield/go-git/v5/plumbing/revlist" "github.com/jesseduffield/go-git/v5/plumbing/storer" "github.com/jesseduffield/go-git/v5/plumbing/transport" "github.com/jesseduffield/go-git/v5/plumbing/transport/client" "github.com/jesseduffield/go-git/v5/storage" "github.com/jesseduffield/go-git/v5/storage/filesystem" "github.com/jesseduffield/go-git/v5/storage/memory" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) var ( NoErrAlreadyUpToDate = errors.New("already up-to-date") ErrDeleteRefNotSupported = errors.New("server does not support delete-refs") ErrForceNeeded = errors.New("some refs were not updated") ErrExactSHA1NotSupported = errors.New("server does not support exact SHA1 refspec") ErrEmptyUrls = errors.New("URLs cannot be empty") ) type NoMatchingRefSpecError struct { refSpec config.RefSpec } func (e NoMatchingRefSpecError) Error() string { return fmt.Sprintf("couldn't find remote ref %q", e.refSpec.Src()) } func (e NoMatchingRefSpecError) Is(target error) bool { _, ok := target.(NoMatchingRefSpecError) return ok } const ( // This describes the maximum number of commits to walk when // computing the haves to send to a server, for each ref in the // repo containing this remote, when not using the multi-ack // protocol. Setting this to 0 means there is no limit. maxHavesToVisitPerRef = 100 // peeledSuffix is the suffix used to build peeled reference names. peeledSuffix = "^{}" ) // Remote represents a connection to a remote repository. type Remote struct { c *config.RemoteConfig s storage.Storer } // NewRemote creates a new Remote. // The intended purpose is to use the Remote for tasks such as listing remote references (like using git ls-remote). // Otherwise Remotes should be created via the use of a Repository. func NewRemote(s storage.Storer, c *config.RemoteConfig) *Remote { return &Remote{s: s, c: c} } // Config returns the RemoteConfig object used to instantiate this Remote. func (r *Remote) Config() *config.RemoteConfig { return r.c } func (r *Remote) String() string { var fetch, push string if len(r.c.URLs) > 0 { fetch = r.c.URLs[0] push = r.c.URLs[len(r.c.URLs)-1] } return fmt.Sprintf("%s\t%s (fetch)\n%[1]s\t%[3]s (push)", r.c.Name, fetch, push) } // Push performs a push to the remote. Returns NoErrAlreadyUpToDate if the // remote was already up-to-date. func (r *Remote) Push(o *PushOptions) error { return r.PushContext(context.Background(), o) } // PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if // the remote was already up-to-date. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects the // transport operations. func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { if err := o.Validate(); err != nil { return err } if o.RemoteName != r.c.Name { return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name) } if o.RemoteURL == "" && len(r.c.URLs) > 0 { o.RemoteURL = r.c.URLs[len(r.c.URLs)-1] } s, err := newSendPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions) if err != nil { return err } defer ioutil.CheckClose(s, &err) ar, err := s.AdvertisedReferencesContext(ctx) if err != nil { return err } remoteRefs, err := ar.AllReferences() if err != nil { return err } if err := r.checkRequireRemoteRefs(o.RequireRemoteRefs, remoteRefs); err != nil { return err } isDelete := false allDelete := true for _, rs := range o.RefSpecs { if rs.IsDelete() { isDelete = true } else { allDelete = false } if isDelete && !allDelete { break } } if isDelete && !ar.Capabilities.Supports(capability.DeleteRefs) { return ErrDeleteRefNotSupported } if o.Force { for i := 0; i < len(o.RefSpecs); i++ { rs := &o.RefSpecs[i] if !rs.IsForceUpdate() && !rs.IsDelete() { o.RefSpecs[i] = config.RefSpec("+" + rs.String()) } } } localRefs, err := r.references() if err != nil { return err } req, err := r.newReferenceUpdateRequest(o, localRefs, remoteRefs, ar) if err != nil { return err } if len(req.Commands) == 0 { return NoErrAlreadyUpToDate } objects := objectsToPush(req.Commands) haves, err := referencesToHashes(remoteRefs) if err != nil { return err } stop, err := r.s.Shallow() if err != nil { return err } // if we have shallow we should include this as part of the objects that // we are aware. haves = append(haves, stop...) var hashesToPush []plumbing.Hash // Avoid the expensive revlist operation if we're only doing deletes. if !allDelete { if url.IsLocalEndpoint(o.RemoteURL) { // If we're are pushing to a local repo, it might be much // faster to use a local storage layer to get the commits // to ignore, when calculating the object revlist. localStorer := filesystem.NewStorage( osfs.New(o.RemoteURL), cache.NewObjectLRUDefault()) hashesToPush, err = revlist.ObjectsWithStorageForIgnores( r.s, localStorer, objects, haves) } else { hashesToPush, err = revlist.Objects(r.s, objects, haves) } if err != nil { return err } } if len(hashesToPush) == 0 { allDelete = true for _, command := range req.Commands { if command.Action() != packp.Delete { allDelete = false break } } } rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar), allDelete) if err != nil { return err } if rs != nil { if err = rs.Error(); err != nil { return err } } return r.updateRemoteReferenceStorage(req) } func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool { return !ar.Capabilities.Supports(capability.OFSDelta) } func (r *Remote) addReachableTags(localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest) error { tags := make(map[plumbing.Reference]struct{}) // get a list of all tags locally for _, ref := range localRefs { if strings.HasPrefix(string(ref.Name()), "refs/tags") { tags[*ref] = struct{}{} } } remoteRefIter, err := remoteRefs.IterReferences() if err != nil { return err } // remove any that are already on the remote if err := remoteRefIter.ForEach(func(reference *plumbing.Reference) error { delete(tags, *reference) return nil }); err != nil { return err } for tag := range tags { tagObject, err := object.GetObject(r.s, tag.Hash()) var tagCommit *object.Commit if err != nil { return fmt.Errorf("get tag object: %w", err) } if tagObject.Type() != plumbing.TagObject { continue } annotatedTag, ok := tagObject.(*object.Tag) if !ok { return errors.New("could not get annotated tag object") } tagCommit, err = object.GetCommit(r.s, annotatedTag.Target) if err != nil { return fmt.Errorf("get annotated tag commit: %w", err) } // only include tags that are reachable from one of the refs // already being pushed for _, cmd := range req.Commands { if tag.Name() == cmd.Name { continue } if strings.HasPrefix(cmd.Name.String(), "refs/tags") { continue } c, err := object.GetCommit(r.s, cmd.New) if err != nil { return fmt.Errorf("get commit %v: %w", cmd.Name, err) } if isAncestor, err := tagCommit.IsAncestor(c); err == nil && isAncestor { req.Commands = append(req.Commands, &packp.Command{Name: tag.Name(), New: tag.Hash()}) } } } return nil } func (r *Remote) newReferenceUpdateRequest( o *PushOptions, localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, ar *packp.AdvRefs, ) (*packp.ReferenceUpdateRequest, error) { req := packp.NewReferenceUpdateRequestFromCapabilities(ar.Capabilities) if o.Progress != nil { req.Progress = o.Progress if ar.Capabilities.Supports(capability.Sideband64k) { _ = req.Capabilities.Set(capability.Sideband64k) } else if ar.Capabilities.Supports(capability.Sideband) { _ = req.Capabilities.Set(capability.Sideband) } } if ar.Capabilities.Supports(capability.PushOptions) { _ = req.Capabilities.Set(capability.PushOptions) for k, v := range o.Options { req.Options = append(req.Options, &packp.Option{Key: k, Value: v}) } } if o.Atomic && ar.Capabilities.Supports(capability.Atomic) { _ = req.Capabilities.Set(capability.Atomic) } if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune, o.ForceWithLease); err != nil { return nil, err } if o.FollowTags { if err := r.addReachableTags(localRefs, remoteRefs, req); err != nil { return nil, err } } return req, nil } func (r *Remote) updateRemoteReferenceStorage( req *packp.ReferenceUpdateRequest, ) error { for _, spec := range r.c.Fetch { for _, c := range req.Commands { if !spec.Match(c.Name) { continue } local := spec.Dst(c.Name) ref := plumbing.NewHashReference(local, c.New) switch c.Action() { case packp.Create, packp.Update: if err := r.s.SetReference(ref); err != nil { return err } case packp.Delete: if err := r.s.RemoveReference(local); err != nil { return err } } } } return nil } // FetchContext fetches references along with the objects necessary to complete // their histories. // // Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are // no changes to be fetched, or an error. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects the // transport operations. func (r *Remote) FetchContext(ctx context.Context, o *FetchOptions) error { _, err := r.fetch(ctx, o) return err } // Fetch fetches references along with the objects necessary to complete their // histories. // // Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are // no changes to be fetched, or an error. func (r *Remote) Fetch(o *FetchOptions) error { return r.FetchContext(context.Background(), o) } func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.ReferenceStorer, err error) { if o.RemoteName == "" { o.RemoteName = r.c.Name } if err = o.Validate(); err != nil { return nil, err } if len(o.RefSpecs) == 0 { o.RefSpecs = r.c.Fetch } if o.RemoteURL == "" { o.RemoteURL = r.c.URLs[0] } s, err := newUploadPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions) if err != nil { return nil, err } defer ioutil.CheckClose(s, &err) ar, err := s.AdvertisedReferencesContext(ctx) if err != nil { return nil, err } req, err := r.newUploadPackRequest(o, ar) if err != nil { return nil, err } if err := r.isSupportedRefSpec(o.RefSpecs, ar); err != nil { return nil, err } remoteRefs, err := ar.AllReferences() if err != nil { return nil, err } localRefs, err := r.references() if err != nil { return nil, err } refs, specToRefs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags) if err != nil { return nil, err } if !req.Depth.IsZero() { req.Shallows, err = r.s.Shallow() if err != nil { return nil, fmt.Errorf("existing checkout is not shallow") } } req.Wants, err = getWants(r.s, refs, o.Depth) if len(req.Wants) > 0 { req.Haves, err = getHaves(localRefs, remoteRefs, r.s, o.Depth) if err != nil { return nil, err } if err = r.fetchPack(ctx, o, s, req); err != nil { return nil, err } } var updatedPrune bool if o.Prune { updatedPrune, err = r.pruneRemotes(o.RefSpecs, localRefs, remoteRefs) if err != nil { return nil, err } } updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, specToRefs, o.Tags, o.Force) if err != nil { return nil, err } if !updated { updated, err = depthChanged(req.Shallows, r.s) if err != nil { return nil, fmt.Errorf("error checking depth change: %v", err) } } if !updated && !updatedPrune { // No references updated, but may have fetched new objects, check if we now have any of our wants for _, hash := range req.Wants { exists, _ := objectExists(r.s, hash) if exists { updated = true break } } if !updated { return remoteRefs, NoErrAlreadyUpToDate } } return remoteRefs, nil } func depthChanged(before []plumbing.Hash, s storage.Storer) (bool, error) { after, err := s.Shallow() if err != nil { return false, err } if len(before) != len(after) { return true, nil } bm := make(map[plumbing.Hash]bool, len(before)) for _, b := range before { bm[b] = true } for _, a := range after { if _, ok := bm[a]; !ok { return true, nil } } return false, nil } func newUploadPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.UploadPackSession, error) { c, ep, err := newClient(url, insecure, cabundle, proxyOpts) if err != nil { return nil, err } return c.NewUploadPackSession(ep, auth) } func newSendPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.ReceivePackSession, error) { c, ep, err := newClient(url, insecure, cabundle, proxyOpts) if err != nil { return nil, err } return c.NewReceivePackSession(ep, auth) } func newClient(url string, insecure bool, cabundle []byte, proxyOpts transport.ProxyOptions) (transport.Transport, *transport.Endpoint, error) { ep, err := transport.NewEndpoint(url) if err != nil { return nil, nil, err } ep.InsecureSkipTLS = insecure ep.CaBundle = cabundle ep.Proxy = proxyOpts c, err := client.NewClient(ep) if err != nil { return nil, nil, err } return c, ep, err } func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.UploadPackSession, req *packp.UploadPackRequest) (err error) { reader, err := s.UploadPack(ctx, req) if err != nil { if errors.Is(err, transport.ErrEmptyUploadPackRequest) { // XXX: no packfile provided, everything is up-to-date. return nil } return err } defer ioutil.CheckClose(reader, &err) if err = r.updateShallow(o, reader); err != nil { return err } if err = packfile.UpdateObjectStorage(r.s, buildSidebandIfSupported(req.Capabilities, reader, o.Progress), ); err != nil { return err } return err } func (r *Remote) pruneRemotes(specs []config.RefSpec, localRefs []*plumbing.Reference, remoteRefs memory.ReferenceStorage) (bool, error) { var updatedPrune bool for _, spec := range specs { rev := spec.Reverse() for _, ref := range localRefs { if !rev.Match(ref.Name()) { continue } _, err := remoteRefs.Reference(rev.Dst(ref.Name())) if errors.Is(err, plumbing.ErrReferenceNotFound) { updatedPrune = true err := r.s.RemoveReference(ref.Name()) if err != nil { return false, err } } } } return updatedPrune, nil } func (r *Remote) addReferencesToUpdate( refspecs []config.RefSpec, localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest, prune bool, forceWithLease *ForceWithLease, ) error { // This references dictionary will be used to search references by name. refsDict := make(map[string]*plumbing.Reference) for _, ref := range localRefs { refsDict[ref.Name().String()] = ref } for _, rs := range refspecs { if rs.IsDelete() { if err := r.deleteReferences(rs, remoteRefs, refsDict, req, false); err != nil { return err } } else { err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req, forceWithLease) if err != nil { return err } if prune { if err := r.deleteReferences(rs, remoteRefs, refsDict, req, true); err != nil { return err } } } } return nil } func (r *Remote) addOrUpdateReferences( rs config.RefSpec, localRefs []*plumbing.Reference, refsDict map[string]*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest, forceWithLease *ForceWithLease, ) error { // If it is not a wildcard refspec we can directly search for the reference // in the references dictionary. if !rs.IsWildcard() { ref, ok := refsDict[rs.Src()] if !ok { commit, err := object.GetCommit(r.s, plumbing.NewHash(rs.Src())) if err == nil { return r.addCommit(rs, remoteRefs, commit.Hash, req) } return nil } return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease) } for _, ref := range localRefs { err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease) if err != nil { return err } } return nil } func (r *Remote) deleteReferences(rs config.RefSpec, remoteRefs storer.ReferenceStorer, refsDict map[string]*plumbing.Reference, req *packp.ReferenceUpdateRequest, prune bool) error { iter, err := remoteRefs.IterReferences() if err != nil { return err } return iter.ForEach(func(ref *plumbing.Reference) error { if ref.Type() != plumbing.HashReference { return nil } if prune { rs := rs.Reverse() if !rs.Match(ref.Name()) { return nil } if _, ok := refsDict[rs.Dst(ref.Name()).String()]; ok { return nil } } else if rs.Dst("") != ref.Name() { return nil } cmd := &packp.Command{ Name: ref.Name(), Old: ref.Hash(), New: plumbing.ZeroHash, } req.Commands = append(req.Commands, cmd) return nil }) } func (r *Remote) addCommit(rs config.RefSpec, remoteRefs storer.ReferenceStorer, localCommit plumbing.Hash, req *packp.ReferenceUpdateRequest) error { if rs.IsWildcard() { return errors.New("can't use wildcard together with hash refspecs") } cmd := &packp.Command{ Name: rs.Dst(""), Old: plumbing.ZeroHash, New: localCommit, } remoteRef, err := remoteRefs.Reference(cmd.Name) if err == nil { if remoteRef.Type() != plumbing.HashReference { // TODO: check actual git behavior here return nil } cmd.Old = remoteRef.Hash() } else if err != plumbing.ErrReferenceNotFound { return err } if cmd.Old == cmd.New { return nil } if !rs.IsForceUpdate() { if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil { return err } } req.Commands = append(req.Commands, cmd) return nil } func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec, remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference, req *packp.ReferenceUpdateRequest, forceWithLease *ForceWithLease) error { if localRef.Type() != plumbing.HashReference { return nil } if !rs.Match(localRef.Name()) { return nil } cmd := &packp.Command{ Name: rs.Dst(localRef.Name()), Old: plumbing.ZeroHash, New: localRef.Hash(), } remoteRef, err := remoteRefs.Reference(cmd.Name) if err == nil { if remoteRef.Type() != plumbing.HashReference { // TODO: check actual git behavior here return nil } cmd.Old = remoteRef.Hash() } else if err != plumbing.ErrReferenceNotFound { return err } if cmd.Old == cmd.New { return nil } if forceWithLease != nil { if err = r.checkForceWithLease(localRef, cmd, forceWithLease); err != nil { return err } } else if !rs.IsForceUpdate() { if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil { return err } } req.Commands = append(req.Commands, cmd) return nil } func (r *Remote) checkForceWithLease(localRef *plumbing.Reference, cmd *packp.Command, forceWithLease *ForceWithLease) error { remotePrefix := fmt.Sprintf("refs/remotes/%s/", r.Config().Name) ref, err := storer.ResolveReference( r.s, plumbing.ReferenceName(remotePrefix+strings.Replace(localRef.Name().String(), "refs/heads/", "", -1))) if err != nil { return err } if forceWithLease.RefName.String() == "" || (forceWithLease.RefName == cmd.Name) { expectedOID := ref.Hash() if !forceWithLease.Hash.IsZero() { expectedOID = forceWithLease.Hash } if cmd.Old != expectedOID { return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) } } return nil } func (r *Remote) references() ([]*plumbing.Reference, error) { var localRefs []*plumbing.Reference iter, err := r.s.IterReferences() if err != nil { return nil, err } for { ref, err := iter.Next() if err == io.EOF { break } if err != nil { return nil, err } localRefs = append(localRefs, ref) } return localRefs, nil } func getRemoteRefsFromStorer(remoteRefStorer storer.ReferenceStorer) ( map[plumbing.Hash]bool, error) { remoteRefs := map[plumbing.Hash]bool{} iter, err := remoteRefStorer.IterReferences() if err != nil { return nil, err } err = iter.ForEach(func(ref *plumbing.Reference) error { if ref.Type() != plumbing.HashReference { return nil } remoteRefs[ref.Hash()] = true return nil }) if err != nil { return nil, err } return remoteRefs, nil } // getHavesFromRef populates the given `haves` map with the given // reference, and up to `maxHavesToVisitPerRef` ancestor commits. func getHavesFromRef( ref *plumbing.Reference, remoteRefs map[plumbing.Hash]bool, s storage.Storer, haves map[plumbing.Hash]bool, depth int, ) error { h := ref.Hash() if haves[h] { return nil } commit, err := object.GetCommit(s, h) if err != nil { if !errors.Is(err, plumbing.ErrObjectNotFound) { // Ignore the error if this isn't a commit. haves[ref.Hash()] = true } return nil } // Until go-git supports proper commit negotiation during an // upload pack request, include up to `maxHavesToVisitPerRef` // commits from the history of each ref. walker := object.NewCommitPreorderIter(commit, haves, nil) toVisit := maxHavesToVisitPerRef // But only need up to the requested depth if depth > 0 && depth < maxHavesToVisitPerRef { toVisit = depth } // It is safe to ignore any error here as we are just trying to find the references that we already have // An example of a legitimate failure is we have a shallow clone and don't have the previous commit(s) _ = walker.ForEach(func(c *object.Commit) error { haves[c.Hash] = true toVisit-- // If toVisit starts out at 0 (indicating there is no // max), then it will be negative here and we won't stop // early. if toVisit == 0 || remoteRefs[c.Hash] { return storer.ErrStop } return nil }) return nil } func getHaves( localRefs []*plumbing.Reference, remoteRefStorer storer.ReferenceStorer, s storage.Storer, depth int, ) ([]plumbing.Hash, error) { haves := map[plumbing.Hash]bool{} // Build a map of all the remote references, to avoid loading too // many parent commits for references we know don't need to be // transferred. remoteRefs, err := getRemoteRefsFromStorer(remoteRefStorer) if err != nil { return nil, err } for _, ref := range localRefs { if haves[ref.Hash()] { continue } if ref.Type() != plumbing.HashReference { continue } err = getHavesFromRef(ref, remoteRefs, s, haves, depth) if err != nil { return nil, err } } var result []plumbing.Hash for h := range haves { result = append(result, h) } return result, nil } const refspecAllTags = "+refs/tags/*:refs/tags/*" func calculateRefs( spec []config.RefSpec, remoteRefs storer.ReferenceStorer, tagMode TagMode, ) (memory.ReferenceStorage, [][]*plumbing.Reference, error) { if tagMode == AllTags { spec = append(spec, refspecAllTags) } refs := make(memory.ReferenceStorage) // list of references matched for each spec specToRefs := make([][]*plumbing.Reference, len(spec)) for i := range spec { var err error specToRefs[i], err = doCalculateRefs(spec[i], remoteRefs, refs) if err != nil { return nil, nil, err } } return refs, specToRefs, nil } func doCalculateRefs( s config.RefSpec, remoteRefs storer.ReferenceStorer, refs memory.ReferenceStorage, ) ([]*plumbing.Reference, error) { var refList []*plumbing.Reference if s.IsExactSHA1() { ref := plumbing.NewHashReference(s.Dst(""), plumbing.NewHash(s.Src())) refList = append(refList, ref) return refList, refs.SetReference(ref) } var matched bool onMatched := func(ref *plumbing.Reference) error { if ref.Type() == plumbing.SymbolicReference { target, err := storer.ResolveReference(remoteRefs, ref.Name()) if err != nil { return err } ref = plumbing.NewHashReference(ref.Name(), target.Hash()) } if ref.Type() != plumbing.HashReference { return nil } matched = true refList = append(refList, ref) return refs.SetReference(ref) } var ret error if s.IsWildcard() { iter, err := remoteRefs.IterReferences() if err != nil { return nil, err } ret = iter.ForEach(func(ref *plumbing.Reference) error { if !s.Match(ref.Name()) { return nil } return onMatched(ref) }) } else { var resolvedRef *plumbing.Reference src := s.Src() resolvedRef, ret = expand_ref(remoteRefs, plumbing.ReferenceName(src)) if ret == nil { ret = onMatched(resolvedRef) } } if !matched && !s.IsWildcard() { return nil, NoMatchingRefSpecError{refSpec: s} } return refList, ret } func getWants(localStorer storage.Storer, refs memory.ReferenceStorage, depth int) ([]plumbing.Hash, error) { // If depth is anything other than 1 and the repo has shallow commits then just because we have the commit // at the reference doesn't mean that we don't still need to fetch the parents shallow := false if depth != 1 { if s, _ := localStorer.Shallow(); len(s) > 0 { shallow = true } } wants := map[plumbing.Hash]bool{} for _, ref := range refs { hash := ref.Hash() exists, err := objectExists(localStorer, ref.Hash()) if err != nil { return nil, err } if !exists || shallow { wants[hash] = true } } var result []plumbing.Hash for h := range wants { result = append(result, h) } return result, nil } func objectExists(s storer.EncodedObjectStorer, h plumbing.Hash) (bool, error) { _, err := s.EncodedObject(plumbing.AnyObject, h) if err == plumbing.ErrObjectNotFound { return false, nil } return true, err } func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.ReferenceStorer, cmd *packp.Command) error { if cmd.Old == plumbing.ZeroHash { _, err := remoteRefs.Reference(cmd.Name) if err == plumbing.ErrReferenceNotFound { return nil } if err != nil { return err } return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) } ff, err := isFastForward(s, cmd.Old, cmd.New, nil) if err != nil { return err } if !ff { return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) } return nil } func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash, earliestShallow *plumbing.Hash) (bool, error) { c, err := object.GetCommit(s, new) if err != nil { return false, err } parentsToIgnore := []plumbing.Hash{} if earliestShallow != nil { earliestCommit, err := object.GetCommit(s, *earliestShallow) if err != nil { return false, err } parentsToIgnore = earliestCommit.ParentHashes } found := false // stop iterating at the earliest shallow commit, ignoring its parents // note: when pull depth is smaller than the number of new changes on the remote, this fails due to missing parents. // as far as i can tell, without the commits in-between the shallow pull and the earliest shallow, there's no // real way of telling whether it will be a fast-forward merge. iter := object.NewCommitPreorderIter(c, nil, parentsToIgnore) err = iter.ForEach(func(c *object.Commit) error { if c.Hash != old { return nil } found = true return storer.ErrStop }) return found, err } func (r *Remote) newUploadPackRequest(o *FetchOptions, ar *packp.AdvRefs) (*packp.UploadPackRequest, error) { req := packp.NewUploadPackRequestFromCapabilities(ar.Capabilities) if o.Depth != 0 { req.Depth = packp.DepthCommits(o.Depth) if err := req.Capabilities.Set(capability.Shallow); err != nil { return nil, err } } if o.Progress == nil && ar.Capabilities.Supports(capability.NoProgress) { if err := req.Capabilities.Set(capability.NoProgress); err != nil { return nil, err } } isWildcard := true for _, s := range o.RefSpecs { if !s.IsWildcard() { isWildcard = false break } } if isWildcard && o.Tags == TagFollowing && ar.Capabilities.Supports(capability.IncludeTag) { if err := req.Capabilities.Set(capability.IncludeTag); err != nil { return nil, err } } return req, nil } func (r *Remote) isSupportedRefSpec(refs []config.RefSpec, ar *packp.AdvRefs) error { var containsIsExact bool for _, ref := range refs { if ref.IsExactSHA1() { containsIsExact = true } } if !containsIsExact { return nil } if ar.Capabilities.Supports(capability.AllowReachableSHA1InWant) || ar.Capabilities.Supports(capability.AllowTipSHA1InWant) { return nil } return ErrExactSHA1NotSupported } func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.Progress) io.Reader { var t sideband.Type switch { case l.Supports(capability.Sideband): t = sideband.Sideband case l.Supports(capability.Sideband64k): t = sideband.Sideband64k default: return reader } d := sideband.NewDemuxer(t, reader) d.Progress = p return d } func (r *Remote) updateLocalReferenceStorage( specs []config.RefSpec, fetchedRefs, remoteRefs memory.ReferenceStorage, specToRefs [][]*plumbing.Reference, tagMode TagMode, force bool, ) (updated bool, err error) { isWildcard := true forceNeeded := false for i, spec := range specs { if !spec.IsWildcard() { isWildcard = false } for _, ref := range specToRefs[i] { if ref.Type() != plumbing.HashReference { continue } localName := spec.Dst(ref.Name()) // If localName doesn't start with "refs/" then treat as a branch. if !strings.HasPrefix(localName.String(), "refs/") { localName = plumbing.NewBranchReferenceName(localName.String()) } old, _ := storer.ResolveReference(r.s, localName) new := plumbing.NewHashReference(localName, ref.Hash()) // If the ref exists locally as a non-tag and force is not // specified, only update if the new ref is an ancestor of the old if old != nil && !old.Name().IsTag() && !force && !spec.IsForceUpdate() { ff, err := isFastForward(r.s, old.Hash(), new.Hash(), nil) if err != nil { return updated, err } if !ff { forceNeeded = true continue } } refUpdated, err := checkAndUpdateReferenceStorerIfNeeded(r.s, new, old) if err != nil { return updated, err } if refUpdated { updated = true } } } if tagMode == NoTags { return updated, nil } tags := fetchedRefs if isWildcard { tags = remoteRefs } tagUpdated, err := r.buildFetchedTags(tags) if err != nil { return updated, err } if tagUpdated { updated = true } if forceNeeded { err = ErrForceNeeded } return } func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, err error) { for _, ref := range refs { if !ref.Name().IsTag() { continue } _, err := r.s.EncodedObject(plumbing.AnyObject, ref.Hash()) if err == plumbing.ErrObjectNotFound { continue } if err != nil { return false, err } refUpdated, err := updateReferenceStorerIfNeeded(r.s, ref) if err != nil { return updated, err } if refUpdated { updated = true } } return } // List the references on the remote repository. // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects to the // transport operations. func (r *Remote) ListContext(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) { return r.list(ctx, o) } func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) { timeout := o.Timeout // Default to the old hardcoded 10s value if a timeout is not explicitly set. if timeout == 0 { timeout = 10 } if timeout < 0 { return nil, fmt.Errorf("invalid timeout: %d", timeout) } ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second) defer cancel() return r.ListContext(ctx, o) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
true
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/worktree.go
vendor/github.com/jesseduffield/go-git/v5/worktree.go
package git import ( "context" "errors" "fmt" "io" "os" "path/filepath" "runtime" "strings" "github.com/go-git/go-billy/v5" "github.com/go-git/go-billy/v5/util" "github.com/jesseduffield/go-git/v5/config" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/filemode" "github.com/jesseduffield/go-git/v5/plumbing/format/gitignore" "github.com/jesseduffield/go-git/v5/plumbing/format/index" "github.com/jesseduffield/go-git/v5/plumbing/object" "github.com/jesseduffield/go-git/v5/plumbing/storer" "github.com/jesseduffield/go-git/v5/utils/ioutil" "github.com/jesseduffield/go-git/v5/utils/merkletrie" "github.com/jesseduffield/go-git/v5/utils/sync" ) var ( ErrWorktreeNotClean = errors.New("worktree is not clean") ErrSubmoduleNotFound = errors.New("submodule not found") ErrUnstagedChanges = errors.New("worktree contains unstaged changes") ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink") ErrNonFastForwardUpdate = errors.New("non-fast-forward update") ErrRestoreWorktreeOnlyNotSupported = errors.New("worktree only is not supported") ) // Worktree represents a git worktree. type Worktree struct { // Filesystem underlying filesystem. Filesystem billy.Filesystem // External excludes not found in the repository .gitignore Excludes []gitignore.Pattern r *Repository } // Pull incorporates changes from a remote repository into the current branch. // Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are // no changes to be fetched, or an error. // // Pull only supports merges where the can be resolved as a fast-forward. func (w *Worktree) Pull(o *PullOptions) error { return w.PullContext(context.Background(), o) } // PullContext incorporates changes from a remote repository into the current // branch. Returns nil if the operation is successful, NoErrAlreadyUpToDate if // there are no changes to be fetched, or an error. // // Pull only supports merges where the can be resolved as a fast-forward. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects the // transport operations. func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { if err := o.Validate(); err != nil { return err } remote, err := w.r.Remote(o.RemoteName) if err != nil { return err } fetchHead, err := remote.fetch(ctx, &FetchOptions{ RemoteName: o.RemoteName, RemoteURL: o.RemoteURL, Depth: o.Depth, Auth: o.Auth, Progress: o.Progress, Force: o.Force, InsecureSkipTLS: o.InsecureSkipTLS, CABundle: o.CABundle, ProxyOptions: o.ProxyOptions, }) updated := true if err == NoErrAlreadyUpToDate { updated = false } else if err != nil { return err } ref, err := storer.ResolveReference(fetchHead, o.ReferenceName) if err != nil { return err } head, err := w.r.Head() if err == nil { // if we don't have a shallows list, just ignore it shallowList, _ := w.r.Storer.Shallow() var earliestShallow *plumbing.Hash if len(shallowList) > 0 { earliestShallow = &shallowList[0] } headAheadOfRef, err := isFastForward(w.r.Storer, ref.Hash(), head.Hash(), earliestShallow) if err != nil { return err } if !updated && headAheadOfRef { return NoErrAlreadyUpToDate } ff, err := isFastForward(w.r.Storer, head.Hash(), ref.Hash(), earliestShallow) if err != nil { return err } if !ff { return ErrNonFastForwardUpdate } } if err != nil && err != plumbing.ErrReferenceNotFound { return err } if err := w.updateHEAD(ref.Hash()); err != nil { return err } if err := w.Reset(&ResetOptions{ Mode: MergeReset, Commit: ref.Hash(), }); err != nil { return err } if o.RecurseSubmodules != NoRecurseSubmodules { return w.updateSubmodules(ctx, &SubmoduleUpdateOptions{ RecurseSubmodules: o.RecurseSubmodules, Auth: o.Auth, }) } return nil } func (w *Worktree) updateSubmodules(ctx context.Context, o *SubmoduleUpdateOptions) error { s, err := w.Submodules() if err != nil { return err } o.Init = true return s.UpdateContext(ctx, o) } // Checkout switch branches or restore working tree files. func (w *Worktree) Checkout(opts *CheckoutOptions) error { if err := opts.Validate(); err != nil { return err } if opts.Create { if err := w.createBranch(opts); err != nil { return err } } c, err := w.getCommitFromCheckoutOptions(opts) if err != nil { return err } ro := &ResetOptions{Commit: c, Mode: MergeReset} if opts.Force { ro.Mode = HardReset } else if opts.Keep { ro.Mode = SoftReset } if !opts.Hash.IsZero() && !opts.Create { err = w.setHEADToCommit(opts.Hash) } else { err = w.setHEADToBranch(opts.Branch, c) } if err != nil { return err } if len(opts.SparseCheckoutDirectories) > 0 { return w.ResetSparsely(ro, opts.SparseCheckoutDirectories) } return w.Reset(ro) } func (w *Worktree) createBranch(opts *CheckoutOptions) error { if err := opts.Branch.Validate(); err != nil { return err } _, err := w.r.Storer.Reference(opts.Branch) if err == nil { return fmt.Errorf("a branch named %q already exists", opts.Branch) } if err != plumbing.ErrReferenceNotFound { return err } if opts.Hash.IsZero() { ref, err := w.r.Head() if err != nil { return err } opts.Hash = ref.Hash() } return w.r.Storer.SetReference( plumbing.NewHashReference(opts.Branch, opts.Hash), ) } func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) { hash := opts.Hash if hash.IsZero() { b, err := w.r.Reference(opts.Branch, true) if err != nil { return plumbing.ZeroHash, err } hash = b.Hash() } o, err := w.r.Object(plumbing.AnyObject, hash) if err != nil { return plumbing.ZeroHash, err } switch o := o.(type) { case *object.Tag: if o.TargetType != plumbing.CommitObject { return plumbing.ZeroHash, fmt.Errorf("%w: tag target %q", object.ErrUnsupportedObject, o.TargetType) } return o.Target, nil case *object.Commit: return o.Hash, nil } return plumbing.ZeroHash, fmt.Errorf("%w: %q", object.ErrUnsupportedObject, o.Type()) } func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error { head := plumbing.NewHashReference(plumbing.HEAD, commit) return w.r.Storer.SetReference(head) } func (w *Worktree) setHEADToBranch(branch plumbing.ReferenceName, commit plumbing.Hash) error { target, err := w.r.Storer.Reference(branch) if err != nil { return err } var head *plumbing.Reference if target.Name().IsBranch() { head = plumbing.NewSymbolicReference(plumbing.HEAD, target.Name()) } else { head = plumbing.NewHashReference(plumbing.HEAD, commit) } return w.r.Storer.SetReference(head) } func (w *Worktree) ResetSparsely(opts *ResetOptions, dirs []string) error { if err := opts.Validate(w.r); err != nil { return err } if opts.Mode == MergeReset { unstaged, err := w.containsUnstagedChanges() if err != nil { return err } if unstaged { return ErrUnstagedChanges } } if err := w.setHEADCommit(opts.Commit); err != nil { return err } if opts.Mode == SoftReset { return nil } t, err := w.r.getTreeFromCommitHash(opts.Commit) if err != nil { return err } if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset { if err := w.resetIndex(t, dirs, opts.Files); err != nil { return err } } if opts.Mode == MergeReset || opts.Mode == HardReset { if err := w.resetWorktree(t, opts.Files); err != nil { return err } } return nil } // Restore restores specified files in the working tree or stage with contents from // a restore source. If a path is tracked but does not exist in the restore, // source, it will be removed to match the source. // // If Staged and Worktree are true, then the restore source will be the index. // If only Staged is true, then the restore source will be HEAD. // If only Worktree is true or neither Staged nor Worktree are true, will // result in ErrRestoreWorktreeOnlyNotSupported because restoring the working // tree while leaving the stage untouched is not currently supported. // // Restore with no files specified will return ErrNoRestorePaths. func (w *Worktree) Restore(o *RestoreOptions) error { if err := o.Validate(); err != nil { return err } if o.Staged { opts := &ResetOptions{ Files: o.Files, } if o.Worktree { // If we are doing both Worktree and Staging then it is a hard reset opts.Mode = HardReset } else { // If we are doing just staging then it is a mixed reset opts.Mode = MixedReset } return w.Reset(opts) } return ErrRestoreWorktreeOnlyNotSupported } // Reset the worktree to a specified state. func (w *Worktree) Reset(opts *ResetOptions) error { return w.ResetSparsely(opts, nil) } func (w *Worktree) resetIndex(t *object.Tree, dirs []string, files []string) error { idx, err := w.r.Storer.Index() if err != nil { return err } b := newIndexBuilder(idx) changes, err := w.diffTreeWithStaging(t, true) if err != nil { return err } for _, ch := range changes { a, err := ch.Action() if err != nil { return err } var name string var e *object.TreeEntry switch a { case merkletrie.Modify, merkletrie.Insert: name = ch.To.String() e, err = t.FindEntry(name) if err != nil { return err } case merkletrie.Delete: name = ch.From.String() } if len(files) > 0 { contains := inFiles(files, name) if !contains { continue } } b.Remove(name) if e == nil { continue } b.Add(&index.Entry{ Name: name, Hash: e.Hash, Mode: e.Mode, }) } b.Write(idx) if len(dirs) > 0 { idx.SkipUnless(dirs) } return w.r.Storer.SetIndex(idx) } func inFiles(files []string, v string) bool { v = filepath.Clean(v) for _, s := range files { if filepath.Clean(s) == v { return true } } return false } func (w *Worktree) resetWorktree(t *object.Tree, files []string) error { changes, err := w.diffStagingWithWorktree(true, false) if err != nil { return err } idx, err := w.r.Storer.Index() if err != nil { return err } b := newIndexBuilder(idx) for _, ch := range changes { if err := w.validChange(ch); err != nil { return err } if len(files) > 0 { file := "" if ch.From != nil { file = ch.From.String() } else if ch.To != nil { file = ch.To.String() } if file == "" { continue } contains := inFiles(files, file) if !contains { continue } } if err := w.checkoutChange(ch, t, b); err != nil { return err } } b.Write(idx) return w.r.Storer.SetIndex(idx) } // worktreeDeny is a list of paths that are not allowed // to be used when resetting the worktree. var worktreeDeny = map[string]struct{}{ // .git GitDirName: {}, // For other historical reasons, file names that do not conform to the 8.3 // format (up to eight characters for the basename, three for the file // extension, certain characters not allowed such as `+`, etc) are associated // with a so-called "short name", at least on the `C:` drive by default. // Which means that `git~1/` is a valid way to refer to `.git/`. "git~1": {}, } // validPath checks whether paths are valid. // The rules around invalid paths could differ from upstream based on how // filesystems are managed within go-git, but they are largely the same. // // For upstream rules: // https://github.com/git/git/blob/564d0252ca632e0264ed670534a51d18a689ef5d/read-cache.c#L946 // https://github.com/git/git/blob/564d0252ca632e0264ed670534a51d18a689ef5d/path.c#L1383 func validPath(paths ...string) error { for _, p := range paths { parts := strings.FieldsFunc(p, func(r rune) bool { return (r == '\\' || r == '/') }) if len(parts) == 0 { return fmt.Errorf("invalid path: %q", p) } if _, denied := worktreeDeny[strings.ToLower(parts[0])]; denied { return fmt.Errorf("invalid path prefix: %q", p) } if runtime.GOOS == "windows" { // Volume names are not supported, in both formats: \\ and <DRIVE_LETTER>:. if vol := filepath.VolumeName(p); vol != "" { return fmt.Errorf("invalid path: %q", p) } if !windowsValidPath(parts[0]) { return fmt.Errorf("invalid path: %q", p) } } for _, part := range parts { if part == ".." { return fmt.Errorf("invalid path %q: cannot use '..'", p) } } } return nil } // windowsPathReplacer defines the chars that need to be replaced // as part of windowsValidPath. var windowsPathReplacer *strings.Replacer func init() { windowsPathReplacer = strings.NewReplacer(" ", "", ".", "") } func windowsValidPath(part string) bool { if len(part) > 3 && strings.EqualFold(part[:4], GitDirName) { // For historical reasons, file names that end in spaces or periods are // automatically trimmed. Therefore, `.git . . ./` is a valid way to refer // to `.git/`. if windowsPathReplacer.Replace(part[4:]) == "" { return false } // For yet other historical reasons, NTFS supports so-called "Alternate Data // Streams", i.e. metadata associated with a given file, referred to via // `<filename>:<stream-name>:<stream-type>`. There exists a default stream // type for directories, allowing `.git/` to be accessed via // `.git::$INDEX_ALLOCATION/`. // // For performance reasons, _all_ Alternate Data Streams of `.git/` are // forbidden, not just `::$INDEX_ALLOCATION`. if len(part) > 4 && part[4:5] == ":" { return false } } return true } func (w *Worktree) validChange(ch merkletrie.Change) error { action, err := ch.Action() if err != nil { return nil } switch action { case merkletrie.Delete: return validPath(ch.From.String()) case merkletrie.Insert: return validPath(ch.To.String()) case merkletrie.Modify: return validPath(ch.From.String(), ch.To.String()) } return nil } func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *indexBuilder) error { a, err := ch.Action() if err != nil { return err } var e *object.TreeEntry var name string var isSubmodule bool switch a { case merkletrie.Modify, merkletrie.Insert: name = ch.To.String() e, err = t.FindEntry(name) if err != nil { return err } isSubmodule = e.Mode == filemode.Submodule case merkletrie.Delete: return rmFileAndDirsIfEmpty(w.Filesystem, ch.From.String()) } if isSubmodule { return w.checkoutChangeSubmodule(name, a, e, idx) } return w.checkoutChangeRegularFile(name, a, t, e, idx) } func (w *Worktree) containsUnstagedChanges() (bool, error) { ch, err := w.diffStagingWithWorktree(false, true) if err != nil { return false, err } for _, c := range ch { a, err := c.Action() if err != nil { return false, err } if a == merkletrie.Insert { continue } return true, nil } return false, nil } func (w *Worktree) setHEADCommit(commit plumbing.Hash) error { head, err := w.r.Reference(plumbing.HEAD, false) if err != nil { return err } if head.Type() == plumbing.HashReference { head = plumbing.NewHashReference(plumbing.HEAD, commit) return w.r.Storer.SetReference(head) } branch, err := w.r.Reference(head.Target(), false) if err != nil { return err } if !branch.Name().IsBranch() { return fmt.Errorf("invalid HEAD target should be a branch, found %s", branch.Type()) } branch = plumbing.NewHashReference(branch.Name(), commit) return w.r.Storer.SetReference(branch) } func (w *Worktree) checkoutChangeSubmodule(name string, a merkletrie.Action, e *object.TreeEntry, idx *indexBuilder, ) error { switch a { case merkletrie.Modify: sub, err := w.Submodule(name) if err != nil { return err } if !sub.initialized { return nil } return w.addIndexFromTreeEntry(name, e, idx) case merkletrie.Insert: mode, err := e.Mode.ToOSFileMode() if err != nil { return err } if err := w.Filesystem.MkdirAll(name, mode); err != nil { return err } return w.addIndexFromTreeEntry(name, e, idx) } return nil } func (w *Worktree) checkoutChangeRegularFile(name string, a merkletrie.Action, t *object.Tree, e *object.TreeEntry, idx *indexBuilder, ) error { switch a { case merkletrie.Modify: idx.Remove(name) // to apply perm changes the file is deleted, billy doesn't implement // chmod if err := w.Filesystem.Remove(name); err != nil { return err } fallthrough case merkletrie.Insert: f, err := t.File(name) if err != nil { return err } if err := w.checkoutFile(f); err != nil { return err } return w.addIndexFromFile(name, e.Hash, f.Mode, idx) } return nil } func (w *Worktree) checkoutFile(f *object.File) (err error) { mode, err := f.Mode.ToOSFileMode() if err != nil { return } if mode&os.ModeSymlink != 0 { return w.checkoutFileSymlink(f) } from, err := f.Reader() if err != nil { return } defer ioutil.CheckClose(from, &err) to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm()) if err != nil { return } defer ioutil.CheckClose(to, &err) buf := sync.GetByteSlice() _, err = io.CopyBuffer(to, from, *buf) sync.PutByteSlice(buf) return } func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) { // https://github.com/git/git/commit/10ecfa76491e4923988337b2e2243b05376b40de if strings.EqualFold(f.Name, gitmodulesFile) { return ErrGitModulesSymlink } from, err := f.Reader() if err != nil { return } defer ioutil.CheckClose(from, &err) bytes, err := io.ReadAll(from) if err != nil { return } err = w.Filesystem.Symlink(string(bytes), f.Name) // On windows, this might fail. // Follow Git on Windows behavior by writing the link as it is. if err != nil && isSymlinkWindowsNonAdmin(err) { mode, _ := f.Mode.ToOSFileMode() to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm()) if err != nil { return err } defer ioutil.CheckClose(to, &err) _, err = to.Write(bytes) return err } return } func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *indexBuilder) error { idx.Remove(name) idx.Add(&index.Entry{ Hash: f.Hash, Name: name, Mode: filemode.Submodule, }) return nil } func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, mode filemode.FileMode, idx *indexBuilder) error { idx.Remove(name) fi, err := w.Filesystem.Lstat(name) if err != nil { return err } e := &index.Entry{ Hash: h, Name: name, Mode: mode, ModifiedAt: fi.ModTime(), Size: uint32(fi.Size()), } // if the FileInfo.Sys() comes from os the ctime, dev, inode, uid and gid // can be retrieved, otherwise this doesn't apply if fillSystemInfo != nil { fillSystemInfo(e, fi.Sys()) } idx.Add(e) return nil } func (r *Repository) getTreeFromCommitHash(commit plumbing.Hash) (*object.Tree, error) { c, err := r.CommitObject(commit) if err != nil { return nil, err } return c.Tree() } var fillSystemInfo func(e *index.Entry, sys interface{}) const gitmodulesFile = ".gitmodules" // Submodule returns the submodule with the given name func (w *Worktree) Submodule(name string) (*Submodule, error) { l, err := w.Submodules() if err != nil { return nil, err } for _, m := range l { if m.Config().Name == name { return m, nil } } return nil, ErrSubmoduleNotFound } // Submodules returns all the available submodules func (w *Worktree) Submodules() (Submodules, error) { l := make(Submodules, 0) m, err := w.readGitmodulesFile() if err != nil || m == nil { return l, err } c, err := w.r.Config() if err != nil { return nil, err } for _, s := range m.Submodules { l = append(l, w.newSubmodule(s, c.Submodules[s.Name])) } return l, nil } func (w *Worktree) newSubmodule(fromModules, fromConfig *config.Submodule) *Submodule { m := &Submodule{w: w} m.initialized = fromConfig != nil if !m.initialized { m.c = fromModules return m } m.c = fromConfig m.c.Path = fromModules.Path return m } func (w *Worktree) isSymlink(path string) bool { if s, err := w.Filesystem.Lstat(path); err == nil { return s.Mode()&os.ModeSymlink != 0 } return false } func (w *Worktree) readGitmodulesFile() (*config.Modules, error) { if w.isSymlink(gitmodulesFile) { return nil, ErrGitModulesSymlink } f, err := w.Filesystem.Open(gitmodulesFile) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } defer f.Close() input, err := io.ReadAll(f) if err != nil { return nil, err } m := config.NewModules() if err := m.Unmarshal(input); err != nil { return m, err } return m, nil } // Clean the worktree by removing untracked files. // An empty dir could be removed - this is what `git clean -f -d .` does. func (w *Worktree) Clean(opts *CleanOptions) error { s, err := w.Status() if err != nil { return err } root := "" files, err := w.Filesystem.ReadDir(root) if err != nil { return err } return w.doClean(s, opts, root, files) } func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error { for _, fi := range files { if fi.Name() == GitDirName { continue } // relative path under the root path := filepath.Join(dir, fi.Name()) if fi.IsDir() { if !opts.Dir { continue } subfiles, err := w.Filesystem.ReadDir(path) if err != nil { return err } err = w.doClean(status, opts, path, subfiles) if err != nil { return err } } else { if status.IsUntracked(path) { if err := w.Filesystem.Remove(path); err != nil { return err } } } } if opts.Dir && dir != "" { _, err := removeDirIfEmpty(w.Filesystem, dir) return err } return nil } // GrepResult is structure of a grep result. type GrepResult struct { // FileName is the name of file which contains match. FileName string // LineNumber is the line number of a file at which a match was found. LineNumber int // Content is the content of the file at the matching line. Content string // TreeName is the name of the tree (reference name/commit hash) at // which the match was performed. TreeName string } func (gr GrepResult) String() string { return fmt.Sprintf("%s:%s:%d:%s", gr.TreeName, gr.FileName, gr.LineNumber, gr.Content) } // Grep performs grep on a repository. func (r *Repository) Grep(opts *GrepOptions) ([]GrepResult, error) { if err := opts.validate(r); err != nil { return nil, err } // Obtain commit hash from options (CommitHash or ReferenceName). var commitHash plumbing.Hash // treeName contains the value of TreeName in GrepResult. var treeName string if opts.ReferenceName != "" { ref, err := r.Reference(opts.ReferenceName, true) if err != nil { return nil, err } commitHash = ref.Hash() treeName = opts.ReferenceName.String() } else if !opts.CommitHash.IsZero() { commitHash = opts.CommitHash treeName = opts.CommitHash.String() } // Obtain a tree from the commit hash and get a tracked files iterator from // the tree. tree, err := r.getTreeFromCommitHash(commitHash) if err != nil { return nil, err } fileiter := tree.Files() return findMatchInFiles(fileiter, treeName, opts) } // Grep performs grep on a worktree. func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) { return w.r.Grep(opts) } // findMatchInFiles takes a FileIter, worktree name and GrepOptions, and // returns a slice of GrepResult containing the result of regex pattern matching // in content of all the files. func findMatchInFiles(fileiter *object.FileIter, treeName string, opts *GrepOptions) ([]GrepResult, error) { var results []GrepResult err := fileiter.ForEach(func(file *object.File) error { var fileInPathSpec bool // When no pathspecs are provided, search all the files. if len(opts.PathSpecs) == 0 { fileInPathSpec = true } // Check if the file name matches with the pathspec. Break out of the // loop once a match is found. for _, pathSpec := range opts.PathSpecs { if pathSpec != nil && pathSpec.MatchString(file.Name) { fileInPathSpec = true break } } // If the file does not match with any of the pathspec, skip it. if !fileInPathSpec { return nil } grepResults, err := findMatchInFile(file, treeName, opts) if err != nil { return err } results = append(results, grepResults...) return nil }) return results, err } // findMatchInFile takes a single File, worktree name and GrepOptions, // and returns a slice of GrepResult containing the result of regex pattern // matching in the given file. func findMatchInFile(file *object.File, treeName string, opts *GrepOptions) ([]GrepResult, error) { var grepResults []GrepResult content, err := file.Contents() if err != nil { return grepResults, err } // Split the file content and parse line-by-line. contentByLine := strings.Split(content, "\n") for lineNum, cnt := range contentByLine { addToResult := false // Match the patterns and content. Break out of the loop once a // match is found. for _, pattern := range opts.Patterns { if pattern != nil && pattern.MatchString(cnt) { // Add to result only if invert match is not enabled. if !opts.InvertMatch { addToResult = true break } } else if opts.InvertMatch { // If matching fails, and invert match is enabled, add to // results. addToResult = true break } } if addToResult { grepResults = append(grepResults, GrepResult{ FileName: file.Name, LineNumber: lineNum + 1, Content: cnt, TreeName: treeName, }) } } return grepResults, nil } // will walk up the directory tree removing all encountered empty // directories, not just the one containing this file func rmFileAndDirsIfEmpty(fs billy.Filesystem, name string) error { if err := util.RemoveAll(fs, name); err != nil { return err } dir := filepath.Dir(name) for { removed, err := removeDirIfEmpty(fs, dir) if err != nil && !os.IsNotExist(err) { return err } if !removed { // directory was not empty and not removed, // stop checking parents break } // move to parent directory dir = filepath.Dir(dir) } return nil } // removeDirIfEmpty will remove the supplied directory `dir` if // `dir` is empty // returns true if the directory was removed func removeDirIfEmpty(fs billy.Filesystem, dir string) (bool, error) { files, err := fs.ReadDir(dir) if err != nil { return false, err } if len(files) > 0 { return false, nil } err = fs.Remove(dir) if err != nil { return false, err } return true, nil } type indexBuilder struct { entries map[string]*index.Entry } func newIndexBuilder(idx *index.Index) *indexBuilder { entries := make(map[string]*index.Entry, len(idx.Entries)) for _, e := range idx.Entries { entries[e.Name] = e } return &indexBuilder{ entries: entries, } } func (b *indexBuilder) Write(idx *index.Index) { idx.Entries = idx.Entries[:0] for _, e := range b.entries { idx.Entries = append(idx.Entries, e) } } func (b *indexBuilder) Add(e *index.Entry) { b.entries[e.Name] = e } func (b *indexBuilder) Remove(name string) { delete(b.entries, filepath.ToSlash(name)) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/status.go
vendor/github.com/jesseduffield/go-git/v5/status.go
package git import ( "bytes" "fmt" "path/filepath" mindex "github.com/jesseduffield/go-git/v5/utils/merkletrie/index" "github.com/jesseduffield/go-git/v5/utils/merkletrie/noder" ) // Status represents the current status of a Worktree. // The key of the map is the path of the file. type Status map[string]*FileStatus // File returns the FileStatus for a given path, if the FileStatus doesn't // exists a new FileStatus is added to the map using the path as key. func (s Status) File(path string) *FileStatus { if _, ok := (s)[path]; !ok { s[path] = &FileStatus{Worktree: Untracked, Staging: Untracked} } return s[path] } // IsUntracked checks if file for given path is 'Untracked' func (s Status) IsUntracked(path string) bool { stat, ok := (s)[filepath.ToSlash(path)] return ok && stat.Worktree == Untracked } // IsClean returns true if all the files are in Unmodified status. func (s Status) IsClean() bool { for _, status := range s { if status.Worktree != Unmodified || status.Staging != Unmodified { return false } } return true } func (s Status) String() string { buf := bytes.NewBuffer(nil) for path, status := range s { if status.Staging == Unmodified && status.Worktree == Unmodified { continue } if status.Staging == Renamed { path = fmt.Sprintf("%s -> %s", path, status.Extra) } fmt.Fprintf(buf, "%c%c %s\n", status.Staging, status.Worktree, path) } return buf.String() } // FileStatus contains the status of a file in the worktree type FileStatus struct { // Staging is the status of a file in the staging area Staging StatusCode // Worktree is the status of a file in the worktree Worktree StatusCode // Extra contains extra information, such as the previous name in a rename Extra string } // StatusCode status code of a file in the Worktree type StatusCode byte const ( Unmodified StatusCode = ' ' Untracked StatusCode = '?' Modified StatusCode = 'M' Added StatusCode = 'A' Deleted StatusCode = 'D' Renamed StatusCode = 'R' Copied StatusCode = 'C' UpdatedButUnmerged StatusCode = 'U' ) // StatusStrategy defines the different types of strategies when processing // the worktree status. type StatusStrategy int const ( // TODO: (V6) Review the default status strategy. // TODO: (V6) Review the type used to represent Status, to enable lazy // processing of statuses going direct to the backing filesystem. defaultStatusStrategy = Empty // Empty starts its status map from empty. Missing entries for a given // path means that the file is untracked. This causes a known issue (#119) // whereby unmodified files can be incorrectly reported as untracked. // // This can be used when returning the changed state within a modified Worktree. // For example, to check whether the current worktree is clean. Empty StatusStrategy = 0 // Preload goes through all existing nodes from the index and add them to the // status map as unmodified. This is currently the most reliable strategy // although it comes at a performance cost in large repositories. // // This method is recommended when fetching the status of unmodified files. // For example, to confirm the status of a specific file that is either // untracked or unmodified. Preload StatusStrategy = 1 ) func (s StatusStrategy) new(w *Worktree) (Status, error) { switch s { case Preload: return preloadStatus(w) case Empty: return make(Status), nil } return nil, fmt.Errorf("%w: %+v", ErrUnsupportedStatusStrategy, s) } func preloadStatus(w *Worktree) (Status, error) { idx, err := w.r.Storer.Index() if err != nil { return nil, err } idxRoot := mindex.NewRootNode(idx) nodes := []noder.Noder{idxRoot} status := make(Status) for len(nodes) > 0 { var node noder.Noder node, nodes = nodes[0], nodes[1:] if node.IsDir() { children, err := node.Children() if err != nil { return nil, err } nodes = append(nodes, children...) continue } fs := status.File(node.Name()) fs.Worktree = Unmodified fs.Staging = Unmodified } return status, nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/worktree_linux.go
vendor/github.com/jesseduffield/go-git/v5/worktree_linux.go
//go:build linux // +build linux package git import ( "syscall" "time" "github.com/jesseduffield/go-git/v5/plumbing/format/index" ) func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Stat_t); ok { e.CreatedAt = time.Unix(os.Ctim.Unix()) e.Dev = uint32(os.Dev) e.Inode = uint32(os.Ino) e.GID = os.Gid e.UID = os.Uid } } } func isSymlinkWindowsNonAdmin(_ error) bool { return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/worktree_bsd.go
vendor/github.com/jesseduffield/go-git/v5/worktree_bsd.go
// +build darwin freebsd netbsd package git import ( "syscall" "time" "github.com/jesseduffield/go-git/v5/plumbing/format/index" ) func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Stat_t); ok { e.CreatedAt = time.Unix(os.Atimespec.Unix()) e.Dev = uint32(os.Dev) e.Inode = uint32(os.Ino) e.GID = os.Gid e.UID = os.Uid } } } func isSymlinkWindowsNonAdmin(err error) bool { return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/object_walker.go
vendor/github.com/jesseduffield/go-git/v5/object_walker.go
package git import ( "fmt" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/filemode" "github.com/jesseduffield/go-git/v5/plumbing/object" "github.com/jesseduffield/go-git/v5/storage" ) type objectWalker struct { Storer storage.Storer // seen is the set of objects seen in the repo. // seen map can become huge if walking over large // repos. Thus using struct{} as the value type. seen map[plumbing.Hash]struct{} } func newObjectWalker(s storage.Storer) *objectWalker { return &objectWalker{s, map[plumbing.Hash]struct{}{}} } // walkAllRefs walks all (hash) references from the repo. func (p *objectWalker) walkAllRefs() error { // Walk over all the references in the repo. it, err := p.Storer.IterReferences() if err != nil { return err } defer it.Close() err = it.ForEach(func(ref *plumbing.Reference) error { // Exit this iteration early for non-hash references. if ref.Type() != plumbing.HashReference { return nil } return p.walkObjectTree(ref.Hash()) }) return err } func (p *objectWalker) isSeen(hash plumbing.Hash) bool { _, seen := p.seen[hash] return seen } func (p *objectWalker) add(hash plumbing.Hash) { p.seen[hash] = struct{}{} } // walkObjectTree walks over all objects and remembers references // to them in the objectWalker. This is used instead of the revlist // walks because memory usage is tight with huge repos. func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error { // Check if we have already seen, and mark this object if p.isSeen(hash) { return nil } p.add(hash) // Fetch the object. obj, err := object.GetObject(p.Storer, hash) if err != nil { return fmt.Errorf("getting object %s failed: %v", hash, err) } // Walk all children depending on object type. switch obj := obj.(type) { case *object.Commit: err = p.walkObjectTree(obj.TreeHash) if err != nil { return err } for _, h := range obj.ParentHashes { err = p.walkObjectTree(h) if err != nil { return err } } case *object.Tree: for i := range obj.Entries { // Shortcut for blob objects: // 'or' the lower bits of a mode and check that it // it matches a filemode.Executable. The type information // is in the higher bits, but this is the cleanest way // to handle plain files with different modes. // Other non-tree objects are somewhat rare, so they // are not special-cased. if obj.Entries[i].Mode|0755 == filemode.Executable { p.add(obj.Entries[i].Hash) continue } // Normal walk for sub-trees (and symlinks etc). err = p.walkObjectTree(obj.Entries[i].Hash) if err != nil { return err } } case *object.Tag: return p.walkObjectTree(obj.Target) default: // Error out on unhandled object types. return fmt.Errorf("unknown object %X %s %T", obj.ID(), obj.Type(), obj) } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/signer.go
vendor/github.com/jesseduffield/go-git/v5/signer.go
package git import ( "io" "github.com/jesseduffield/go-git/v5/plumbing" ) // signableObject is an object which can be signed. type signableObject interface { EncodeWithoutSignature(o plumbing.EncodedObject) error } // Signer is an interface for signing git objects. // message is a reader containing the encoded object to be signed. // Implementors should return the encoded signature and an error if any. // See https://git-scm.com/docs/gitformat-signature for more information. type Signer interface { Sign(message io.Reader) ([]byte, error) } func signObject(signer Signer, obj signableObject) ([]byte, error) { encoded := &plumbing.MemoryObject{} if err := obj.EncodeWithoutSignature(encoded); err != nil { return nil, err } r, err := encoded.Reader() if err != nil { return nil, err } return signer.Sign(r) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/worktree_status.go
vendor/github.com/jesseduffield/go-git/v5/worktree_status.go
package git import ( "bytes" "errors" "io" "os" "path" "path/filepath" "strings" "github.com/go-git/go-billy/v5/util" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/filemode" "github.com/jesseduffield/go-git/v5/plumbing/format/gitignore" "github.com/jesseduffield/go-git/v5/plumbing/format/index" "github.com/jesseduffield/go-git/v5/plumbing/object" "github.com/jesseduffield/go-git/v5/utils/ioutil" "github.com/jesseduffield/go-git/v5/utils/merkletrie" "github.com/jesseduffield/go-git/v5/utils/merkletrie/filesystem" mindex "github.com/jesseduffield/go-git/v5/utils/merkletrie/index" "github.com/jesseduffield/go-git/v5/utils/merkletrie/noder" ) var ( // ErrDestinationExists in an Move operation means that the target exists on // the worktree. ErrDestinationExists = errors.New("destination exists") // ErrGlobNoMatches in an AddGlob if the glob pattern does not match any // files in the worktree. ErrGlobNoMatches = errors.New("glob pattern did not match any files") // ErrUnsupportedStatusStrategy occurs when an invalid StatusStrategy is used // when processing the Worktree status. ErrUnsupportedStatusStrategy = errors.New("unsupported status strategy") ) // Status returns the working tree status. func (w *Worktree) Status() (Status, error) { return w.StatusWithOptions(StatusOptions{Strategy: defaultStatusStrategy}) } // StatusOptions defines the options for Worktree.StatusWithOptions(). type StatusOptions struct { Strategy StatusStrategy } // StatusWithOptions returns the working tree status. func (w *Worktree) StatusWithOptions(o StatusOptions) (Status, error) { var hash plumbing.Hash ref, err := w.r.Head() if err != nil && err != plumbing.ErrReferenceNotFound { return nil, err } if err == nil { hash = ref.Hash() } return w.status(o.Strategy, hash) } func (w *Worktree) status(ss StatusStrategy, commit plumbing.Hash) (Status, error) { s, err := ss.new(w) if err != nil { return nil, err } left, err := w.diffCommitWithStaging(commit, false) if err != nil { return nil, err } for _, ch := range left { a, err := ch.Action() if err != nil { return nil, err } fs := s.File(nameFromAction(&ch)) fs.Worktree = Unmodified switch a { case merkletrie.Delete: s.File(ch.From.String()).Staging = Deleted case merkletrie.Insert: s.File(ch.To.String()).Staging = Added case merkletrie.Modify: s.File(ch.To.String()).Staging = Modified } } right, err := w.diffStagingWithWorktree(false, true) if err != nil { return nil, err } for _, ch := range right { a, err := ch.Action() if err != nil { return nil, err } fs := s.File(nameFromAction(&ch)) if fs.Staging == Untracked { fs.Staging = Unmodified } switch a { case merkletrie.Delete: fs.Worktree = Deleted case merkletrie.Insert: fs.Worktree = Untracked fs.Staging = Untracked case merkletrie.Modify: fs.Worktree = Modified } } return s, nil } func nameFromAction(ch *merkletrie.Change) string { name := ch.To.String() if name == "" { return ch.From.String() } return name } func (w *Worktree) diffStagingWithWorktree(reverse, excludeIgnoredChanges bool) (merkletrie.Changes, error) { idx, err := w.r.Storer.Index() if err != nil { return nil, err } from := mindex.NewRootNode(idx) submodules, err := w.getSubmodulesStatus() if err != nil { return nil, err } to := filesystem.NewRootNode(w.Filesystem, submodules) var c merkletrie.Changes if reverse { c, err = merkletrie.DiffTree(to, from, diffTreeIsEquals) } else { c, err = merkletrie.DiffTree(from, to, diffTreeIsEquals) } if err != nil { return nil, err } if excludeIgnoredChanges { return w.excludeIgnoredChanges(c), nil } return c, nil } func (w *Worktree) excludeIgnoredChanges(changes merkletrie.Changes) merkletrie.Changes { patterns, err := gitignore.ReadPatterns(w.Filesystem, nil) if err != nil { return changes } patterns = append(patterns, w.Excludes...) if len(patterns) == 0 { return changes } m := gitignore.NewMatcher(patterns) var res merkletrie.Changes for _, ch := range changes { var path []string for _, n := range ch.To { path = append(path, n.Name()) } if len(path) == 0 { for _, n := range ch.From { path = append(path, n.Name()) } } if len(path) != 0 { isDir := (len(ch.To) > 0 && ch.To.IsDir()) || (len(ch.From) > 0 && ch.From.IsDir()) if m.Match(path, isDir) { if len(ch.From) == 0 { continue } } } res = append(res, ch) } return res } func (w *Worktree) getSubmodulesStatus() (map[string]plumbing.Hash, error) { o := map[string]plumbing.Hash{} sub, err := w.Submodules() if err != nil { return nil, err } status, err := sub.Status() if err != nil { return nil, err } for _, s := range status { if s.Current.IsZero() { o[s.Path] = s.Expected continue } o[s.Path] = s.Current } return o, nil } func (w *Worktree) diffCommitWithStaging(commit plumbing.Hash, reverse bool) (merkletrie.Changes, error) { var t *object.Tree if !commit.IsZero() { c, err := w.r.CommitObject(commit) if err != nil { return nil, err } t, err = c.Tree() if err != nil { return nil, err } } return w.diffTreeWithStaging(t, reverse) } func (w *Worktree) diffTreeWithStaging(t *object.Tree, reverse bool) (merkletrie.Changes, error) { var from noder.Noder if t != nil { from = object.NewTreeRootNode(t) } idx, err := w.r.Storer.Index() if err != nil { return nil, err } to := mindex.NewRootNode(idx) if reverse { return merkletrie.DiffTree(to, from, diffTreeIsEquals) } return merkletrie.DiffTree(from, to, diffTreeIsEquals) } var emptyNoderHash = make([]byte, 24) // diffTreeIsEquals is a implementation of noder.Equals, used to compare // noder.Noder, it compare the content and the length of the hashes. // // Since some of the noder.Noder implementations doesn't compute a hash for // some directories, if any of the hashes is a 24-byte slice of zero values // the comparison is not done and the hashes are take as different. func diffTreeIsEquals(a, b noder.Hasher) bool { hashA := a.Hash() hashB := b.Hash() if bytes.Equal(hashA, emptyNoderHash) || bytes.Equal(hashB, emptyNoderHash) { return false } return bytes.Equal(hashA, hashB) } // Add adds the file contents of a file in the worktree to the index. if the // file is already staged in the index no error is returned. If a file deleted // from the Workspace is given, the file is removed from the index. If a // directory given, adds the files and all his sub-directories recursively in // the worktree to the index. If any of the files is already staged in the index // no error is returned. When path is a file, the blob.Hash is returned. func (w *Worktree) Add(path string) (plumbing.Hash, error) { // TODO(mcuadros): deprecate in favor of AddWithOption in v6. return w.doAdd(path, make([]gitignore.Pattern, 0), false) } func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) { if len(ignorePattern) > 0 { m := gitignore.NewMatcher(ignorePattern) matchPath := strings.Split(directory, string(os.PathSeparator)) if m.Match(matchPath, true) { // ignore return false, nil } } directory = filepath.ToSlash(filepath.Clean(directory)) for name := range s { if !isPathInDirectory(name, directory) { continue } var a bool a, _, err = w.doAddFile(idx, s, name, ignorePattern) if err != nil { return } added = added || a } return } func isPathInDirectory(path, directory string) bool { return directory == "." || strings.HasPrefix(path, directory+"/") } // AddWithOptions file contents to the index, updates the index using the // current content found in the working tree, to prepare the content staged for // the next commit. // // It typically adds the current content of existing paths as a whole, but with // some options it can also be used to add content with only part of the changes // made to the working tree files applied, or remove paths that do not exist in // the working tree anymore. func (w *Worktree) AddWithOptions(opts *AddOptions) error { if err := opts.Validate(w.r); err != nil { return err } if opts.All { _, err := w.doAdd(".", w.Excludes, false) return err } if opts.Glob != "" { return w.AddGlob(opts.Glob) } _, err := w.doAdd(opts.Path, make([]gitignore.Pattern, 0), opts.SkipStatus) return err } func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern, skipStatus bool) (plumbing.Hash, error) { idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err } var h plumbing.Hash var added bool fi, err := w.Filesystem.Lstat(path) // status is required for doAddDirectory var s Status var err2 error if !skipStatus || fi == nil || fi.IsDir() { s, err2 = w.Status() if err2 != nil { return plumbing.ZeroHash, err2 } } path = filepath.Clean(path) if err != nil || !fi.IsDir() { added, h, err = w.doAddFile(idx, s, path, ignorePattern) } else { added, err = w.doAddDirectory(idx, s, path, ignorePattern) } if err != nil { return h, err } if !added { return h, nil } return h, w.r.Storer.SetIndex(idx) } // AddGlob adds all paths, matching pattern, to the index. If pattern matches a // directory path, all directory contents are added to the index recursively. No // error is returned if all matching paths are already staged in index. func (w *Worktree) AddGlob(pattern string) error { // TODO(mcuadros): deprecate in favor of AddWithOption in v6. files, err := util.Glob(w.Filesystem, pattern) if err != nil { return err } if len(files) == 0 { return ErrGlobNoMatches } s, err := w.Status() if err != nil { return err } idx, err := w.r.Storer.Index() if err != nil { return err } var saveIndex bool for _, file := range files { fi, err := w.Filesystem.Lstat(file) if err != nil { return err } var added bool if fi.IsDir() { added, err = w.doAddDirectory(idx, s, file, make([]gitignore.Pattern, 0)) } else { added, _, err = w.doAddFile(idx, s, file, make([]gitignore.Pattern, 0)) } if err != nil { return err } if !saveIndex && added { saveIndex = true } } if saveIndex { return w.r.Storer.SetIndex(idx) } return nil } // doAddFile create a new blob from path and update the index, added is true if // the file added is different from the index. // if s status is nil will skip the status check and update the index anyway func (w *Worktree) doAddFile(idx *index.Index, s Status, path string, ignorePattern []gitignore.Pattern) (added bool, h plumbing.Hash, err error) { if s != nil && s.File(path).Worktree == Unmodified { return false, h, nil } if len(ignorePattern) > 0 { m := gitignore.NewMatcher(ignorePattern) matchPath := strings.Split(path, string(os.PathSeparator)) if m.Match(matchPath, true) { // ignore return false, h, nil } } h, err = w.copyFileToStorage(path) if err != nil { if os.IsNotExist(err) { added = true h, err = w.deleteFromIndex(idx, path) } return } if err := w.addOrUpdateFileToIndex(idx, path, h); err != nil { return false, h, err } return true, h, err } func (w *Worktree) copyFileToStorage(path string) (hash plumbing.Hash, err error) { fi, err := w.Filesystem.Lstat(path) if err != nil { return plumbing.ZeroHash, err } obj := w.r.Storer.NewEncodedObject() obj.SetType(plumbing.BlobObject) obj.SetSize(fi.Size()) writer, err := obj.Writer() if err != nil { return plumbing.ZeroHash, err } defer ioutil.CheckClose(writer, &err) if fi.Mode()&os.ModeSymlink != 0 { err = w.fillEncodedObjectFromSymlink(writer, path, fi) } else { err = w.fillEncodedObjectFromFile(writer, path, fi) } if err != nil { return plumbing.ZeroHash, err } return w.r.Storer.SetEncodedObject(obj) } func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, _ os.FileInfo) (err error) { src, err := w.Filesystem.Open(path) if err != nil { return err } defer ioutil.CheckClose(src, &err) if _, err := io.Copy(dst, src); err != nil { return err } return err } func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, _ os.FileInfo) error { target, err := w.Filesystem.Readlink(path) if err != nil { return err } _, err = dst.Write([]byte(target)) return err } func (w *Worktree) addOrUpdateFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error { e, err := idx.Entry(filename) if err != nil && err != index.ErrEntryNotFound { return err } if err == index.ErrEntryNotFound { return w.doAddFileToIndex(idx, filename, h) } return w.doUpdateFileToIndex(e, filename, h) } func (w *Worktree) doAddFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error { return w.doUpdateFileToIndex(idx.Add(filename), filename, h) } func (w *Worktree) doUpdateFileToIndex(e *index.Entry, filename string, h plumbing.Hash) error { info, err := w.Filesystem.Lstat(filename) if err != nil { return err } e.Hash = h e.ModifiedAt = info.ModTime() e.Mode, err = filemode.NewFromOSFileMode(info.Mode()) if err != nil { return err } // The entry size must always reflect the current state, otherwise // it will cause go-git's Worktree.Status() to divert from "git status". // The size of a symlink is the length of the path to the target. // The size of Regular and Executable files is the size of the files. e.Size = uint32(info.Size()) fillSystemInfo(e, info.Sys()) return nil } // Remove removes files from the working tree and from the index. func (w *Worktree) Remove(path string) (plumbing.Hash, error) { // TODO(mcuadros): remove plumbing.Hash from signature at v5. idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err } var h plumbing.Hash fi, err := w.Filesystem.Lstat(path) if err != nil || !fi.IsDir() { h, err = w.doRemoveFile(idx, path) } else { _, err = w.doRemoveDirectory(idx, path) } if err != nil { return h, err } return h, w.r.Storer.SetIndex(idx) } func (w *Worktree) doRemoveDirectory(idx *index.Index, directory string) (removed bool, err error) { files, err := w.Filesystem.ReadDir(directory) if err != nil { return false, err } for _, file := range files { name := path.Join(directory, file.Name()) var r bool if file.IsDir() { r, err = w.doRemoveDirectory(idx, name) } else { _, err = w.doRemoveFile(idx, name) if err == index.ErrEntryNotFound { err = nil } } if err != nil { return } if !removed && r { removed = true } } err = w.removeEmptyDirectory(directory) return } func (w *Worktree) removeEmptyDirectory(path string) error { files, err := w.Filesystem.ReadDir(path) if err != nil { return err } if len(files) != 0 { return nil } return w.Filesystem.Remove(path) } func (w *Worktree) doRemoveFile(idx *index.Index, path string) (plumbing.Hash, error) { hash, err := w.deleteFromIndex(idx, path) if err != nil { return plumbing.ZeroHash, err } return hash, w.deleteFromFilesystem(path) } func (w *Worktree) deleteFromIndex(idx *index.Index, path string) (plumbing.Hash, error) { e, err := idx.Remove(path) if err != nil { return plumbing.ZeroHash, err } return e.Hash, nil } func (w *Worktree) deleteFromFilesystem(path string) error { err := w.Filesystem.Remove(path) if os.IsNotExist(err) { return nil } return err } // RemoveGlob removes all paths, matching pattern, from the index. If pattern // matches a directory path, all directory contents are removed from the index // recursively. func (w *Worktree) RemoveGlob(pattern string) error { idx, err := w.r.Storer.Index() if err != nil { return err } entries, err := idx.Glob(pattern) if err != nil { return err } for _, e := range entries { file := filepath.FromSlash(e.Name) if _, err := w.Filesystem.Lstat(file); err != nil && !os.IsNotExist(err) { return err } if _, err := w.doRemoveFile(idx, file); err != nil { return err } dir, _ := filepath.Split(file) if err := w.removeEmptyDirectory(dir); err != nil { return err } } return w.r.Storer.SetIndex(idx) } // Move moves or rename a file in the worktree and the index, directories are // not supported. func (w *Worktree) Move(from, to string) (plumbing.Hash, error) { // TODO(mcuadros): support directories and/or implement support for glob if _, err := w.Filesystem.Lstat(from); err != nil { return plumbing.ZeroHash, err } if _, err := w.Filesystem.Lstat(to); err == nil { return plumbing.ZeroHash, ErrDestinationExists } idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err } hash, err := w.deleteFromIndex(idx, from) if err != nil { return plumbing.ZeroHash, err } if err := w.Filesystem.Rename(from, to); err != nil { return hash, err } if err := w.addOrUpdateFileToIndex(idx, to, hash); err != nil { return hash, err } return hash, w.r.Storer.SetIndex(idx) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/options.go
vendor/github.com/jesseduffield/go-git/v5/options.go
package git import ( "errors" "fmt" "regexp" "strings" "time" "github.com/ProtonMail/go-crypto/openpgp" "github.com/jesseduffield/go-git/v5/config" "github.com/jesseduffield/go-git/v5/plumbing" formatcfg "github.com/jesseduffield/go-git/v5/plumbing/format/config" "github.com/jesseduffield/go-git/v5/plumbing/object" "github.com/jesseduffield/go-git/v5/plumbing/protocol/packp/sideband" "github.com/jesseduffield/go-git/v5/plumbing/transport" ) // SubmoduleRescursivity defines how depth will affect any submodule recursive // operation. type SubmoduleRescursivity uint const ( // DefaultRemoteName name of the default Remote, just like git command. DefaultRemoteName = "origin" // NoRecurseSubmodules disables the recursion for a submodule operation. NoRecurseSubmodules SubmoduleRescursivity = 0 // DefaultSubmoduleRecursionDepth allow recursion in a submodule operation. DefaultSubmoduleRecursionDepth SubmoduleRescursivity = 10 ) var ( ErrMissingURL = errors.New("URL field is required") ) // CloneOptions describes how a clone should be performed. type CloneOptions struct { // The (possibly remote) repository URL to clone from. URL string // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod // Name of the remote to be added, by default `origin`. RemoteName string // Remote branch to clone. ReferenceName plumbing.ReferenceName // Fetch only ReferenceName if true. SingleBranch bool // Mirror clones the repository as a mirror. // // Compared to a bare clone, mirror not only maps local branches of the // source to local branches of the target, it maps all refs (including // remote-tracking branches, notes etc.) and sets up a refspec configuration // such that all these refs are overwritten by a git remote update in the // target repository. Mirror bool // No checkout of HEAD after clone if true. NoCheckout bool // Limit fetching to the specified number of commits. Depth int // RecurseSubmodules after the clone is created, initialize all submodules // within, using their default settings. This option is ignored if the // cloned repository does not have a worktree. RecurseSubmodules SubmoduleRescursivity // ShallowSubmodules limit cloning submodules to the 1 level of depth. // It matches the git command --shallow-submodules. ShallowSubmodules bool // Progress is where the human readable information sent by the server is // stored, if nil nothing is stored and the capability (if supported) // no-progress, is sent to the server to avoid send this information. Progress sideband.Progress // Tags describe how the tags will be fetched from the remote repository, // by default is AllTags. Tags TagMode // InsecureSkipTLS skips ssl verify if protocol is https InsecureSkipTLS bool // CABundle specify additional ca bundle with system cert pool CABundle []byte // ProxyOptions provides info required for connecting to a proxy. ProxyOptions transport.ProxyOptions // When the repository to clone is on the local machine, instead of // using hard links, automatically setup .git/objects/info/alternates // to share the objects with the source repository. // The resulting repository starts out without any object of its own. // NOTE: this is a possibly dangerous operation; do not use it unless // you understand what it does. // // [Reference]: https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---shared Shared bool } // MergeOptions describes how a merge should be performed. type MergeOptions struct { // Strategy defines the merge strategy to be used. Strategy MergeStrategy } // MergeStrategy represents the different types of merge strategies. type MergeStrategy int8 const ( // FastForwardMerge represents a Git merge strategy where the current // branch can be simply updated to point to the HEAD of the branch being // merged. This is only possible if the history of the branch being merged // is a linear descendant of the current branch, with no conflicting commits. // // This is the default option. FastForwardMerge MergeStrategy = iota ) // Validate validates the fields and sets the default values. func (o *CloneOptions) Validate() error { if o.URL == "" { return ErrMissingURL } if o.RemoteName == "" { o.RemoteName = DefaultRemoteName } if o.ReferenceName == "" { o.ReferenceName = plumbing.HEAD } if o.Tags == InvalidTagMode { o.Tags = AllTags } return nil } // PullOptions describes how a pull should be performed. type PullOptions struct { // Name of the remote to be pulled. If empty, uses the default. RemoteName string // RemoteURL overrides the remote repo address with a custom URL RemoteURL string // Remote branch to clone. If empty, uses HEAD. ReferenceName plumbing.ReferenceName // Fetch only ReferenceName if true. SingleBranch bool // Limit fetching to the specified number of commits. Depth int // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod // RecurseSubmodules controls if new commits of all populated submodules // should be fetched too. RecurseSubmodules SubmoduleRescursivity // Progress is where the human readable information sent by the server is // stored, if nil nothing is stored and the capability (if supported) // no-progress, is sent to the server to avoid send this information. Progress sideband.Progress // Force allows the pull to update a local branch even when the remote // branch does not descend from it. Force bool // InsecureSkipTLS skips ssl verify if protocol is https InsecureSkipTLS bool // CABundle specify additional ca bundle with system cert pool CABundle []byte // ProxyOptions provides info required for connecting to a proxy. ProxyOptions transport.ProxyOptions } // Validate validates the fields and sets the default values. func (o *PullOptions) Validate() error { if o.RemoteName == "" { o.RemoteName = DefaultRemoteName } if o.ReferenceName == "" { o.ReferenceName = plumbing.HEAD } return nil } type TagMode int const ( InvalidTagMode TagMode = iota // TagFollowing any tag that points into the histories being fetched is also // fetched. TagFollowing requires a server with `include-tag` capability // in order to fetch the annotated tags objects. TagFollowing // AllTags fetch all tags from the remote (i.e., fetch remote tags // refs/tags/* into local tags with the same name) AllTags // NoTags fetch no tags from the remote at all NoTags ) // FetchOptions describes how a fetch should be performed type FetchOptions struct { // Name of the remote to fetch from. Defaults to origin. RemoteName string // RemoteURL overrides the remote repo address with a custom URL RemoteURL string RefSpecs []config.RefSpec // Depth limit fetching to the specified number of commits from the tip of // each remote branch history. Depth int // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod // Progress is where the human readable information sent by the server is // stored, if nil nothing is stored and the capability (if supported) // no-progress, is sent to the server to avoid send this information. Progress sideband.Progress // Tags describe how the tags will be fetched from the remote repository, // by default is TagFollowing. Tags TagMode // Force allows the fetch to update a local branch even when the remote // branch does not descend from it. Force bool // InsecureSkipTLS skips ssl verify if protocol is https InsecureSkipTLS bool // CABundle specify additional ca bundle with system cert pool CABundle []byte // ProxyOptions provides info required for connecting to a proxy. ProxyOptions transport.ProxyOptions // Prune specify that local refs that match given RefSpecs and that do // not exist remotely will be removed. Prune bool } // Validate validates the fields and sets the default values. func (o *FetchOptions) Validate() error { if o.RemoteName == "" { o.RemoteName = DefaultRemoteName } if o.Tags == InvalidTagMode { o.Tags = TagFollowing } for _, r := range o.RefSpecs { if err := r.Validate(); err != nil { return err } } return nil } // PushOptions describes how a push should be performed. type PushOptions struct { // RemoteName is the name of the remote to be pushed to. RemoteName string // RemoteURL overrides the remote repo address with a custom URL RemoteURL string // RefSpecs specify what destination ref to update with what source object. // // The format of a <refspec> parameter is an optional plus +, followed by // the source object <src>, followed by a colon :, followed by the destination ref <dst>. // The <src> is often the name of the branch you would want to push, but it can be a SHA-1. // The <dst> tells which ref on the remote side is updated with this push. // // A refspec with empty src can be used to delete a reference. RefSpecs []config.RefSpec // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod // Progress is where the human readable information sent by the server is // stored, if nil nothing is stored. Progress sideband.Progress // Prune specify that remote refs that match given RefSpecs and that do // not exist locally will be removed. Prune bool // Force allows the push to update a remote branch even when the local // branch does not descend from it. Force bool // InsecureSkipTLS skips ssl verify if protocol is https InsecureSkipTLS bool // CABundle specify additional ca bundle with system cert pool CABundle []byte // RequireRemoteRefs only allows a remote ref to be updated if its current // value is the one specified here. RequireRemoteRefs []config.RefSpec // FollowTags will send any annotated tags with a commit target reachable from // the refs already being pushed FollowTags bool // ForceWithLease allows a force push as long as the remote ref adheres to a "lease" ForceWithLease *ForceWithLease // PushOptions sets options to be transferred to the server during push. Options map[string]string // Atomic sets option to be an atomic push Atomic bool // ProxyOptions provides info required for connecting to a proxy. ProxyOptions transport.ProxyOptions } // ForceWithLease sets fields on the lease // If neither RefName nor Hash are set, ForceWithLease protects // all refs in the refspec by ensuring the ref of the remote in the local repsitory // matches the one in the ref advertisement. type ForceWithLease struct { // RefName, when set will protect the ref by ensuring it matches the // hash in the ref advertisement. RefName plumbing.ReferenceName // Hash is the expected object id of RefName. The push will be rejected unless this // matches the corresponding object id of RefName in the refs advertisement. Hash plumbing.Hash } // Validate validates the fields and sets the default values. func (o *PushOptions) Validate() error { if o.RemoteName == "" { o.RemoteName = DefaultRemoteName } if len(o.RefSpecs) == 0 { o.RefSpecs = []config.RefSpec{ config.RefSpec(config.DefaultPushRefSpec), } } for _, r := range o.RefSpecs { if err := r.Validate(); err != nil { return err } } return nil } // SubmoduleUpdateOptions describes how a submodule update should be performed. type SubmoduleUpdateOptions struct { // Init, if true initializes the submodules recorded in the index. Init bool // NoFetch tell to the update command to not fetch new objects from the // remote site. NoFetch bool // RecurseSubmodules the update is performed not only in the submodules of // the current repository but also in any nested submodules inside those // submodules (and so on). Until the SubmoduleRescursivity is reached. RecurseSubmodules SubmoduleRescursivity // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod // Depth limit fetching to the specified number of commits from the tip of // each remote branch history. Depth int } var ( ErrBranchHashExclusive = errors.New("Branch and Hash are mutually exclusive") ErrCreateRequiresBranch = errors.New("Branch is mandatory when Create is used") ) // CheckoutOptions describes how a checkout operation should be performed. type CheckoutOptions struct { // Hash is the hash of a commit or tag to be checked out. If used, HEAD // will be in detached mode. If Create is not used, Branch and Hash are // mutually exclusive. Hash plumbing.Hash // Branch to be checked out, if Branch and Hash are empty is set to `master`. Branch plumbing.ReferenceName // Create a new branch named Branch and start it at Hash. Create bool // Force, if true when switching branches, proceed even if the index or the // working tree differs from HEAD. This is used to throw away local changes Force bool // Keep, if true when switching branches, local changes (the index or the // working tree changes) will be kept so that they can be committed to the // target branch. Force and Keep are mutually exclusive, should not be both // set to true. Keep bool // SparseCheckoutDirectories SparseCheckoutDirectories []string } // Validate validates the fields and sets the default values. func (o *CheckoutOptions) Validate() error { if !o.Create && !o.Hash.IsZero() && o.Branch != "" { return ErrBranchHashExclusive } if o.Create && o.Branch == "" { return ErrCreateRequiresBranch } if o.Branch == "" { o.Branch = plumbing.Master } return nil } // ResetMode defines the mode of a reset operation. type ResetMode int8 const ( // MixedReset resets the index but not the working tree (i.e., the changed // files are preserved but not marked for commit) and reports what has not // been updated. This is the default action. MixedReset ResetMode = iota // HardReset resets the index and working tree. Any changes to tracked files // in the working tree are discarded. HardReset // MergeReset resets the index and updates the files in the working tree // that are different between Commit and HEAD, but keeps those which are // different between the index and working tree (i.e. which have changes // which have not been added). // // If a file that is different between Commit and the index has unstaged // changes, reset is aborted. MergeReset // SoftReset does not touch the index file or the working tree at all (but // resets the head to <commit>, just like all modes do). This leaves all // your changed files "Changes to be committed", as git status would put it. SoftReset ) // ResetOptions describes how a reset operation should be performed. type ResetOptions struct { // Commit, if commit is present set the current branch head (HEAD) to it. Commit plumbing.Hash // Mode, form resets the current branch head to Commit and possibly updates // the index (resetting it to the tree of Commit) and the working tree // depending on Mode. If empty MixedReset is used. Mode ResetMode // Files, if not empty will constrain the reseting the index to only files // specified in this list. Files []string } // Validate validates the fields and sets the default values. func (o *ResetOptions) Validate(r *Repository) error { if o.Commit == plumbing.ZeroHash { ref, err := r.Head() if err != nil { return err } o.Commit = ref.Hash() } else { _, err := r.CommitObject(o.Commit) if err != nil { return fmt.Errorf("invalid reset option: %w", err) } } return nil } type LogOrder int8 const ( LogOrderDefault LogOrder = iota LogOrderDFS LogOrderDFSPost LogOrderBSF LogOrderCommitterTime ) // LogOptions describes how a log action should be performed. type LogOptions struct { // When the From option is set the log will only contain commits // reachable from it. If this option is not set, HEAD will be used as // the default From. From plumbing.Hash // The default traversal algorithm is Depth-first search // set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`) // set Order=LogOrderBSF for Breadth-first search Order LogOrder // Show only those commits in which the specified file was inserted/updated. // It is equivalent to running `git log -- <file-name>`. // this field is kept for compatibility, it can be replaced with PathFilter FileName *string // Filter commits based on the path of files that are updated // takes file path as argument and should return true if the file is desired // It can be used to implement `git log -- <path>` // either <path> is a file path, or directory path, or a regexp of file/directory path PathFilter func(string) bool // Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>. // It is equivalent to running `git log --all`. // If set on true, the From option will be ignored. All bool // Show commits more recent than a specific date. // It is equivalent to running `git log --since <date>` or `git log --after <date>`. Since *time.Time // Show commits older than a specific date. // It is equivalent to running `git log --until <date>` or `git log --before <date>`. Until *time.Time } var ( ErrMissingAuthor = errors.New("author field is required") ) // AddOptions describes how an `add` operation should be performed type AddOptions struct { // All equivalent to `git add -A`, update the index not only where the // working tree has a file matching `Path` but also where the index already // has an entry. This adds, modifies, and removes index entries to match the // working tree. If no `Path` nor `Glob` is given when `All` option is // used, all files in the entire working tree are updated. All bool // Path is the exact filepath to the file or directory to be added. Path string // Glob adds all paths, matching pattern, to the index. If pattern matches a // directory path, all directory contents are added to the index recursively. Glob string // SkipStatus adds the path with no status check. This option is relevant only // when the `Path` option is specified and does not apply when the `All` option is used. // Notice that when passing an ignored path it will be added anyway. // When true it can speed up adding files to the worktree in very large repositories. SkipStatus bool } // Validate validates the fields and sets the default values. func (o *AddOptions) Validate(r *Repository) error { if o.Path != "" && o.Glob != "" { return fmt.Errorf("fields Path and Glob are mutual exclusive") } return nil } // CommitOptions describes how a commit operation should be performed. type CommitOptions struct { // All automatically stage files that have been modified and deleted, but // new files you have not told Git about are not affected. All bool // AllowEmptyCommits enable empty commits to be created. An empty commit // is when no changes to the tree were made, but a new commit message is // provided. The default behavior is false, which results in ErrEmptyCommit. AllowEmptyCommits bool // Author is the author's signature of the commit. If Author is empty the // Name and Email is read from the config, and time.Now it's used as When. Author *object.Signature // Committer is the committer's signature of the commit. If Committer is // nil the Author signature is used. Committer *object.Signature // Parents are the parents commits for the new commit, by default when // len(Parents) is zero, the hash of HEAD reference is used. Parents []plumbing.Hash // SignKey denotes a key to sign the commit with. A nil value here means the // commit will not be signed. The private key must be present and already // decrypted. SignKey *openpgp.Entity // Signer denotes a cryptographic signer to sign the commit with. // A nil value here means the commit will not be signed. // Takes precedence over SignKey. Signer Signer // Amend will create a new commit object and replace the commit that HEAD currently // points to. Cannot be used with All nor Parents. Amend bool } // Validate validates the fields and sets the default values. func (o *CommitOptions) Validate(r *Repository) error { if o.All && o.Amend { return errors.New("all and amend cannot be used together") } if o.Amend && len(o.Parents) > 0 { return errors.New("parents cannot be used with amend") } if o.Author == nil { if err := o.loadConfigAuthorAndCommitter(r); err != nil { return err } } if o.Committer == nil { o.Committer = o.Author } if len(o.Parents) == 0 { head, err := r.Head() if err != nil && err != plumbing.ErrReferenceNotFound { return err } if head != nil { o.Parents = []plumbing.Hash{head.Hash()} } } return nil } func (o *CommitOptions) loadConfigAuthorAndCommitter(r *Repository) error { cfg, err := r.ConfigScoped(config.SystemScope) if err != nil { return err } if o.Author == nil && cfg.Author.Email != "" && cfg.Author.Name != "" { o.Author = &object.Signature{ Name: cfg.Author.Name, Email: cfg.Author.Email, When: time.Now(), } } if o.Committer == nil && cfg.Committer.Email != "" && cfg.Committer.Name != "" { o.Committer = &object.Signature{ Name: cfg.Committer.Name, Email: cfg.Committer.Email, When: time.Now(), } } if o.Author == nil && cfg.User.Email != "" && cfg.User.Name != "" { o.Author = &object.Signature{ Name: cfg.User.Name, Email: cfg.User.Email, When: time.Now(), } } if o.Author == nil { return ErrMissingAuthor } return nil } var ( ErrMissingName = errors.New("name field is required") ErrMissingTagger = errors.New("tagger field is required") ErrMissingMessage = errors.New("message field is required") ) // CreateTagOptions describes how a tag object should be created. type CreateTagOptions struct { // Tagger defines the signature of the tag creator. If Tagger is empty the // Name and Email is read from the config, and time.Now it's used as When. Tagger *object.Signature // Message defines the annotation of the tag. It is canonicalized during // validation into the format expected by git - no leading whitespace and // ending in a newline. Message string // SignKey denotes a key to sign the tag with. A nil value here means the tag // will not be signed. The private key must be present and already decrypted. SignKey *openpgp.Entity } // Validate validates the fields and sets the default values. func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error { if o.Tagger == nil { if err := o.loadConfigTagger(r); err != nil { return err } } if o.Message == "" { return ErrMissingMessage } // Canonicalize the message into the expected message format. o.Message = strings.TrimSpace(o.Message) + "\n" return nil } func (o *CreateTagOptions) loadConfigTagger(r *Repository) error { cfg, err := r.ConfigScoped(config.SystemScope) if err != nil { return err } if o.Tagger == nil && cfg.Author.Email != "" && cfg.Author.Name != "" { o.Tagger = &object.Signature{ Name: cfg.Author.Name, Email: cfg.Author.Email, When: time.Now(), } } if o.Tagger == nil && cfg.User.Email != "" && cfg.User.Name != "" { o.Tagger = &object.Signature{ Name: cfg.User.Name, Email: cfg.User.Email, When: time.Now(), } } if o.Tagger == nil { return ErrMissingTagger } return nil } // ListOptions describes how a remote list should be performed. type ListOptions struct { // Auth credentials, if required, to use with the remote repository. Auth transport.AuthMethod // InsecureSkipTLS skips ssl verify if protocol is https InsecureSkipTLS bool // CABundle specify additional ca bundle with system cert pool CABundle []byte // PeelingOption defines how peeled objects are handled during a // remote list. PeelingOption PeelingOption // ProxyOptions provides info required for connecting to a proxy. ProxyOptions transport.ProxyOptions // Timeout specifies the timeout in seconds for list operations Timeout int } // PeelingOption represents the different ways to handle peeled references. // // Peeled references represent the underlying object of an annotated // (or signed) tag. Refer to upstream documentation for more info: // https://github.com/git/git/blob/master/Documentation/technical/reftable.txt type PeelingOption uint8 const ( // IgnorePeeled ignores all peeled reference names. This is the default behavior. IgnorePeeled PeelingOption = 0 // OnlyPeeled returns only peeled reference names. OnlyPeeled PeelingOption = 1 // AppendPeeled appends peeled reference names to the reference list. AppendPeeled PeelingOption = 2 ) // CleanOptions describes how a clean should be performed. type CleanOptions struct { Dir bool } // GrepOptions describes how a grep should be performed. type GrepOptions struct { // Patterns are compiled Regexp objects to be matched. Patterns []*regexp.Regexp // InvertMatch selects non-matching lines. InvertMatch bool // CommitHash is the hash of the commit from which worktree should be derived. CommitHash plumbing.Hash // ReferenceName is the branch or tag name from which worktree should be derived. ReferenceName plumbing.ReferenceName // PathSpecs are compiled Regexp objects of pathspec to use in the matching. PathSpecs []*regexp.Regexp } var ( ErrHashOrReference = errors.New("ambiguous options, only one of CommitHash or ReferenceName can be passed") ) // Validate validates the fields and sets the default values. // // TODO: deprecate in favor of Validate(r *Repository) in v6. func (o *GrepOptions) Validate(w *Worktree) error { return o.validate(w.r) } func (o *GrepOptions) validate(r *Repository) error { if !o.CommitHash.IsZero() && o.ReferenceName != "" { return ErrHashOrReference } // If none of CommitHash and ReferenceName are provided, set commit hash of // the repository's head. if o.CommitHash.IsZero() && o.ReferenceName == "" { ref, err := r.Head() if err != nil { return err } o.CommitHash = ref.Hash() } return nil } // PlainOpenOptions describes how opening a plain repository should be // performed. type PlainOpenOptions struct { // DetectDotGit defines whether parent directories should be // walked until a .git directory or file is found. DetectDotGit bool // Enable .git/commondir support (see https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt). // NOTE: This option will only work with the filesystem storage. EnableDotGitCommonDir bool } // Validate validates the fields and sets the default values. func (o *PlainOpenOptions) Validate() error { return nil } type PlainInitOptions struct { InitOptions // Determines if the repository will have a worktree (non-bare) or not (bare). Bare bool ObjectFormat formatcfg.ObjectFormat } // Validate validates the fields and sets the default values. func (o *PlainInitOptions) Validate() error { return nil } var ( ErrNoRestorePaths = errors.New("you must specify path(s) to restore") ) // RestoreOptions describes how a restore should be performed. type RestoreOptions struct { // Marks to restore the content in the index Staged bool // Marks to restore the content of the working tree Worktree bool // List of file paths that will be restored Files []string } // Validate validates the fields and sets the default values. func (o *RestoreOptions) Validate() error { if len(o.Files) == 0 { return ErrNoRestorePaths } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/worktree_plan9.go
vendor/github.com/jesseduffield/go-git/v5/worktree_plan9.go
package git import ( "syscall" "time" "github.com/jesseduffield/go-git/v5/plumbing/format/index" ) func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Dir); ok { // Plan 9 doesn't have a CreatedAt field. e.CreatedAt = time.Unix(int64(os.Mtime), 0) e.Dev = uint32(os.Dev) // Plan 9 has no Inode. // ext2srv(4) appears to store Inode in Qid.Path. e.Inode = uint32(os.Qid.Path) // Plan 9 has string UID/GID e.GID = 0 e.UID = 0 } } } func isSymlinkWindowsNonAdmin(err error) bool { return true }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/repository.go
vendor/github.com/jesseduffield/go-git/v5/repository.go
package git import ( "bytes" "context" "crypto" "encoding/hex" "errors" "fmt" "io" "os" "path" "path/filepath" "strings" "time" "dario.cat/mergo" "github.com/ProtonMail/go-crypto/openpgp" "github.com/go-git/go-billy/v5" "github.com/go-git/go-billy/v5/osfs" "github.com/go-git/go-billy/v5/util" "github.com/jesseduffield/go-git/v5/config" "github.com/jesseduffield/go-git/v5/internal/path_util" "github.com/jesseduffield/go-git/v5/internal/revision" "github.com/jesseduffield/go-git/v5/internal/url" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/cache" formatcfg "github.com/jesseduffield/go-git/v5/plumbing/format/config" "github.com/jesseduffield/go-git/v5/plumbing/format/packfile" "github.com/jesseduffield/go-git/v5/plumbing/hash" "github.com/jesseduffield/go-git/v5/plumbing/object" "github.com/jesseduffield/go-git/v5/plumbing/storer" "github.com/jesseduffield/go-git/v5/storage" "github.com/jesseduffield/go-git/v5/storage/filesystem" "github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) // GitDirName this is a special folder where all the git stuff is. const GitDirName = ".git" var ( // ErrBranchExists an error stating the specified branch already exists ErrBranchExists = errors.New("branch already exists") // ErrBranchNotFound an error stating the specified branch does not exist ErrBranchNotFound = errors.New("branch not found") // ErrTagExists an error stating the specified tag already exists ErrTagExists = errors.New("tag already exists") // ErrTagNotFound an error stating the specified tag does not exist ErrTagNotFound = errors.New("tag not found") // ErrFetching is returned when the packfile could not be downloaded ErrFetching = errors.New("unable to fetch packfile") ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") ErrRepositoryNotExists = errors.New("repository does not exist") ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist") ErrRepositoryAlreadyExists = errors.New("repository already exists") ErrRemoteNotFound = errors.New("remote not found") ErrRemoteExists = errors.New("remote already exists") ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") ErrWorktreeNotProvided = errors.New("worktree should be provided") ErrIsBareRepository = errors.New("worktree not available in a bare repository") ErrUnableToResolveCommit = errors.New("unable to resolve commit") ErrPackedObjectsNotSupported = errors.New("packed objects not supported") ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support") ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme") ErrUnsupportedMergeStrategy = errors.New("unsupported merge strategy") ErrFastForwardMergeNotPossible = errors.New("not possible to fast-forward merge changes") ) // Repository represents a git repository type Repository struct { Storer storage.Storer r map[string]*Remote wt billy.Filesystem } type InitOptions struct { // The default branch (e.g. "refs/heads/master") DefaultBranch plumbing.ReferenceName } // Init creates an empty git repository, based on the given Storer and worktree. // The worktree Filesystem is optional, if nil a bare repository is created. If // the given storer is not empty ErrRepositoryAlreadyExists is returned func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { options := InitOptions{ DefaultBranch: plumbing.Master, } return InitWithOptions(s, worktree, options) } func InitWithOptions(s storage.Storer, worktree billy.Filesystem, options InitOptions) (*Repository, error) { if err := initStorer(s); err != nil { return nil, err } if options.DefaultBranch == "" { options.DefaultBranch = plumbing.Master } if err := options.DefaultBranch.Validate(); err != nil { return nil, err } r := newRepository(s, worktree) _, err := r.Reference(plumbing.HEAD, false) switch err { case plumbing.ErrReferenceNotFound: case nil: return nil, ErrRepositoryAlreadyExists default: return nil, err } h := plumbing.NewSymbolicReference(plumbing.HEAD, options.DefaultBranch) if err := s.SetReference(h); err != nil { return nil, err } if worktree == nil { _ = r.setIsBare(true) return r, nil } return r, setWorktreeAndStoragePaths(r, worktree) } func initStorer(s storer.Storer) error { i, ok := s.(storer.Initializer) if !ok { return nil } return i.Init() } func setWorktreeAndStoragePaths(r *Repository, worktree billy.Filesystem) error { type fsBased interface { Filesystem() billy.Filesystem } // .git file is only created if the storage is file based and the file // system is osfs.OS fs, isFSBased := r.Storer.(fsBased) if !isFSBased { return nil } if err := createDotGitFile(worktree, fs.Filesystem()); err != nil { return err } return setConfigWorktree(r, worktree, fs.Filesystem()) } func createDotGitFile(worktree, storage billy.Filesystem) error { path, err := filepath.Rel(worktree.Root(), storage.Root()) if err != nil { path = storage.Root() } if path == GitDirName { // not needed, since the folder is the default place return nil } f, err := worktree.Create(GitDirName) if err != nil { return err } defer f.Close() _, err = fmt.Fprintf(f, "gitdir: %s\n", path) return err } func setConfigWorktree(r *Repository, worktree, storage billy.Filesystem) error { path, err := filepath.Rel(storage.Root(), worktree.Root()) if err != nil { path = worktree.Root() } if path == ".." { // not needed, since the folder is the default place return nil } cfg, err := r.Config() if err != nil { return err } cfg.Core.Worktree = path return r.Storer.SetConfig(cfg) } // Open opens a git repository using the given Storer and worktree filesystem, // if the given storer is complete empty ErrRepositoryNotExists is returned. // The worktree can be nil when the repository being opened is bare, if the // repository is a normal one (not bare) and worktree is nil the err // ErrWorktreeNotProvided is returned func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { _, err := s.Reference(plumbing.HEAD) if err == plumbing.ErrReferenceNotFound { return nil, ErrRepositoryNotExists } if err != nil { return nil, err } return newRepository(s, worktree), nil } // Clone a repository into the given Storer and worktree Filesystem with the // given options, if worktree is nil a bare repository is created. If the given // storer is not empty ErrRepositoryAlreadyExists is returned. func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repository, error) { return CloneContext(context.Background(), s, worktree, o) } // CloneContext a repository into the given Storer and worktree Filesystem with // the given options, if worktree is nil a bare repository is created. If the // given storer is not empty ErrRepositoryAlreadyExists is returned. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects the // transport operations. func CloneContext( ctx context.Context, s storage.Storer, worktree billy.Filesystem, o *CloneOptions, ) (*Repository, error) { r, err := Init(s, worktree) if err != nil { return nil, err } return r, r.clone(ctx, o) } // PlainInit create an empty git repository at the given path. isBare defines // if the repository will have worktree (non-bare) or not (bare), if the path // is not empty ErrRepositoryAlreadyExists is returned. func PlainInit(path string, isBare bool) (*Repository, error) { return PlainInitWithOptions(path, &PlainInitOptions{ Bare: isBare, }) } func PlainInitWithOptions(path string, opts *PlainInitOptions) (*Repository, error) { if opts == nil { opts = &PlainInitOptions{} } var wt, dot billy.Filesystem if opts.Bare { dot = osfs.New(path) } else { wt = osfs.New(path) dot, _ = wt.Chroot(GitDirName) } s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) r, err := InitWithOptions(s, wt, opts.InitOptions) if err != nil { return nil, err } cfg, err := r.Config() if err != nil { return nil, err } if opts.ObjectFormat != "" { if opts.ObjectFormat == formatcfg.SHA256 && hash.CryptoType != crypto.SHA256 { return nil, ErrSHA256NotSupported } cfg.Core.RepositoryFormatVersion = formatcfg.Version_1 cfg.Extensions.ObjectFormat = opts.ObjectFormat } err = r.Storer.SetConfig(cfg) if err != nil { return nil, err } return r, err } // PlainOpen opens a git repository from the given path. It detects if the // repository is bare or a normal one. If the path doesn't contain a valid // repository ErrRepositoryNotExists is returned func PlainOpen(path string) (*Repository, error) { return PlainOpenWithOptions(path, &PlainOpenOptions{}) } // PlainOpenWithOptions opens a git repository from the given path with specific // options. See PlainOpen for more info. func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error) { dot, wt, err := dotGitToOSFilesystems(path, o.DetectDotGit) if err != nil { return nil, err } if _, err := dot.Stat(""); err != nil { if os.IsNotExist(err) { return nil, ErrRepositoryNotExists } return nil, err } var repositoryFs billy.Filesystem if o.EnableDotGitCommonDir { dotGitCommon, err := dotGitCommonDirectory(dot) if err != nil { return nil, err } repositoryFs = dotgit.NewRepositoryFilesystem(dot, dotGitCommon) } else { repositoryFs = dot } s := filesystem.NewStorage(repositoryFs, cache.NewObjectLRUDefault()) return Open(s, wt) } func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, err error) { path, err = path_util.ReplaceTildeWithHome(path) if err != nil { return nil, nil, err } if path, err = filepath.Abs(path); err != nil { return nil, nil, err } var fs billy.Filesystem var fi os.FileInfo for { fs = osfs.New(path) pathinfo, err := fs.Stat("/") if !os.IsNotExist(err) { if pathinfo == nil { return nil, nil, err } if !pathinfo.IsDir() && detect { fs = osfs.New(filepath.Dir(path)) } } fi, err = fs.Stat(GitDirName) if err == nil { // no error; stop break } if !os.IsNotExist(err) { // unknown error; stop return nil, nil, err } if detect { // try its parent as long as we haven't reached // the root dir if dir := filepath.Dir(path); dir != path { path = dir continue } } // not detecting via parent dirs and the dir does not exist; // stop return fs, nil, nil } if fi.IsDir() { dot, err = fs.Chroot(GitDirName) return dot, fs, err } dot, err = dotGitFileToOSFilesystem(path, fs) if err != nil { return nil, nil, err } return dot, fs, nil } func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Filesystem, err error) { f, err := fs.Open(GitDirName) if err != nil { return nil, err } defer ioutil.CheckClose(f, &err) b, err := io.ReadAll(f) if err != nil { return nil, err } line := string(b) const prefix = "gitdir: " if !strings.HasPrefix(line, prefix) { return nil, fmt.Errorf(".git file has no %s prefix", prefix) } gitdir := strings.Split(line[len(prefix):], "\n")[0] gitdir = strings.TrimSpace(gitdir) if filepath.IsAbs(gitdir) { return osfs.New(gitdir), nil } return osfs.New(fs.Join(path, gitdir)), nil } func dotGitCommonDirectory(fs billy.Filesystem) (commonDir billy.Filesystem, err error) { f, err := fs.Open("commondir") if os.IsNotExist(err) { return nil, nil } if err != nil { return nil, err } b, err := io.ReadAll(f) if err != nil { return nil, err } if len(b) > 0 { path := strings.TrimSpace(string(b)) if filepath.IsAbs(path) { commonDir = osfs.New(path) } else { commonDir = osfs.New(filepath.Join(fs.Root(), path)) } if _, err := commonDir.Stat(""); err != nil { if os.IsNotExist(err) { return nil, ErrRepositoryIncomplete } return nil, err } } return commonDir, nil } // PlainClone a repository into the path with the given options, isBare defines // if the new repository will be bare or normal. If the path is not empty // ErrRepositoryAlreadyExists is returned. // // TODO(mcuadros): move isBare to CloneOptions in v5 func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) { return PlainCloneContext(context.Background(), path, isBare, o) } // PlainCloneContext a repository into the path with the given options, isBare // defines if the new repository will be bare or normal. If the path is not empty // ErrRepositoryAlreadyExists is returned. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects the // transport operations. // // TODO(mcuadros): move isBare to CloneOptions in v5 // TODO(smola): refuse upfront to clone on a non-empty directory in v5, see #1027 func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOptions) (*Repository, error) { cleanup, cleanupParent, err := checkIfCleanupIsNeeded(path) if err != nil { return nil, err } if o.Mirror { isBare = true } r, err := PlainInit(path, isBare) if err != nil { return nil, err } err = r.clone(ctx, o) if err != nil && err != ErrRepositoryAlreadyExists { if cleanup { _ = cleanUpDir(path, cleanupParent) } } return r, err } func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository { return &Repository{ Storer: s, wt: worktree, r: make(map[string]*Remote), } } func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) { fi, err := osfs.Default.Stat(path) if err != nil { if os.IsNotExist(err) { return true, true, nil } return false, false, err } if !fi.IsDir() { return false, false, fmt.Errorf("path is not a directory: %s", path) } files, err := osfs.Default.ReadDir(path) if err != nil { return false, false, err } if len(files) == 0 { return true, false, nil } return false, false, nil } func cleanUpDir(path string, all bool) error { if all { return util.RemoveAll(osfs.Default, path) } files, err := osfs.Default.ReadDir(path) if err != nil { return err } for _, fi := range files { if err := util.RemoveAll(osfs.Default, osfs.Default.Join(path, fi.Name())); err != nil { return err } } return err } // Config return the repository config. In a filesystem backed repository this // means read the `.git/config`. func (r *Repository) Config() (*config.Config, error) { return r.Storer.Config() } // SetConfig marshall and writes the repository config. In a filesystem backed // repository this means write the `.git/config`. This function should be called // with the result of `Repository.Config` and never with the output of // `Repository.ConfigScoped`. func (r *Repository) SetConfig(cfg *config.Config) error { return r.Storer.SetConfig(cfg) } // ConfigScoped returns the repository config, merged with requested scope and // lower. For example if, config.GlobalScope is given the local and global config // are returned merged in one config value. func (r *Repository) ConfigScoped(scope config.Scope) (*config.Config, error) { // TODO(mcuadros): v6, add this as ConfigOptions.Scoped var err error system := config.NewConfig() if scope >= config.SystemScope { system, err = config.LoadConfig(config.SystemScope) if err != nil { return nil, err } } global := config.NewConfig() if scope >= config.GlobalScope { global, err = config.LoadConfig(config.GlobalScope) if err != nil { return nil, err } } local, err := r.Storer.Config() if err != nil { return nil, err } _ = mergo.Merge(global, system) _ = mergo.Merge(local, global) return local, nil } // Remote return a remote if exists func (r *Repository) Remote(name string) (*Remote, error) { cfg, err := r.Config() if err != nil { return nil, err } c, ok := cfg.Remotes[name] if !ok { return nil, ErrRemoteNotFound } return NewRemote(r.Storer, c), nil } // Remotes returns a list with all the remotes func (r *Repository) Remotes() ([]*Remote, error) { cfg, err := r.Config() if err != nil { return nil, err } remotes := make([]*Remote, len(cfg.Remotes)) var i int for _, c := range cfg.Remotes { remotes[i] = NewRemote(r.Storer, c) i++ } return remotes, nil } // CreateRemote creates a new remote func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) { if err := c.Validate(); err != nil { return nil, err } remote := NewRemote(r.Storer, c) cfg, err := r.Config() if err != nil { return nil, err } if _, ok := cfg.Remotes[c.Name]; ok { return nil, ErrRemoteExists } cfg.Remotes[c.Name] = c return remote, r.Storer.SetConfig(cfg) } // CreateRemoteAnonymous creates a new anonymous remote. c.Name must be "anonymous". // It's used like 'git fetch git@github.com:src-d/go-git.git master:master'. func (r *Repository) CreateRemoteAnonymous(c *config.RemoteConfig) (*Remote, error) { if err := c.Validate(); err != nil { return nil, err } if c.Name != "anonymous" { return nil, ErrAnonymousRemoteName } remote := NewRemote(r.Storer, c) return remote, nil } // DeleteRemote delete a remote from the repository and delete the config func (r *Repository) DeleteRemote(name string) error { cfg, err := r.Config() if err != nil { return err } if _, ok := cfg.Remotes[name]; !ok { return ErrRemoteNotFound } delete(cfg.Remotes, name) return r.Storer.SetConfig(cfg) } // Branch return a Branch if exists func (r *Repository) Branch(name string) (*config.Branch, error) { cfg, err := r.Config() if err != nil { return nil, err } b, ok := cfg.Branches[name] if !ok { return nil, ErrBranchNotFound } return b, nil } // CreateBranch creates a new Branch func (r *Repository) CreateBranch(c *config.Branch) error { if err := c.Validate(); err != nil { return err } cfg, err := r.Config() if err != nil { return err } if _, ok := cfg.Branches[c.Name]; ok { return ErrBranchExists } cfg.Branches[c.Name] = c return r.Storer.SetConfig(cfg) } // DeleteBranch delete a Branch from the repository and delete the config func (r *Repository) DeleteBranch(name string) error { cfg, err := r.Config() if err != nil { return err } if _, ok := cfg.Branches[name]; !ok { return ErrBranchNotFound } delete(cfg.Branches, name) return r.Storer.SetConfig(cfg) } // CreateTag creates a tag. If opts is included, the tag is an annotated tag, // otherwise a lightweight tag is created. func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) { rname := plumbing.NewTagReferenceName(name) if err := rname.Validate(); err != nil { return nil, err } _, err := r.Storer.Reference(rname) switch err { case nil: // Tag exists, this is an error return nil, ErrTagExists case plumbing.ErrReferenceNotFound: // Tag missing, available for creation, pass this default: // Some other error return nil, err } var target plumbing.Hash if opts != nil { target, err = r.createTagObject(name, hash, opts) if err != nil { return nil, err } } else { target = hash } ref := plumbing.NewHashReference(rname, target) if err = r.Storer.SetReference(ref); err != nil { return nil, err } return ref, nil } func (r *Repository) createTagObject(name string, hash plumbing.Hash, opts *CreateTagOptions) (plumbing.Hash, error) { if err := opts.Validate(r, hash); err != nil { return plumbing.ZeroHash, err } rawobj, err := object.GetObject(r.Storer, hash) if err != nil { return plumbing.ZeroHash, err } tag := &object.Tag{ Name: name, Tagger: *opts.Tagger, Message: opts.Message, TargetType: rawobj.Type(), Target: hash, } if opts.SignKey != nil { sig, err := r.buildTagSignature(tag, opts.SignKey) if err != nil { return plumbing.ZeroHash, err } tag.PGPSignature = sig } obj := r.Storer.NewEncodedObject() if err := tag.Encode(obj); err != nil { return plumbing.ZeroHash, err } return r.Storer.SetEncodedObject(obj) } func (r *Repository) buildTagSignature(tag *object.Tag, signKey *openpgp.Entity) (string, error) { encoded := &plumbing.MemoryObject{} if err := tag.Encode(encoded); err != nil { return "", err } rdr, err := encoded.Reader() if err != nil { return "", err } var b bytes.Buffer if err := openpgp.ArmoredDetachSign(&b, signKey, rdr, nil); err != nil { return "", err } return b.String(), nil } // Tag returns a tag from the repository. // // If you want to check to see if the tag is an annotated tag, you can call // TagObject on the hash of the reference in ForEach: // // ref, err := r.Tag("v0.1.0") // if err != nil { // // Handle error // } // // obj, err := r.TagObject(ref.Hash()) // switch err { // case nil: // // Tag object present // case plumbing.ErrObjectNotFound: // // Not a tag object // default: // // Some other error // } func (r *Repository) Tag(name string) (*plumbing.Reference, error) { ref, err := r.Reference(plumbing.ReferenceName(path.Join("refs", "tags", name)), false) if err != nil { if err == plumbing.ErrReferenceNotFound { // Return a friendly error for this one, versus just ReferenceNotFound. return nil, ErrTagNotFound } return nil, err } return ref, nil } // DeleteTag deletes a tag from the repository. func (r *Repository) DeleteTag(name string) error { _, err := r.Tag(name) if err != nil { return err } return r.Storer.RemoveReference(plumbing.ReferenceName(path.Join("refs", "tags", name))) } func (r *Repository) resolveToCommitHash(h plumbing.Hash) (plumbing.Hash, error) { obj, err := r.Storer.EncodedObject(plumbing.AnyObject, h) if err != nil { return plumbing.ZeroHash, err } switch obj.Type() { case plumbing.TagObject: t, err := object.DecodeTag(r.Storer, obj) if err != nil { return plumbing.ZeroHash, err } return r.resolveToCommitHash(t.Target) case plumbing.CommitObject: return h, nil default: return plumbing.ZeroHash, ErrUnableToResolveCommit } } // Clone clones a remote repository func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { if err := o.Validate(); err != nil { return err } c := &config.RemoteConfig{ Name: o.RemoteName, URLs: []string{o.URL}, Fetch: r.cloneRefSpec(o), Mirror: o.Mirror, } if _, err := r.CreateRemote(c); err != nil { return err } // When the repository to clone is on the local machine, // instead of using hard links, automatically setup .git/objects/info/alternates // to share the objects with the source repository if o.Shared { if !url.IsLocalEndpoint(o.URL) { return ErrAlternatePathNotSupported } altpath := o.URL remoteRepo, err := PlainOpen(o.URL) if err != nil { return fmt.Errorf("failed to open remote repository: %w", err) } conf, err := remoteRepo.Config() if err != nil { return fmt.Errorf("failed to read remote repository configuration: %w", err) } if !conf.Core.IsBare { altpath = path.Join(altpath, GitDirName) } if err := r.Storer.AddAlternate(altpath); err != nil { return fmt.Errorf("failed to add alternate file to git objects dir: %w", err) } } ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{ RefSpecs: c.Fetch, Depth: o.Depth, Auth: o.Auth, Progress: o.Progress, Tags: o.Tags, RemoteName: o.RemoteName, InsecureSkipTLS: o.InsecureSkipTLS, CABundle: o.CABundle, ProxyOptions: o.ProxyOptions, }, o.ReferenceName) if err != nil { return err } if r.wt != nil && !o.NoCheckout { w, err := r.Worktree() if err != nil { return err } head, err := r.Head() if err != nil { return err } if err := w.Reset(&ResetOptions{ Mode: MergeReset, Commit: head.Hash(), }); err != nil { return err } if o.RecurseSubmodules != NoRecurseSubmodules { if err := w.updateSubmodules(ctx, &SubmoduleUpdateOptions{ RecurseSubmodules: o.RecurseSubmodules, Depth: func() int { if o.ShallowSubmodules { return 1 } return 0 }(), Auth: o.Auth, }); err != nil { return err } } } if err := r.updateRemoteConfigIfNeeded(o, c, ref); err != nil { return err } if !o.Mirror && ref.Name().IsBranch() { branchRef := ref.Name() branchName := strings.Split(string(branchRef), "refs/heads/")[1] b := &config.Branch{ Name: branchName, Merge: branchRef, } if o.RemoteName == "" { b.Remote = "origin" } else { b.Remote = o.RemoteName } if err := r.CreateBranch(b); err != nil { return err } } return nil } const ( refspecTag = "+refs/tags/%s:refs/tags/%[1]s" refspecSingleBranch = "+refs/heads/%s:refs/remotes/%s/%[1]s" refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD" ) func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec { switch { case o.Mirror: return []config.RefSpec{"+refs/*:refs/*"} case o.ReferenceName.IsTag(): return []config.RefSpec{ config.RefSpec(fmt.Sprintf(refspecTag, o.ReferenceName.Short())), } case o.SingleBranch && o.ReferenceName == plumbing.HEAD: return []config.RefSpec{ config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)), } case o.SingleBranch: return []config.RefSpec{ config.RefSpec(fmt.Sprintf(refspecSingleBranch, o.ReferenceName.Short(), o.RemoteName)), } default: return []config.RefSpec{ config.RefSpec(fmt.Sprintf(config.DefaultFetchRefSpec, o.RemoteName)), } } } func (r *Repository) setIsBare(isBare bool) error { cfg, err := r.Config() if err != nil { return err } cfg.Core.IsBare = isBare return r.Storer.SetConfig(cfg) } func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.RemoteConfig, _ *plumbing.Reference) error { if !o.SingleBranch { return nil } c.Fetch = r.cloneRefSpec(o) cfg, err := r.Config() if err != nil { return err } cfg.Remotes[c.Name] = c return r.Storer.SetConfig(cfg) } func (r *Repository) fetchAndUpdateReferences( ctx context.Context, o *FetchOptions, ref plumbing.ReferenceName, ) (*plumbing.Reference, error) { if err := o.Validate(); err != nil { return nil, err } remote, err := r.Remote(o.RemoteName) if err != nil { return nil, err } objsUpdated := true remoteRefs, err := remote.fetch(ctx, o) if err == NoErrAlreadyUpToDate { objsUpdated = false } else if err == packfile.ErrEmptyPackfile { return nil, ErrFetching } else if err != nil { return nil, err } resolvedRef, err := expand_ref(remoteRefs, ref) if err != nil { return nil, err } refsUpdated, err := r.updateReferences(remote.c.Fetch, resolvedRef) if err != nil { return nil, err } if !objsUpdated && !refsUpdated { return nil, NoErrAlreadyUpToDate } return resolvedRef, nil } func (r *Repository) updateReferences(spec []config.RefSpec, resolvedRef *plumbing.Reference) (updated bool, err error) { if !resolvedRef.Name().IsBranch() { // Detached HEAD mode h, err := r.resolveToCommitHash(resolvedRef.Hash()) if err != nil { return false, err } head := plumbing.NewHashReference(plumbing.HEAD, h) return updateReferenceStorerIfNeeded(r.Storer, head) } refs := []*plumbing.Reference{ // Create local reference for the resolved ref resolvedRef, // Create local symbolic HEAD plumbing.NewSymbolicReference(plumbing.HEAD, resolvedRef.Name()), } refs = append(refs, r.calculateRemoteHeadReference(spec, resolvedRef)...) for _, ref := range refs { u, err := updateReferenceStorerIfNeeded(r.Storer, ref) if err != nil { return updated, err } if u { updated = true } } return } func (r *Repository) calculateRemoteHeadReference(spec []config.RefSpec, resolvedHead *plumbing.Reference) []*plumbing.Reference { var refs []*plumbing.Reference // Create resolved HEAD reference with remote prefix if it does not // exist. This is needed when using single branch and HEAD. for _, rs := range spec { name := resolvedHead.Name() if !rs.Match(name) { continue } name = rs.Dst(name) _, err := r.Storer.Reference(name) if err == plumbing.ErrReferenceNotFound { refs = append(refs, plumbing.NewHashReference(name, resolvedHead.Hash())) } } return refs } func checkAndUpdateReferenceStorerIfNeeded( s storer.ReferenceStorer, r, old *plumbing.Reference) ( updated bool, err error) { p, err := s.Reference(r.Name()) if err != nil && err != plumbing.ErrReferenceNotFound { return false, err } // we use the string method to compare references, is the easiest way if err == plumbing.ErrReferenceNotFound || r.String() != p.String() { if err := s.CheckAndSetReference(r, old); err != nil { return false, err } return true, nil } return false, nil } func updateReferenceStorerIfNeeded( s storer.ReferenceStorer, r *plumbing.Reference) (updated bool, err error) { return checkAndUpdateReferenceStorerIfNeeded(s, r, nil) } // Fetch fetches references along with the objects necessary to complete // their histories, from the remote named as FetchOptions.RemoteName. // // Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are // no changes to be fetched, or an error. func (r *Repository) Fetch(o *FetchOptions) error { return r.FetchContext(context.Background(), o) } // FetchContext fetches references along with the objects necessary to complete // their histories, from the remote named as FetchOptions.RemoteName. // // Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are // no changes to be fetched, or an error. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects the // transport operations. func (r *Repository) FetchContext(ctx context.Context, o *FetchOptions) error { if err := o.Validate(); err != nil { return err } remote, err := r.Remote(o.RemoteName) if err != nil { return err } return remote.FetchContext(ctx, o) } // Push performs a push to the remote. Returns NoErrAlreadyUpToDate if // the remote was already up-to-date, from the remote named as // FetchOptions.RemoteName. func (r *Repository) Push(o *PushOptions) error { return r.PushContext(context.Background(), o) } // PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if // the remote was already up-to-date, from the remote named as // FetchOptions.RemoteName. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects the // transport operations. func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error { if err := o.Validate(); err != nil { return err } remote, err := r.Remote(o.RemoteName) if err != nil { return err } return remote.PushContext(ctx, o) } // Log returns the commit history from the given LogOptions. func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) { fn := commitIterFunc(o.Order) if fn == nil { return nil, fmt.Errorf("invalid Order=%v", o.Order) } var ( it object.CommitIter err error ) if o.All { it, err = r.logAll(fn) } else { it, err = r.log(o.From, fn) } if err != nil { return nil, err } if o.FileName != nil { // for `git log --all` also check parent (if the next commit comes from the real parent) it = r.logWithFile(*o.FileName, it, o.All) } if o.PathFilter != nil { it = r.logWithPathFilter(o.PathFilter, it, o.All) } if o.Since != nil || o.Until != nil { limitOptions := object.LogLimitOptions{Since: o.Since, Until: o.Until} it = r.logWithLimit(it, limitOptions) } return it, nil } func (r *Repository) log(from plumbing.Hash, commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { h := from if from == plumbing.ZeroHash { head, err := r.Head() if err != nil { return nil, err } h = head.Hash() } commit, err := r.CommitObject(h) if err != nil { return nil, err } return commitIterFunc(commit), nil } func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { return object.NewCommitAllIter(r.Storer, commitIterFunc) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
true
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/doc.go
vendor/github.com/jesseduffield/go-git/v5/doc.go
// A highly extensible git implementation in pure Go. // // go-git aims to reach the completeness of libgit2 or jgit, nowadays covers the // majority of the plumbing read operations and some of the main write // operations, but lacks the main porcelain operations such as merges. // // It is highly extensible, we have been following the open/close principle in // its design to facilitate extensions, mainly focusing the efforts on the // persistence of the objects. package git
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/worktree_commit.go
vendor/github.com/jesseduffield/go-git/v5/worktree_commit.go
package git import ( "bytes" "errors" "io" "path" "regexp" "sort" "strings" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/filemode" "github.com/jesseduffield/go-git/v5/plumbing/format/index" "github.com/jesseduffield/go-git/v5/plumbing/object" "github.com/jesseduffield/go-git/v5/storage" "github.com/ProtonMail/go-crypto/openpgp" "github.com/ProtonMail/go-crypto/openpgp/packet" "github.com/go-git/go-billy/v5" ) var ( // ErrEmptyCommit occurs when a commit is attempted using a clean // working tree, with no changes to be committed. ErrEmptyCommit = errors.New("cannot create empty commit: clean working tree") // characters to be removed from user name and/or email before using them to build a commit object // See https://git-scm.com/docs/git-commit#_commit_information invalidCharactersRe = regexp.MustCompile(`[<>\n]`) ) // Commit stores the current contents of the index in a new commit along with // a log message from the user describing the changes. func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error) { if err := opts.Validate(w.r); err != nil { return plumbing.ZeroHash, err } if opts.All { if err := w.autoAddModifiedAndDeleted(); err != nil { return plumbing.ZeroHash, err } } if opts.Amend { head, err := w.r.Head() if err != nil { return plumbing.ZeroHash, err } headCommit, err := w.r.CommitObject(head.Hash()) if err != nil { return plumbing.ZeroHash, err } opts.Parents = nil if len(headCommit.ParentHashes) != 0 { opts.Parents = []plumbing.Hash{headCommit.ParentHashes[0]} } } idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err } // First handle the case of the first commit in the repository being empty. if len(opts.Parents) == 0 && len(idx.Entries) == 0 && !opts.AllowEmptyCommits { return plumbing.ZeroHash, ErrEmptyCommit } h := &buildTreeHelper{ fs: w.Filesystem, s: w.r.Storer, } treeHash, err := h.BuildTree(idx, opts) if err != nil { return plumbing.ZeroHash, err } previousTree := plumbing.ZeroHash if len(opts.Parents) > 0 { parentCommit, err := w.r.CommitObject(opts.Parents[0]) if err != nil { return plumbing.ZeroHash, err } previousTree = parentCommit.TreeHash } if treeHash == previousTree && !opts.AllowEmptyCommits { return plumbing.ZeroHash, ErrEmptyCommit } commit, err := w.buildCommitObject(msg, opts, treeHash) if err != nil { return plumbing.ZeroHash, err } return commit, w.updateHEAD(commit) } func (w *Worktree) autoAddModifiedAndDeleted() error { s, err := w.Status() if err != nil { return err } idx, err := w.r.Storer.Index() if err != nil { return err } for path, fs := range s { if fs.Worktree != Modified && fs.Worktree != Deleted { continue } if _, _, err := w.doAddFile(idx, s, path, nil); err != nil { return err } } return w.r.Storer.SetIndex(idx) } func (w *Worktree) updateHEAD(commit plumbing.Hash) error { head, err := w.r.Storer.Reference(plumbing.HEAD) if err != nil { return err } name := plumbing.HEAD if head.Type() != plumbing.HashReference { name = head.Target() } ref := plumbing.NewHashReference(name, commit) return w.r.Storer.SetReference(ref) } func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumbing.Hash) (plumbing.Hash, error) { commit := &object.Commit{ Author: w.sanitize(*opts.Author), Committer: w.sanitize(*opts.Committer), Message: msg, TreeHash: tree, ParentHashes: opts.Parents, } // Convert SignKey into a Signer if set. Existing Signer should take priority. signer := opts.Signer if signer == nil && opts.SignKey != nil { signer = &gpgSigner{key: opts.SignKey} } if signer != nil { sig, err := signObject(signer, commit) if err != nil { return plumbing.ZeroHash, err } commit.PGPSignature = string(sig) } obj := w.r.Storer.NewEncodedObject() if err := commit.Encode(obj); err != nil { return plumbing.ZeroHash, err } return w.r.Storer.SetEncodedObject(obj) } func (w *Worktree) sanitize(signature object.Signature) object.Signature { return object.Signature{ Name: invalidCharactersRe.ReplaceAllString(signature.Name, ""), Email: invalidCharactersRe.ReplaceAllString(signature.Email, ""), When: signature.When, } } type gpgSigner struct { key *openpgp.Entity cfg *packet.Config } func (s *gpgSigner) Sign(message io.Reader) ([]byte, error) { var b bytes.Buffer if err := openpgp.ArmoredDetachSign(&b, s.key, message, s.cfg); err != nil { return nil, err } return b.Bytes(), nil } // buildTreeHelper converts a given index.Index file into multiple git objects // reading the blobs from the given filesystem and creating the trees from the // index structure. The created objects are pushed to a given Storer. type buildTreeHelper struct { fs billy.Filesystem s storage.Storer trees map[string]*object.Tree entries map[string]*object.TreeEntry } // BuildTree builds the tree objects and push its to the storer, the hash // of the root tree is returned. func (h *buildTreeHelper) BuildTree(idx *index.Index, opts *CommitOptions) (plumbing.Hash, error) { const rootNode = "" h.trees = map[string]*object.Tree{rootNode: {}} h.entries = map[string]*object.TreeEntry{} for _, e := range idx.Entries { if err := h.commitIndexEntry(e); err != nil { return plumbing.ZeroHash, err } } return h.copyTreeToStorageRecursive(rootNode, h.trees[rootNode]) } func (h *buildTreeHelper) commitIndexEntry(e *index.Entry) error { parts := strings.Split(e.Name, "/") var fullpath string for _, part := range parts { parent := fullpath fullpath = path.Join(fullpath, part) h.doBuildTree(e, parent, fullpath) } return nil } func (h *buildTreeHelper) doBuildTree(e *index.Entry, parent, fullpath string) { if _, ok := h.trees[fullpath]; ok { return } if _, ok := h.entries[fullpath]; ok { return } te := object.TreeEntry{Name: path.Base(fullpath)} if fullpath == e.Name { te.Mode = e.Mode te.Hash = e.Hash } else { te.Mode = filemode.Dir h.trees[fullpath] = &object.Tree{} } h.trees[parent].Entries = append(h.trees[parent].Entries, te) } type sortableEntries []object.TreeEntry func (sortableEntries) sortName(te object.TreeEntry) string { if te.Mode == filemode.Dir { return te.Name + "/" } return te.Name } func (se sortableEntries) Len() int { return len(se) } func (se sortableEntries) Less(i int, j int) bool { return se.sortName(se[i]) < se.sortName(se[j]) } func (se sortableEntries) Swap(i int, j int) { se[i], se[j] = se[j], se[i] } func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tree) (plumbing.Hash, error) { sort.Sort(sortableEntries(t.Entries)) for i, e := range t.Entries { if e.Mode != filemode.Dir && !e.Hash.IsZero() { continue } path := path.Join(parent, e.Name) var err error e.Hash, err = h.copyTreeToStorageRecursive(path, h.trees[path]) if err != nil { return plumbing.ZeroHash, err } t.Entries[i] = e } o := h.s.NewEncodedObject() if err := t.Encode(o); err != nil { return plumbing.ZeroHash, err } hash := o.Hash() if h.s.HasEncodedObject(hash) == nil { return hash, nil } return h.s.SetEncodedObject(o) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/prune.go
vendor/github.com/jesseduffield/go-git/v5/prune.go
package git import ( "errors" "time" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/storer" ) type PruneHandler func(unreferencedObjectHash plumbing.Hash) error type PruneOptions struct { // OnlyObjectsOlderThan if set to non-zero value // selects only objects older than the time provided. OnlyObjectsOlderThan time.Time // Handler is called on matching objects Handler PruneHandler } var ErrLooseObjectsNotSupported = errors.New("loose objects not supported") // DeleteObject deletes an object from a repository. // The type conveniently matches PruneHandler. func (r *Repository) DeleteObject(hash plumbing.Hash) error { los, ok := r.Storer.(storer.LooseObjectStorer) if !ok { return ErrLooseObjectsNotSupported } return los.DeleteLooseObject(hash) } func (r *Repository) Prune(opt PruneOptions) error { los, ok := r.Storer.(storer.LooseObjectStorer) if !ok { return ErrLooseObjectsNotSupported } pw := newObjectWalker(r.Storer) err := pw.walkAllRefs() if err != nil { return err } // Now walk all (loose) objects in storage. return los.ForEachObjectHash(func(hash plumbing.Hash) error { // Get out if we have seen this object. if pw.isSeen(hash) { return nil } // Otherwise it is a candidate for pruning. // Check out for too new objects next. if !opt.OnlyObjectsOlderThan.IsZero() { // Errors here are non-fatal. The object may be e.g. packed. // Or concurrently deleted. Skip such objects. t, err := los.LooseObjectTime(hash) if err != nil { return nil } // Skip too new objects. if !t.Before(opt.OnlyObjectsOlderThan) { return nil } } return opt.Handler(hash) }) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/worktree_windows.go
vendor/github.com/jesseduffield/go-git/v5/worktree_windows.go
// +build windows package git import ( "os" "syscall" "time" "github.com/jesseduffield/go-git/v5/plumbing/format/index" ) func init() { fillSystemInfo = func(e *index.Entry, sys interface{}) { if os, ok := sys.(*syscall.Win32FileAttributeData); ok { seconds := os.CreationTime.Nanoseconds() / 1000000000 nanoseconds := os.CreationTime.Nanoseconds() - seconds*1000000000 e.CreatedAt = time.Unix(seconds, nanoseconds) } } } func isSymlinkWindowsNonAdmin(err error) bool { const ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 if err != nil { if errLink, ok := err.(*os.LinkError); ok { if errNo, ok := errLink.Err.(syscall.Errno); ok { return errNo == ERROR_PRIVILEGE_NOT_HELD } } } return false }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/submodule.go
vendor/github.com/jesseduffield/go-git/v5/submodule.go
package git import ( "bytes" "context" "errors" "fmt" "path" "github.com/go-git/go-billy/v5" "github.com/jesseduffield/go-git/v5/config" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/index" "github.com/jesseduffield/go-git/v5/plumbing/transport" ) var ( ErrSubmoduleAlreadyInitialized = errors.New("submodule already initialized") ErrSubmoduleNotInitialized = errors.New("submodule not initialized") ) // Submodule a submodule allows you to keep another Git repository in a // subdirectory of your repository. type Submodule struct { // initialized defines if a submodule was already initialized. initialized bool c *config.Submodule w *Worktree } // Config returns the submodule config func (s *Submodule) Config() *config.Submodule { return s.c } // Init initialize the submodule reading the recorded Entry in the index for // the given submodule func (s *Submodule) Init() error { cfg, err := s.w.r.Config() if err != nil { return err } _, ok := cfg.Submodules[s.c.Name] if ok { return ErrSubmoduleAlreadyInitialized } s.initialized = true cfg.Submodules[s.c.Name] = s.c return s.w.r.Storer.SetConfig(cfg) } // Status returns the status of the submodule. func (s *Submodule) Status() (*SubmoduleStatus, error) { idx, err := s.w.r.Storer.Index() if err != nil { return nil, err } return s.status(idx) } func (s *Submodule) status(idx *index.Index) (*SubmoduleStatus, error) { status := &SubmoduleStatus{ Path: s.c.Path, } e, err := idx.Entry(s.c.Path) if err != nil && err != index.ErrEntryNotFound { return nil, err } if e != nil { status.Expected = e.Hash } if !s.initialized { return status, nil } r, err := s.Repository() if err != nil { return nil, err } head, err := r.Head() if err == nil { status.Current = head.Hash() } if err != nil && err == plumbing.ErrReferenceNotFound { err = nil } return status, err } // Repository returns the Repository represented by this submodule func (s *Submodule) Repository() (*Repository, error) { if !s.initialized { return nil, ErrSubmoduleNotInitialized } storer, err := s.w.r.Storer.Module(s.c.Name) if err != nil { return nil, err } _, err = storer.Reference(plumbing.HEAD) if err != nil && err != plumbing.ErrReferenceNotFound { return nil, err } var exists bool if err == nil { exists = true } var worktree billy.Filesystem if worktree, err = s.w.Filesystem.Chroot(s.c.Path); err != nil { return nil, err } if exists { return Open(storer, worktree) } r, err := Init(storer, worktree) if err != nil { return nil, err } moduleEndpoint, err := transport.NewEndpoint(s.c.URL) if err != nil { return nil, err } if !path.IsAbs(moduleEndpoint.Path) && moduleEndpoint.Protocol == "file" { remotes, err := s.w.r.Remotes() if err != nil { return nil, err } rootEndpoint, err := transport.NewEndpoint(remotes[0].c.URLs[0]) if err != nil { return nil, err } rootEndpoint.Path = path.Join(rootEndpoint.Path, moduleEndpoint.Path) *moduleEndpoint = *rootEndpoint } _, err = r.CreateRemote(&config.RemoteConfig{ Name: DefaultRemoteName, URLs: []string{moduleEndpoint.String()}, }) return r, err } // Update the registered submodule to match what the superproject expects, the // submodule should be initialized first calling the Init method or setting in // the options SubmoduleUpdateOptions.Init equals true func (s *Submodule) Update(o *SubmoduleUpdateOptions) error { return s.UpdateContext(context.Background(), o) } // UpdateContext the registered submodule to match what the superproject // expects, the submodule should be initialized first calling the Init method or // setting in the options SubmoduleUpdateOptions.Init equals true. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects the // transport operations. func (s *Submodule) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { return s.update(ctx, o, plumbing.ZeroHash) } func (s *Submodule) update(ctx context.Context, o *SubmoduleUpdateOptions, forceHash plumbing.Hash) error { if !s.initialized && !o.Init { return ErrSubmoduleNotInitialized } if !s.initialized && o.Init { if err := s.Init(); err != nil { return err } } idx, err := s.w.r.Storer.Index() if err != nil { return err } hash := forceHash if hash.IsZero() { e, err := idx.Entry(s.c.Path) if err != nil { return err } hash = e.Hash } r, err := s.Repository() if err != nil { return err } if err := s.fetchAndCheckout(ctx, r, o, hash); err != nil { return err } return s.doRecursiveUpdate(ctx, r, o) } func (s *Submodule) doRecursiveUpdate(ctx context.Context, r *Repository, o *SubmoduleUpdateOptions) error { if o.RecurseSubmodules == NoRecurseSubmodules { return nil } w, err := r.Worktree() if err != nil { return err } l, err := w.Submodules() if err != nil { return err } new := &SubmoduleUpdateOptions{} *new = *o new.RecurseSubmodules-- return l.UpdateContext(ctx, new) } func (s *Submodule) fetchAndCheckout( ctx context.Context, r *Repository, o *SubmoduleUpdateOptions, hash plumbing.Hash, ) error { if !o.NoFetch { err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth, Depth: o.Depth}) if err != nil && err != NoErrAlreadyUpToDate { return err } } w, err := r.Worktree() if err != nil { return err } // Handle a case when submodule refers to an orphaned commit that's still reachable // through Git server using a special protocol capability[1]. // // [1]: https://git-scm.com/docs/protocol-capabilities#_allow_reachable_sha1_in_want if !o.NoFetch { if _, err := w.r.Object(plumbing.AnyObject, hash); err != nil { refSpec := config.RefSpec("+" + hash.String() + ":" + hash.String()) err := r.FetchContext(ctx, &FetchOptions{ Auth: o.Auth, RefSpecs: []config.RefSpec{refSpec}, Depth: o.Depth, }) if err != nil && err != NoErrAlreadyUpToDate && err != ErrExactSHA1NotSupported { return err } } } if err := w.Checkout(&CheckoutOptions{Hash: hash}); err != nil { return err } head := plumbing.NewHashReference(plumbing.HEAD, hash) return r.Storer.SetReference(head) } // Submodules list of several submodules from the same repository. type Submodules []*Submodule // Init initializes the submodules in this list. func (s Submodules) Init() error { for _, sub := range s { if err := sub.Init(); err != nil { return err } } return nil } // Update updates all the submodules in this list. func (s Submodules) Update(o *SubmoduleUpdateOptions) error { return s.UpdateContext(context.Background(), o) } // UpdateContext updates all the submodules in this list. // // The provided Context must be non-nil. If the context expires before the // operation is complete, an error is returned. The context only affects the // transport operations. func (s Submodules) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { for _, sub := range s { if err := sub.UpdateContext(ctx, o); err != nil { return err } } return nil } // Status returns the status of the submodules. func (s Submodules) Status() (SubmodulesStatus, error) { var list SubmodulesStatus var r *Repository for _, sub := range s { if r == nil { r = sub.w.r } idx, err := r.Storer.Index() if err != nil { return nil, err } status, err := sub.status(idx) if err != nil { return nil, err } list = append(list, status) } return list, nil } // SubmodulesStatus contains the status for all submodiles in the worktree type SubmodulesStatus []*SubmoduleStatus // String is equivalent to `git submodule status` func (s SubmodulesStatus) String() string { buf := bytes.NewBuffer(nil) for _, sub := range s { fmt.Fprintln(buf, sub) } return buf.String() } // SubmoduleStatus contains the status for a submodule in the worktree type SubmoduleStatus struct { Path string Current plumbing.Hash Expected plumbing.Hash Branch plumbing.ReferenceName } // IsClean is the HEAD of the submodule is equals to the expected commit func (s *SubmoduleStatus) IsClean() bool { return s.Current == s.Expected } // String is equivalent to `git submodule status <submodule>` // // This will print the SHA-1 of the currently checked out commit for a // submodule, along with the submodule path and the output of git describe fo // the SHA-1. Each SHA-1 will be prefixed with - if the submodule is not // initialized, + if the currently checked out submodule commit does not match // the SHA-1 found in the index of the containing repository. func (s *SubmoduleStatus) String() string { var extra string var status = ' ' if s.Current.IsZero() { status = '-' } else if !s.IsClean() { status = '+' } if len(s.Branch) != 0 { extra = string(s.Branch[5:]) } else if !s.Current.IsZero() { extra = s.Current.String()[:7] } if extra != "" { extra = fmt.Sprintf(" (%s)", extra) } return fmt.Sprintf("%c%s %s%s", status, s.Expected, s.Path, extra) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/common.go
vendor/github.com/jesseduffield/go-git/v5/common.go
package git import "strings" // countLines returns the number of lines in a string à la git, this is // The newline character is assumed to be '\n'. The empty string // contains 0 lines. If the last line of the string doesn't end with a // newline, it will still be considered a line. func countLines(s string) int { if s == "" { return 0 } nEOL := strings.Count(s, "\n") if strings.HasSuffix(s, "\n") { return nEOL } return nEOL + 1 }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/storer.go
vendor/github.com/jesseduffield/go-git/v5/storage/storer.go
package storage import ( "errors" "github.com/jesseduffield/go-git/v5/config" "github.com/jesseduffield/go-git/v5/plumbing/storer" ) var ErrReferenceHasChanged = errors.New("reference has changed concurrently") // Storer is a generic storage of objects, references and any information // related to a particular repository. The package github.com/jesseduffield/go-git/v5/storage // contains two implementation a filesystem base implementation (such as `.git`) // and a memory implementations being ephemeral type Storer interface { storer.EncodedObjectStorer storer.ReferenceStorer storer.ShallowStorer storer.IndexStorer config.ConfigStorer ModuleStorer } // ModuleStorer allows interact with the modules' Storers type ModuleStorer interface { // Module returns a Storer representing a submodule, if not exists returns a // new empty Storer is returned Module(name string) (Storer, error) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/memory/storage.go
vendor/github.com/jesseduffield/go-git/v5/storage/memory/storage.go
// Package memory is a storage backend base on memory package memory import ( "fmt" "time" "github.com/jesseduffield/go-git/v5/config" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/index" "github.com/jesseduffield/go-git/v5/plumbing/storer" "github.com/jesseduffield/go-git/v5/storage" ) var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type") // Storage is an implementation of git.Storer that stores data on memory, being // ephemeral. The use of this storage should be done in controlled environments, // since the representation in memory of some repository can fill the machine // memory. in the other hand this storage has the best performance. type Storage struct { ConfigStorage ObjectStorage ShallowStorage IndexStorage ReferenceStorage ModuleStorage } // NewStorage returns a new Storage base on memory func NewStorage() *Storage { return &Storage{ ReferenceStorage: make(ReferenceStorage), ConfigStorage: ConfigStorage{}, ShallowStorage: ShallowStorage{}, ObjectStorage: ObjectStorage{ Objects: make(map[plumbing.Hash]plumbing.EncodedObject), Commits: make(map[plumbing.Hash]plumbing.EncodedObject), Trees: make(map[plumbing.Hash]plumbing.EncodedObject), Blobs: make(map[plumbing.Hash]plumbing.EncodedObject), Tags: make(map[plumbing.Hash]plumbing.EncodedObject), }, ModuleStorage: make(ModuleStorage), } } type ConfigStorage struct { config *config.Config } func (c *ConfigStorage) SetConfig(cfg *config.Config) error { if err := cfg.Validate(); err != nil { return err } c.config = cfg return nil } func (c *ConfigStorage) Config() (*config.Config, error) { if c.config == nil { c.config = config.NewConfig() } return c.config, nil } type IndexStorage struct { index *index.Index } func (c *IndexStorage) SetIndex(idx *index.Index) error { c.index = idx return nil } func (c *IndexStorage) Index() (*index.Index, error) { if c.index == nil { c.index = &index.Index{Version: 2} } return c.index, nil } type ObjectStorage struct { Objects map[plumbing.Hash]plumbing.EncodedObject Commits map[plumbing.Hash]plumbing.EncodedObject Trees map[plumbing.Hash]plumbing.EncodedObject Blobs map[plumbing.Hash]plumbing.EncodedObject Tags map[plumbing.Hash]plumbing.EncodedObject } func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { return &plumbing.MemoryObject{} } func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { h := obj.Hash() o.Objects[h] = obj switch obj.Type() { case plumbing.CommitObject: o.Commits[h] = o.Objects[h] case plumbing.TreeObject: o.Trees[h] = o.Objects[h] case plumbing.BlobObject: o.Blobs[h] = o.Objects[h] case plumbing.TagObject: o.Tags[h] = o.Objects[h] default: return h, ErrUnsupportedObjectType } return h, nil } func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { if _, ok := o.Objects[h]; !ok { return plumbing.ErrObjectNotFound } return nil } func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( size int64, err error) { obj, ok := o.Objects[h] if !ok { return 0, plumbing.ErrObjectNotFound } return obj.Size(), nil } func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { obj, ok := o.Objects[h] if !ok || (plumbing.AnyObject != t && obj.Type() != t) { return nil, plumbing.ErrObjectNotFound } return obj, nil } func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { var series []plumbing.EncodedObject switch t { case plumbing.AnyObject: series = flattenObjectMap(o.Objects) case plumbing.CommitObject: series = flattenObjectMap(o.Commits) case plumbing.TreeObject: series = flattenObjectMap(o.Trees) case plumbing.BlobObject: series = flattenObjectMap(o.Blobs) case plumbing.TagObject: series = flattenObjectMap(o.Tags) } return storer.NewEncodedObjectSliceIter(series), nil } func flattenObjectMap(m map[plumbing.Hash]plumbing.EncodedObject) []plumbing.EncodedObject { objects := make([]plumbing.EncodedObject, 0, len(m)) for _, obj := range m { objects = append(objects, obj) } return objects } func (o *ObjectStorage) Begin() storer.Transaction { return &TxObjectStorage{ Storage: o, Objects: make(map[plumbing.Hash]plumbing.EncodedObject), } } func (o *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error { for h := range o.Objects { err := fun(h) if err != nil { if err == storer.ErrStop { return nil } return err } } return nil } func (o *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { return nil, nil } func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error { return nil } var errNotSupported = fmt.Errorf("not supported") func (o *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) { return time.Time{}, errNotSupported } func (o *ObjectStorage) DeleteLooseObject(plumbing.Hash) error { return errNotSupported } func (o *ObjectStorage) AddAlternate(remote string) error { return errNotSupported } type TxObjectStorage struct { Storage *ObjectStorage Objects map[plumbing.Hash]plumbing.EncodedObject } func (tx *TxObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { h := obj.Hash() tx.Objects[h] = obj return h, nil } func (tx *TxObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { obj, ok := tx.Objects[h] if !ok || (plumbing.AnyObject != t && obj.Type() != t) { return nil, plumbing.ErrObjectNotFound } return obj, nil } func (tx *TxObjectStorage) Commit() error { for h, obj := range tx.Objects { delete(tx.Objects, h) if _, err := tx.Storage.SetEncodedObject(obj); err != nil { return err } } return nil } func (tx *TxObjectStorage) Rollback() error { tx.Objects = make(map[plumbing.Hash]plumbing.EncodedObject) return nil } type ReferenceStorage map[plumbing.ReferenceName]*plumbing.Reference func (r ReferenceStorage) SetReference(ref *plumbing.Reference) error { if ref != nil { r[ref.Name()] = ref } return nil } func (r ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error { if ref == nil { return nil } if old != nil { tmp := r[ref.Name()] if tmp != nil && tmp.Hash() != old.Hash() { return storage.ErrReferenceHasChanged } } r[ref.Name()] = ref return nil } func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { ref, ok := r[n] if !ok { return nil, plumbing.ErrReferenceNotFound } return ref, nil } func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { var refs []*plumbing.Reference for _, ref := range r { refs = append(refs, ref) } return storer.NewReferenceSliceIter(refs), nil } func (r ReferenceStorage) CountLooseRefs() (int, error) { return len(r), nil } func (r ReferenceStorage) PackRefs() error { return nil } func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { delete(r, n) return nil } type ShallowStorage []plumbing.Hash func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { *s = commits return nil } func (s ShallowStorage) Shallow() ([]plumbing.Hash, error) { return s, nil } type ModuleStorage map[string]*Storage func (s ModuleStorage) Module(name string) (storage.Storer, error) { if m, ok := s[name]; ok { return m, nil } m := NewStorage() s[name] = m return m, nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/storage.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/storage.go
// Package filesystem is a storage backend base on filesystems package filesystem import ( "github.com/jesseduffield/go-git/v5/plumbing/cache" "github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit" "github.com/go-git/go-billy/v5" ) // Storage is an implementation of git.Storer that stores data on disk in the // standard git format (this is, the .git directory). Zero values of this type // are not safe to use, see the NewStorage function below. type Storage struct { fs billy.Filesystem dir *dotgit.DotGit ObjectStorage ReferenceStorage IndexStorage ShallowStorage ConfigStorage ModuleStorage } // Options holds configuration for the storage. type Options struct { // ExclusiveAccess means that the filesystem is not modified externally // while the repo is open. ExclusiveAccess bool // KeepDescriptors makes the file descriptors to be reused but they will // need to be manually closed calling Close(). KeepDescriptors bool // MaxOpenDescriptors is the max number of file descriptors to keep // open. If KeepDescriptors is true, all file descriptors will remain open. MaxOpenDescriptors int // LargeObjectThreshold maximum object size (in bytes) that will be read in to memory. // If left unset or set to 0 there is no limit LargeObjectThreshold int64 // AlternatesFS provides the billy filesystem to be used for Git Alternates. // If none is provided, it falls back to using the underlying instance used for // DotGit. AlternatesFS billy.Filesystem } // NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache. func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage { return NewStorageWithOptions(fs, cache, Options{}) } // NewStorageWithOptions returns a new Storage with extra options, // backed by a given `fs.Filesystem` and cache. func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage { dirOps := dotgit.Options{ ExclusiveAccess: ops.ExclusiveAccess, AlternatesFS: ops.AlternatesFS, } dir := dotgit.NewWithOptions(fs, dirOps) return &Storage{ fs: fs, dir: dir, ObjectStorage: *NewObjectStorageWithOptions(dir, cache, ops), ReferenceStorage: ReferenceStorage{dir: dir}, IndexStorage: IndexStorage{dir: dir}, ShallowStorage: ShallowStorage{dir: dir}, ConfigStorage: ConfigStorage{dir: dir}, ModuleStorage: ModuleStorage{dir: dir}, } } // Filesystem returns the underlying filesystem func (s *Storage) Filesystem() billy.Filesystem { return s.fs } // Init initializes .git directory func (s *Storage) Init() error { return s.dir.Initialize() } func (s *Storage) AddAlternate(remote string) error { return s.dir.AddAlternate(remote) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/index.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/index.go
package filesystem import ( "bufio" "os" "github.com/jesseduffield/go-git/v5/plumbing/format/index" "github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) type IndexStorage struct { dir *dotgit.DotGit } func (s *IndexStorage) SetIndex(idx *index.Index) (err error) { f, err := s.dir.IndexWriter() if err != nil { return err } defer ioutil.CheckClose(f, &err) bw := bufio.NewWriter(f) defer func() { if e := bw.Flush(); err == nil && e != nil { err = e } }() e := index.NewEncoder(bw) err = e.Encode(idx) return err } func (s *IndexStorage) Index() (i *index.Index, err error) { idx := &index.Index{ Version: 2, } f, err := s.dir.Index() if err != nil { if os.IsNotExist(err) { return idx, nil } return nil, err } defer ioutil.CheckClose(f, &err) d := index.NewDecoder(f) err = d.Decode(idx) return idx, err }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/config.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/config.go
package filesystem import ( "os" "github.com/jesseduffield/go-git/v5/config" "github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) type ConfigStorage struct { dir *dotgit.DotGit } func (c *ConfigStorage) Config() (conf *config.Config, err error) { f, err := c.dir.Config() if err != nil { if os.IsNotExist(err) { return config.NewConfig(), nil } return nil, err } defer ioutil.CheckClose(f, &err) return config.ReadConfig(f) } func (c *ConfigStorage) SetConfig(cfg *config.Config) (err error) { if err = cfg.Validate(); err != nil { return err } f, err := c.dir.ConfigWriter() if err != nil { return err } defer ioutil.CheckClose(f, &err) b, err := cfg.Marshal() if err != nil { return err } _, err = f.Write(b) return err }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/deltaobject.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/deltaobject.go
package filesystem import ( "github.com/jesseduffield/go-git/v5/plumbing" ) type deltaObject struct { plumbing.EncodedObject base plumbing.Hash hash plumbing.Hash size int64 } func newDeltaObject( obj plumbing.EncodedObject, hash plumbing.Hash, base plumbing.Hash, size int64) plumbing.DeltaObject { return &deltaObject{ EncodedObject: obj, hash: hash, base: base, size: size, } } func (o *deltaObject) BaseHash() plumbing.Hash { return o.base } func (o *deltaObject) ActualSize() int64 { return o.size } func (o *deltaObject) ActualHash() plumbing.Hash { return o.hash }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/object.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/object.go
package filesystem import ( "bytes" "io" "os" "sync" "time" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/cache" "github.com/jesseduffield/go-git/v5/plumbing/format/idxfile" "github.com/jesseduffield/go-git/v5/plumbing/format/objfile" "github.com/jesseduffield/go-git/v5/plumbing/format/packfile" "github.com/jesseduffield/go-git/v5/plumbing/storer" "github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit" "github.com/jesseduffield/go-git/v5/utils/ioutil" "github.com/go-git/go-billy/v5" ) type ObjectStorage struct { options Options // objectCache is an object cache uses to cache delta's bases and also recently // loaded loose objects objectCache cache.Object dir *dotgit.DotGit index map[plumbing.Hash]idxfile.Index packList []plumbing.Hash packListIdx int packfiles map[plumbing.Hash]*packfile.Packfile } // NewObjectStorage creates a new ObjectStorage with the given .git directory and cache. func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage { return NewObjectStorageWithOptions(dir, objectCache, Options{}) } // NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage { return &ObjectStorage{ options: ops, objectCache: objectCache, dir: dir, } } func (s *ObjectStorage) requireIndex() error { if s.index != nil { return nil } s.index = make(map[plumbing.Hash]idxfile.Index) packs, err := s.dir.ObjectPacks() if err != nil { return err } for _, h := range packs { if err := s.loadIdxFile(h); err != nil { return err } } return nil } // Reindex indexes again all packfiles. Useful if git changed packfiles externally func (s *ObjectStorage) Reindex() { s.index = nil } func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) { f, err := s.dir.ObjectPackIdx(h) if err != nil { return err } defer ioutil.CheckClose(f, &err) idxf := idxfile.NewMemoryIndex() d := idxfile.NewDecoder(f) if err = d.Decode(idxf); err != nil { return err } s.index[h] = idxf return err } func (s *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { return &plumbing.MemoryObject{} } func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) { if err := s.requireIndex(); err != nil { return nil, err } w, err := s.dir.NewObjectPack() if err != nil { return nil, err } w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) { index, err := writer.Index() if err == nil { s.index[h] = index } } return w, nil } // SetEncodedObject adds a new object to the storage. func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.Hash, err error) { if o.Type() == plumbing.OFSDeltaObject || o.Type() == plumbing.REFDeltaObject { return plumbing.ZeroHash, plumbing.ErrInvalidType } ow, err := s.dir.NewObject() if err != nil { return plumbing.ZeroHash, err } defer ioutil.CheckClose(ow, &err) or, err := o.Reader() if err != nil { return plumbing.ZeroHash, err } defer ioutil.CheckClose(or, &err) if err = ow.WriteHeader(o.Type(), o.Size()); err != nil { return plumbing.ZeroHash, err } if _, err = io.Copy(ow, or); err != nil { return plumbing.ZeroHash, err } return o.Hash(), err } // LazyWriter returns a lazy ObjectWriter that is bound to a DotGit file. // It first write the header passing on the object type and size, so // that the object contents can be written later, without the need to // create a MemoryObject and buffering its entire contents into memory. func (s *ObjectStorage) LazyWriter() (w io.WriteCloser, wh func(typ plumbing.ObjectType, sz int64) error, err error) { ow, err := s.dir.NewObject() if err != nil { return nil, nil, err } return ow, ow.WriteHeader, nil } // HasEncodedObject returns nil if the object exists, without actually // reading the object data from storage. func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { // Check unpacked objects f, err := s.dir.Object(h) if err != nil { if !os.IsNotExist(err) { return err } // Fall through to check packed objects. } else { defer ioutil.CheckClose(f, &err) return nil } // Check packed objects. if err := s.requireIndex(); err != nil { return err } _, _, offset := s.findObjectInPackfile(h) if offset == -1 { return plumbing.ErrObjectNotFound } return nil } func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) ( size int64, err error) { f, err := s.dir.Object(h) if err != nil { if os.IsNotExist(err) { return 0, plumbing.ErrObjectNotFound } return 0, err } r, err := objfile.NewReader(f) if err != nil { return 0, err } defer ioutil.CheckClose(r, &err) _, size, err = r.Header() return size, err } func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) { if p := s.packfileFromCache(pack); p != nil { return p, nil } f, err := s.dir.ObjectPack(pack) if err != nil { return nil, err } var p *packfile.Packfile if s.objectCache != nil { p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache, s.options.LargeObjectThreshold) } else { p = packfile.NewPackfile(idx, s.dir.Fs(), f, s.options.LargeObjectThreshold) } return p, s.storePackfileInCache(pack, p) } func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile { if s.packfiles == nil { if s.options.KeepDescriptors { s.packfiles = make(map[plumbing.Hash]*packfile.Packfile) } else if s.options.MaxOpenDescriptors > 0 { s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors) s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors) } } return s.packfiles[hash] } func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error { if s.options.KeepDescriptors { s.packfiles[hash] = p return nil } if s.options.MaxOpenDescriptors <= 0 { return nil } // start over as the limit of packList is hit if s.packListIdx >= len(s.packList) { s.packListIdx = 0 } // close the existing packfile if open if next := s.packList[s.packListIdx]; !next.IsZero() { open := s.packfiles[next] delete(s.packfiles, next) if open != nil { if err := open.Close(); err != nil { return err } } } // cache newly open packfile s.packList[s.packListIdx] = hash s.packfiles[hash] = p s.packListIdx++ return nil } func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( size int64, err error) { if err := s.requireIndex(); err != nil { return 0, err } pack, _, offset := s.findObjectInPackfile(h) if offset == -1 { return 0, plumbing.ErrObjectNotFound } idx := s.index[pack] hash, err := idx.FindHash(offset) if err == nil { obj, ok := s.objectCache.Get(hash) if ok { return obj.Size(), nil } } else if err != nil && err != plumbing.ErrObjectNotFound { return 0, err } p, err := s.packfile(idx, pack) if err != nil { return 0, err } if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { defer ioutil.CheckClose(p, &err) } return p.GetSizeByOffset(offset) } // EncodedObjectSize returns the plaintext size of the given object, // without actually reading the full object data from storage. func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( size int64, err error) { size, err = s.encodedObjectSizeFromUnpacked(h) if err != nil && err != plumbing.ErrObjectNotFound { return 0, err } else if err == nil { return size, nil } return s.encodedObjectSizeFromPackfile(h) } // EncodedObject returns the object with the given hash, by searching for it in // the packfile and the git object directories. func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { var obj plumbing.EncodedObject var err error if s.index != nil { obj, err = s.getFromPackfile(h, false) if err == plumbing.ErrObjectNotFound { obj, err = s.getFromUnpacked(h) } } else { obj, err = s.getFromUnpacked(h) if err == plumbing.ErrObjectNotFound { obj, err = s.getFromPackfile(h, false) } } // If the error is still object not found, check if it's a shared object // repository. if err == plumbing.ErrObjectNotFound { dotgits, e := s.dir.Alternates() if e == nil { // Create a new object storage with the DotGit(s) and check for the // required hash object. Skip when not found. for _, dg := range dotgits { o := NewObjectStorage(dg, s.objectCache) enobj, enerr := o.EncodedObject(t, h) if enerr != nil { continue } return enobj, nil } } } if err != nil { return nil, err } if plumbing.AnyObject != t && obj.Type() != t { return nil, plumbing.ErrObjectNotFound } return obj, nil } // DeltaObject returns the object with the given hash, by searching for // it in the packfile and the git object directories. func (s *ObjectStorage) DeltaObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { obj, err := s.getFromUnpacked(h) if err == plumbing.ErrObjectNotFound { obj, err = s.getFromPackfile(h, true) } if err != nil { return nil, err } if plumbing.AnyObject != t && obj.Type() != t { return nil, plumbing.ErrObjectNotFound } return obj, nil } func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedObject, err error) { f, err := s.dir.Object(h) if err != nil { if os.IsNotExist(err) { return nil, plumbing.ErrObjectNotFound } return nil, err } defer ioutil.CheckClose(f, &err) if cacheObj, found := s.objectCache.Get(h); found { return cacheObj, nil } r, err := objfile.NewReader(f) if err != nil { return nil, err } defer ioutil.CheckClose(r, &err) t, size, err := r.Header() if err != nil { return nil, err } if s.options.LargeObjectThreshold > 0 && size > s.options.LargeObjectThreshold { obj = dotgit.NewEncodedObject(s.dir, h, t, size) return obj, nil } obj = s.NewEncodedObject() obj.SetType(t) obj.SetSize(size) w, err := obj.Writer() if err != nil { return nil, err } defer ioutil.CheckClose(w, &err) bufp := copyBufferPool.Get().(*[]byte) buf := *bufp _, err = io.CopyBuffer(w, r, buf) copyBufferPool.Put(bufp) s.objectCache.Put(obj) return obj, err } var copyBufferPool = sync.Pool{ New: func() interface{} { b := make([]byte, 32*1024) return &b }, } // Get returns the object with the given hash, by searching for it in // the packfile. func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( plumbing.EncodedObject, error) { if err := s.requireIndex(); err != nil { return nil, err } pack, hash, offset := s.findObjectInPackfile(h) if offset == -1 { return nil, plumbing.ErrObjectNotFound } idx := s.index[pack] p, err := s.packfile(idx, pack) if err != nil { return nil, err } if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { defer ioutil.CheckClose(p, &err) } if canBeDelta { return s.decodeDeltaObjectAt(p, offset, hash) } return s.decodeObjectAt(p, offset) } func (s *ObjectStorage) decodeObjectAt( p *packfile.Packfile, offset int64, ) (plumbing.EncodedObject, error) { hash, err := p.FindHash(offset) if err == nil { obj, ok := s.objectCache.Get(hash) if ok { return obj, nil } } if err != nil && err != plumbing.ErrObjectNotFound { return nil, err } return p.GetByOffset(offset) } func (s *ObjectStorage) decodeDeltaObjectAt( p *packfile.Packfile, offset int64, hash plumbing.Hash, ) (plumbing.EncodedObject, error) { scan := p.Scanner() header, err := scan.SeekObjectHeader(offset) if err != nil { return nil, err } var ( base plumbing.Hash ) switch header.Type { case plumbing.REFDeltaObject: base = header.Reference case plumbing.OFSDeltaObject: base, err = p.FindHash(header.OffsetReference) if err != nil { return nil, err } default: return s.decodeObjectAt(p, offset) } obj := &plumbing.MemoryObject{} obj.SetType(header.Type) w, err := obj.Writer() if err != nil { return nil, err } if _, _, err := scan.NextObject(w); err != nil { return nil, err } return newDeltaObject(obj, hash, base, header.Length), nil } func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) { for packfile, index := range s.index { offset, err := index.FindOffset(h) if err == nil { return packfile, h, offset } } return plumbing.ZeroHash, plumbing.ZeroHash, -1 } // HashesWithPrefix returns all objects with a hash that starts with a prefix by searching for // them in the packfile and the git object directories. func (s *ObjectStorage) HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error) { hashes, err := s.dir.ObjectsWithPrefix(prefix) if err != nil { return nil, err } seen := hashListAsMap(hashes) // TODO: This could be faster with some idxfile changes, // or diving into the packfile. if err := s.requireIndex(); err != nil { return nil, err } for _, index := range s.index { ei, err := index.Entries() if err != nil { return nil, err } for { e, err := ei.Next() if err == io.EOF { break } else if err != nil { return nil, err } if bytes.HasPrefix(e.Hash[:], prefix) { if _, ok := seen[e.Hash]; ok { continue } hashes = append(hashes, e.Hash) } } ei.Close() } return hashes, nil } // IterEncodedObjects returns an iterator for all the objects in the packfile // with the given type. func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { objects, err := s.dir.Objects() if err != nil { return nil, err } seen := make(map[plumbing.Hash]struct{}) var iters []storer.EncodedObjectIter if len(objects) != 0 { iters = append(iters, &objectsIter{s: s, t: t, h: objects}) seen = hashListAsMap(objects) } packi, err := s.buildPackfileIters(t, seen) if err != nil { return nil, err } iters = append(iters, packi) return storer.NewMultiEncodedObjectIter(iters), nil } func (s *ObjectStorage) buildPackfileIters( t plumbing.ObjectType, seen map[plumbing.Hash]struct{}, ) (storer.EncodedObjectIter, error) { if err := s.requireIndex(); err != nil { return nil, err } packs, err := s.dir.ObjectPacks() if err != nil { return nil, err } return &lazyPackfilesIter{ hashes: packs, open: func(h plumbing.Hash) (storer.EncodedObjectIter, error) { pack, err := s.dir.ObjectPack(h) if err != nil { return nil, err } return newPackfileIter( s.dir.Fs(), pack, t, seen, s.index[h], s.objectCache, s.options.KeepDescriptors, s.options.LargeObjectThreshold, ) }, }, nil } // Close closes all opened files. func (s *ObjectStorage) Close() error { var firstError error if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 { for _, packfile := range s.packfiles { err := packfile.Close() if firstError == nil && err != nil { firstError = err } } } s.packfiles = nil s.dir.Close() return firstError } type lazyPackfilesIter struct { hashes []plumbing.Hash open func(h plumbing.Hash) (storer.EncodedObjectIter, error) cur storer.EncodedObjectIter } func (it *lazyPackfilesIter) Next() (plumbing.EncodedObject, error) { for { if it.cur == nil { if len(it.hashes) == 0 { return nil, io.EOF } h := it.hashes[0] it.hashes = it.hashes[1:] sub, err := it.open(h) if err == io.EOF { continue } else if err != nil { return nil, err } it.cur = sub } ob, err := it.cur.Next() if err == io.EOF { it.cur.Close() it.cur = nil continue } else if err != nil { return nil, err } return ob, nil } } func (it *lazyPackfilesIter) ForEach(cb func(plumbing.EncodedObject) error) error { return storer.ForEachIterator(it, cb) } func (it *lazyPackfilesIter) Close() { if it.cur != nil { it.cur.Close() it.cur = nil } it.hashes = nil } type packfileIter struct { pack billy.File iter storer.EncodedObjectIter seen map[plumbing.Hash]struct{} // tells whether the pack file should be left open after iteration or not keepPack bool } // NewPackfileIter returns a new EncodedObjectIter for the provided packfile // and object type. Packfile and index file will be closed after they're // used. If keepPack is true the packfile won't be closed after the iteration // finished. func NewPackfileIter( fs billy.Filesystem, f billy.File, idxFile billy.File, t plumbing.ObjectType, keepPack bool, largeObjectThreshold int64, ) (storer.EncodedObjectIter, error) { idx := idxfile.NewMemoryIndex() if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { return nil, err } if err := idxFile.Close(); err != nil { return nil, err } seen := make(map[plumbing.Hash]struct{}) return newPackfileIter(fs, f, t, seen, idx, nil, keepPack, largeObjectThreshold) } func newPackfileIter( fs billy.Filesystem, f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash]struct{}, index idxfile.Index, cache cache.Object, keepPack bool, largeObjectThreshold int64, ) (storer.EncodedObjectIter, error) { var p *packfile.Packfile if cache != nil { p = packfile.NewPackfileWithCache(index, fs, f, cache, largeObjectThreshold) } else { p = packfile.NewPackfile(index, fs, f, largeObjectThreshold) } iter, err := p.GetByType(t) if err != nil { return nil, err } return &packfileIter{ pack: f, iter: iter, seen: seen, keepPack: keepPack, }, nil } func (iter *packfileIter) Next() (plumbing.EncodedObject, error) { for { obj, err := iter.iter.Next() if err != nil { return nil, err } if _, ok := iter.seen[obj.Hash()]; ok { continue } return obj, nil } } func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { for { o, err := iter.Next() if err != nil { if err == io.EOF { iter.Close() return nil } return err } if err := cb(o); err != nil { return err } } } func (iter *packfileIter) Close() { iter.iter.Close() if !iter.keepPack { _ = iter.pack.Close() } } type objectsIter struct { s *ObjectStorage t plumbing.ObjectType h []plumbing.Hash } func (iter *objectsIter) Next() (plumbing.EncodedObject, error) { if len(iter.h) == 0 { return nil, io.EOF } obj, err := iter.s.getFromUnpacked(iter.h[0]) iter.h = iter.h[1:] if err != nil { return nil, err } if iter.t != plumbing.AnyObject && iter.t != obj.Type() { return iter.Next() } return obj, err } func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error { for { o, err := iter.Next() if err != nil { if err == io.EOF { return nil } return err } if err := cb(o); err != nil { return err } } } func (iter *objectsIter) Close() { iter.h = []plumbing.Hash{} } func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]struct{} { m := make(map[plumbing.Hash]struct{}, len(l)) for _, h := range l { m[h] = struct{}{} } return m } func (s *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error { err := s.dir.ForEachObjectHash(fun) if err == storer.ErrStop { return nil } return err } func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) { fi, err := s.dir.ObjectStat(hash) if err != nil { return time.Time{}, err } return fi.ModTime(), nil } func (s *ObjectStorage) DeleteLooseObject(hash plumbing.Hash) error { return s.dir.ObjectDelete(hash) } func (s *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { return s.dir.ObjectPacks() } func (s *ObjectStorage) DeleteOldObjectPackAndIndex(h plumbing.Hash, t time.Time) error { return s.dir.DeleteOldObjectPackAndIndex(h, t) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/module.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/module.go
package filesystem import ( "github.com/jesseduffield/go-git/v5/plumbing/cache" "github.com/jesseduffield/go-git/v5/storage" "github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit" ) type ModuleStorage struct { dir *dotgit.DotGit } func (s *ModuleStorage) Module(name string) (storage.Storer, error) { fs, err := s.dir.Module(name) if err != nil { return nil, err } return NewStorage(fs, cache.NewObjectLRUDefault()), nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/shallow.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/shallow.go
package filesystem import ( "bufio" "fmt" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) // ShallowStorage where the shallow commits are stored, an internal to // manipulate the shallow file type ShallowStorage struct { dir *dotgit.DotGit } // SetShallow save the shallows in the shallow file in the .git folder as one // commit per line represented by 40-byte hexadecimal object terminated by a // newline. func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { f, err := s.dir.ShallowWriter() if err != nil { return err } defer ioutil.CheckClose(f, &err) for _, h := range commits { if _, err := fmt.Fprintf(f, "%s\n", h); err != nil { return err } } return err } // Shallow returns the shallow commits reading from shallo file from .git func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) { f, err := s.dir.Shallow() if f == nil || err != nil { return nil, err } defer ioutil.CheckClose(f, &err) var hash []plumbing.Hash scn := bufio.NewScanner(f) for scn.Scan() { hash = append(hash, plumbing.NewHash(scn.Text())) } return hash, scn.Err() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/reference.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/reference.go
package filesystem import ( "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/storer" "github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit" ) type ReferenceStorage struct { dir *dotgit.DotGit } func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error { return r.dir.SetRef(ref, nil) } func (r *ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error { return r.dir.SetRef(ref, old) } func (r *ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { return r.dir.Ref(n) } func (r *ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { refs, err := r.dir.Refs() if err != nil { return nil, err } return storer.NewReferenceSliceIter(refs), nil } func (r *ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { return r.dir.RemoveRef(n) } func (r *ReferenceStorage) CountLooseRefs() (int, error) { return r.dir.CountLooseRefs() } func (r *ReferenceStorage) PackRefs() error { return r.dir.PackRefs() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/reader.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/reader.go
package dotgit import ( "fmt" "io" "os" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/objfile" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) var _ (plumbing.EncodedObject) = &EncodedObject{} type EncodedObject struct { dir *DotGit h plumbing.Hash t plumbing.ObjectType sz int64 } func (e *EncodedObject) Hash() plumbing.Hash { return e.h } func (e *EncodedObject) Reader() (io.ReadCloser, error) { f, err := e.dir.Object(e.h) if err != nil { if os.IsNotExist(err) { return nil, plumbing.ErrObjectNotFound } return nil, err } r, err := objfile.NewReader(f) if err != nil { return nil, err } t, size, err := r.Header() if err != nil { _ = r.Close() return nil, err } if t != e.t { _ = r.Close() return nil, objfile.ErrHeader } if size != e.sz { _ = r.Close() return nil, objfile.ErrHeader } return ioutil.NewReadCloserWithCloser(r, f.Close), nil } func (e *EncodedObject) SetType(plumbing.ObjectType) {} func (e *EncodedObject) Type() plumbing.ObjectType { return e.t } func (e *EncodedObject) Size() int64 { return e.sz } func (e *EncodedObject) SetSize(int64) {} func (e *EncodedObject) Writer() (io.WriteCloser, error) { return nil, fmt.Errorf("not supported") } func NewEncodedObject(dir *DotGit, h plumbing.Hash, t plumbing.ObjectType, size int64) *EncodedObject { return &EncodedObject{ dir: dir, h: h, t: t, sz: size, } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
package dotgit import ( "io" "os" "runtime" "github.com/go-git/go-billy/v5" "github.com/jesseduffield/go-git/v5/utils/ioutil" ) func (d *DotGit) openAndLockPackedRefsMode() int { if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { return os.O_RDWR } return os.O_RDONLY } func (d *DotGit) rewritePackedRefsWhileLocked( tmp billy.File, pr billy.File) error { // Try plain rename. If we aren't using the bare Windows filesystem as the // storage layer, we might be able to get away with a rename over a locked // file. err := d.fs.Rename(tmp.Name(), pr.Name()) if err == nil { return nil } // If we are in a filesystem that does not support rename (e.g. sivafs) // a full copy is done. if err == billy.ErrNotSupported { return d.copyNewFile(tmp, pr) } if runtime.GOOS != "windows" { return err } // Otherwise, Windows doesn't let us rename over a locked file, so // we have to do a straight copy. Unfortunately this could result // in a partially-written file if the process fails before the // copy completes. return d.copyToExistingFile(tmp, pr) } func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error { _, err := pr.Seek(0, io.SeekStart) if err != nil { return err } err = pr.Truncate(0) if err != nil { return err } _, err = tmp.Seek(0, io.SeekStart) if err != nil { return err } _, err = io.Copy(pr, tmp) return err } func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) { prWrite, err := d.fs.Create(pr.Name()) if err != nil { return err } defer ioutil.CheckClose(prWrite, &err) _, err = tmp.Seek(0, io.SeekStart) if err != nil { return err } _, err = io.Copy(prWrite, tmp) return err }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/writers.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/writers.go
package dotgit import ( "fmt" "io" "sync/atomic" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/format/idxfile" "github.com/jesseduffield/go-git/v5/plumbing/format/objfile" "github.com/jesseduffield/go-git/v5/plumbing/format/packfile" "github.com/jesseduffield/go-git/v5/plumbing/hash" "github.com/go-git/go-billy/v5" ) // PackWriter is a io.Writer that generates the packfile index simultaneously, // a packfile.Decoder is used with a file reader to read the file being written // this operation is synchronized with the write operations. // The packfile is written in a temp file, when Close is called this file // is renamed/moved (depends on the Filesystem implementation) to the final // location, if the PackWriter is not used, nothing is written type PackWriter struct { Notify func(plumbing.Hash, *idxfile.Writer) fs billy.Filesystem fr, fw billy.File synced *syncedReader checksum plumbing.Hash parser *packfile.Parser writer *idxfile.Writer result chan error } func newPackWrite(fs billy.Filesystem) (*PackWriter, error) { fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_") if err != nil { return nil, err } fr, err := fs.Open(fw.Name()) if err != nil { return nil, err } writer := &PackWriter{ fs: fs, fw: fw, fr: fr, synced: newSyncedReader(fw, fr), result: make(chan error), } go writer.buildIndex() return writer, nil } func (w *PackWriter) buildIndex() { s := packfile.NewScanner(w.synced) w.writer = new(idxfile.Writer) var err error w.parser, err = packfile.NewParser(s, w.writer) if err != nil { w.result <- err return } checksum, err := w.parser.Parse() if err != nil { w.result <- err return } w.checksum = checksum w.result <- err } // waitBuildIndex waits until buildIndex function finishes, this can terminate // with a packfile.ErrEmptyPackfile, this means that nothing was written so we // ignore the error func (w *PackWriter) waitBuildIndex() error { err := <-w.result if err == packfile.ErrEmptyPackfile { return nil } return err } func (w *PackWriter) Write(p []byte) (int, error) { return w.synced.Write(p) } // Close closes all the file descriptors and save the final packfile, if nothing // was written, the tempfiles are deleted without writing a packfile. func (w *PackWriter) Close() error { defer func() { if w.Notify != nil && w.writer != nil && w.writer.Finished() { w.Notify(w.checksum, w.writer) } close(w.result) }() if err := w.synced.Close(); err != nil { return err } if err := w.waitBuildIndex(); err != nil { return err } if err := w.fr.Close(); err != nil { return err } if err := w.fw.Close(); err != nil { return err } if w.writer == nil || !w.writer.Finished() { return w.clean() } return w.save() } func (w *PackWriter) clean() error { return w.fs.Remove(w.fw.Name()) } func (w *PackWriter) save() error { base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum)) idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base)) if err != nil { return err } if err := w.encodeIdx(idx); err != nil { return err } if err := idx.Close(); err != nil { return err } return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base)) } func (w *PackWriter) encodeIdx(writer io.Writer) error { idx, err := w.writer.Index() if err != nil { return err } e := idxfile.NewEncoder(writer) _, err = e.Encode(idx) return err } type syncedReader struct { w io.Writer r io.ReadSeeker blocked, done uint32 written, read uint64 news chan bool } func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader { return &syncedReader{ w: w, r: r, news: make(chan bool), } } func (s *syncedReader) Write(p []byte) (n int, err error) { defer func() { written := atomic.AddUint64(&s.written, uint64(n)) read := atomic.LoadUint64(&s.read) if written > read { s.wake() } }() n, err = s.w.Write(p) return } func (s *syncedReader) Read(p []byte) (n int, err error) { defer func() { atomic.AddUint64(&s.read, uint64(n)) }() for { s.sleep() n, err = s.r.Read(p) if err == io.EOF && !s.isDone() && n == 0 { continue } break } return } func (s *syncedReader) isDone() bool { return atomic.LoadUint32(&s.done) == 1 } func (s *syncedReader) isBlocked() bool { return atomic.LoadUint32(&s.blocked) == 1 } func (s *syncedReader) wake() { if s.isBlocked() { atomic.StoreUint32(&s.blocked, 0) s.news <- true } } func (s *syncedReader) sleep() { read := atomic.LoadUint64(&s.read) written := atomic.LoadUint64(&s.written) if read >= written { atomic.StoreUint32(&s.blocked, 1) <-s.news } } func (s *syncedReader) Seek(offset int64, whence int) (int64, error) { if whence == io.SeekCurrent { return s.r.Seek(offset, whence) } p, err := s.r.Seek(offset, whence) atomic.StoreUint64(&s.read, uint64(p)) return p, err } func (s *syncedReader) Close() error { atomic.StoreUint32(&s.done, 1) close(s.news) return nil } type ObjectWriter struct { objfile.Writer fs billy.Filesystem f billy.File } func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) { f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_") if err != nil { return nil, err } return &ObjectWriter{ Writer: (*objfile.NewWriter(f)), fs: fs, f: f, }, nil } func (w *ObjectWriter) Close() error { if err := w.Writer.Close(); err != nil { return err } if err := w.f.Close(); err != nil { return err } return w.save() } func (w *ObjectWriter) save() error { hex := w.Hash().String() file := w.fs.Join(objectsPath, hex[0:2], hex[2:hash.HexSize]) return w.fs.Rename(w.f.Name(), file) }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit.go
// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt package dotgit import ( "bufio" "bytes" "errors" "fmt" "io" "os" "path" "path/filepath" "reflect" "runtime" "sort" "strings" "time" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/plumbing/hash" "github.com/jesseduffield/go-git/v5/storage" "github.com/jesseduffield/go-git/v5/utils/ioutil" "github.com/go-git/go-billy/v5" "github.com/go-git/go-billy/v5/helper/chroot" ) const ( suffix = ".git" packedRefsPath = "packed-refs" configPath = "config" indexPath = "index" shallowPath = "shallow" modulePath = "modules" objectsPath = "objects" packPath = "pack" refsPath = "refs" branchesPath = "branches" hooksPath = "hooks" infoPath = "info" remotesPath = "remotes" logsPath = "logs" worktreesPath = "worktrees" alternatesPath = "alternates" tmpPackedRefsPrefix = "._packed-refs" packPrefix = "pack-" packExt = ".pack" idxExt = ".idx" ) var ( // ErrNotFound is returned by New when the path is not found. ErrNotFound = errors.New("path not found") // ErrIdxNotFound is returned by Idxfile when the idx file is not found ErrIdxNotFound = errors.New("idx file not found") // ErrPackfileNotFound is returned by Packfile when the packfile is not found ErrPackfileNotFound = errors.New("packfile not found") // ErrConfigNotFound is returned by Config when the config is not found ErrConfigNotFound = errors.New("config file not found") // ErrPackedRefsDuplicatedRef is returned when a duplicated reference is // found in the packed-ref file. This is usually the case for corrupted git // repositories. ErrPackedRefsDuplicatedRef = errors.New("duplicated ref found in packed-ref file") // ErrPackedRefsBadFormat is returned when the packed-ref file corrupt. ErrPackedRefsBadFormat = errors.New("malformed packed-ref") // ErrSymRefTargetNotFound is returned when a symbolic reference is // targeting a non-existing object. This usually means the repository // is corrupt. ErrSymRefTargetNotFound = errors.New("symbolic reference target not found") // ErrIsDir is returned when a reference file is attempting to be read, // but the path specified is a directory. ErrIsDir = errors.New("reference path is a directory") // ErrEmptyRefFile is returned when a reference file is attempted to be read, // but the file is empty ErrEmptyRefFile = errors.New("ref file is empty") ) // Options holds configuration for the storage. type Options struct { // ExclusiveAccess means that the filesystem is not modified externally // while the repo is open. ExclusiveAccess bool // KeepDescriptors makes the file descriptors to be reused but they will // need to be manually closed calling Close(). KeepDescriptors bool // AlternatesFS provides the billy filesystem to be used for Git Alternates. // If none is provided, it falls back to using the underlying instance used for // DotGit. AlternatesFS billy.Filesystem } // The DotGit type represents a local git repository on disk. This // type is not zero-value-safe, use the New function to initialize it. type DotGit struct { options Options fs billy.Filesystem // incoming object directory information incomingChecked bool incomingDirName string objectList []plumbing.Hash // sorted objectMap map[plumbing.Hash]struct{} packList []plumbing.Hash packMap map[plumbing.Hash]struct{} files map[plumbing.Hash]billy.File } // New returns a DotGit value ready to be used. The path argument must // be the absolute path of a git repository directory (e.g. // "/foo/bar/.git"). func New(fs billy.Filesystem) *DotGit { return NewWithOptions(fs, Options{}) } // NewWithOptions sets non default configuration options. // See New for complete help. func NewWithOptions(fs billy.Filesystem, o Options) *DotGit { return &DotGit{ options: o, fs: fs, } } // Initialize creates all the folder scaffolding. func (d *DotGit) Initialize() error { mustExists := []string{ d.fs.Join("objects", "info"), d.fs.Join("objects", "pack"), d.fs.Join("refs", "heads"), d.fs.Join("refs", "tags"), } for _, path := range mustExists { _, err := d.fs.Stat(path) if err == nil { continue } if !os.IsNotExist(err) { return err } if err := d.fs.MkdirAll(path, os.ModeDir|os.ModePerm); err != nil { return err } } return nil } // Close closes all opened files. func (d *DotGit) Close() error { var firstError error if d.files != nil { for _, f := range d.files { err := f.Close() if err != nil && firstError == nil { firstError = err continue } } d.files = nil } if firstError != nil { return firstError } return nil } // ConfigWriter returns a file pointer for write to the config file func (d *DotGit) ConfigWriter() (billy.File, error) { return d.fs.Create(configPath) } // Config returns a file pointer for read to the config file func (d *DotGit) Config() (billy.File, error) { return d.fs.Open(configPath) } // IndexWriter returns a file pointer for write to the index file func (d *DotGit) IndexWriter() (billy.File, error) { return d.fs.Create(indexPath) } // Index returns a file pointer for read to the index file func (d *DotGit) Index() (billy.File, error) { return d.fs.Open(indexPath) } // ShallowWriter returns a file pointer for write to the shallow file func (d *DotGit) ShallowWriter() (billy.File, error) { return d.fs.Create(shallowPath) } // Shallow returns a file pointer for read to the shallow file func (d *DotGit) Shallow() (billy.File, error) { f, err := d.fs.Open(shallowPath) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } return f, nil } // NewObjectPack return a writer for a new packfile, it saves the packfile to // disk and also generates and save the index for the given packfile. func (d *DotGit) NewObjectPack() (*PackWriter, error) { d.cleanPackList() return newPackWrite(d.fs) } // ObjectPacks returns the list of availables packfiles func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { if !d.options.ExclusiveAccess { return d.objectPacks() } err := d.genPackList() if err != nil { return nil, err } return d.packList, nil } func (d *DotGit) objectPacks() ([]plumbing.Hash, error) { packDir := d.fs.Join(objectsPath, packPath) files, err := d.fs.ReadDir(packDir) if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } var packs []plumbing.Hash for _, f := range files { n := f.Name() if !strings.HasSuffix(n, packExt) || !strings.HasPrefix(n, packPrefix) { continue } h := plumbing.NewHash(n[5 : len(n)-5]) // pack-(hash).pack if h.IsZero() { // Ignore files with badly-formatted names. continue } packs = append(packs, h) } return packs, nil } func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string { return d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.%s", hash.String(), extension)) } func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) { if d.options.KeepDescriptors && extension == "pack" { if d.files == nil { d.files = make(map[plumbing.Hash]billy.File) } f, ok := d.files[hash] if ok { return f, nil } } err := d.hasPack(hash) if err != nil { return nil, err } path := d.objectPackPath(hash, extension) pack, err := d.fs.Open(path) if err != nil { if os.IsNotExist(err) { return nil, ErrPackfileNotFound } return nil, err } if d.options.KeepDescriptors && extension == "pack" { d.files[hash] = pack } return pack, nil } // ObjectPack returns a fs.File of the given packfile func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) { err := d.hasPack(hash) if err != nil { return nil, err } return d.objectPackOpen(hash, `pack`) } // ObjectPackIdx returns a fs.File of the index file for a given packfile func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) { err := d.hasPack(hash) if err != nil { return nil, err } return d.objectPackOpen(hash, `idx`) } func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error { d.cleanPackList() path := d.objectPackPath(hash, `pack`) if !t.IsZero() { fi, err := d.fs.Stat(path) if err != nil { return err } // too new, skip deletion. if !fi.ModTime().Before(t) { return nil } } err := d.fs.Remove(path) if err != nil { return err } return d.fs.Remove(d.objectPackPath(hash, `idx`)) } // NewObject return a writer for a new object file. func (d *DotGit) NewObject() (*ObjectWriter, error) { d.cleanObjectList() return newObjectWriter(d.fs) } // ObjectsWithPrefix returns the hashes of objects that have the given prefix. func (d *DotGit) ObjectsWithPrefix(prefix []byte) ([]plumbing.Hash, error) { // Handle edge cases. if len(prefix) < 1 { return d.Objects() } else if len(prefix) > len(plumbing.ZeroHash) { return nil, nil } if d.options.ExclusiveAccess { err := d.genObjectList() if err != nil { return nil, err } // Rely on d.objectList being sorted. // Figure out the half-open interval defined by the prefix. first := sort.Search(len(d.objectList), func(i int) bool { // Same as plumbing.HashSlice.Less. return bytes.Compare(d.objectList[i][:], prefix) >= 0 }) lim := len(d.objectList) if limPrefix, overflow := incBytes(prefix); !overflow { lim = sort.Search(len(d.objectList), func(i int) bool { // Same as plumbing.HashSlice.Less. return bytes.Compare(d.objectList[i][:], limPrefix) >= 0 }) } return d.objectList[first:lim], nil } // This is the slow path. var objects []plumbing.Hash var n int err := d.ForEachObjectHash(func(hash plumbing.Hash) error { n++ if bytes.HasPrefix(hash[:], prefix) { objects = append(objects, hash) } return nil }) if err != nil { return nil, err } return objects, nil } // Objects returns a slice with the hashes of objects found under the // .git/objects/ directory. func (d *DotGit) Objects() ([]plumbing.Hash, error) { if d.options.ExclusiveAccess { err := d.genObjectList() if err != nil { return nil, err } return d.objectList, nil } var objects []plumbing.Hash err := d.ForEachObjectHash(func(hash plumbing.Hash) error { objects = append(objects, hash) return nil }) if err != nil { return nil, err } return objects, nil } // ForEachObjectHash iterates over the hashes of objects found under the // .git/objects/ directory and executes the provided function. func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { if !d.options.ExclusiveAccess { return d.forEachObjectHash(fun) } err := d.genObjectList() if err != nil { return err } for _, h := range d.objectList { err := fun(h) if err != nil { return err } } return nil } func (d *DotGit) forEachObjectHash(fun func(plumbing.Hash) error) error { files, err := d.fs.ReadDir(objectsPath) if err != nil { if os.IsNotExist(err) { return nil } return err } for _, f := range files { if f.IsDir() && len(f.Name()) == 2 && isHex(f.Name()) { base := f.Name() d, err := d.fs.ReadDir(d.fs.Join(objectsPath, base)) if err != nil { return err } for _, o := range d { h := plumbing.NewHash(base + o.Name()) if h.IsZero() { // Ignore files with badly-formatted names. continue } err = fun(h) if err != nil { return err } } } } return nil } func (d *DotGit) cleanObjectList() { d.objectMap = nil d.objectList = nil } func (d *DotGit) genObjectList() error { if d.objectMap != nil { return nil } d.objectMap = make(map[plumbing.Hash]struct{}) populate := func(h plumbing.Hash) error { d.objectList = append(d.objectList, h) d.objectMap[h] = struct{}{} return nil } if err := d.forEachObjectHash(populate); err != nil { return err } plumbing.HashesSort(d.objectList) return nil } func (d *DotGit) hasObject(h plumbing.Hash) error { if !d.options.ExclusiveAccess { return nil } err := d.genObjectList() if err != nil { return err } _, ok := d.objectMap[h] if !ok { return plumbing.ErrObjectNotFound } return nil } func (d *DotGit) cleanPackList() { d.packMap = nil d.packList = nil } func (d *DotGit) genPackList() error { if d.packMap != nil { return nil } op, err := d.objectPacks() if err != nil { return err } d.packMap = make(map[plumbing.Hash]struct{}) d.packList = nil for _, h := range op { d.packList = append(d.packList, h) d.packMap[h] = struct{}{} } return nil } func (d *DotGit) hasPack(h plumbing.Hash) error { if !d.options.ExclusiveAccess { return nil } err := d.genPackList() if err != nil { return err } _, ok := d.packMap[h] if !ok { return ErrPackfileNotFound } return nil } func (d *DotGit) objectPath(h plumbing.Hash) string { hex := h.String() return d.fs.Join(objectsPath, hex[0:2], hex[2:hash.HexSize]) } // incomingObjectPath is intended to add support for a git pre-receive hook // to be written it adds support for go-git to find objects in an "incoming" // directory, so that the library can be used to write a pre-receive hook // that deals with the incoming objects. // // More on git hooks found here : https://git-scm.com/docs/githooks // More on 'quarantine'/incoming directory here: // // https://git-scm.com/docs/git-receive-pack func (d *DotGit) incomingObjectPath(h plumbing.Hash) string { hString := h.String() if d.incomingDirName == "" { return d.fs.Join(objectsPath, hString[0:2], hString[2:hash.HexSize]) } return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:hash.HexSize]) } // hasIncomingObjects searches for an incoming directory and keeps its name // so it doesn't have to be found each time an object is accessed. func (d *DotGit) hasIncomingObjects() bool { if !d.incomingChecked { directoryContents, err := d.fs.ReadDir(objectsPath) if err == nil { for _, file := range directoryContents { if file.IsDir() && (strings.HasPrefix(file.Name(), "tmp_objdir-incoming-") || // Before Git 2.35 incoming commits directory had another prefix strings.HasPrefix(file.Name(), "incoming-")) { d.incomingDirName = file.Name() } } } d.incomingChecked = true } return d.incomingDirName != "" } // Object returns a fs.File pointing the object file, if exists func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { err := d.hasObject(h) if err != nil { return nil, err } obj1, err1 := d.fs.Open(d.objectPath(h)) if os.IsNotExist(err1) && d.hasIncomingObjects() { obj2, err2 := d.fs.Open(d.incomingObjectPath(h)) if err2 != nil { return obj1, err1 } return obj2, err2 } return obj1, err1 } // ObjectStat returns a os.FileInfo pointing the object file, if exists func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { err := d.hasObject(h) if err != nil { return nil, err } obj1, err1 := d.fs.Stat(d.objectPath(h)) if os.IsNotExist(err1) && d.hasIncomingObjects() { obj2, err2 := d.fs.Stat(d.incomingObjectPath(h)) if err2 != nil { return obj1, err1 } return obj2, err2 } return obj1, err1 } // ObjectDelete removes the object file, if exists func (d *DotGit) ObjectDelete(h plumbing.Hash) error { d.cleanObjectList() err1 := d.fs.Remove(d.objectPath(h)) if os.IsNotExist(err1) && d.hasIncomingObjects() { err2 := d.fs.Remove(d.incomingObjectPath(h)) if err2 != nil { return err1 } return err2 } return err1 } func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) { b, err := io.ReadAll(rd) if err != nil { return nil, err } if len(b) == 0 { return nil, ErrEmptyRefFile } line := strings.TrimSpace(string(b)) return plumbing.NewReferenceFromStrings(name, line), nil } // checkReferenceAndTruncate reads the reference from the given file, or the `pack-refs` file if // the file was empty. Then it checks that the old reference matches the stored reference and // truncates the file. func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error { if old == nil { return nil } ref, err := d.readReferenceFrom(f, old.Name().String()) if errors.Is(err, ErrEmptyRefFile) { // This may happen if the reference is being read from a newly created file. // In that case, try getting the reference from the packed refs file. ref, err = d.packedRef(old.Name()) } if err != nil { return err } if ref.Hash() != old.Hash() { return storage.ErrReferenceHasChanged } _, err = f.Seek(0, io.SeekStart) if err != nil { return err } return f.Truncate(0) } func (d *DotGit) SetRef(r, old *plumbing.Reference) error { var content string switch r.Type() { case plumbing.SymbolicReference: content = fmt.Sprintf("ref: %s\n", r.Target()) case plumbing.HashReference: content = fmt.Sprintln(r.Hash().String()) } fileName := r.Name().String() return d.setRef(fileName, content, old) } // Refs scans the git directory collecting references, which it returns. // Symbolic references are resolved and included in the output. func (d *DotGit) Refs() ([]*plumbing.Reference, error) { var refs []*plumbing.Reference seen := make(map[plumbing.ReferenceName]bool) if err := d.addRefFromHEAD(&refs); err != nil { return nil, err } if err := d.addRefsFromRefDir(&refs, seen); err != nil { return nil, err } if err := d.addRefsFromPackedRefs(&refs, seen); err != nil { return nil, err } return refs, nil } // Ref returns the reference for a given reference name. func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) { ref, err := d.readReferenceFile(".", name.String()) if err == nil { return ref, nil } return d.packedRef(name) } func (d *DotGit) findPackedRefsInFile(f billy.File, recv refsRecv) error { s := bufio.NewScanner(f) for s.Scan() { ref, err := d.processLine(s.Text()) if err != nil { return err } if !recv(ref) { // skip parse return nil } } if err := s.Err(); err != nil { return err } return nil } // refsRecv: returning true means that the reference continues to be resolved, otherwise it is stopped, which will speed up the lookup of a single reference. type refsRecv func(*plumbing.Reference) bool func (d *DotGit) findPackedRefs(recv refsRecv) error { f, err := d.fs.Open(packedRefsPath) if err != nil { if os.IsNotExist(err) { return nil } return err } defer ioutil.CheckClose(f, &err) return d.findPackedRefsInFile(f, recv) } func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) { var ref *plumbing.Reference if err := d.findPackedRefs(func(r *plumbing.Reference) bool { if r != nil && r.Name() == name { ref = r // ref found return false } return true }); err != nil { return nil, err } if ref != nil { return ref, nil } return nil, plumbing.ErrReferenceNotFound } // RemoveRef removes a reference by name. func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error { path := d.fs.Join(".", name.String()) _, err := d.fs.Stat(path) if err == nil { err = d.fs.Remove(path) // Drop down to remove it from the packed refs file, too. } if err != nil && !os.IsNotExist(err) { return err } return d.rewritePackedRefsWithoutRef(name) } func refsRecvFunc(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) refsRecv { return func(r *plumbing.Reference) bool { if r != nil && !seen[r.Name()] { *refs = append(*refs, r) seen[r.Name()] = true } return true } } func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) { return d.findPackedRefs(refsRecvFunc(refs, seen)) } func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) { return d.findPackedRefsInFile(f, refsRecvFunc(refs, seen)) } func (d *DotGit) openAndLockPackedRefs(doCreate bool) ( pr billy.File, err error, ) { var f billy.File defer func() { if err != nil && f != nil { ioutil.CheckClose(f, &err) } }() // File mode is retrieved from a constant defined in the target specific // files (dotgit_rewrite_packed_refs_*). Some modes are not available // in all filesystems. openFlags := d.openAndLockPackedRefsMode() if doCreate { openFlags |= os.O_CREATE } // Keep trying to open and lock the file until we're sure the file // didn't change between the open and the lock. for { f, err = d.fs.OpenFile(packedRefsPath, openFlags, 0600) if err != nil { if os.IsNotExist(err) && !doCreate { return nil, nil } return nil, err } fi, err := d.fs.Stat(packedRefsPath) if err != nil { return nil, err } mtime := fi.ModTime() err = f.Lock() if err != nil { return nil, err } fi, err = d.fs.Stat(packedRefsPath) if err != nil { return nil, err } if mtime.Equal(fi.ModTime()) { break } // The file has changed since we opened it. Close and retry. err = f.Close() if err != nil { return nil, err } } return f, nil } func (d *DotGit) rewritePackedRefsWithoutRef(name plumbing.ReferenceName) (err error) { pr, err := d.openAndLockPackedRefs(false) if err != nil { return err } if pr == nil { return nil } defer ioutil.CheckClose(pr, &err) // Creating the temp file in the same directory as the target file // improves our chances for rename operation to be atomic. tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) if err != nil { return err } tmpName := tmp.Name() defer func() { ioutil.CheckClose(tmp, &err) _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it }() s := bufio.NewScanner(pr) found := false for s.Scan() { line := s.Text() ref, err := d.processLine(line) if err != nil { return err } if ref != nil && ref.Name() == name { found = true continue } if _, err := fmt.Fprintln(tmp, line); err != nil { return err } } if err := s.Err(); err != nil { return err } if !found { return nil } return d.rewritePackedRefsWhileLocked(tmp, pr) } // process lines from a packed-refs file func (d *DotGit) processLine(line string) (*plumbing.Reference, error) { if len(line) == 0 { return nil, nil } switch line[0] { case '#': // comment - ignore return nil, nil case '^': // annotated tag commit of the previous line - ignore return nil, nil default: ws := strings.Split(line, " ") // hash then ref if len(ws) != 2 { return nil, ErrPackedRefsBadFormat } return plumbing.NewReferenceFromStrings(ws[1], ws[0]), nil } } func (d *DotGit) addRefsFromRefDir(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) error { return d.walkReferencesTree(refs, []string{refsPath}, seen) } func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []string, seen map[plumbing.ReferenceName]bool) error { files, err := d.fs.ReadDir(d.fs.Join(relPath...)) if err != nil { if os.IsNotExist(err) { // a race happened, and our directory is gone now return nil } return err } for _, f := range files { newRelPath := append(append([]string(nil), relPath...), f.Name()) if f.IsDir() { if err = d.walkReferencesTree(refs, newRelPath, seen); err != nil { return err } continue } ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/")) if os.IsNotExist(err) { // a race happened, and our file is gone now continue } if err != nil { return err } if ref != nil && !seen[ref.Name()] { *refs = append(*refs, ref) seen[ref.Name()] = true } } return nil } func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error { ref, err := d.readReferenceFile(".", "HEAD") if err != nil { if os.IsNotExist(err) { return nil } return err } *refs = append(*refs, ref) return nil } func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, err error) { path = d.fs.Join(path, d.fs.Join(strings.Split(name, "/")...)) st, err := d.fs.Stat(path) if err != nil { return nil, err } if st.IsDir() { return nil, ErrIsDir } f, err := d.fs.Open(path) if err != nil { return nil, err } defer ioutil.CheckClose(f, &err) return d.readReferenceFrom(f, name) } func (d *DotGit) CountLooseRefs() (int, error) { var refs []*plumbing.Reference seen := make(map[plumbing.ReferenceName]bool) if err := d.addRefsFromRefDir(&refs, seen); err != nil { return 0, err } return len(refs), nil } // PackRefs packs all loose refs into the packed-refs file. // // This implementation only works under the assumption that the view // of the file system won't be updated during this operation. This // strategy would not work on a general file system though, without // locking each loose reference and checking it again before deleting // the file, because otherwise an updated reference could sneak in and // then be deleted by the packed-refs process. Alternatively, every // ref update could also lock packed-refs, so only one lock is // required during ref-packing. But that would worsen performance in // the common case. // // TODO: add an "all" boolean like the `git pack-refs --all` flag. // When `all` is false, it would only pack refs that have already been // packed, plus all tags. func (d *DotGit) PackRefs() (err error) { // Lock packed-refs, and create it if it doesn't exist yet. f, err := d.openAndLockPackedRefs(true) if err != nil { return err } defer ioutil.CheckClose(f, &err) // Gather all refs using addRefsFromRefDir and addRefsFromPackedRefs. var refs []*plumbing.Reference seen := make(map[plumbing.ReferenceName]bool) if err = d.addRefsFromRefDir(&refs, seen); err != nil { return err } if len(refs) == 0 { // Nothing to do! return nil } numLooseRefs := len(refs) if err = d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil { return err } // Write them all to a new temp packed-refs file. tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) if err != nil { return err } tmpName := tmp.Name() defer func() { ioutil.CheckClose(tmp, &err) _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it }() w := bufio.NewWriter(tmp) for _, ref := range refs { _, err = w.WriteString(ref.String() + "\n") if err != nil { return err } } err = w.Flush() if err != nil { return err } // Rename the temp packed-refs file. err = d.rewritePackedRefsWhileLocked(tmp, f) if err != nil { return err } // Delete all the loose refs, while still holding the packed-refs // lock. for _, ref := range refs[:numLooseRefs] { path := d.fs.Join(".", ref.Name().String()) err = d.fs.Remove(path) if err != nil && !os.IsNotExist(err) { return err } } return nil } // Module return a billy.Filesystem pointing to the module folder func (d *DotGit) Module(name string) (billy.Filesystem, error) { return d.fs.Chroot(d.fs.Join(modulePath, name)) } func (d *DotGit) AddAlternate(remote string) error { altpath := d.fs.Join(objectsPath, infoPath, alternatesPath) f, err := d.fs.OpenFile(altpath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0640) if err != nil { return fmt.Errorf("cannot open file: %w", err) } defer f.Close() // locking in windows throws an error, based on comments // https://github.com/go-git/go-git/pull/860#issuecomment-1751823044 // do not lock on windows platform. if runtime.GOOS != "windows" { if err = f.Lock(); err != nil { return fmt.Errorf("cannot lock file: %w", err) } defer f.Unlock() } line := path.Join(remote, objectsPath) + "\n" _, err = io.WriteString(f, line) if err != nil { return fmt.Errorf("error writing 'alternates' file: %w", err) } return nil } // Alternates returns DotGit(s) based off paths in objects/info/alternates if // available. This can be used to checks if it's a shared repository. func (d *DotGit) Alternates() ([]*DotGit, error) { altpath := d.fs.Join(objectsPath, infoPath, alternatesPath) f, err := d.fs.Open(altpath) if err != nil { return nil, err } defer f.Close() fs := d.options.AlternatesFS if fs == nil { fs = d.fs } var alternates []*DotGit seen := make(map[string]struct{}) // Read alternate paths line-by-line and create DotGit objects. scanner := bufio.NewScanner(f) for scanner.Scan() { path := scanner.Text() // Avoid creating multiple dotgits for the same alternative path. if _, ok := seen[path]; ok { continue } seen[path] = struct{}{} if filepath.IsAbs(path) { // Handling absolute paths should be straight-forward. However, the default osfs (Chroot) // tries to concatenate an abs path with the root path in some operations (e.g. Stat), // which leads to unexpected errors. Therefore, make the path relative to the current FS instead. if reflect.TypeOf(fs) == reflect.TypeOf(&chroot.ChrootHelper{}) { path, err = filepath.Rel(fs.Root(), path) if err != nil { return nil, fmt.Errorf("cannot make path %q relative: %w", path, err) } } } else { // By Git conventions, relative paths should be based on the object database (.git/objects/info) // location as per: https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html // However, due to the nature of go-git and its filesystem handling via Billy, paths cannot // cross its "chroot boundaries". Therefore, ignore any "../" and treat the path from the // fs root. If this is not correct based on the dotgit fs, set a different one via AlternatesFS. abs := filepath.Join(string(filepath.Separator), filepath.ToSlash(path)) path = filepath.FromSlash(abs) } // Aligns with upstream behavior: exit if target path is not a valid directory. if fi, err := fs.Stat(path); err != nil || !fi.IsDir() { return nil, fmt.Errorf("invalid object directory %q: %w", path, err) } afs, err := fs.Chroot(filepath.Dir(path)) if err != nil { return nil, fmt.Errorf("cannot chroot %q: %w", path, err) } alternates = append(alternates, New(afs)) } if err = scanner.Err(); err != nil { return nil, err } return alternates, nil } // Fs returns the underlying filesystem of the DotGit folder. func (d *DotGit) Fs() billy.Filesystem { return d.fs } func isHex(s string) bool { for _, b := range []byte(s) { if isNum(b) { continue } if isHexAlpha(b) { continue } return false } return true } func isNum(b byte) bool { return b >= '0' && b <= '9' } func isHexAlpha(b byte) bool { return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' } // incBytes increments a byte slice, which involves incrementing the // right-most byte, and following carry leftward. // It makes a copy so that the provided slice's underlying array is not modified. // If the overall operation overflows (e.g. incBytes(0xff, 0xff)), the second return parameter indicates that. func incBytes(in []byte) (out []byte, overflow bool) { out = make([]byte, len(in)) copy(out, in) for i := len(out) - 1; i >= 0; i-- { out[i]++ if out[i] != 0 { return // Didn't overflow. } } overflow = true return }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go
package dotgit import ( "fmt" "os" "github.com/jesseduffield/go-git/v5/plumbing" "github.com/jesseduffield/go-git/v5/utils/ioutil" "github.com/go-git/go-billy/v5" ) func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) { if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { return d.setRefRwfs(fileName, content, old) } return d.setRefNorwfs(fileName, content, old) } func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) { // If we are not checking an old ref, just truncate the file. mode := os.O_RDWR | os.O_CREATE if old == nil { mode |= os.O_TRUNC } f, err := d.fs.OpenFile(fileName, mode, 0666) if err != nil { return err } defer ioutil.CheckClose(f, &err) // Lock is unlocked by the deferred Close above. This is because Unlock // does not imply a fsync and thus there would be a race between // Unlock+Close and other concurrent writers. Adding Sync to go-billy // could work, but this is better (and avoids superfluous syncs). err = f.Lock() if err != nil { return err } // this is a no-op to call even when old is nil. err = d.checkReferenceAndTruncate(f, old) if err != nil { return err } _, err = f.Write([]byte(content)) return err } // There are some filesystems that don't support opening files in RDWD mode. // In these filesystems the standard SetRef function can not be used as it // reads the reference file to check that it's not modified before updating it. // // This version of the function writes the reference without extra checks // making it compatible with these simple filesystems. This is usually not // a problem as they should be accessed by only one process at a time. func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error { _, err := d.fs.Stat(fileName) if err == nil && old != nil { fRead, err := d.fs.Open(fileName) if err != nil { return err } ref, err := d.readReferenceFrom(fRead, old.Name().String()) fRead.Close() if err != nil { return err } if ref.Hash() != old.Hash() { return fmt.Errorf("reference has changed concurrently") } } f, err := d.fs.Create(fileName) if err != nil { return err } defer f.Close() _, err = f.Write([]byte(content)) return err }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go
vendor/github.com/jesseduffield/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go
package dotgit import ( "os" "path/filepath" "strings" "github.com/go-git/go-billy/v5" ) // RepositoryFilesystem is a billy.Filesystem compatible object wrapper // which handles dot-git filesystem operations and supports commondir according to git scm layout: // https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt type RepositoryFilesystem struct { dotGitFs billy.Filesystem commonDotGitFs billy.Filesystem } func NewRepositoryFilesystem(dotGitFs, commonDotGitFs billy.Filesystem) *RepositoryFilesystem { return &RepositoryFilesystem{ dotGitFs: dotGitFs, commonDotGitFs: commonDotGitFs, } } func (fs *RepositoryFilesystem) mapToRepositoryFsByPath(path string) billy.Filesystem { // Nothing to decide if commondir not defined if fs.commonDotGitFs == nil { return fs.dotGitFs } cleanPath := filepath.Clean(path) // Check exceptions for commondir (https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt) switch cleanPath { case fs.dotGitFs.Join(logsPath, "HEAD"): return fs.dotGitFs case fs.dotGitFs.Join(refsPath, "bisect"), fs.dotGitFs.Join(refsPath, "rewritten"), fs.dotGitFs.Join(refsPath, "worktree"): return fs.dotGitFs } // Determine dot-git root by first path element. // There are some elements which should always use commondir when commondir defined. // Usual dot-git root will be used for the rest of files. switch strings.Split(cleanPath, string(filepath.Separator))[0] { case objectsPath, refsPath, packedRefsPath, configPath, branchesPath, hooksPath, infoPath, remotesPath, logsPath, shallowPath, worktreesPath: return fs.commonDotGitFs default: return fs.dotGitFs } } func (fs *RepositoryFilesystem) Create(filename string) (billy.File, error) { return fs.mapToRepositoryFsByPath(filename).Create(filename) } func (fs *RepositoryFilesystem) Open(filename string) (billy.File, error) { return fs.mapToRepositoryFsByPath(filename).Open(filename) } func (fs *RepositoryFilesystem) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { return fs.mapToRepositoryFsByPath(filename).OpenFile(filename, flag, perm) } func (fs *RepositoryFilesystem) Stat(filename string) (os.FileInfo, error) { return fs.mapToRepositoryFsByPath(filename).Stat(filename) } func (fs *RepositoryFilesystem) Rename(oldpath, newpath string) error { return fs.mapToRepositoryFsByPath(oldpath).Rename(oldpath, newpath) } func (fs *RepositoryFilesystem) Remove(filename string) error { return fs.mapToRepositoryFsByPath(filename).Remove(filename) } func (fs *RepositoryFilesystem) Join(elem ...string) string { return fs.dotGitFs.Join(elem...) } func (fs *RepositoryFilesystem) TempFile(dir, prefix string) (billy.File, error) { return fs.mapToRepositoryFsByPath(dir).TempFile(dir, prefix) } func (fs *RepositoryFilesystem) ReadDir(path string) ([]os.FileInfo, error) { return fs.mapToRepositoryFsByPath(path).ReadDir(path) } func (fs *RepositoryFilesystem) MkdirAll(filename string, perm os.FileMode) error { return fs.mapToRepositoryFsByPath(filename).MkdirAll(filename, perm) } func (fs *RepositoryFilesystem) Lstat(filename string) (os.FileInfo, error) { return fs.mapToRepositoryFsByPath(filename).Lstat(filename) } func (fs *RepositoryFilesystem) Symlink(target, link string) error { return fs.mapToRepositoryFsByPath(target).Symlink(target, link) } func (fs *RepositoryFilesystem) Readlink(link string) (string, error) { return fs.mapToRepositoryFsByPath(link).Readlink(link) } func (fs *RepositoryFilesystem) Chroot(path string) (billy.Filesystem, error) { return fs.mapToRepositoryFsByPath(path).Chroot(path) } func (fs *RepositoryFilesystem) Root() string { return fs.dotGitFs.Root() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/utils/diff/diff.go
vendor/github.com/jesseduffield/go-git/v5/utils/diff/diff.go
// Package diff implements line oriented diffs, similar to the ancient // Unix diff command. // // The current implementation is just a wrapper around Sergi's // go-diff/diffmatchpatch library, which is a go port of Neil // Fraser's google-diff-match-patch code package diff import ( "bytes" "time" "github.com/sergi/go-diff/diffmatchpatch" ) // Do computes the (line oriented) modifications needed to turn the src // string into the dst string. The underlying algorithm is Meyers, // its complexity is O(N*d) where N is min(lines(src), lines(dst)) and d // is the size of the diff. func Do(src, dst string) (diffs []diffmatchpatch.Diff) { // the default timeout is time.Second which may be too small under heavy load return DoWithTimeout(src, dst, time.Hour) } // DoWithTimeout computes the (line oriented) modifications needed to turn the src // string into the dst string. The `timeout` argument specifies the maximum // amount of time it is allowed to spend in this function. If the timeout // is exceeded, the parts of the strings which were not considered are turned into // a bulk delete+insert and the half-baked suboptimal result is returned at once. // The underlying algorithm is Meyers, its complexity is O(N*d) where N is // min(lines(src), lines(dst)) and d is the size of the diff. func DoWithTimeout(src, dst string, timeout time.Duration) (diffs []diffmatchpatch.Diff) { dmp := diffmatchpatch.New() dmp.DiffTimeout = timeout wSrc, wDst, warray := dmp.DiffLinesToRunes(src, dst) diffs = dmp.DiffMainRunes(wSrc, wDst, false) diffs = dmp.DiffCharsToLines(diffs, warray) return diffs } // Dst computes and returns the destination text. func Dst(diffs []diffmatchpatch.Diff) string { var text bytes.Buffer for _, d := range diffs { if d.Type != diffmatchpatch.DiffDelete { text.WriteString(d.Text) } } return text.String() } // Src computes and returns the source text func Src(diffs []diffmatchpatch.Diff) string { var text bytes.Buffer for _, d := range diffs { if d.Type != diffmatchpatch.DiffInsert { text.WriteString(d.Text) } } return text.String() }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/utils/trace/trace.go
vendor/github.com/jesseduffield/go-git/v5/utils/trace/trace.go
package trace import ( "fmt" "log" "os" "sync/atomic" ) var ( // logger is the logger to use for tracing. logger = newLogger() // current is the targets that are enabled for tracing. current atomic.Int32 ) func newLogger() *log.Logger { return log.New(os.Stderr, "", log.Ltime|log.Lmicroseconds|log.Lshortfile) } // Target is a tracing target. type Target int32 const ( // General traces general operations. General Target = 1 << iota // Packet traces git packets. Packet ) // SetTarget sets the tracing targets. func SetTarget(target Target) { current.Store(int32(target)) } // SetLogger sets the logger to use for tracing. func SetLogger(l *log.Logger) { logger = l } // Print prints the given message only if the target is enabled. func (t Target) Print(args ...interface{}) { if int32(t)&current.Load() != 0 { logger.Output(2, fmt.Sprint(args...)) // nolint: errcheck } } // Printf prints the given message only if the target is enabled. func (t Target) Printf(format string, args ...interface{}) { if int32(t)&current.Load() != 0 { logger.Output(2, fmt.Sprintf(format, args...)) // nolint: errcheck } }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false
jesseduffield/lazygit
https://github.com/jesseduffield/lazygit/blob/80dd695d7a8d32714603f5a6307f26f589802b1d/vendor/github.com/jesseduffield/go-git/v5/utils/merkletrie/change.go
vendor/github.com/jesseduffield/go-git/v5/utils/merkletrie/change.go
package merkletrie import ( "errors" "fmt" "io" "github.com/jesseduffield/go-git/v5/utils/merkletrie/noder" ) var ( ErrEmptyFileName = errors.New("empty filename in tree entry") ) // Action values represent the kind of things a Change can represent: // insertion, deletions or modifications of files. type Action int // The set of possible actions in a change. const ( _ Action = iota Insert Delete Modify ) // String returns the action as a human readable text. func (a Action) String() string { switch a { case Insert: return "Insert" case Delete: return "Delete" case Modify: return "Modify" default: panic(fmt.Sprintf("unsupported action: %d", a)) } } // A Change value represent how a noder has change between to merkletries. type Change struct { // The noder before the change or nil if it was inserted. From noder.Path // The noder after the change or nil if it was deleted. To noder.Path } // Action is convenience method that returns what Action c represents. func (c *Change) Action() (Action, error) { if c.From == nil && c.To == nil { return Action(0), fmt.Errorf("malformed change: nil from and to") } if c.From == nil { return Insert, nil } if c.To == nil { return Delete, nil } return Modify, nil } // NewInsert returns a new Change representing the insertion of n. func NewInsert(n noder.Path) Change { return Change{To: n} } // NewDelete returns a new Change representing the deletion of n. func NewDelete(n noder.Path) Change { return Change{From: n} } // NewModify returns a new Change representing that a has been modified and // it is now b. func NewModify(a, b noder.Path) Change { return Change{ From: a, To: b, } } // String returns a single change in human readable form, using the // format: '<' + action + space + path + '>'. The contents of the file // before or after the change are not included in this format. // // Example: inserting a file at the path a/b/c.txt will return "<Insert // a/b/c.txt>". func (c Change) String() string { action, err := c.Action() if err != nil { panic(err) } var path string if action == Delete { path = c.From.String() } else { path = c.To.String() } return fmt.Sprintf("<%s %s>", action, path) } // Changes is a list of changes between to merkletries. type Changes []Change // NewChanges returns an empty list of changes. func NewChanges() Changes { return Changes{} } // Add adds the change c to the list of changes. func (l *Changes) Add(c Change) { *l = append(*l, c) } // AddRecursiveInsert adds the required changes to insert all the // file-like noders found in root, recursively. func (l *Changes) AddRecursiveInsert(root noder.Path) error { return l.addRecursive(root, NewInsert) } // AddRecursiveDelete adds the required changes to delete all the // file-like noders found in root, recursively. func (l *Changes) AddRecursiveDelete(root noder.Path) error { return l.addRecursive(root, NewDelete) } type noderToChangeFn func(noder.Path) Change // NewInsert or NewDelete func (l *Changes) addRecursive(root noder.Path, ctor noderToChangeFn) error { if root.String() == "" { return ErrEmptyFileName } if !root.IsDir() { l.Add(ctor(root)) return nil } i, err := NewIterFromPath(root) if err != nil { return err } var current noder.Path for { if current, err = i.Step(); err != nil { if err == io.EOF { break } return err } if current.IsDir() { continue } l.Add(ctor(current)) } return nil }
go
MIT
80dd695d7a8d32714603f5a6307f26f589802b1d
2026-01-07T08:35:43.445894Z
false